10 #include <asm/cpumask.h>
11 #include <uapi/asm/msr.h>
30 struct msr_regs_info {
42 struct saved_msr *array;
46 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
47 * constraint has different meanings. For i386, "A" means exactly
48 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
49 * it means rax *or* rdx.
52 /* Using 64-bit values saves one instruction clearing the high half of low */
53 #define DECLARE_ARGS(val, low, high) unsigned long low, high
54 #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
55 #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
57 #define DECLARE_ARGS(val, low, high) unsigned long long val
58 #define EAX_EDX_VAL(val, low, high) (val)
59 #define EAX_EDX_RET(val, low, high) "=A" (val)
62 #ifdef CONFIG_TRACEPOINTS
64 * Be very careful with includes. This header is prone to include loops.
66 #include <asm/atomic.h>
67 #include <linux/tracepoint-defs.h>
69 extern struct tracepoint __tracepoint_read_msr;
70 extern struct tracepoint __tracepoint_write_msr;
71 extern struct tracepoint __tracepoint_rdpmc;
72 #define msr_tracepoint_active(t) static_key_false(&(t).key)
73 extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
74 extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
75 extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
77 #define msr_tracepoint_active(t) false
78 static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
79 static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
80 static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
83 static inline unsigned long long native_read_msr(unsigned int msr)
85 DECLARE_ARGS(val, low, high);
87 asm volatile("1: rdmsr\n"
89 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe)
90 : EAX_EDX_RET(val, low, high) : "c" (msr));
91 if (msr_tracepoint_active(__tracepoint_read_msr))
92 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
93 return EAX_EDX_VAL(val, low, high);
96 static inline unsigned long long native_read_msr_safe(unsigned int msr,
99 DECLARE_ARGS(val, low, high);
101 asm volatile("2: rdmsr ; xor %[err],%[err]\n"
103 ".section .fixup,\"ax\"\n\t"
104 "3: mov %[fault],%[err]\n\t"
105 "xorl %%eax, %%eax\n\t"
106 "xorl %%edx, %%edx\n\t"
110 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
111 : "c" (msr), [fault] "i" (-EIO));
112 if (msr_tracepoint_active(__tracepoint_read_msr))
113 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
114 return EAX_EDX_VAL(val, low, high);
117 /* Can be uninlined because referenced by paravirt */
118 static inline void notrace
119 __native_write_msr_notrace(unsigned int msr, u32 low, u32 high)
121 asm volatile("1: wrmsr\n"
123 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
124 : : "c" (msr), "a"(low), "d" (high) : "memory");
127 /* Can be uninlined because referenced by paravirt */
128 static inline void notrace
129 native_write_msr(unsigned int msr, u32 low, u32 high)
131 __native_write_msr_notrace(msr, low, high);
132 if (msr_tracepoint_active(__tracepoint_write_msr))
133 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
137 wrmsr_notrace(unsigned int msr, u32 low, u32 high)
139 __native_write_msr_notrace(msr, low, high);
142 /* Can be uninlined because referenced by paravirt */
143 static inline int notrace
144 native_write_msr_safe(unsigned int msr, u32 low, u32 high)
148 asm volatile("2: wrmsr ; xor %[err],%[err]\n"
150 ".section .fixup,\"ax\"\n\t"
151 "3: mov %[fault],%[err] ; jmp 1b\n\t"
155 : "c" (msr), "0" (low), "d" (high),
158 if (msr_tracepoint_active(__tracepoint_write_msr))
159 do_trace_write_msr(msr, ((u64)high << 32 | low), err);
163 extern int rdmsr_safe_regs(u32 regs[8]);
164 extern int wrmsr_safe_regs(u32 regs[8]);
167 * rdtsc() - returns the current TSC without ordering constraints
169 * rdtsc() returns the result of RDTSC as a 64-bit integer. The
170 * only ordering constraint it supplies is the ordering implied by
171 * "asm volatile": it will put the RDTSC in the place you expect. The
172 * CPU can and will speculatively execute that RDTSC, though, so the
173 * results can be non-monotonic if compared on different CPUs.
175 static __always_inline unsigned long long rdtsc(void)
177 DECLARE_ARGS(val, low, high);
179 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
181 return EAX_EDX_VAL(val, low, high);
185 * rdtsc_ordered() - read the current TSC in program order
187 * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
188 * It is ordered like a load to a global in-memory counter. It should
189 * be impossible to observe non-monotonic rdtsc_unordered() behavior
190 * across multiple CPUs as long as the TSC is synced.
192 static __always_inline unsigned long long rdtsc_ordered(void)
195 * The RDTSC instruction is not ordered relative to memory
196 * access. The Intel SDM and the AMD APM are both vague on this
197 * point, but empirically an RDTSC instruction can be
198 * speculatively executed before prior loads. An RDTSC
199 * immediately after an appropriate barrier appears to be
200 * ordered as a normal load, that is, it provides the same
201 * ordering guarantees as reading from a global memory location
202 * that some other imaginary CPU is updating continuously with a
205 alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
206 "lfence", X86_FEATURE_LFENCE_RDTSC);
210 /* Deprecated, keep it for a cycle for easier merging: */
211 #define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0)
213 static inline unsigned long long native_read_pmc(int counter)
215 DECLARE_ARGS(val, low, high);
217 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
218 if (msr_tracepoint_active(__tracepoint_rdpmc))
219 do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
220 return EAX_EDX_VAL(val, low, high);
223 #ifdef CONFIG_PARAVIRT
224 #include <asm/paravirt.h>
226 #include <linux/errno.h>
228 * Access to machine-specific registers (available on 586 and better only)
229 * Note: the rd* operations modify the parameters directly (without using
230 * pointer indirection), this allows gcc to optimize better
233 #define rdmsr(msr, low, high) \
235 u64 __val = native_read_msr((msr)); \
236 (void)((low) = (u32)__val); \
237 (void)((high) = (u32)(__val >> 32)); \
240 static inline void wrmsr(unsigned int msr, u32 low, u32 high)
242 native_write_msr(msr, low, high);
245 #define rdmsrl(msr, val) \
246 ((val) = native_read_msr((msr)))
248 static inline void wrmsrl(unsigned int msr, u64 val)
250 native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
253 /* wrmsr with exception handling */
254 static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
256 return native_write_msr_safe(msr, low, high);
259 /* rdmsr with exception handling */
260 #define rdmsr_safe(msr, low, high) \
263 u64 __val = native_read_msr_safe((msr), &__err); \
264 (*low) = (u32)__val; \
265 (*high) = (u32)(__val >> 32); \
269 static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
273 *p = native_read_msr_safe(msr, &err);
277 #define rdpmc(counter, low, high) \
279 u64 _l = native_read_pmc((counter)); \
281 (high) = (u32)(_l >> 32); \
284 #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
286 #endif /* !CONFIG_PARAVIRT */
289 * 64-bit version of wrmsr_safe():
291 static inline int wrmsrl_safe(u32 msr, u64 val)
293 return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
296 #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
298 #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
300 struct msr *msrs_alloc(void);
301 void msrs_free(struct msr *msrs);
302 int msr_set_bit(u32 msr, u8 bit);
303 int msr_clear_bit(u32 msr, u8 bit);
306 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
307 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
308 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
309 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
310 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
311 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
312 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
313 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
314 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
315 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
316 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
317 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
318 #else /* CONFIG_SMP */
319 static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
321 rdmsr(msr_no, *l, *h);
324 static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
329 static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
334 static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
339 static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
342 rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
344 static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
347 wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
349 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
352 return rdmsr_safe(msr_no, l, h);
354 static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
356 return wrmsr_safe(msr_no, l, h);
358 static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
360 return rdmsrl_safe(msr_no, q);
362 static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
364 return wrmsrl_safe(msr_no, q);
366 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
368 return rdmsr_safe_regs(regs);
370 static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
372 return wrmsr_safe_regs(regs);
374 #endif /* CONFIG_SMP */
375 #endif /* __ASSEMBLY__ */
376 #endif /* _ASM_X86_MSR_H */