10 #include <asm/cpumask.h>
11 #include <uapi/asm/msr.h>
30 struct msr_regs_info {
35 static inline unsigned long long native_read_tscp(unsigned int *aux)
37 unsigned long low, high;
38 asm volatile(".byte 0x0f,0x01,0xf9"
39 : "=a" (low), "=d" (high), "=c" (*aux));
40 return low | ((u64)high << 32);
44 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
45 * constraint has different meanings. For i386, "A" means exactly
46 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
47 * it means rax *or* rdx.
50 /* Using 64-bit values saves one instruction clearing the high half of low */
51 #define DECLARE_ARGS(val, low, high) unsigned long low, high
52 #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
53 #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
55 #define DECLARE_ARGS(val, low, high) unsigned long long val
56 #define EAX_EDX_VAL(val, low, high) (val)
57 #define EAX_EDX_RET(val, low, high) "=A" (val)
60 static inline unsigned long long native_read_msr(unsigned int msr)
62 DECLARE_ARGS(val, low, high);
64 asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
65 return EAX_EDX_VAL(val, low, high);
68 static inline unsigned long long native_read_msr_safe(unsigned int msr,
71 DECLARE_ARGS(val, low, high);
73 asm volatile("2: rdmsr ; xor %[err],%[err]\n"
75 ".section .fixup,\"ax\"\n\t"
76 "3: mov %[fault],%[err] ; jmp 1b\n\t"
79 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
80 : "c" (msr), [fault] "i" (-EIO));
81 return EAX_EDX_VAL(val, low, high);
84 static inline void native_write_msr(unsigned int msr,
85 unsigned low, unsigned high)
87 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
90 /* Can be uninlined because referenced by paravirt */
91 notrace static inline int native_write_msr_safe(unsigned int msr,
92 unsigned low, unsigned high)
95 asm volatile("2: wrmsr ; xor %[err],%[err]\n"
97 ".section .fixup,\"ax\"\n\t"
98 "3: mov %[fault],%[err] ; jmp 1b\n\t"
102 : "c" (msr), "0" (low), "d" (high),
108 extern int rdmsr_safe_regs(u32 regs[8]);
109 extern int wrmsr_safe_regs(u32 regs[8]);
112 * rdtsc() - returns the current TSC without ordering constraints
114 * rdtsc() returns the result of RDTSC as a 64-bit integer. The
115 * only ordering constraint it supplies is the ordering implied by
116 * "asm volatile": it will put the RDTSC in the place you expect. The
117 * CPU can and will speculatively execute that RDTSC, though, so the
118 * results can be non-monotonic if compared on different CPUs.
120 static __always_inline unsigned long long rdtsc(void)
122 DECLARE_ARGS(val, low, high);
124 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
126 return EAX_EDX_VAL(val, low, high);
130 * rdtsc_ordered() - read the current TSC in program order
132 * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
133 * It is ordered like a load to a global in-memory counter. It should
134 * be impossible to observe non-monotonic rdtsc_unordered() behavior
135 * across multiple CPUs as long as the TSC is synced.
137 static __always_inline unsigned long long rdtsc_ordered(void)
140 * The RDTSC instruction is not ordered relative to memory
141 * access. The Intel SDM and the AMD APM are both vague on this
142 * point, but empirically an RDTSC instruction can be
143 * speculatively executed before prior loads. An RDTSC
144 * immediately after an appropriate barrier appears to be
145 * ordered as a normal load, that is, it provides the same
146 * ordering guarantees as reading from a global memory location
147 * that some other imaginary CPU is updating continuously with a
150 alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
151 "lfence", X86_FEATURE_LFENCE_RDTSC);
155 /* Deprecated, keep it for a cycle for easier merging: */
156 #define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0)
158 static inline unsigned long long native_read_pmc(int counter)
160 DECLARE_ARGS(val, low, high);
162 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
163 return EAX_EDX_VAL(val, low, high);
166 #ifdef CONFIG_PARAVIRT
167 #include <asm/paravirt.h>
169 #include <linux/errno.h>
171 * Access to machine-specific registers (available on 586 and better only)
172 * Note: the rd* operations modify the parameters directly (without using
173 * pointer indirection), this allows gcc to optimize better
176 #define rdmsr(msr, low, high) \
178 u64 __val = native_read_msr((msr)); \
179 (void)((low) = (u32)__val); \
180 (void)((high) = (u32)(__val >> 32)); \
183 static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
185 native_write_msr(msr, low, high);
188 #define rdmsrl(msr, val) \
189 ((val) = native_read_msr((msr)))
191 static inline void wrmsrl(unsigned msr, u64 val)
193 native_write_msr(msr, (u32)val, (u32)(val >> 32));
196 /* wrmsr with exception handling */
197 static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
199 return native_write_msr_safe(msr, low, high);
202 /* rdmsr with exception handling */
203 #define rdmsr_safe(msr, low, high) \
206 u64 __val = native_read_msr_safe((msr), &__err); \
207 (*low) = (u32)__val; \
208 (*high) = (u32)(__val >> 32); \
212 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
216 *p = native_read_msr_safe(msr, &err);
220 #define rdpmc(counter, low, high) \
222 u64 _l = native_read_pmc((counter)); \
224 (high) = (u32)(_l >> 32); \
227 #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
229 #endif /* !CONFIG_PARAVIRT */
232 * 64-bit version of wrmsr_safe():
234 static inline int wrmsrl_safe(u32 msr, u64 val)
236 return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
239 #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
241 #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
243 struct msr *msrs_alloc(void);
244 void msrs_free(struct msr *msrs);
245 int msr_set_bit(u32 msr, u8 bit);
246 int msr_clear_bit(u32 msr, u8 bit);
249 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
250 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
251 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
252 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
253 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
254 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
255 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
256 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
257 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
258 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
259 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
260 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
261 #else /* CONFIG_SMP */
262 static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
264 rdmsr(msr_no, *l, *h);
267 static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
272 static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
277 static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
282 static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
285 rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
287 static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
290 wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
292 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
295 return rdmsr_safe(msr_no, l, h);
297 static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
299 return wrmsr_safe(msr_no, l, h);
301 static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
303 return rdmsrl_safe(msr_no, q);
305 static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
307 return wrmsrl_safe(msr_no, q);
309 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
311 return rdmsr_safe_regs(regs);
313 static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
315 return wrmsr_safe_regs(regs);
317 #endif /* CONFIG_SMP */
318 #endif /* __ASSEMBLY__ */
319 #endif /* _ASM_X86_MSR_H */