1 #ifndef _ASM_X86_SYSTEM_H_
2 #define _ASM_X86_SYSTEM_H_
6 #include <linux/kernel.h>
9 #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
11 struct task_struct; /* one of the stranger aspects of C forward declarations */
12 extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev,
13 struct task_struct *next));
16 * Saving eflags is important. It switches not only IOPL between tasks,
17 * it also protects other tasks from NT leaking through sysenter etc.
19 #define switch_to(prev, next, last) do { \
20 unsigned long esi, edi; \
21 asm volatile("pushfl\n\t" /* Save flags */ \
23 "movl %%esp,%0\n\t" /* save ESP */ \
24 "movl %5,%%esp\n\t" /* restore ESP */ \
25 "movl $1f,%1\n\t" /* save EIP */ \
26 "pushl %6\n\t" /* restore EIP */ \
31 :"=m" (prev->thread.sp), "=m" (prev->thread.ip), \
32 "=a" (last), "=S" (esi), "=D" (edi) \
33 :"m" (next->thread.sp), "m" (next->thread.ip), \
34 "2" (prev), "d" (next)); \
37 # include "system_32.h"
39 #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
40 #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
42 /* frame pointer must be last for get_wchan */
43 #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
44 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
46 #define __EXTRA_CLOBBER \
47 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
48 "r12", "r13", "r14", "r15"
50 /* Save restore flags to clear handle leaking NT */
51 #define switch_to(prev, next, last) \
52 asm volatile(SAVE_CONTEXT \
53 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
54 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
55 "call __switch_to\n\t" \
56 ".globl thread_return\n" \
57 "thread_return:\n\t" \
58 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
59 "movq %P[thread_info](%%rsi),%%r8\n\t" \
60 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
61 "movq %%rax,%%rdi\n\t" \
62 "jc ret_from_fork\n\t" \
65 : [next] "S" (next), [prev] "D" (prev), \
66 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
67 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
68 [tif_fork] "i" (TIF_FORK), \
69 [thread_info] "i" (offsetof(struct task_struct, stack)), \
70 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
71 : "memory", "cc" __EXTRA_CLOBBER)
72 # include "system_64.h"
76 #define _set_base(addr, base) do { unsigned long __pr; \
77 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
78 "rorl $16,%%edx\n\t" \
88 #define _set_limit(addr, limit) do { unsigned long __lr; \
89 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
90 "rorl $16,%%edx\n\t" \
92 "andb $0xf0,%%dh\n\t" \
101 #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
102 #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
104 extern void load_gs_index(unsigned);
107 * Load a segment. Fall back on loading the zero
108 * segment if something goes wrong..
110 #define loadsegment(seg, value) \
113 "movl %k0,%%" #seg "\n" \
115 ".section .fixup,\"ax\"\n" \
117 "movl %k1, %%" #seg "\n\t" \
120 ".section __ex_table,\"a\"\n\t" \
122 _ASM_PTR " 1b,3b\n" \
124 : :"r" (value), "r" (0))
128 * Save a segment register away
130 #define savesegment(seg, value) \
131 asm volatile("mov %%" #seg ",%0":"=rm" (value))
133 static inline unsigned long get_limit(unsigned long segment)
135 unsigned long __limit;
137 :"=r" (__limit):"r" (segment));
141 static inline void native_clts(void)
143 asm volatile ("clts");
147 * Volatile isn't enough to prevent the compiler from reordering the
148 * read/write functions for the control registers and messing everything up.
149 * A memory clobber would solve the problem, but would prevent reordering of
150 * all loads stores around it, which can hurt performance. Solution is to
151 * use a variable and mimic reads and writes to it to enforce serialization
153 static unsigned long __force_order;
155 static inline unsigned long native_read_cr0(void)
158 asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order));
162 static inline void native_write_cr0(unsigned long val)
164 asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order));
167 static inline unsigned long native_read_cr2(void)
170 asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order));
174 static inline void native_write_cr2(unsigned long val)
176 asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order));
179 static inline unsigned long native_read_cr3(void)
182 asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order));
186 static inline void native_write_cr3(unsigned long val)
188 asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order));
191 static inline unsigned long native_read_cr4(void)
194 asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order));
198 static inline unsigned long native_read_cr4_safe(void)
201 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
202 * exists, so it will never fail. */
204 asm volatile("1: mov %%cr4, %0 \n"
206 ".section __ex_table,\"a\" \n"
209 : "=r" (val), "=m" (__force_order) : "0" (0));
211 val = native_read_cr4();
216 static inline void native_write_cr4(unsigned long val)
218 asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order));
221 static inline void native_wbinvd(void)
223 asm volatile("wbinvd": : :"memory");
225 #ifdef CONFIG_PARAVIRT
226 #include <asm/paravirt.h>
228 #define read_cr0() (native_read_cr0())
229 #define write_cr0(x) (native_write_cr0(x))
230 #define read_cr2() (native_read_cr2())
231 #define write_cr2(x) (native_write_cr2(x))
232 #define read_cr3() (native_read_cr3())
233 #define write_cr3(x) (native_write_cr3(x))
234 #define read_cr4() (native_read_cr4())
235 #define read_cr4_safe() (native_read_cr4_safe())
236 #define write_cr4(x) (native_write_cr4(x))
237 #define wbinvd() (native_wbinvd())
239 /* Clear the 'TS' bit */
240 #define clts() (native_clts())
242 #endif/* CONFIG_PARAVIRT */
244 #define stts() write_cr0(8 | read_cr0())
246 #endif /* __KERNEL__ */
248 static inline void clflush(void *__p)
250 asm volatile("clflush %0" : "+m" (*(char __force *)__p));
253 #define nop() __asm__ __volatile__ ("nop")
255 void disable_hlt(void);
256 void enable_hlt(void);
258 extern int es7000_plat;
259 void cpu_idle_wait(void);
261 extern unsigned long arch_align_stack(unsigned long sp);
262 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
264 void default_idle(void);
267 * Force strict CPU ordering.
268 * And yes, this is required on UP too when we're talking
273 * For now, "wmb()" doesn't actually do anything, as all
274 * Intel CPU's follow what Intel calls a *Processor Order*,
275 * in which all writes are seen in the program order even
278 * I expect future Intel CPU's to have a weaker ordering,
279 * but I'd also expect them to finally get their act together
280 * and add some real memory barriers if so.
282 * Some non intel clones support out of order store. wmb() ceases to be a
285 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
286 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
287 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
289 #define mb() asm volatile("mfence":::"memory")
290 #define rmb() asm volatile("lfence":::"memory")
291 #define wmb() asm volatile("sfence" ::: "memory")
295 * read_barrier_depends - Flush all pending reads that subsequents reads
298 * No data-dependent reads from memory-like regions are ever reordered
299 * over this barrier. All reads preceding this primitive are guaranteed
300 * to access memory (but not necessarily other CPUs' caches) before any
301 * reads following this primitive that depend on the data return by
302 * any of the preceding reads. This primitive is much lighter weight than
303 * rmb() on most CPUs, and is never heavier weight than is
306 * These ordering constraints are respected by both the local CPU
309 * Ordering is not guaranteed by anything other than these primitives,
310 * not even by data dependencies. See the documentation for
311 * memory_barrier() for examples and URLs to more information.
313 * For example, the following code would force ordering (the initial
314 * value of "a" is zero, "b" is one, and "p" is "&a"):
322 * read_barrier_depends();
326 * because the read of "*q" depends on the read of "p" and these
327 * two reads are separated by a read_barrier_depends(). However,
328 * the following code, with the same initial values for "a" and "b":
336 * read_barrier_depends();
340 * does not enforce ordering, since there is no data dependency between
341 * the read of "a" and the read of "b". Therefore, on some CPUs, such
342 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
343 * in cases like this where there are no data dependencies.
346 #define read_barrier_depends() do { } while (0)
349 #define smp_mb() mb()
350 #ifdef CONFIG_X86_PPRO_FENCE
351 # define smp_rmb() rmb()
353 # define smp_rmb() barrier()
355 #ifdef CONFIG_X86_OOSTORE
356 # define smp_wmb() wmb()
358 # define smp_wmb() barrier()
360 #define smp_read_barrier_depends() read_barrier_depends()
361 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
363 #define smp_mb() barrier()
364 #define smp_rmb() barrier()
365 #define smp_wmb() barrier()
366 #define smp_read_barrier_depends() do { } while (0)
367 #define set_mb(var, value) do { var = value; barrier(); } while (0)