8 * The legacy x87 FPU state format, as saved by FSAVE and
9 * restored by the FRSTOR instructions:
12 u32 cwd; /* FPU Control Word */
13 u32 swd; /* FPU Status Word */
14 u32 twd; /* FPU Tag Word */
15 u32 fip; /* FPU IP Offset */
16 u32 fcs; /* FPU IP Selector */
17 u32 foo; /* FPU Operand Pointer Offset */
18 u32 fos; /* FPU Operand Pointer Selector */
20 /* 8*10 bytes for each FP-reg = 80 bytes: */
23 /* Software status information [not touched by FSAVE]: */
28 * The legacy fx SSE/MMX FPU state format, as saved by FXSAVE and
29 * restored by the FXRSTOR instructions. It's similar to the FSAVE
30 * format, but differs in some areas, plus has extensions at
31 * the end for the XMM registers.
34 u16 cwd; /* Control Word */
35 u16 swd; /* Status Word */
36 u16 twd; /* Tag Word */
37 u16 fop; /* Last Instruction Opcode */
40 u64 rip; /* Instruction Pointer */
41 u64 rdp; /* Data Pointer */
44 u32 fip; /* FPU IP Offset */
45 u32 fcs; /* FPU IP Selector */
46 u32 foo; /* FPU Operand Offset */
47 u32 fos; /* FPU Operand Selector */
50 u32 mxcsr; /* MXCSR Register State */
51 u32 mxcsr_mask; /* MXCSR Mask */
53 /* 8*16 bytes for each FP-reg = 128 bytes: */
56 /* 16*16 bytes for each XMM-reg = 256 bytes: */
66 } __attribute__((aligned(16)));
68 /* Default value for fxregs_state.mxcsr: */
69 #define MXCSR_DEFAULT 0x1f80
72 * Software based FPU emulation state. This is arbitrary really,
73 * it matches the x87 format to make it easier to understand:
83 /* 8*10 bytes for each FP-reg = 80 bytes: */
91 struct math_emu_info *info;
96 * List of XSAVE features Linux knows about:
102 * Values above here are "legacy states".
103 * Those below are "extended states".
111 XFEATURE_PT_UNIMPLEMENTED_SO_FAR,
117 #define XFEATURE_MASK_FP (1 << XFEATURE_FP)
118 #define XFEATURE_MASK_SSE (1 << XFEATURE_SSE)
119 #define XFEATURE_MASK_YMM (1 << XFEATURE_YMM)
120 #define XFEATURE_MASK_BNDREGS (1 << XFEATURE_BNDREGS)
121 #define XFEATURE_MASK_BNDCSR (1 << XFEATURE_BNDCSR)
122 #define XFEATURE_MASK_OPMASK (1 << XFEATURE_OPMASK)
123 #define XFEATURE_MASK_ZMM_Hi256 (1 << XFEATURE_ZMM_Hi256)
124 #define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM)
125 #define XFEATURE_MASK_PT (1 << XFEATURE_PT_UNIMPLEMENTED_SO_FAR)
126 #define XFEATURE_MASK_PKRU (1 << XFEATURE_PKRU)
128 #define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
129 #define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK \
130 | XFEATURE_MASK_ZMM_Hi256 \
131 | XFEATURE_MASK_Hi16_ZMM)
133 #define FIRST_EXTENDED_XFEATURE XFEATURE_YMM
148 * There are 16x 256-bit AVX registers named YMM0-YMM15.
149 * The low 128 bits are aliased to the 16 SSE registers (XMM0-XMM15)
150 * and are stored in 'struct fxregs_state::xmm_space[]' in the
153 * The high 128 bits are stored here.
156 struct reg_128_bit hi_ymm[16];
159 /* Intel MPX support: */
166 * State component 3 is used for the 4 128-bit bounds registers
168 struct mpx_bndreg_state {
169 struct mpx_bndreg bndreg[4];
173 * State component 4 is used for the 64-bit user-mode MPX
174 * configuration register BNDCFGU and the 64-bit MPX status
175 * register BNDSTATUS. We call the pair "BNDCSR".
183 * The BNDCSR state is padded out to be 64-bytes in size.
185 struct mpx_bndcsr_state {
187 struct mpx_bndcsr bndcsr;
188 u8 pad_to_64_bytes[64];
192 /* AVX-512 Components: */
195 * State component 5 is used for the 8 64-bit opmask registers
196 * k0-k7 (opmask state).
198 struct avx_512_opmask_state {
203 * State component 6 is used for the upper 256 bits of the
204 * registers ZMM0-ZMM15. These 16 256-bit values are denoted
205 * ZMM0_H-ZMM15_H (ZMM_Hi256 state).
207 struct avx_512_zmm_uppers_state {
208 struct reg_256_bit zmm_upper[16];
212 * State component 7 is used for the 16 512-bit registers
213 * ZMM16-ZMM31 (Hi16_ZMM state).
215 struct avx_512_hi16_state {
216 struct reg_512_bit hi16_zmm[16];
220 * State component 9: 32-bit PKRU register. The state is
221 * 8 bytes long but only 4 bytes is used currently.
228 struct xstate_header {
232 } __attribute__((packed));
235 * xstate_header.xcomp_bv[63] indicates that the extended_state_area
236 * is in compacted format.
238 #define XCOMP_BV_COMPACTED_FORMAT ((u64)1 << 63)
241 * This is our most modern FPU state format, as saved by the XSAVE
242 * and restored by the XRSTOR instructions.
244 * It consists of a legacy fxregs portion, an xstate header and
245 * subsequent areas as defined by the xstate header. Not all CPUs
246 * support all the extensions, so the size of the extended area
247 * can vary quite a bit between CPUs.
250 struct fxregs_state i387;
251 struct xstate_header header;
252 u8 extended_state_area[0];
253 } __attribute__ ((packed, aligned (64)));
256 * This is a union of all the possible FPU state formats
257 * put together, so that we can pick the right one runtime.
259 * The size of the structure is determined by the largest
260 * member - which is the xsave area. The padding is there
261 * to ensure that statically-allocated task_structs (just
262 * the init_task today) have enough space.
265 struct fregs_state fsave;
266 struct fxregs_state fxsave;
267 struct swregs_state soft;
268 struct xregs_state xsave;
269 u8 __padding[PAGE_SIZE];
273 * Highest level per task FPU state data structure that
274 * contains the FPU register state plus various FPU
281 * Records the last CPU on which this context was loaded into
282 * FPU registers. (In the lazy-restore case we might be
283 * able to reuse FPU registers across multiple context switches
284 * this way, if no intermediate task used the FPU.)
286 * A value of -1 is used to indicate that the FPU state in context
287 * memory is newer than the FPU state in registers, and that the
288 * FPU state should be reloaded next time the task is run.
290 unsigned int last_cpu;
295 * This flag indicates whether this context is active: if the task
296 * is not running then we can restore from this context, if the task
297 * is running then we should save into this context.
299 unsigned char fpstate_active;
304 * This flag determines whether a given context is actively
305 * loaded into the FPU's registers and that those registers
306 * represent the task's current FPU state.
308 * Note the interaction with fpstate_active:
310 * # task does not use the FPU:
311 * fpstate_active == 0
313 * # task uses the FPU and regs are active:
314 * fpstate_active == 1 && fpregs_active == 1
316 * # the regs are inactive but still match fpstate:
317 * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
319 * The third state is what we use for the lazy restore optimization
320 * on lazy-switching CPUs.
322 unsigned char fpregs_active;
327 * This counter contains the number of consecutive context switches
328 * during which the FPU stays used. If this is over a threshold, the
329 * lazy FPU restore logic becomes eager, to save the trap overhead.
330 * This is an unsigned char so that after 256 iterations the counter
331 * wraps and the context switch behavior turns lazy again; this is to
332 * deal with bursty apps that only use the FPU for a short time:
334 unsigned char counter;
338 * In-memory copy of all FPU registers that we save/restore
339 * over context switches. If the task is using the FPU then
340 * the registers in the FPU are more recent than this state
341 * copy. If the task context-switches away then they get
342 * saved here and represent the FPU state.
344 * After context switches there may be a (short) time period
345 * during which the in-FPU hardware registers are unchanged
346 * and still perfectly match this state, if the tasks
347 * scheduled afterwards are not using the FPU.
349 * This is the 'lazy restore' window of optimization, which
350 * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
352 * We detect whether a subsequent task uses the FPU via setting
353 * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
355 * During this window, if the task gets scheduled again, we
356 * might be able to skip having to do a restore from this
357 * memory buffer to the hardware registers - at the cost of
358 * incurring the overhead of #NM fault traps.
360 * Note that on modern CPUs that support the XSAVEOPT (or other
361 * optimized XSAVE instructions), we don't use #NM traps anymore,
362 * as the hardware can track whether FPU registers need saving
363 * or not. On such CPUs we activate the non-lazy ('eagerfpu')
364 * logic, which unconditionally saves/restores all FPU state
365 * across context switches. (if FPU state exists.)
367 union fpregs_state state;
369 * WARNING: 'state' is dynamically-sized. Do not put
370 * anything after it here.
374 #endif /* _ASM_X86_FPU_H */