1 /******************************************************************************
4 * Guest OS interface to x86 32-bit Xen.
6 * Copyright (c) 2004, K A Fraser
9 #ifndef __XEN_PUBLIC_ARCH_X86_32_H__
10 #define __XEN_PUBLIC_ARCH_X86_32_H__
13 #define __DEFINE_GUEST_HANDLE(name, type) \
14 typedef struct { type *p; } __guest_handle_ ## name
16 #define __DEFINE_GUEST_HANDLE(name, type) \
17 typedef type * __guest_handle_ ## name
20 #define DEFINE_GUEST_HANDLE_STRUCT(name) \
21 __DEFINE_GUEST_HANDLE(name, struct name)
22 #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
23 #define GUEST_HANDLE(name) __guest_handle_ ## name
27 #define set_xen_guest_handle(hnd, val) \
29 if (sizeof(hnd) == 8) \
30 *(uint64_t *)&(hnd) = 0; \
33 #elif defined(__x86_64__)
34 #define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
38 #define set_xen_guest_handle(hnd, val) \
40 if (sizeof(hnd) == 8) \
41 *(uint64_t *)&(hnd) = 0; \
44 #elif defined(__x86_64__)
45 #define set_xen_guest_handle(hnd, val) do { (hnd) = val; } while (0)
50 /* Guest handles for primitive C types. */
51 __DEFINE_GUEST_HANDLE(uchar, unsigned char);
52 __DEFINE_GUEST_HANDLE(uint, unsigned int);
53 __DEFINE_GUEST_HANDLE(ulong, unsigned long);
54 DEFINE_GUEST_HANDLE(char);
55 DEFINE_GUEST_HANDLE(int);
56 DEFINE_GUEST_HANDLE(long);
57 DEFINE_GUEST_HANDLE(void);
61 * SEGMENT DESCRIPTOR TABLES
64 * A number of GDT entries are reserved by Xen. These are not situated at the
65 * start of the GDT because some stupid OSes export hard-coded selector values
66 * in their ABI. These hard-coded values are always near the start of the GDT,
67 * so Xen places itself out of the way, at the far end of the GDT.
69 #define FIRST_RESERVED_GDT_PAGE 14
70 #define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
71 #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
74 * These flat segments are in the Xen-private section of every GDT. Since these
75 * are also present in the initial GDT, many OSes will be able to avoid
76 * installing their own GDT.
78 #define FLAT_RING1_CS 0xe019 /* GDT index 259 */
79 #define FLAT_RING1_DS 0xe021 /* GDT index 260 */
80 #define FLAT_RING1_SS 0xe021 /* GDT index 260 */
81 #define FLAT_RING3_CS 0xe02b /* GDT index 261 */
82 #define FLAT_RING3_DS 0xe033 /* GDT index 262 */
83 #define FLAT_RING3_SS 0xe033 /* GDT index 262 */
85 #define FLAT_KERNEL_CS FLAT_RING1_CS
86 #define FLAT_KERNEL_DS FLAT_RING1_DS
87 #define FLAT_KERNEL_SS FLAT_RING1_SS
88 #define FLAT_USER_CS FLAT_RING3_CS
89 #define FLAT_USER_DS FLAT_RING3_DS
90 #define FLAT_USER_SS FLAT_RING3_SS
92 /* And the trap vector is... */
93 #define TRAP_INSTR "int $0x82"
96 * Virtual addresses beyond this are not modifiable by guest OSes. The
97 * machine->physical mapping table starts at this address, read-only.
100 #define __HYPERVISOR_VIRT_START 0xF5800000
102 #define __HYPERVISOR_VIRT_START 0xFC000000
105 #ifndef HYPERVISOR_VIRT_START
106 #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
109 #ifndef machine_to_phys_mapping
110 #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
113 /* Maximum number of virtual CPUs in multi-processor guests. */
114 #define MAX_VIRT_CPUS 32
119 * Send an array of these to HYPERVISOR_set_trap_table()
121 #define TI_GET_DPL(_ti) ((_ti)->flags & 3)
122 #define TI_GET_IF(_ti) ((_ti)->flags & 4)
123 #define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl))
124 #define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2))
127 uint8_t vector; /* exception vector */
128 uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
129 uint16_t cs; /* code selector */
130 unsigned long address; /* code offset */
132 DEFINE_GUEST_HANDLE_STRUCT(trap_info);
134 struct cpu_user_regs {
142 uint16_t error_code; /* private */
143 uint16_t entry_vector; /* private */
146 uint8_t saved_upcall_mask;
148 uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
156 DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs);
158 typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
161 * The following is all CPU context. Note that the fpu_ctxt block is filled
162 * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
164 struct vcpu_guest_context {
165 /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
166 struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
167 #define VGCF_I387_VALID (1<<0)
168 #define VGCF_HVM_GUEST (1<<1)
169 #define VGCF_IN_KERNEL (1<<2)
170 unsigned long flags; /* VGCF_* flags */
171 struct cpu_user_regs user_regs; /* User-level CPU registers */
172 struct trap_info trap_ctxt[256]; /* Virtual IDT */
173 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
174 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
175 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
176 unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
177 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
178 unsigned long event_callback_cs; /* CS:EIP of event callback */
179 unsigned long event_callback_eip;
180 unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
181 unsigned long failsafe_callback_eip;
182 unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
184 DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context);
186 struct arch_shared_info {
187 unsigned long max_pfn; /* max pfn that appears in table */
188 /* Frame containing list of mfns containing list of mfns containing p2m. */
189 unsigned long pfn_to_mfn_frame_list_list;
190 unsigned long nmi_reason;
193 struct arch_vcpu_info {
195 unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
198 struct xen_callback {
202 #endif /* !__ASSEMBLY__ */
205 * Prefix forces emulation of some non-trapping instructions.
206 * Currently only CPUID.
209 #define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
210 #define XEN_CPUID XEN_EMULATE_PREFIX cpuid
212 #define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
213 #define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"