2 * VMI specific paravirt-ops implementation
4 * Copyright (C) 2005, VMware, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * Send feedback to zach@vmware.com
25 #include <linux/module.h>
26 #include <linux/cpu.h>
27 #include <linux/bootmem.h>
29 #include <linux/highmem.h>
30 #include <linux/sched.h>
33 #include <asm/fixmap.h>
34 #include <asm/apicdef.h>
36 #include <asm/pgalloc.h>
37 #include <asm/processor.h>
38 #include <asm/timer.h>
39 #include <asm/vmi_time.h>
40 #include <asm/kmap_types.h>
41 #include <asm/setup.h>
43 /* Convenient for calling VMI functions indirectly in the ROM */
44 typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
45 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
47 #define call_vrom_func(rom,func) \
48 (((VROMFUNC *)(rom->func))())
50 #define call_vrom_long_func(rom,func,arg) \
51 (((VROMLONGFUNC *)(rom->func)) (arg))
53 static struct vrom_header *vmi_rom;
54 static int disable_pge;
55 static int disable_pse;
56 static int disable_sep;
57 static int disable_tsc;
58 static int disable_mtrr;
59 static int disable_noidle;
60 static int disable_vmi_timer;
62 /* Cached VMI operations */
64 void (*cpuid)(void /* non-c */);
65 void (*_set_ldt)(u32 selector);
66 void (*set_tr)(u32 selector);
67 void (*write_idt_entry)(struct desc_struct *, int, u32, u32);
68 void (*write_gdt_entry)(struct desc_struct *, int, u32, u32);
69 void (*write_ldt_entry)(struct desc_struct *, int, u32, u32);
70 void (*set_kernel_stack)(u32 selector, u32 sp0);
71 void (*allocate_page)(u32, u32, u32, u32, u32);
72 void (*release_page)(u32, u32);
73 void (*set_pte)(pte_t, pte_t *, unsigned);
74 void (*update_pte)(pte_t *, unsigned);
75 void (*set_linear_mapping)(int, void *, u32, u32);
76 void (*_flush_tlb)(int);
77 void (*set_initial_ap_state)(int, int);
79 void (*set_lazy_mode)(int mode);
82 /* Cached VMI operations */
83 struct vmi_timer_ops vmi_timer_ops;
86 * VMI patching routines.
88 #define MNEM_CALL 0xe8
92 #define IRQ_PATCH_INT_MASK 0
93 #define IRQ_PATCH_DISABLE 5
95 static inline void patch_offset(void *insnbuf,
96 unsigned long ip, unsigned long dest)
98 *(unsigned long *)(insnbuf+1) = dest-ip-5;
101 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
105 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
106 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
108 case VMI_RELOCATION_CALL_REL:
110 *(char *)insnbuf = MNEM_CALL;
111 patch_offset(insnbuf, ip, (unsigned long)rel->eip);
114 case VMI_RELOCATION_JUMP_REL:
116 *(char *)insnbuf = MNEM_JMP;
117 patch_offset(insnbuf, ip, (unsigned long)rel->eip);
120 case VMI_RELOCATION_NOP:
121 /* obliterate the whole thing */
124 case VMI_RELOCATION_NONE:
125 /* leave native code in place */
135 * Apply patch if appropriate, return length of new instruction
136 * sequence. The callee does nop padding for us.
138 static unsigned vmi_patch(u8 type, u16 clobbers, void *insns,
139 unsigned long ip, unsigned len)
142 case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
143 return patch_internal(VMI_CALL_DisableInterrupts, len,
145 case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
146 return patch_internal(VMI_CALL_EnableInterrupts, len,
148 case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
149 return patch_internal(VMI_CALL_SetInterruptMask, len,
151 case PARAVIRT_PATCH(pv_irq_ops.save_fl):
152 return patch_internal(VMI_CALL_GetInterruptMask, len,
154 case PARAVIRT_PATCH(pv_cpu_ops.iret):
155 return patch_internal(VMI_CALL_IRET, len, insns, ip);
156 case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit):
157 return patch_internal(VMI_CALL_SYSEXIT, len, insns, ip);
164 /* CPUID has non-C semantics, and paravirt-ops API doesn't match hardware ISA */
165 static void vmi_cpuid(unsigned int *ax, unsigned int *bx,
166 unsigned int *cx, unsigned int *dx)
171 asm volatile ("call *%6"
176 : "0" (*ax), "2" (*cx), "r" (vmi_ops.cpuid));
179 *dx &= ~X86_FEATURE_PSE;
181 *dx &= ~X86_FEATURE_PGE;
183 *dx &= ~X86_FEATURE_SEP;
185 *dx &= ~X86_FEATURE_TSC;
187 *dx &= ~X86_FEATURE_MTRR;
191 static inline void vmi_maybe_load_tls(struct desc_struct *gdt, int nr, struct desc_struct *new)
193 if (gdt[nr].a != new->a || gdt[nr].b != new->b)
194 write_gdt_entry(gdt, nr, new, 0);
197 static void vmi_load_tls(struct thread_struct *t, unsigned int cpu)
199 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
200 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0]);
201 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1]);
202 vmi_maybe_load_tls(gdt, GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2]);
205 static void vmi_set_ldt(const void *addr, unsigned entries)
207 unsigned cpu = smp_processor_id();
208 struct desc_struct desc;
210 pack_descriptor(&desc, (unsigned long)addr,
211 entries * sizeof(struct desc_struct) - 1,
213 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &desc, DESC_LDT);
214 vmi_ops._set_ldt(entries ? GDT_ENTRY_LDT*sizeof(struct desc_struct) : 0);
217 static void vmi_set_tr(void)
219 vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct));
222 static void vmi_write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
224 u32 *idt_entry = (u32 *)g;
225 vmi_ops.write_idt_entry(dt, entry, idt_entry[0], idt_entry[1]);
228 static void vmi_write_gdt_entry(struct desc_struct *dt, int entry,
229 const void *desc, int type)
231 u32 *gdt_entry = (u32 *)desc;
232 vmi_ops.write_gdt_entry(dt, entry, gdt_entry[0], gdt_entry[1]);
235 static void vmi_write_ldt_entry(struct desc_struct *dt, int entry,
238 u32 *ldt_entry = (u32 *)desc;
239 vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]);
242 static void vmi_load_sp0(struct tss_struct *tss,
243 struct thread_struct *thread)
245 tss->x86_tss.sp0 = thread->sp0;
247 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
248 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
249 tss->x86_tss.ss1 = thread->sysenter_cs;
250 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
252 vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.sp0);
255 static void vmi_flush_tlb_user(void)
257 vmi_ops._flush_tlb(VMI_FLUSH_TLB);
260 static void vmi_flush_tlb_kernel(void)
262 vmi_ops._flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL);
265 /* Stub to do nothing at all; used for delays and unimplemented calls */
266 static void vmi_nop(void)
270 static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
272 vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
275 static void vmi_allocate_pmd(struct mm_struct *mm, unsigned long pfn)
278 * This call comes in very early, before mem_map is setup.
279 * It is called only for swapper_pg_dir, which already has
282 vmi_ops.allocate_page(pfn, VMI_PAGE_L2, 0, 0, 0);
285 static void vmi_allocate_pmd_clone(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count)
287 vmi_ops.allocate_page(pfn, VMI_PAGE_L2 | VMI_PAGE_CLONE, clonepfn, start, count);
290 static void vmi_release_pte(unsigned long pfn)
292 vmi_ops.release_page(pfn, VMI_PAGE_L1);
295 static void vmi_release_pmd(unsigned long pfn)
297 vmi_ops.release_page(pfn, VMI_PAGE_L2);
301 * We use the pgd_free hook for releasing the pgd page:
303 static void vmi_pgd_free(struct mm_struct *mm, pgd_t *pgd)
305 unsigned long pfn = __pa(pgd) >> PAGE_SHIFT;
307 vmi_ops.release_page(pfn, VMI_PAGE_L2);
311 * Helper macros for MMU update flags. We can defer updates until a flush
312 * or page invalidation only if the update is to the current address space
313 * (otherwise, there is no flush). We must check against init_mm, since
314 * this could be a kernel update, which usually passes init_mm, although
315 * sometimes this check can be skipped if we know the particular function
316 * is only called on user mode PTEs. We could change the kernel to pass
317 * current->active_mm here, but in particular, I was unsure if changing
318 * mm/highmem.c to do this would still be correct on other architectures.
320 #define is_current_as(mm, mustbeuser) ((mm) == current->active_mm || \
321 (!mustbeuser && (mm) == &init_mm))
322 #define vmi_flags_addr(mm, addr, level, user) \
323 ((level) | (is_current_as(mm, user) ? \
324 (VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
325 #define vmi_flags_addr_defer(mm, addr, level, user) \
326 ((level) | (is_current_as(mm, user) ? \
327 (VMI_PAGE_DEFER | VMI_PAGE_CURRENT_AS | ((addr) & VMI_PAGE_VA_MASK)) : 0))
329 static void vmi_update_pte(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
331 vmi_ops.update_pte(ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
334 static void vmi_update_pte_defer(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
336 vmi_ops.update_pte(ptep, vmi_flags_addr_defer(mm, addr, VMI_PAGE_PT, 0));
339 static void vmi_set_pte(pte_t *ptep, pte_t pte)
341 /* XXX because of set_pmd_pte, this can be called on PT or PD layers */
342 vmi_ops.set_pte(pte, ptep, VMI_PAGE_PT);
345 static void vmi_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
347 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
350 static void vmi_set_pmd(pmd_t *pmdp, pmd_t pmdval)
352 #ifdef CONFIG_X86_PAE
353 const pte_t pte = { .pte = pmdval.pmd };
355 const pte_t pte = { pmdval.pud.pgd.pgd };
357 vmi_ops.set_pte(pte, (pte_t *)pmdp, VMI_PAGE_PD);
360 #ifdef CONFIG_X86_PAE
362 static void vmi_set_pte_atomic(pte_t *ptep, pte_t pteval)
365 * XXX This is called from set_pmd_pte, but at both PT
366 * and PD layers so the VMI_PAGE_PT flag is wrong. But
367 * it is only called for large page mapping changes,
368 * the Xen backend, doesn't support large pages, and the
369 * ESX backend doesn't depend on the flag.
371 set_64bit((unsigned long long *)ptep,pte_val(pteval));
372 vmi_ops.update_pte(ptep, VMI_PAGE_PT);
375 static void vmi_set_pud(pud_t *pudp, pud_t pudval)
378 const pte_t pte = { .pte = pudval.pgd.pgd };
379 vmi_ops.set_pte(pte, (pte_t *)pudp, VMI_PAGE_PDP);
382 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
384 const pte_t pte = { .pte = 0 };
385 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
388 static void vmi_pmd_clear(pmd_t *pmd)
390 const pte_t pte = { .pte = 0 };
391 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
396 static void __devinit
397 vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
398 unsigned long start_esp)
400 struct vmi_ap_state ap;
402 /* Default everything to zero. This is fine for most GPRs. */
403 memset(&ap, 0, sizeof(struct vmi_ap_state));
405 ap.gdtr_limit = GDT_SIZE - 1;
406 ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid);
408 ap.idtr_limit = IDT_ENTRIES * 8 - 1;
409 ap.idtr_base = (unsigned long) idt_table;
414 ap.eip = (unsigned long) start_eip;
416 ap.esp = (unsigned long) start_esp;
420 ap.fs = __KERNEL_PERCPU;
421 ap.gs = __KERNEL_STACK_CANARY;
425 #ifdef CONFIG_X86_PAE
426 /* efer should match BSP efer. */
429 rdmsr(MSR_EFER, l, h);
430 ap.efer = (unsigned long long) h << 32 | l;
434 ap.cr3 = __pa(swapper_pg_dir);
435 /* Protected mode, paging, AM, WP, NE, MP. */
437 ap.cr4 = mmu_cr4_features;
438 vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid);
442 static void vmi_start_context_switch(struct task_struct *prev)
444 paravirt_start_context_switch(prev);
445 vmi_ops.set_lazy_mode(2);
448 static void vmi_end_context_switch(struct task_struct *next)
450 vmi_ops.set_lazy_mode(0);
451 paravirt_end_context_switch(next);
454 static void vmi_enter_lazy_mmu(void)
456 paravirt_enter_lazy_mmu();
457 vmi_ops.set_lazy_mode(1);
460 static void vmi_leave_lazy_mmu(void)
462 vmi_ops.set_lazy_mode(0);
463 paravirt_leave_lazy_mmu();
466 static inline int __init check_vmi_rom(struct vrom_header *rom)
468 struct pci_header *pci;
469 struct pnp_header *pnp;
470 const char *manufacturer = "UNKNOWN";
471 const char *product = "UNKNOWN";
472 const char *license = "unspecified";
474 if (rom->rom_signature != 0xaa55)
476 if (rom->vrom_signature != VMI_SIGNATURE)
478 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
479 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
480 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
481 rom->api_version_maj,
482 rom->api_version_min);
487 * Relying on the VMI_SIGNATURE field is not 100% safe, so check
488 * the PCI header and device type to make sure this is really a
491 if (!rom->pci_header_offs) {
492 printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n");
496 pci = (struct pci_header *)((char *)rom+rom->pci_header_offs);
497 if (pci->vendorID != PCI_VENDOR_ID_VMWARE ||
498 pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) {
499 /* Allow it to run... anyways, but warn */
500 printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n");
503 if (rom->pnp_header_offs) {
504 pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs);
505 if (pnp->manufacturer_offset)
506 manufacturer = (const char *)rom+pnp->manufacturer_offset;
507 if (pnp->product_offset)
508 product = (const char *)rom+pnp->product_offset;
511 if (rom->license_offs)
512 license = (char *)rom+rom->license_offs;
514 printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n",
515 manufacturer, product,
516 rom->api_version_maj, rom->api_version_min,
517 pci->rom_version_maj, pci->rom_version_min);
519 /* Don't allow BSD/MIT here for now because we don't want to end up
520 with any binary only shim layers */
521 if (strcmp(license, "GPL") && strcmp(license, "GPL v2")) {
522 printk(KERN_WARNING "VMI: Non GPL license `%s' found for ROM. Not used.\n",
531 * Probe for the VMI option ROM
533 static inline int __init probe_vmi_rom(void)
537 /* VMI ROM is in option ROM area, check signature */
538 for (base = 0xC0000; base < 0xE0000; base += 2048) {
539 struct vrom_header *romstart;
540 romstart = (struct vrom_header *)isa_bus_to_virt(base);
541 if (check_vmi_rom(romstart)) {
550 * VMI setup common to all processors
552 void vmi_bringup(void)
554 /* We must establish the lowmem mapping for MMU ops to work */
555 if (vmi_ops.set_linear_mapping)
556 vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, MAXMEM_PFN, 0);
560 * Return a pointer to a VMI function or NULL if unimplemented
562 static void *vmi_get_function(int vmicall)
565 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
566 reloc = call_vrom_long_func(vmi_rom, get_reloc, vmicall);
567 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);
568 if (rel->type == VMI_RELOCATION_CALL_REL)
569 return (void *)rel->eip;
575 * Helper macro for making the VMI paravirt-ops fill code readable.
576 * For unimplemented operations, fall back to default, unless nop
577 * is returned by the ROM.
579 #define para_fill(opname, vmicall) \
581 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
582 VMI_CALL_##vmicall); \
583 if (rel->type == VMI_RELOCATION_CALL_REL) \
584 opname = (void *)rel->eip; \
585 else if (rel->type == VMI_RELOCATION_NOP) \
586 opname = (void *)vmi_nop; \
587 else if (rel->type != VMI_RELOCATION_NONE) \
588 printk(KERN_WARNING "VMI: Unknown relocation " \
589 "type %d for " #vmicall"\n",\
594 * Helper macro for making the VMI paravirt-ops fill code readable.
595 * For cached operations which do not match the VMI ROM ABI and must
596 * go through a tranlation stub. Ignore NOPs, since it is not clear
597 * a NOP * VMI function corresponds to a NOP paravirt-op when the
598 * functions are not in 1-1 correspondence.
600 #define para_wrap(opname, wrapper, cache, vmicall) \
602 reloc = call_vrom_long_func(vmi_rom, get_reloc, \
603 VMI_CALL_##vmicall); \
604 BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL); \
605 if (rel->type == VMI_RELOCATION_CALL_REL) { \
607 vmi_ops.cache = (void *)rel->eip; \
612 * Activate the VMI interface and switch into paravirtualized mode
614 static inline int __init activate_vmi(void)
618 const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
621 * Prevent page tables from being allocated in highmem, even if
622 * CONFIG_HIGHPTE is enabled.
624 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
626 if (call_vrom_func(vmi_rom, vmi_init) != 0) {
627 printk(KERN_ERR "VMI ROM failed to initialize!");
630 savesegment(cs, kernel_cs);
632 pv_info.paravirt_enabled = 1;
633 pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
634 pv_info.name = "vmi [deprecated]";
636 pv_init_ops.patch = vmi_patch;
639 * Many of these operations are ABI compatible with VMI.
640 * This means we can fill in the paravirt-ops with direct
641 * pointers into the VMI ROM. If the calling convention for
642 * these operations changes, this code needs to be updated.
645 * CPUID paravirt-op uses pointers, not the native ISA
646 * halt has no VMI equivalent; all VMI halts are "safe"
647 * no MSR support yet - just trap and emulate. VMI uses the
648 * same ABI as the native ISA, but Linux wants exceptions
649 * from bogus MSR read / write handled
650 * rdpmc is not yet used in Linux
653 /* CPUID is special, so very special it gets wrapped like a present */
654 para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID);
656 para_fill(pv_cpu_ops.clts, CLTS);
657 para_fill(pv_cpu_ops.get_debugreg, GetDR);
658 para_fill(pv_cpu_ops.set_debugreg, SetDR);
659 para_fill(pv_cpu_ops.read_cr0, GetCR0);
660 para_fill(pv_mmu_ops.read_cr2, GetCR2);
661 para_fill(pv_mmu_ops.read_cr3, GetCR3);
662 para_fill(pv_cpu_ops.read_cr4, GetCR4);
663 para_fill(pv_cpu_ops.write_cr0, SetCR0);
664 para_fill(pv_mmu_ops.write_cr2, SetCR2);
665 para_fill(pv_mmu_ops.write_cr3, SetCR3);
666 para_fill(pv_cpu_ops.write_cr4, SetCR4);
668 para_fill(pv_irq_ops.save_fl.func, GetInterruptMask);
669 para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask);
670 para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts);
671 para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts);
673 para_fill(pv_cpu_ops.wbinvd, WBINVD);
674 para_fill(pv_cpu_ops.read_tsc, RDTSC);
676 /* The following we emulate with trap and emulate for now */
677 /* paravirt_ops.read_msr = vmi_rdmsr */
678 /* paravirt_ops.write_msr = vmi_wrmsr */
679 /* paravirt_ops.rdpmc = vmi_rdpmc */
681 /* TR interface doesn't pass TR value, wrap */
682 para_wrap(pv_cpu_ops.load_tr_desc, vmi_set_tr, set_tr, SetTR);
684 /* LDT is special, too */
685 para_wrap(pv_cpu_ops.set_ldt, vmi_set_ldt, _set_ldt, SetLDT);
687 para_fill(pv_cpu_ops.load_gdt, SetGDT);
688 para_fill(pv_cpu_ops.load_idt, SetIDT);
689 para_fill(pv_cpu_ops.store_gdt, GetGDT);
690 para_fill(pv_cpu_ops.store_idt, GetIDT);
691 para_fill(pv_cpu_ops.store_tr, GetTR);
692 pv_cpu_ops.load_tls = vmi_load_tls;
693 para_wrap(pv_cpu_ops.write_ldt_entry, vmi_write_ldt_entry,
694 write_ldt_entry, WriteLDTEntry);
695 para_wrap(pv_cpu_ops.write_gdt_entry, vmi_write_gdt_entry,
696 write_gdt_entry, WriteGDTEntry);
697 para_wrap(pv_cpu_ops.write_idt_entry, vmi_write_idt_entry,
698 write_idt_entry, WriteIDTEntry);
699 para_wrap(pv_cpu_ops.load_sp0, vmi_load_sp0, set_kernel_stack, UpdateKernelStack);
700 para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
701 para_fill(pv_cpu_ops.io_delay, IODelay);
703 para_wrap(pv_cpu_ops.start_context_switch, vmi_start_context_switch,
704 set_lazy_mode, SetLazyMode);
705 para_wrap(pv_cpu_ops.end_context_switch, vmi_end_context_switch,
706 set_lazy_mode, SetLazyMode);
708 para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,
709 set_lazy_mode, SetLazyMode);
710 para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy_mmu,
711 set_lazy_mode, SetLazyMode);
713 /* user and kernel flush are just handled with different flags to FlushTLB */
714 para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB);
715 para_wrap(pv_mmu_ops.flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB);
716 para_fill(pv_mmu_ops.flush_tlb_single, InvalPage);
719 * Until a standard flag format can be agreed on, we need to
720 * implement these as wrappers in Linux. Get the VMI ROM
721 * function pointers for the two backend calls.
723 #ifdef CONFIG_X86_PAE
724 vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong);
725 vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong);
727 vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE);
728 vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE);
731 if (vmi_ops.set_pte) {
732 pv_mmu_ops.set_pte = vmi_set_pte;
733 pv_mmu_ops.set_pte_at = vmi_set_pte_at;
734 pv_mmu_ops.set_pmd = vmi_set_pmd;
735 #ifdef CONFIG_X86_PAE
736 pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic;
737 pv_mmu_ops.set_pud = vmi_set_pud;
738 pv_mmu_ops.pte_clear = vmi_pte_clear;
739 pv_mmu_ops.pmd_clear = vmi_pmd_clear;
743 if (vmi_ops.update_pte) {
744 pv_mmu_ops.pte_update = vmi_update_pte;
745 pv_mmu_ops.pte_update_defer = vmi_update_pte_defer;
748 vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
749 if (vmi_ops.allocate_page) {
750 pv_mmu_ops.alloc_pte = vmi_allocate_pte;
751 pv_mmu_ops.alloc_pmd = vmi_allocate_pmd;
752 pv_mmu_ops.alloc_pmd_clone = vmi_allocate_pmd_clone;
755 vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
756 if (vmi_ops.release_page) {
757 pv_mmu_ops.release_pte = vmi_release_pte;
758 pv_mmu_ops.release_pmd = vmi_release_pmd;
759 pv_mmu_ops.pgd_free = vmi_pgd_free;
762 /* Set linear is needed in all cases */
763 vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
766 * These MUST always be patched. Don't support indirect jumps
767 * through these operations, as the VMI interface may use either
768 * a jump or a call to get to these operations, depending on
769 * the backend. They are performance critical anyway, so requiring
770 * a patch is not a big problem.
772 pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0;
773 pv_cpu_ops.iret = (void *)0xbadbab0;
776 para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState);
779 #ifdef CONFIG_X86_LOCAL_APIC
780 para_fill(apic->read, APICRead);
781 para_fill(apic->write, APICWrite);
785 * Check for VMI timer functionality by probing for a cycle frequency method
787 reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency);
788 if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) {
789 vmi_timer_ops.get_cycle_frequency = (void *)rel->eip;
790 vmi_timer_ops.get_cycle_counter =
791 vmi_get_function(VMI_CALL_GetCycleCounter);
792 vmi_timer_ops.get_wallclock =
793 vmi_get_function(VMI_CALL_GetWallclockTime);
794 vmi_timer_ops.wallclock_updated =
795 vmi_get_function(VMI_CALL_WallclockUpdated);
796 vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
797 vmi_timer_ops.cancel_alarm =
798 vmi_get_function(VMI_CALL_CancelAlarm);
799 x86_init.timers.timer_init = vmi_time_init;
800 #ifdef CONFIG_X86_LOCAL_APIC
801 x86_init.timers.setup_percpu_clockev = vmi_time_bsp_init;
802 x86_cpuinit.setup_percpu_clockev = vmi_time_ap_init;
804 pv_time_ops.sched_clock = vmi_sched_clock;
805 x86_platform.calibrate_tsc = vmi_tsc_khz;
806 x86_platform.get_wallclock = vmi_get_wallclock;
807 x86_platform.set_wallclock = vmi_set_wallclock;
809 /* We have true wallclock functions; disable CMOS clock sync */
810 no_sync_cmos_clock = 1;
813 disable_vmi_timer = 1;
816 para_fill(pv_irq_ops.safe_halt, Halt);
819 * Alternative instruction rewriting doesn't happen soon enough
820 * to convert VMI_IRET to a call instead of a jump; so we have
821 * to do this before IRQs get reenabled. Fortunately, it is
824 apply_paravirt(__parainstructions, __parainstructions_end);
833 void __init vmi_init(void)
838 check_vmi_rom(vmi_rom);
840 /* In case probing for or validating the ROM failed, basil */
844 reserve_top_address(-vmi_rom->virtual_top);
846 #ifdef CONFIG_X86_IO_APIC
847 /* This is virtual hardware; timer routing is wired correctly */
852 void __init vmi_activate(void)
859 local_irq_save(flags);
861 local_irq_restore(flags & X86_EFLAGS_IF);
864 static int __init parse_vmi(char *arg)
869 if (!strcmp(arg, "disable_pge")) {
870 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
872 } else if (!strcmp(arg, "disable_pse")) {
873 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE);
875 } else if (!strcmp(arg, "disable_sep")) {
876 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP);
878 } else if (!strcmp(arg, "disable_tsc")) {
879 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC);
881 } else if (!strcmp(arg, "disable_mtrr")) {
882 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_MTRR);
884 } else if (!strcmp(arg, "disable_timer")) {
885 disable_vmi_timer = 1;
887 } else if (!strcmp(arg, "disable_noidle"))
892 early_param("vmi", parse_vmi);