From: Gleb Natapov Date: Tue, 16 Feb 2010 08:51:48 +0000 (+0200) Subject: KVM: use desc_ptr struct instead of kvm private descriptor_table X-Git-Tag: v2.6.35-rc1~436^2~269 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=89a27f4d0e042a2fa3391a76b652aec3e16ef200;p=karo-tx-linux.git KVM: use desc_ptr struct instead of kvm private descriptor_table x86 arch defines desc_ptr for idt/gdt pointers, no need to define another structure in kvm code. Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 06d9e79ca37d..cf392dfb8000 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -461,11 +461,6 @@ struct kvm_vcpu_stat { u32 nmi_injections; }; -struct descriptor_table { - u16 limit; - unsigned long base; -} __attribute__((packed)); - struct kvm_x86_ops { int (*cpu_has_kvm_support)(void); /* __init */ int (*disabled_by_bios)(void); /* __init */ @@ -503,10 +498,10 @@ struct kvm_x86_ops { void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); - void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); - void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); - void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); - void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); + void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); + void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); + void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); + void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest); int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value); void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); @@ -724,12 +719,12 @@ static inline void kvm_load_ldt(u16 sel) asm("lldt %0" : : "rm"(sel)); } -static inline void kvm_get_idt(struct descriptor_table *table) +static inline void kvm_get_idt(struct desc_ptr *table) { asm("sidt %0" : "=m"(*table)); } -static inline void kvm_get_gdt(struct descriptor_table *table) +static inline void kvm_get_gdt(struct desc_ptr *table) { asm("sgdt %0" : "=m"(*table)); } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 2ba58206812a..77fa2e3053b5 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -319,7 +319,7 @@ static int svm_hardware_enable(void *garbage) struct svm_cpu_data *sd; uint64_t efer; - struct descriptor_table gdt_descr; + struct desc_ptr gdt_descr; struct desc_struct *gdt; int me = raw_smp_processor_id(); @@ -345,7 +345,7 @@ static int svm_hardware_enable(void *garbage) sd->next_asid = sd->max_asid + 1; kvm_get_gdt(&gdt_descr); - gdt = (struct desc_struct *)gdt_descr.base; + gdt = (struct desc_struct *)gdt_descr.address; sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); wrmsrl(MSR_EFER, efer | EFER_SVME); @@ -936,36 +936,36 @@ static int svm_get_cpl(struct kvm_vcpu *vcpu) return save->cpl; } -static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) +static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); - dt->limit = svm->vmcb->save.idtr.limit; - dt->base = svm->vmcb->save.idtr.base; + dt->size = svm->vmcb->save.idtr.limit; + dt->address = svm->vmcb->save.idtr.base; } -static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) +static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); - svm->vmcb->save.idtr.limit = dt->limit; - svm->vmcb->save.idtr.base = dt->base ; + svm->vmcb->save.idtr.limit = dt->size; + svm->vmcb->save.idtr.base = dt->address ; } -static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) +static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); - dt->limit = svm->vmcb->save.gdtr.limit; - dt->base = svm->vmcb->save.gdtr.base; + dt->size = svm->vmcb->save.gdtr.limit; + dt->address = svm->vmcb->save.gdtr.base; } -static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) +static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { struct vcpu_svm *svm = to_svm(vcpu); - svm->vmcb->save.gdtr.limit = dt->limit; - svm->vmcb->save.gdtr.base = dt->base ; + svm->vmcb->save.gdtr.limit = dt->size; + svm->vmcb->save.gdtr.base = dt->address ; } static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index bc933cfb4e66..68f895b00450 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -600,11 +600,11 @@ static void reload_tss(void) /* * VT restores TR but not its size. Useless. */ - struct descriptor_table gdt; + struct desc_ptr gdt; struct desc_struct *descs; kvm_get_gdt(&gdt); - descs = (void *)gdt.base; + descs = (void *)gdt.address; descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ load_TR_desc(); } @@ -758,7 +758,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } if (vcpu->cpu != cpu) { - struct descriptor_table dt; + struct desc_ptr dt; unsigned long sysenter_esp; vcpu->cpu = cpu; @@ -768,7 +768,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) */ vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ kvm_get_gdt(&dt); - vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */ + vmcs_writel(HOST_GDTR_BASE, dt.address); /* 22.2.4 */ rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ @@ -1934,28 +1934,28 @@ static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) *l = (ar >> 13) & 1; } -static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) +static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { - dt->limit = vmcs_read32(GUEST_IDTR_LIMIT); - dt->base = vmcs_readl(GUEST_IDTR_BASE); + dt->size = vmcs_read32(GUEST_IDTR_LIMIT); + dt->address = vmcs_readl(GUEST_IDTR_BASE); } -static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) +static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { - vmcs_write32(GUEST_IDTR_LIMIT, dt->limit); - vmcs_writel(GUEST_IDTR_BASE, dt->base); + vmcs_write32(GUEST_IDTR_LIMIT, dt->size); + vmcs_writel(GUEST_IDTR_BASE, dt->address); } -static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) +static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { - dt->limit = vmcs_read32(GUEST_GDTR_LIMIT); - dt->base = vmcs_readl(GUEST_GDTR_BASE); + dt->size = vmcs_read32(GUEST_GDTR_LIMIT); + dt->address = vmcs_readl(GUEST_GDTR_BASE); } -static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt) +static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) { - vmcs_write32(GUEST_GDTR_LIMIT, dt->limit); - vmcs_writel(GUEST_GDTR_BASE, dt->base); + vmcs_write32(GUEST_GDTR_LIMIT, dt->size); + vmcs_writel(GUEST_GDTR_BASE, dt->address); } static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) @@ -2334,7 +2334,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) u32 junk; u64 host_pat, tsc_this, tsc_base; unsigned long a; - struct descriptor_table dt; + struct desc_ptr dt; int i; unsigned long kvm_vmx_return; u32 exec_control; @@ -2416,7 +2416,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ kvm_get_idt(&dt); - vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ + vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3c4ca98ad27f..274a8e39bca7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -225,7 +225,7 @@ static void drop_user_return_notifiers(void *ignore) unsigned long segment_base(u16 selector) { - struct descriptor_table gdt; + struct desc_ptr gdt; struct desc_struct *d; unsigned long table_base; unsigned long v; @@ -234,7 +234,7 @@ unsigned long segment_base(u16 selector) return 0; kvm_get_gdt(&gdt); - table_base = gdt.base; + table_base = gdt.address; if (selector & 4) { /* from ldt */ u16 ldt_selector = kvm_read_ldt(); @@ -3949,14 +3949,14 @@ static u64 mk_cr_64(u64 curr_cr, u32 new_val) void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) { - struct descriptor_table dt = { limit, base }; + struct desc_ptr dt = { limit, base }; kvm_x86_ops->set_gdt(vcpu, &dt); } void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) { - struct descriptor_table dt = { limit, base }; + struct desc_ptr dt = { limit, base }; kvm_x86_ops->set_idt(vcpu, &dt); } @@ -4581,7 +4581,7 @@ EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { - struct descriptor_table dt; + struct desc_ptr dt; vcpu_load(vcpu); @@ -4596,11 +4596,11 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); kvm_x86_ops->get_idt(vcpu, &dt); - sregs->idt.limit = dt.limit; - sregs->idt.base = dt.base; + sregs->idt.limit = dt.size; + sregs->idt.base = dt.address; kvm_x86_ops->get_gdt(vcpu, &dt); - sregs->gdt.limit = dt.limit; - sregs->gdt.base = dt.base; + sregs->gdt.limit = dt.size; + sregs->gdt.base = dt.address; sregs->cr0 = kvm_read_cr0(vcpu); sregs->cr2 = vcpu->arch.cr2; @@ -4672,7 +4672,7 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector, static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu, u16 selector, - struct descriptor_table *dtable) + struct desc_ptr *dtable) { if (selector & 1 << 2) { struct kvm_segment kvm_seg; @@ -4680,10 +4680,10 @@ static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu, kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR); if (kvm_seg.unusable) - dtable->limit = 0; + dtable->size = 0; else - dtable->limit = kvm_seg.limit; - dtable->base = kvm_seg.base; + dtable->size = kvm_seg.limit; + dtable->address = kvm_seg.base; } else kvm_x86_ops->get_gdt(vcpu, dtable); @@ -4693,7 +4693,7 @@ static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu, static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, struct desc_struct *seg_desc) { - struct descriptor_table dtable; + struct desc_ptr dtable; u16 index = selector >> 3; int ret; u32 err; @@ -4701,7 +4701,7 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, get_segment_descriptor_dtable(vcpu, selector, &dtable); - if (dtable.limit < index * 8 + 7) { + if (dtable.size < index * 8 + 7) { kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); return X86EMUL_PROPAGATE_FAULT; } @@ -4718,14 +4718,14 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, struct desc_struct *seg_desc) { - struct descriptor_table dtable; + struct desc_ptr dtable; u16 index = selector >> 3; get_segment_descriptor_dtable(vcpu, selector, &dtable); - if (dtable.limit < index * 8 + 7) + if (dtable.size < index * 8 + 7) return 1; - return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL); + return kvm_write_guest_virt(dtable.address + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL); } static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu, @@ -5204,15 +5204,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, { int mmu_reset_needed = 0; int pending_vec, max_bits; - struct descriptor_table dt; + struct desc_ptr dt; vcpu_load(vcpu); - dt.limit = sregs->idt.limit; - dt.base = sregs->idt.base; + dt.size = sregs->idt.limit; + dt.address = sregs->idt.base; kvm_x86_ops->set_idt(vcpu, &dt); - dt.limit = sregs->gdt.limit; - dt.base = sregs->gdt.base; + dt.size = sregs->gdt.limit; + dt.address = sregs->gdt.base; kvm_x86_ops->set_gdt(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2;