]> git.karo-electronics.de Git - linux-beck.git/commitdiff
KVM: SVM: Add clean-bits infrastructure code
authorRoedel, Joerg <Joerg.Roedel@amd.com>
Fri, 3 Dec 2010 12:15:21 +0000 (13:15 +0100)
committerAvi Kivity <avi@redhat.com>
Wed, 12 Jan 2011 09:30:22 +0000 (11:30 +0200)
This patch adds the infrastructure for the implementation of
the individual clean-bits.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/include/asm/svm.h
arch/x86/kvm/svm.c

index 11dbca7a582a32bc302fd9094be31a8e44e12e8d..235dd732c336f7d623adcae1e1fb2f4d03b18bee 100644 (file)
@@ -79,7 +79,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
        u32 event_inj_err;
        u64 nested_cr3;
        u64 lbr_ctl;
-       u64 reserved_5;
+       u32 clean;
+       u32 reserved_5;
        u64 next_rip;
        u8 reserved_6[816];
 };
index 50387860a53ca261c8c73b35c10590a384beb0d5..e73cbc3c49f9fab2d342b294edd5e403ee299a37 100644 (file)
@@ -185,6 +185,28 @@ static int nested_svm_vmexit(struct vcpu_svm *svm);
 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
                                      bool has_error_code, u32 error_code);
 
+enum {
+       VMCB_DIRTY_MAX,
+};
+
+#define VMCB_ALWAYS_DIRTY_MASK 0U
+
+static inline void mark_all_dirty(struct vmcb *vmcb)
+{
+       vmcb->control.clean = 0;
+}
+
+static inline void mark_all_clean(struct vmcb *vmcb)
+{
+       vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
+                              & ~VMCB_ALWAYS_DIRTY_MASK;
+}
+
+static inline void mark_dirty(struct vmcb *vmcb, int bit)
+{
+       vmcb->control.clean &= ~(1 << bit);
+}
+
 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
 {
        return container_of(vcpu, struct vcpu_svm, vcpu);
@@ -973,6 +995,8 @@ static void init_vmcb(struct vcpu_svm *svm)
                set_intercept(svm, INTERCEPT_PAUSE);
        }
 
+       mark_all_dirty(svm->vmcb);
+
        enable_gif(svm);
 }
 
@@ -1089,6 +1113,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        if (unlikely(cpu != vcpu->cpu)) {
                svm->asid_generation = 0;
+               mark_all_dirty(svm->vmcb);
        }
 
 #ifdef CONFIG_X86_64
@@ -2140,6 +2165,8 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
        svm->vmcb->save.cpl = 0;
        svm->vmcb->control.exit_int_info = 0;
 
+       mark_all_dirty(svm->vmcb);
+
        nested_svm_unmap(page);
 
        nested_svm_uninit_mmu_context(&svm->vcpu);
@@ -2351,6 +2378,8 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
 
        enable_gif(svm);
 
+       mark_all_dirty(svm->vmcb);
+
        return true;
 }
 
@@ -3490,6 +3519,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
        if (unlikely(svm->vmcb->control.exit_code ==
                     SVM_EXIT_EXCP_BASE + MC_VECTOR))
                svm_handle_mce(svm);
+
+       mark_all_clean(svm->vmcb);
 }
 
 #undef R