]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
KVM: PPC: Book3S HV: Simplify dynamic micro-threading code
authorPaul Mackerras <paulus@ozlabs.org>
Thu, 22 Jun 2017 05:08:42 +0000 (15:08 +1000)
committerPaul Mackerras <paulus@ozlabs.org>
Sat, 1 Jul 2017 08:59:01 +0000 (18:59 +1000)
Since commit b009031f74da ("KVM: PPC: Book3S HV: Take out virtual
core piggybacking code", 2016-09-15), we only have at most one
vcore per subcore.  Previously, the fact that there might be more
than one vcore per subcore meant that we had the notion of a
"master vcore", which was the vcore that controlled thread 0 of
the subcore.  We also needed a list per subcore in the core_info
struct to record which vcores belonged to each subcore.  Now that
there can only be one vcore in the subcore, we can replace the
list with a simple pointer and get rid of the notion of the
master vcore (and in fact treat every vcore as a master vcore).

We can also get rid of the subcore_vm[] field in the core_info
struct since it is never read.

Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/include/asm/kvm_book3s_asm.h
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_builtin.c

index 2bf35017ffc0e40c0771378eb93160012c77957e..b8d5b8e35244edcad25c695df6e02f4b16d333b1 100644 (file)
@@ -86,7 +86,6 @@ struct kvmppc_vcore {
        u16 last_cpu;
        u8 vcore_state;
        u8 in_guest;
-       struct kvmppc_vcore *master_vcore;
        struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
        struct list_head preempt_list;
        spinlock_t lock;
index b148496ffe36da31c81f18020a783798c9e8c1c3..7cea76f11c26c66fc52e08aae5b37e6534ac79a8 100644 (file)
@@ -81,7 +81,7 @@ struct kvm_split_mode {
        u8              subcore_size;
        u8              do_nap;
        u8              napped[MAX_SMT_THREADS];
-       struct kvmppc_vcore *master_vcs[MAX_SUBCORES];
+       struct kvmppc_vcore *vc[MAX_SUBCORES];
 };
 
 /*
index c4ada89be6580beef30f8975d62bd1c3b6e37516..03d6c7f9b5476bfe97a75ed263db172523117765 100644 (file)
@@ -2171,7 +2171,6 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
 {
        int cpu;
        struct paca_struct *tpaca;
-       struct kvmppc_vcore *mvc = vc->master_vcore;
        struct kvm *kvm = vc->kvm;
 
        cpu = vc->pcpu;
@@ -2181,7 +2180,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
                        vcpu->arch.timer_running = 0;
                }
                cpu += vcpu->arch.ptid;
-               vcpu->cpu = mvc->pcpu;
+               vcpu->cpu = vc->pcpu;
                vcpu->arch.thread_cpu = cpu;
 
                /*
@@ -2207,10 +2206,10 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
        }
        tpaca = &paca[cpu];
        tpaca->kvm_hstate.kvm_vcpu = vcpu;
-       tpaca->kvm_hstate.ptid = cpu - mvc->pcpu;
+       tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
        /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
        smp_wmb();
-       tpaca->kvm_hstate.kvm_vcore = mvc;
+       tpaca->kvm_hstate.kvm_vcore = vc;
        if (cpu != smp_processor_id())
                kvmppc_ipi_thread(cpu);
 }
@@ -2339,8 +2338,7 @@ struct core_info {
        int             max_subcore_threads;
        int             total_threads;
        int             subcore_threads[MAX_SUBCORES];
-       struct kvm      *subcore_vm[MAX_SUBCORES];
-       struct list_head vcs[MAX_SUBCORES];
+       struct kvmppc_vcore *vc[MAX_SUBCORES];
 };
 
 /*
@@ -2351,17 +2349,12 @@ static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
 
 static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
 {
-       int sub;
-
        memset(cip, 0, sizeof(*cip));
        cip->n_subcores = 1;
        cip->max_subcore_threads = vc->num_threads;
        cip->total_threads = vc->num_threads;
        cip->subcore_threads[0] = vc->num_threads;
-       cip->subcore_vm[0] = vc->kvm;
-       for (sub = 0; sub < MAX_SUBCORES; ++sub)
-               INIT_LIST_HEAD(&cip->vcs[sub]);
-       list_add_tail(&vc->preempt_list, &cip->vcs[0]);
+       cip->vc[0] = vc;
 }
 
 static bool subcore_config_ok(int n_subcores, int n_threads)
@@ -2381,9 +2374,8 @@ static bool subcore_config_ok(int n_subcores, int n_threads)
        return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
 }
 
-static void init_master_vcore(struct kvmppc_vcore *vc)
+static void init_vcore_to_run(struct kvmppc_vcore *vc)
 {
-       vc->master_vcore = vc;
        vc->entry_exit_map = 0;
        vc->in_guest = 0;
        vc->napping_threads = 0;
@@ -2408,9 +2400,9 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
        ++cip->n_subcores;
        cip->total_threads += vc->num_threads;
        cip->subcore_threads[sub] = vc->num_threads;
-       cip->subcore_vm[sub] = vc->kvm;
-       init_master_vcore(vc);
-       list_move_tail(&vc->preempt_list, &cip->vcs[sub]);
+       cip->vc[sub] = vc;
+       init_vcore_to_run(vc);
+       list_del_init(&vc->preempt_list);
 
        return true;
 }
@@ -2515,7 +2507,6 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
                        wake_up(&vcpu->arch.cpu_run);
                }
        }
-       list_del_init(&vc->preempt_list);
        if (!is_master) {
                if (still_running > 0) {
                        kvmppc_vcore_preempt(vc);
@@ -2587,7 +2578,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        int i;
        int srcu_idx;
        struct core_info core_info;
-       struct kvmppc_vcore *pvc, *vcnext;
+       struct kvmppc_vcore *pvc;
        struct kvm_split_mode split_info, *sip;
        int split, subcore_size, active;
        int sub;
@@ -2610,7 +2601,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        /*
         * Initialize *vc.
         */
-       init_master_vcore(vc);
+       init_vcore_to_run(vc);
        vc->preempt_tb = TB_NIL;
 
        /*
@@ -2670,9 +2661,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
                split_info.ldbar = mfspr(SPRN_LDBAR);
                split_info.subcore_size = subcore_size;
                for (sub = 0; sub < core_info.n_subcores; ++sub)
-                       split_info.master_vcs[sub] =
-                               list_first_entry(&core_info.vcs[sub],
-                                       struct kvmppc_vcore, preempt_list);
+                       split_info.vc[sub] = core_info.vc[sub];
                /* order writes to split_info before kvm_split_mode pointer */
                smp_wmb();
        }
@@ -2704,24 +2693,23 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
                thr = subcore_thread_map[sub];
                thr0_done = false;
                active |= 1 << thr;
-               list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) {
-                       pvc->pcpu = pcpu + thr;
-                       for_each_runnable_thread(i, vcpu, pvc) {
-                               kvmppc_start_thread(vcpu, pvc);
-                               kvmppc_create_dtl_entry(vcpu, pvc);
-                               trace_kvm_guest_enter(vcpu);
-                               if (!vcpu->arch.ptid)
-                                       thr0_done = true;
-                               active |= 1 << (thr + vcpu->arch.ptid);
-                       }
-                       /*
-                        * We need to start the first thread of each subcore
-                        * even if it doesn't have a vcpu.
-                        */
-                       if (pvc->master_vcore == pvc && !thr0_done)
-                               kvmppc_start_thread(NULL, pvc);
-                       thr += pvc->num_threads;
+               pvc = core_info.vc[sub];
+               pvc->pcpu = pcpu + thr;
+               for_each_runnable_thread(i, vcpu, pvc) {
+                       kvmppc_start_thread(vcpu, pvc);
+                       kvmppc_create_dtl_entry(vcpu, pvc);
+                       trace_kvm_guest_enter(vcpu);
+                       if (!vcpu->arch.ptid)
+                               thr0_done = true;
+                       active |= 1 << (thr + vcpu->arch.ptid);
                }
+               /*
+                * We need to start the first thread of each subcore
+                * even if it doesn't have a vcpu.
+                */
+               if (!thr0_done)
+                       kvmppc_start_thread(NULL, pvc);
+               thr += pvc->num_threads;
        }
 
        /*
@@ -2748,8 +2736,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        trace_kvmppc_run_core(vc, 0);
 
        for (sub = 0; sub < core_info.n_subcores; ++sub)
-               list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list)
-                       spin_unlock(&pvc->lock);
+               spin_unlock(&core_info.vc[sub]->lock);
 
        guest_enter();
 
@@ -2802,10 +2789,10 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        smp_mb();
        guest_exit();
 
-       for (sub = 0; sub < core_info.n_subcores; ++sub)
-               list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub],
-                                        preempt_list)
-                       post_guest_process(pvc, pvc == vc);
+       for (sub = 0; sub < core_info.n_subcores; ++sub) {
+               pvc = core_info.vc[sub];
+               post_guest_process(pvc, pvc == vc);
+       }
 
        spin_lock(&vc->lock);
        preempt_enable();
@@ -3026,15 +3013,14 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
         */
        if (!signal_pending(current)) {
                if (vc->vcore_state == VCORE_PIGGYBACK) {
-                       struct kvmppc_vcore *mvc = vc->master_vcore;
-                       if (spin_trylock(&mvc->lock)) {
-                               if (mvc->vcore_state == VCORE_RUNNING &&
-                                   !VCORE_IS_EXITING(mvc)) {
+                       if (spin_trylock(&vc->lock)) {
+                               if (vc->vcore_state == VCORE_RUNNING &&
+                                   !VCORE_IS_EXITING(vc)) {
                                        kvmppc_create_dtl_entry(vcpu, vc);
                                        kvmppc_start_thread(vcpu, vc);
                                        trace_kvm_guest_enter(vcpu);
                                }
-                               spin_unlock(&mvc->lock);
+                               spin_unlock(&vc->lock);
                        }
                } else if (vc->vcore_state == VCORE_RUNNING &&
                           !VCORE_IS_EXITING(vc)) {
index ee4c2558c3058717da1f6299fd6a0e0aebc75360..90644db9d38e21c82427d949061d585d60c4558f 100644 (file)
@@ -307,7 +307,7 @@ void kvmhv_commence_exit(int trap)
                return;
 
        for (i = 0; i < MAX_SUBCORES; ++i) {
-               vc = sip->master_vcs[i];
+               vc = sip->vc[i];
                if (!vc)
                        break;
                do {