]> git.karo-electronics.de Git - linux-beck.git/commitdiff
KVM: x86/vPMU: rename a few PMU functions
authorWei Huang <wehuang@redhat.com>
Fri, 19 Jun 2015 11:44:45 +0000 (13:44 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 19 Jun 2015 15:16:29 +0000 (17:16 +0200)
Before introducing a pmu.h header for them, make the naming more
consistent.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/cpuid.c
arch/x86/kvm/pmu.c
arch/x86/kvm/x86.c

index f2d60cce7595f31decfc382daa9311d77da65e69..d92d7edc016b186a680bbb8f209241c2bbc0e032 100644 (file)
@@ -1198,14 +1198,14 @@ int kvm_is_in_guest(void);
 void kvm_pmu_init(struct kvm_vcpu *vcpu);
 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
 void kvm_pmu_reset(struct kvm_vcpu *vcpu);
-void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
-bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
+void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
+bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
-int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc);
-int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
-void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
-void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
+int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned pmc);
+int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
+void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
+void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
 
 int __x86_set_memory_region(struct kvm *kvm,
                            const struct kvm_userspace_memory_region *mem);
index 9dadf8d67873b85433df57f50baf4e602bbcc44b..9d69f76aa0fa0f2155632b9349f866e1a2f429b4 100644 (file)
@@ -111,7 +111,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
        /* Update physical-address width */
        vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
 
-       kvm_pmu_cpuid_update(vcpu);
+       kvm_pmu_refresh(vcpu);
        return 0;
 }
 
index 29fbf9dfdc549f47f1e189f58c5d5adfeb7a5fd1..d6a4506f62a869876e69d26ea56ba6c5ab15fcaa 100644 (file)
@@ -52,7 +52,7 @@ static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
        return pmu->counter_bitmask[pmc->type];
 }
 
-static inline bool pmc_enabled(struct kvm_pmc *pmc)
+static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
 {
        struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
        return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
@@ -87,20 +87,20 @@ static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
                return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
 }
 
-void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
+void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
 {
        if (vcpu->arch.apic)
                kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
 }
 
-static void trigger_pmi(struct irq_work *irq_work)
+static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
 {
        struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
                        irq_work);
        struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
                        arch.pmu);
 
-       kvm_deliver_pmi(vcpu);
+       kvm_pmu_deliver_pmi(vcpu);
 }
 
 static void kvm_perf_overflow(struct perf_event *perf_event,
@@ -138,7 +138,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
        }
 }
 
-static u64 read_pmc(struct kvm_pmc *pmc)
+static u64 pmc_read_counter(struct kvm_pmc *pmc)
 {
        u64 counter, enabled, running;
 
@@ -153,16 +153,16 @@ static u64 read_pmc(struct kvm_pmc *pmc)
        return counter & pmc_bitmask(pmc);
 }
 
-static void stop_counter(struct kvm_pmc *pmc)
+static void pmc_stop_counter(struct kvm_pmc *pmc)
 {
        if (pmc->perf_event) {
-               pmc->counter = read_pmc(pmc);
+               pmc->counter = pmc_read_counter(pmc);
                perf_event_release_kernel(pmc->perf_event);
                pmc->perf_event = NULL;
        }
 }
 
-static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
+static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
                unsigned config, bool exclude_user, bool exclude_kernel,
                bool intr, bool in_tx, bool in_tx_cp)
 {
@@ -224,9 +224,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
 
        pmc->eventsel = eventsel;
 
-       stop_counter(pmc);
+       pmc_stop_counter(pmc);
 
-       if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
+       if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
                return;
 
        event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
@@ -246,7 +246,7 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
        if (type == PERF_TYPE_RAW)
                config = eventsel & X86_RAW_EVENT_MASK;
 
-       reprogram_counter(pmc, type, config,
+       pmc_reprogram_counter(pmc, type, config,
                        !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
                        !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
                        eventsel & ARCH_PERFMON_EVENTSEL_INT,
@@ -259,19 +259,19 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
        unsigned en = en_pmi & 0x3;
        bool pmi = en_pmi & 0x8;
 
-       stop_counter(pmc);
+       pmc_stop_counter(pmc);
 
-       if (!en || !pmc_enabled(pmc))
+       if (!en || !pmc_is_enabled(pmc))
                return;
 
-       reprogram_counter(pmc, PERF_TYPE_HARDWARE,
+       pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
                        arch_events[fixed_pmc_events[idx]].event_type,
                        !(en & 0x2), /* exclude user */
                        !(en & 0x1), /* exclude kernel */
                        pmi, false, false);
 }
 
-static inline u8 fixed_en_pmi(u64 ctrl, int idx)
+static inline u8 fixed_ctrl_field(u64 ctrl, int idx)
 {
        return (ctrl >> (idx * 4)) & 0xf;
 }
@@ -281,10 +281,10 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
        int i;
 
        for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
-               u8 en_pmi = fixed_en_pmi(data, i);
+               u8 en_pmi = fixed_ctrl_field(data, i);
                struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
 
-               if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
+               if (fixed_ctrl_field(pmu->fixed_ctr_ctrl, i) == en_pmi)
                        continue;
 
                reprogram_fixed_counter(pmc, en_pmi, i);
@@ -293,7 +293,7 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
        pmu->fixed_ctr_ctrl = data;
 }
 
-static void reprogram_idx(struct kvm_pmu *pmu, int idx)
+static void reprogram_counter(struct kvm_pmu *pmu, int idx)
 {
        struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
 
@@ -305,7 +305,7 @@ static void reprogram_idx(struct kvm_pmu *pmu, int idx)
        else {
                int fidx = idx - INTEL_PMC_IDX_FIXED;
                reprogram_fixed_counter(pmc,
-                               fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
+                               fixed_ctrl_field(pmu->fixed_ctr_ctrl, fidx), fidx);
        }
 }
 
@@ -317,10 +317,10 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
        pmu->global_ctrl = data;
 
        for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
-               reprogram_idx(pmu, bit);
+               reprogram_counter(pmu, bit);
 }
 
-bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
+bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 {
        struct kvm_pmu *pmu = &vcpu->arch.pmu;
        int ret;
@@ -362,7 +362,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
        default:
                if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
                                (pmc = get_fixed_pmc(pmu, index))) {
-                       *data = read_pmc(pmc);
+                       *data = pmc_read_counter(pmc);
                        return 0;
                } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
                        *data = pmc->eventsel;
@@ -415,7 +415,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                (pmc = get_fixed_pmc(pmu, index))) {
                        if (!msr_info->host_initiated)
                                data = (s64)(s32)data;
-                       pmc->counter += data - read_pmc(pmc);
+                       pmc->counter += data - pmc_read_counter(pmc);
                        return 0;
                } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
                        if (data == pmc->eventsel)
@@ -429,7 +429,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        return 1;
 }
 
-int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc)
+int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned pmc)
 {
        struct kvm_pmu *pmu = &vcpu->arch.pmu;
        bool fixed = pmc & (1u << 30);
@@ -438,7 +438,7 @@ int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc)
                (fixed && pmc >= pmu->nr_arch_fixed_counters);
 }
 
-int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
+int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
 {
        struct kvm_pmu *pmu = &vcpu->arch.pmu;
        bool fast_mode = pmc & (1u << 31);
@@ -452,7 +452,7 @@ int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
        if (fixed && pmc >= pmu->nr_arch_fixed_counters)
                return 1;
        counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
-       ctr = read_pmc(&counters[pmc]);
+       ctr = pmc_read_counter(&counters[pmc]);
        if (fast_mode)
                ctr = (u32)ctr;
        *data = ctr;
@@ -460,7 +460,7 @@ int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
        return 0;
 }
 
-void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
+void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
 {
        struct kvm_pmu *pmu = &vcpu->arch.pmu;
        struct kvm_cpuid_entry2 *entry;
@@ -527,8 +527,8 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
                pmu->fixed_counters[i].vcpu = vcpu;
                pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
        }
-       init_irq_work(&pmu->irq_work, trigger_pmi);
-       kvm_pmu_cpuid_update(vcpu);
+       init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
+       kvm_pmu_refresh(vcpu);
 }
 
 void kvm_pmu_reset(struct kvm_vcpu *vcpu)
@@ -539,12 +539,12 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu)
        irq_work_sync(&pmu->irq_work);
        for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
                struct kvm_pmc *pmc = &pmu->gp_counters[i];
-               stop_counter(pmc);
+               pmc_stop_counter(pmc);
                pmc->counter = pmc->eventsel = 0;
        }
 
        for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
-               stop_counter(&pmu->fixed_counters[i]);
+               pmc_stop_counter(&pmu->fixed_counters[i]);
 
        pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
                pmu->global_ovf_ctrl = 0;
@@ -555,7 +555,7 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
        kvm_pmu_reset(vcpu);
 }
 
-void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
+void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
 {
        struct kvm_pmu *pmu = &vcpu->arch.pmu;
        u64 bitmask;
@@ -571,6 +571,6 @@ void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
                        continue;
                }
 
-               reprogram_idx(pmu, bit);
+               reprogram_counter(pmu, bit);
        }
 }
index 6574fa36cb65dd5f5f8e10aae7c81825c9d27309..c34b52c828ea0c0ad4b1b7e91277601947d1f8ad 100644 (file)
@@ -913,7 +913,7 @@ bool kvm_rdpmc(struct kvm_vcpu *vcpu)
        u64 data;
        int err;
 
-       err = kvm_pmu_read_pmc(vcpu, ecx, &data);
+       err = kvm_pmu_rdpmc(vcpu, ecx, &data);
        if (err)
                return err;
        kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
@@ -2231,7 +2231,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                pr = true;
        case MSR_P6_EVNTSEL0:
        case MSR_P6_EVNTSEL1:
-               if (kvm_pmu_msr(vcpu, msr))
+               if (kvm_pmu_is_valid_msr(vcpu, msr))
                        return kvm_pmu_set_msr(vcpu, msr_info);
 
                if (pr || data != 0)
@@ -2277,7 +2277,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        default:
                if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
                        return xen_hvm_config(vcpu, data);
-               if (kvm_pmu_msr(vcpu, msr))
+               if (kvm_pmu_is_valid_msr(vcpu, msr))
                        return kvm_pmu_set_msr(vcpu, msr_info);
                if (!ignore_msrs) {
                        vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
@@ -2435,7 +2435,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_P6_PERFCTR1:
        case MSR_P6_EVNTSEL0:
        case MSR_P6_EVNTSEL1:
-               if (kvm_pmu_msr(vcpu, msr_info->index))
+               if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
                        return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
                msr_info->data = 0;
                break;
@@ -2561,7 +2561,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = vcpu->arch.osvw.status;
                break;
        default:
-               if (kvm_pmu_msr(vcpu, msr_info->index))
+               if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
                        return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
                if (!ignore_msrs) {
                        vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr_info->index);
@@ -4966,13 +4966,13 @@ static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
                              u32 pmc)
 {
-       return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc);
+       return kvm_pmu_is_valid_msr_idx(emul_to_vcpu(ctxt), pmc);
 }
 
 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
                             u32 pmc, u64 *pdata)
 {
-       return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
+       return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
 }
 
 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
@@ -6542,9 +6542,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                if (kvm_check_request(KVM_REQ_NMI, vcpu))
                        process_nmi(vcpu);
                if (kvm_check_request(KVM_REQ_PMU, vcpu))
-                       kvm_handle_pmu_event(vcpu);
+                       kvm_pmu_handle_event(vcpu);
                if (kvm_check_request(KVM_REQ_PMI, vcpu))
-                       kvm_deliver_pmi(vcpu);
+                       kvm_pmu_deliver_pmi(vcpu);
                if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
                        vcpu_scan_ioapic(vcpu);
                if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))