3 * Local APIC virtualization
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright (C) 2007 Novell
7 * Copyright (C) 2007 Intel
8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
11 * Dor Laor <dor.laor@qumranet.com>
12 * Gregory Haskins <ghaskins@novell.com>
13 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
21 #include <linux/kvm_host.h>
22 #include <linux/kvm.h>
24 #include <linux/highmem.h>
25 #include <linux/smp.h>
26 #include <linux/hrtimer.h>
28 #include <linux/module.h>
29 #include <linux/math64.h>
30 #include <linux/slab.h>
31 #include <asm/processor.h>
34 #include <asm/current.h>
35 #include <asm/apicdef.h>
36 #include <asm/delay.h>
37 #include <linux/atomic.h>
38 #include <linux/jump_label.h>
39 #include "kvm_cache_regs.h"
47 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
49 #define mod_64(x, y) ((x) % (y))
57 #define APIC_BUS_CYCLE_NS 1
59 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
60 #define apic_debug(fmt, arg...)
62 #define APIC_LVT_NUM 6
63 /* 14 is the version for Xeon and Pentium 8.4.8*/
64 #define APIC_VERSION (0x14UL | ((APIC_LVT_NUM - 1) << 16))
65 #define LAPIC_MMIO_LENGTH (1 << 12)
66 /* followed define is not in apicdef.h */
67 #define APIC_SHORT_MASK 0xc0000
68 #define APIC_DEST_NOSHORT 0x0
69 #define APIC_DEST_MASK 0x800
70 #define MAX_APIC_VECTOR 256
71 #define APIC_VECTORS_PER_REG 32
73 #define APIC_BROADCAST 0xFF
74 #define X2APIC_BROADCAST 0xFFFFFFFFul
76 #define VEC_POS(v) ((v) & (32 - 1))
77 #define REG_POS(v) (((v) >> 5) << 4)
79 static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
81 *((u32 *) (apic->regs + reg_off)) = val;
84 static inline int apic_test_vector(int vec, void *bitmap)
86 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
89 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
91 struct kvm_lapic *apic = vcpu->arch.apic;
93 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
94 apic_test_vector(vector, apic->regs + APIC_IRR);
97 static inline void apic_set_vector(int vec, void *bitmap)
99 set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
102 static inline void apic_clear_vector(int vec, void *bitmap)
104 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
107 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
109 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
112 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
114 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
117 struct static_key_deferred apic_hw_disabled __read_mostly;
118 struct static_key_deferred apic_sw_disabled __read_mostly;
120 static inline int apic_enabled(struct kvm_lapic *apic)
122 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
126 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
129 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
130 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
132 /* The logical map is definitely wrong if we have multiple
133 * modes at the same time. (Physical map is always right.)
135 static inline bool kvm_apic_logical_map_valid(struct kvm_apic_map *map)
137 return !(map->mode & (map->mode - 1));
141 apic_logical_id(struct kvm_apic_map *map, u32 dest_id, u16 *cid, u16 *lid)
145 BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_CLUSTER != 4);
146 BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_FLAT != 8);
147 BUILD_BUG_ON(KVM_APIC_MODE_X2APIC != 16);
148 lid_bits = map->mode;
150 *cid = dest_id >> lid_bits;
151 *lid = dest_id & ((1 << lid_bits) - 1);
154 static void recalculate_apic_map(struct kvm *kvm)
156 struct kvm_apic_map *new, *old = NULL;
157 struct kvm_vcpu *vcpu;
160 new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL);
162 mutex_lock(&kvm->arch.apic_map_lock);
167 kvm_for_each_vcpu(i, vcpu, kvm) {
168 struct kvm_lapic *apic = vcpu->arch.apic;
172 if (!kvm_apic_present(vcpu))
175 aid = kvm_apic_id(apic);
176 ldr = kvm_apic_get_reg(apic, APIC_LDR);
178 if (aid < ARRAY_SIZE(new->phys_map))
179 new->phys_map[aid] = apic;
181 if (apic_x2apic_mode(apic)) {
182 new->mode |= KVM_APIC_MODE_X2APIC;
184 ldr = GET_APIC_LOGICAL_ID(ldr);
185 if (kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
186 new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
188 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
191 if (!kvm_apic_logical_map_valid(new))
194 apic_logical_id(new, ldr, &cid, &lid);
196 if (lid && cid < ARRAY_SIZE(new->logical_map))
197 new->logical_map[cid][ffs(lid) - 1] = apic;
200 old = rcu_dereference_protected(kvm->arch.apic_map,
201 lockdep_is_held(&kvm->arch.apic_map_lock));
202 rcu_assign_pointer(kvm->arch.apic_map, new);
203 mutex_unlock(&kvm->arch.apic_map_lock);
208 kvm_make_scan_ioapic_request(kvm);
211 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
213 bool enabled = val & APIC_SPIV_APIC_ENABLED;
215 apic_set_reg(apic, APIC_SPIV, val);
217 if (enabled != apic->sw_enabled) {
218 apic->sw_enabled = enabled;
220 static_key_slow_dec_deferred(&apic_sw_disabled);
221 recalculate_apic_map(apic->vcpu->kvm);
223 static_key_slow_inc(&apic_sw_disabled.key);
227 static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id)
229 apic_set_reg(apic, APIC_ID, id << 24);
230 recalculate_apic_map(apic->vcpu->kvm);
233 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
235 apic_set_reg(apic, APIC_LDR, id);
236 recalculate_apic_map(apic->vcpu->kvm);
239 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u8 id)
241 u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
243 apic_set_reg(apic, APIC_ID, id << 24);
244 apic_set_reg(apic, APIC_LDR, ldr);
245 recalculate_apic_map(apic->vcpu->kvm);
248 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
250 return !(kvm_apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
253 static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
255 return kvm_apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
258 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
260 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
263 static inline int apic_lvtt_period(struct kvm_lapic *apic)
265 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
268 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
270 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
273 static inline int apic_lvt_nmi_mode(u32 lvt_val)
275 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
278 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
280 struct kvm_lapic *apic = vcpu->arch.apic;
281 struct kvm_cpuid_entry2 *feat;
282 u32 v = APIC_VERSION;
284 if (!lapic_in_kernel(vcpu))
287 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
288 if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
289 v |= APIC_LVR_DIRECTED_EOI;
290 apic_set_reg(apic, APIC_LVR, v);
293 static const unsigned int apic_lvt_mask[APIC_LVT_NUM] = {
294 LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */
295 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
296 LVT_MASK | APIC_MODE_MASK, /* LVTPC */
297 LINT_MASK, LINT_MASK, /* LVT0-1 */
298 LVT_MASK /* LVTERR */
301 static int find_highest_vector(void *bitmap)
306 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
307 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
308 reg = bitmap + REG_POS(vec);
310 return fls(*reg) - 1 + vec;
316 static u8 count_vectors(void *bitmap)
322 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
323 reg = bitmap + REG_POS(vec);
324 count += hweight32(*reg);
330 void __kvm_apic_update_irr(u32 *pir, void *regs)
334 for (i = 0; i <= 7; i++) {
335 pir_val = xchg(&pir[i], 0);
337 *((u32 *)(regs + APIC_IRR + i * 0x10)) |= pir_val;
340 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
342 void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
344 struct kvm_lapic *apic = vcpu->arch.apic;
346 __kvm_apic_update_irr(pir, apic->regs);
348 kvm_make_request(KVM_REQ_EVENT, vcpu);
350 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
352 static inline void apic_set_irr(int vec, struct kvm_lapic *apic)
354 apic_set_vector(vec, apic->regs + APIC_IRR);
356 * irr_pending must be true if any interrupt is pending; set it after
357 * APIC_IRR to avoid race with apic_clear_irr
359 apic->irr_pending = true;
362 static inline int apic_search_irr(struct kvm_lapic *apic)
364 return find_highest_vector(apic->regs + APIC_IRR);
367 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
372 * Note that irr_pending is just a hint. It will be always
373 * true with virtual interrupt delivery enabled.
375 if (!apic->irr_pending)
378 if (apic->vcpu->arch.apicv_active)
379 kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
380 result = apic_search_irr(apic);
381 ASSERT(result == -1 || result >= 16);
386 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
388 struct kvm_vcpu *vcpu;
392 if (unlikely(vcpu->arch.apicv_active)) {
393 /* try to update RVI */
394 apic_clear_vector(vec, apic->regs + APIC_IRR);
395 kvm_make_request(KVM_REQ_EVENT, vcpu);
397 apic->irr_pending = false;
398 apic_clear_vector(vec, apic->regs + APIC_IRR);
399 if (apic_search_irr(apic) != -1)
400 apic->irr_pending = true;
404 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
406 struct kvm_vcpu *vcpu;
408 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
414 * With APIC virtualization enabled, all caching is disabled
415 * because the processor can modify ISR under the hood. Instead
418 if (unlikely(vcpu->arch.apicv_active))
419 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec);
422 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
424 * ISR (in service register) bit is set when injecting an interrupt.
425 * The highest vector is injected. Thus the latest bit set matches
426 * the highest bit in ISR.
428 apic->highest_isr_cache = vec;
432 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
437 * Note that isr_count is always 1, and highest_isr_cache
438 * is always -1, with APIC virtualization enabled.
440 if (!apic->isr_count)
442 if (likely(apic->highest_isr_cache != -1))
443 return apic->highest_isr_cache;
445 result = find_highest_vector(apic->regs + APIC_ISR);
446 ASSERT(result == -1 || result >= 16);
451 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
453 struct kvm_vcpu *vcpu;
454 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
460 * We do get here for APIC virtualization enabled if the guest
461 * uses the Hyper-V APIC enlightenment. In this case we may need
462 * to trigger a new interrupt delivery by writing the SVI field;
463 * on the other hand isr_count and highest_isr_cache are unused
464 * and must be left alone.
466 if (unlikely(vcpu->arch.apicv_active))
467 kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
468 apic_find_highest_isr(apic));
471 BUG_ON(apic->isr_count < 0);
472 apic->highest_isr_cache = -1;
476 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
478 /* This may race with setting of irr in __apic_accept_irq() and
479 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
480 * will cause vmexit immediately and the value will be recalculated
481 * on the next vmentry.
483 return apic_find_highest_irr(vcpu->arch.apic);
486 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
487 int vector, int level, int trig_mode,
488 unsigned long *dest_map);
490 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
491 unsigned long *dest_map)
493 struct kvm_lapic *apic = vcpu->arch.apic;
495 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
496 irq->level, irq->trig_mode, dest_map);
499 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
502 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
506 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
509 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
513 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
515 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
518 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
521 if (pv_eoi_get_user(vcpu, &val) < 0)
522 apic_debug("Can't read EOI MSR value: 0x%llx\n",
523 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
527 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
529 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
530 apic_debug("Can't set EOI MSR value: 0x%llx\n",
531 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
534 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
537 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
539 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
540 apic_debug("Can't clear EOI MSR value: 0x%llx\n",
541 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
544 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
547 static void apic_update_ppr(struct kvm_lapic *apic)
549 u32 tpr, isrv, ppr, old_ppr;
552 old_ppr = kvm_apic_get_reg(apic, APIC_PROCPRI);
553 tpr = kvm_apic_get_reg(apic, APIC_TASKPRI);
554 isr = apic_find_highest_isr(apic);
555 isrv = (isr != -1) ? isr : 0;
557 if ((tpr & 0xf0) >= (isrv & 0xf0))
562 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
563 apic, ppr, isr, isrv);
565 if (old_ppr != ppr) {
566 apic_set_reg(apic, APIC_PROCPRI, ppr);
568 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
572 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
574 apic_set_reg(apic, APIC_TASKPRI, tpr);
575 apic_update_ppr(apic);
578 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
580 if (apic_x2apic_mode(apic))
581 return mda == X2APIC_BROADCAST;
583 return GET_APIC_DEST_FIELD(mda) == APIC_BROADCAST;
586 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
588 if (kvm_apic_broadcast(apic, mda))
591 if (apic_x2apic_mode(apic))
592 return mda == kvm_apic_id(apic);
594 return mda == SET_APIC_DEST_FIELD(kvm_apic_id(apic));
597 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
601 if (kvm_apic_broadcast(apic, mda))
604 logical_id = kvm_apic_get_reg(apic, APIC_LDR);
606 if (apic_x2apic_mode(apic))
607 return ((logical_id >> 16) == (mda >> 16))
608 && (logical_id & mda & 0xffff) != 0;
610 logical_id = GET_APIC_LOGICAL_ID(logical_id);
611 mda = GET_APIC_DEST_FIELD(mda);
613 switch (kvm_apic_get_reg(apic, APIC_DFR)) {
615 return (logical_id & mda) != 0;
616 case APIC_DFR_CLUSTER:
617 return ((logical_id >> 4) == (mda >> 4))
618 && (logical_id & mda & 0xf) != 0;
620 apic_debug("Bad DFR vcpu %d: %08x\n",
621 apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR));
626 /* KVM APIC implementation has two quirks
627 * - dest always begins at 0 while xAPIC MDA has offset 24,
628 * - IOxAPIC messages have to be delivered (directly) to x2APIC.
630 static u32 kvm_apic_mda(unsigned int dest_id, struct kvm_lapic *source,
631 struct kvm_lapic *target)
633 bool ipi = source != NULL;
634 bool x2apic_mda = apic_x2apic_mode(ipi ? source : target);
636 if (!ipi && dest_id == APIC_BROADCAST && x2apic_mda)
637 return X2APIC_BROADCAST;
639 return x2apic_mda ? dest_id : SET_APIC_DEST_FIELD(dest_id);
642 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
643 int short_hand, unsigned int dest, int dest_mode)
645 struct kvm_lapic *target = vcpu->arch.apic;
646 u32 mda = kvm_apic_mda(dest, source, target);
648 apic_debug("target %p, source %p, dest 0x%x, "
649 "dest_mode 0x%x, short_hand 0x%x\n",
650 target, source, dest, dest_mode, short_hand);
653 switch (short_hand) {
654 case APIC_DEST_NOSHORT:
655 if (dest_mode == APIC_DEST_PHYSICAL)
656 return kvm_apic_match_physical_addr(target, mda);
658 return kvm_apic_match_logical_addr(target, mda);
660 return target == source;
661 case APIC_DEST_ALLINC:
663 case APIC_DEST_ALLBUT:
664 return target != source;
666 apic_debug("kvm: apic: Bad dest shorthand value %x\n",
672 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
673 const unsigned long *bitmap, u32 bitmap_size)
678 mod = vector % dest_vcpus;
680 for (i = 0; i <= mod; i++) {
681 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
682 BUG_ON(idx == bitmap_size);
688 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
689 struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map)
691 struct kvm_apic_map *map;
692 unsigned long bitmap = 1;
693 struct kvm_lapic **dst;
695 bool ret, x2apic_ipi;
699 if (irq->shorthand == APIC_DEST_SELF) {
700 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
707 x2apic_ipi = src && apic_x2apic_mode(src);
708 if (irq->dest_id == (x2apic_ipi ? X2APIC_BROADCAST : APIC_BROADCAST))
713 map = rcu_dereference(kvm->arch.apic_map);
720 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
721 if (irq->dest_id >= ARRAY_SIZE(map->phys_map))
724 dst = &map->phys_map[irq->dest_id];
728 if (!kvm_apic_logical_map_valid(map)) {
733 apic_logical_id(map, irq->dest_id, &cid, (u16 *)&bitmap);
735 if (cid >= ARRAY_SIZE(map->logical_map))
738 dst = map->logical_map[cid];
740 if (!kvm_lowest_prio_delivery(irq))
743 if (!kvm_vector_hashing_enabled()) {
745 for_each_set_bit(i, &bitmap, 16) {
750 else if (kvm_apic_compare_prio(dst[i]->vcpu,
754 bitmap = (l >= 0) ? 1 << l : 0;
757 unsigned int dest_vcpus;
759 dest_vcpus = hweight16(bitmap);
763 idx = kvm_vector_to_index(irq->vector,
764 dest_vcpus, &bitmap, 16);
767 * We may find a hardware disabled LAPIC here, if that
768 * is the case, print out a error message once for each
771 if (!dst[idx] && !kvm->arch.disabled_lapic_found) {
772 kvm->arch.disabled_lapic_found = true;
774 "Disabled LAPIC found during irq injection\n");
778 bitmap = (idx >= 0) ? 1 << idx : 0;
783 for_each_set_bit(i, &bitmap, 16) {
788 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
796 * This routine tries to handler interrupts in posted mode, here is how
797 * it deals with different cases:
798 * - For single-destination interrupts, handle it in posted mode
799 * - Else if vector hashing is enabled and it is a lowest-priority
800 * interrupt, handle it in posted mode and use the following mechanism
801 * to find the destinaiton vCPU.
802 * 1. For lowest-priority interrupts, store all the possible
803 * destination vCPUs in an array.
804 * 2. Use "guest vector % max number of destination vCPUs" to find
805 * the right destination vCPU in the array for the lowest-priority
807 * - Otherwise, use remapped mode to inject the interrupt.
809 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
810 struct kvm_vcpu **dest_vcpu)
812 struct kvm_apic_map *map;
814 struct kvm_lapic *dst = NULL;
820 map = rcu_dereference(kvm->arch.apic_map);
825 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
826 if (irq->dest_id == 0xFF)
829 if (irq->dest_id >= ARRAY_SIZE(map->phys_map))
832 dst = map->phys_map[irq->dest_id];
833 if (dst && kvm_apic_present(dst->vcpu))
834 *dest_vcpu = dst->vcpu;
839 unsigned long bitmap = 1;
842 if (!kvm_apic_logical_map_valid(map))
845 apic_logical_id(map, irq->dest_id, &cid, (u16 *)&bitmap);
847 if (cid >= ARRAY_SIZE(map->logical_map))
850 if (kvm_vector_hashing_enabled() &&
851 kvm_lowest_prio_delivery(irq)) {
853 unsigned int dest_vcpus;
855 dest_vcpus = hweight16(bitmap);
859 idx = kvm_vector_to_index(irq->vector, dest_vcpus,
863 * We may find a hardware disabled LAPIC here, if that
864 * is the case, print out a error message once for each
867 dst = map->logical_map[cid][idx];
868 if (!dst && !kvm->arch.disabled_lapic_found) {
869 kvm->arch.disabled_lapic_found = true;
871 "Disabled LAPIC found during irq injection\n");
875 *dest_vcpu = dst->vcpu;
877 for_each_set_bit(i, &bitmap, 16) {
878 dst = map->logical_map[cid][i];
883 if (dst && kvm_apic_present(dst->vcpu))
884 *dest_vcpu = dst->vcpu;
897 * Add a pending IRQ into lapic.
898 * Return 1 if successfully added and 0 if discarded.
900 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
901 int vector, int level, int trig_mode,
902 unsigned long *dest_map)
905 struct kvm_vcpu *vcpu = apic->vcpu;
907 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
909 switch (delivery_mode) {
911 vcpu->arch.apic_arb_prio++;
913 if (unlikely(trig_mode && !level))
916 /* FIXME add logic for vcpu on reset */
917 if (unlikely(!apic_enabled(apic)))
923 __set_bit(vcpu->vcpu_id, dest_map);
925 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
927 apic_set_vector(vector, apic->regs + APIC_TMR);
929 apic_clear_vector(vector, apic->regs + APIC_TMR);
932 if (vcpu->arch.apicv_active)
933 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
935 apic_set_irr(vector, apic);
937 kvm_make_request(KVM_REQ_EVENT, vcpu);
944 vcpu->arch.pv.pv_unhalted = 1;
945 kvm_make_request(KVM_REQ_EVENT, vcpu);
951 kvm_make_request(KVM_REQ_SMI, vcpu);
957 kvm_inject_nmi(vcpu);
962 if (!trig_mode || level) {
964 /* assumes that there are only KVM_APIC_INIT/SIPI */
965 apic->pending_events = (1UL << KVM_APIC_INIT);
966 /* make sure pending_events is visible before sending
969 kvm_make_request(KVM_REQ_EVENT, vcpu);
972 apic_debug("Ignoring de-assert INIT to vcpu %d\n",
977 case APIC_DM_STARTUP:
978 apic_debug("SIPI to vcpu %d vector 0x%02x\n",
979 vcpu->vcpu_id, vector);
981 apic->sipi_vector = vector;
982 /* make sure sipi_vector is visible for the receiver */
984 set_bit(KVM_APIC_SIPI, &apic->pending_events);
985 kvm_make_request(KVM_REQ_EVENT, vcpu);
991 * Should only be called by kvm_apic_local_deliver() with LVT0,
992 * before NMI watchdog was enabled. Already handled by
993 * kvm_apic_accept_pic_intr().
998 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1005 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1007 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1010 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1012 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1015 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1019 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1020 if (!kvm_ioapic_handles_vector(apic, vector))
1023 /* Request a KVM exit to inform the userspace IOAPIC. */
1024 if (irqchip_split(apic->vcpu->kvm)) {
1025 apic->vcpu->arch.pending_ioapic_eoi = vector;
1026 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1030 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1031 trigger_mode = IOAPIC_LEVEL_TRIG;
1033 trigger_mode = IOAPIC_EDGE_TRIG;
1035 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1038 static int apic_set_eoi(struct kvm_lapic *apic)
1040 int vector = apic_find_highest_isr(apic);
1042 trace_kvm_eoi(apic, vector);
1045 * Not every write EOI will has corresponding ISR,
1046 * one example is when Kernel check timer on setup_IO_APIC
1051 apic_clear_isr(vector, apic);
1052 apic_update_ppr(apic);
1054 if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap))
1055 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1057 kvm_ioapic_send_eoi(apic, vector);
1058 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1063 * this interface assumes a trap-like exit, which has already finished
1064 * desired side effect including vISR and vPPR update.
1066 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1068 struct kvm_lapic *apic = vcpu->arch.apic;
1070 trace_kvm_eoi(apic, vector);
1072 kvm_ioapic_send_eoi(apic, vector);
1073 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1075 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1077 static void apic_send_ipi(struct kvm_lapic *apic)
1079 u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR);
1080 u32 icr_high = kvm_apic_get_reg(apic, APIC_ICR2);
1081 struct kvm_lapic_irq irq;
1083 irq.vector = icr_low & APIC_VECTOR_MASK;
1084 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1085 irq.dest_mode = icr_low & APIC_DEST_MASK;
1086 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1087 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1088 irq.shorthand = icr_low & APIC_SHORT_MASK;
1089 irq.msi_redir_hint = false;
1090 if (apic_x2apic_mode(apic))
1091 irq.dest_id = icr_high;
1093 irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
1095 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1097 apic_debug("icr_high 0x%x, icr_low 0x%x, "
1098 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
1099 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x, "
1100 "msi_redir_hint 0x%x\n",
1101 icr_high, icr_low, irq.shorthand, irq.dest_id,
1102 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
1103 irq.vector, irq.msi_redir_hint);
1105 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1108 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1114 ASSERT(apic != NULL);
1116 /* if initial count is 0, current count should also be 0 */
1117 if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
1118 apic->lapic_timer.period == 0)
1121 remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
1122 if (ktime_to_ns(remaining) < 0)
1123 remaining = ktime_set(0, 0);
1125 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1126 tmcct = div64_u64(ns,
1127 (APIC_BUS_CYCLE_NS * apic->divide_count));
1132 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1134 struct kvm_vcpu *vcpu = apic->vcpu;
1135 struct kvm_run *run = vcpu->run;
1137 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1138 run->tpr_access.rip = kvm_rip_read(vcpu);
1139 run->tpr_access.is_write = write;
1142 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1144 if (apic->vcpu->arch.tpr_access_reporting)
1145 __report_tpr_access(apic, write);
1148 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1152 if (offset >= LAPIC_MMIO_LENGTH)
1157 if (apic_x2apic_mode(apic))
1158 val = kvm_apic_id(apic);
1160 val = kvm_apic_id(apic) << 24;
1163 apic_debug("Access APIC ARBPRI register which is for P6\n");
1166 case APIC_TMCCT: /* Timer CCR */
1167 if (apic_lvtt_tscdeadline(apic))
1170 val = apic_get_tmcct(apic);
1173 apic_update_ppr(apic);
1174 val = kvm_apic_get_reg(apic, offset);
1177 report_tpr_access(apic, false);
1180 val = kvm_apic_get_reg(apic, offset);
1187 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1189 return container_of(dev, struct kvm_lapic, dev);
1192 static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1195 unsigned char alignment = offset & 0xf;
1197 /* this bitmask has a bit cleared for each reserved register */
1198 static const u64 rmask = 0x43ff01ffffffe70cULL;
1200 if ((alignment + len) > 4) {
1201 apic_debug("KVM_APIC_READ: alignment error %x %d\n",
1206 if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
1207 apic_debug("KVM_APIC_READ: read reserved register %x\n",
1212 result = __apic_read(apic, offset & ~0xf);
1214 trace_kvm_apic_read(offset, result);
1220 memcpy(data, (char *)&result + alignment, len);
1223 printk(KERN_ERR "Local APIC read with len = %x, "
1224 "should be 1,2, or 4 instead\n", len);
1230 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1232 return kvm_apic_hw_enabled(apic) &&
1233 addr >= apic->base_address &&
1234 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1237 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1238 gpa_t address, int len, void *data)
1240 struct kvm_lapic *apic = to_lapic(this);
1241 u32 offset = address - apic->base_address;
1243 if (!apic_mmio_in_range(apic, address))
1246 apic_reg_read(apic, offset, len, data);
1251 static void update_divide_count(struct kvm_lapic *apic)
1253 u32 tmp1, tmp2, tdcr;
1255 tdcr = kvm_apic_get_reg(apic, APIC_TDCR);
1257 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1258 apic->divide_count = 0x1 << (tmp2 & 0x7);
1260 apic_debug("timer divide count is 0x%x\n",
1261 apic->divide_count);
1264 static void apic_update_lvtt(struct kvm_lapic *apic)
1266 u32 timer_mode = kvm_apic_get_reg(apic, APIC_LVTT) &
1267 apic->lapic_timer.timer_mode_mask;
1269 if (apic->lapic_timer.timer_mode != timer_mode) {
1270 apic->lapic_timer.timer_mode = timer_mode;
1271 hrtimer_cancel(&apic->lapic_timer.timer);
1275 static void apic_timer_expired(struct kvm_lapic *apic)
1277 struct kvm_vcpu *vcpu = apic->vcpu;
1278 wait_queue_head_t *q = &vcpu->wq;
1279 struct kvm_timer *ktimer = &apic->lapic_timer;
1281 if (atomic_read(&apic->lapic_timer.pending))
1284 atomic_inc(&apic->lapic_timer.pending);
1285 kvm_set_pending_timer(vcpu);
1287 if (waitqueue_active(q))
1288 wake_up_interruptible(q);
1290 if (apic_lvtt_tscdeadline(apic))
1291 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1295 * On APICv, this test will cause a busy wait
1296 * during a higher-priority task.
1299 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1301 struct kvm_lapic *apic = vcpu->arch.apic;
1302 u32 reg = kvm_apic_get_reg(apic, APIC_LVTT);
1304 if (kvm_apic_hw_enabled(apic)) {
1305 int vec = reg & APIC_VECTOR_MASK;
1306 void *bitmap = apic->regs + APIC_ISR;
1308 if (vcpu->arch.apicv_active)
1309 bitmap = apic->regs + APIC_IRR;
1311 if (apic_test_vector(vec, bitmap))
1317 void wait_lapic_expire(struct kvm_vcpu *vcpu)
1319 struct kvm_lapic *apic = vcpu->arch.apic;
1320 u64 guest_tsc, tsc_deadline;
1322 if (!lapic_in_kernel(vcpu))
1325 if (apic->lapic_timer.expired_tscdeadline == 0)
1328 if (!lapic_timer_int_injected(vcpu))
1331 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1332 apic->lapic_timer.expired_tscdeadline = 0;
1333 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1334 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1336 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
1337 if (guest_tsc < tsc_deadline)
1338 __delay(tsc_deadline - guest_tsc);
1341 static void start_apic_timer(struct kvm_lapic *apic)
1345 atomic_set(&apic->lapic_timer.pending, 0);
1347 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
1348 /* lapic timer in oneshot or periodic mode */
1349 now = apic->lapic_timer.timer.base->get_time();
1350 apic->lapic_timer.period = (u64)kvm_apic_get_reg(apic, APIC_TMICT)
1351 * APIC_BUS_CYCLE_NS * apic->divide_count;
1353 if (!apic->lapic_timer.period)
1356 * Do not allow the guest to program periodic timers with small
1357 * interval, since the hrtimers are not throttled by the host
1360 if (apic_lvtt_period(apic)) {
1361 s64 min_period = min_timer_period_us * 1000LL;
1363 if (apic->lapic_timer.period < min_period) {
1364 pr_info_ratelimited(
1365 "kvm: vcpu %i: requested %lld ns "
1366 "lapic timer period limited to %lld ns\n",
1367 apic->vcpu->vcpu_id,
1368 apic->lapic_timer.period, min_period);
1369 apic->lapic_timer.period = min_period;
1373 hrtimer_start(&apic->lapic_timer.timer,
1374 ktime_add_ns(now, apic->lapic_timer.period),
1377 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
1379 "timer initial count 0x%x, period %lldns, "
1380 "expire @ 0x%016" PRIx64 ".\n", __func__,
1381 APIC_BUS_CYCLE_NS, ktime_to_ns(now),
1382 kvm_apic_get_reg(apic, APIC_TMICT),
1383 apic->lapic_timer.period,
1384 ktime_to_ns(ktime_add_ns(now,
1385 apic->lapic_timer.period)));
1386 } else if (apic_lvtt_tscdeadline(apic)) {
1387 /* lapic timer in tsc deadline mode */
1388 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
1391 struct kvm_vcpu *vcpu = apic->vcpu;
1392 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1393 unsigned long flags;
1395 if (unlikely(!tscdeadline || !this_tsc_khz))
1398 local_irq_save(flags);
1400 now = apic->lapic_timer.timer.base->get_time();
1401 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1402 if (likely(tscdeadline > guest_tsc)) {
1403 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1404 do_div(ns, this_tsc_khz);
1405 expire = ktime_add_ns(now, ns);
1406 expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
1407 hrtimer_start(&apic->lapic_timer.timer,
1408 expire, HRTIMER_MODE_ABS);
1410 apic_timer_expired(apic);
1412 local_irq_restore(flags);
1416 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1418 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1420 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
1421 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
1422 if (lvt0_in_nmi_mode) {
1423 apic_debug("Receive NMI setting on APIC_LVT0 "
1424 "for cpu %d\n", apic->vcpu->vcpu_id);
1425 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1427 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1431 static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1435 trace_kvm_apic_write(reg, val);
1438 case APIC_ID: /* Local APIC ID */
1439 if (!apic_x2apic_mode(apic))
1440 kvm_apic_set_id(apic, val >> 24);
1446 report_tpr_access(apic, true);
1447 apic_set_tpr(apic, val & 0xff);
1455 if (!apic_x2apic_mode(apic))
1456 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
1462 if (!apic_x2apic_mode(apic)) {
1463 apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
1464 recalculate_apic_map(apic->vcpu->kvm);
1471 if (kvm_apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
1472 mask |= APIC_SPIV_DIRECTED_EOI;
1473 apic_set_spiv(apic, val & mask);
1474 if (!(val & APIC_SPIV_APIC_ENABLED)) {
1478 for (i = 0; i < APIC_LVT_NUM; i++) {
1479 lvt_val = kvm_apic_get_reg(apic,
1480 APIC_LVTT + 0x10 * i);
1481 apic_set_reg(apic, APIC_LVTT + 0x10 * i,
1482 lvt_val | APIC_LVT_MASKED);
1484 apic_update_lvtt(apic);
1485 atomic_set(&apic->lapic_timer.pending, 0);
1491 /* No delay here, so we always clear the pending bit */
1492 apic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
1493 apic_send_ipi(apic);
1497 if (!apic_x2apic_mode(apic))
1499 apic_set_reg(apic, APIC_ICR2, val);
1503 apic_manage_nmi_watchdog(apic, val);
1508 /* TODO: Check vector */
1509 if (!kvm_apic_sw_enabled(apic))
1510 val |= APIC_LVT_MASKED;
1512 val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
1513 apic_set_reg(apic, reg, val);
1518 if (!kvm_apic_sw_enabled(apic))
1519 val |= APIC_LVT_MASKED;
1520 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
1521 apic_set_reg(apic, APIC_LVTT, val);
1522 apic_update_lvtt(apic);
1526 if (apic_lvtt_tscdeadline(apic))
1529 hrtimer_cancel(&apic->lapic_timer.timer);
1530 apic_set_reg(apic, APIC_TMICT, val);
1531 start_apic_timer(apic);
1536 apic_debug("KVM_WRITE:TDCR %x\n", val);
1537 apic_set_reg(apic, APIC_TDCR, val);
1538 update_divide_count(apic);
1542 if (apic_x2apic_mode(apic) && val != 0) {
1543 apic_debug("KVM_WRITE:ESR not zero %x\n", val);
1549 if (apic_x2apic_mode(apic)) {
1550 apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
1559 apic_debug("Local APIC Write to read-only register %x\n", reg);
1563 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1564 gpa_t address, int len, const void *data)
1566 struct kvm_lapic *apic = to_lapic(this);
1567 unsigned int offset = address - apic->base_address;
1570 if (!apic_mmio_in_range(apic, address))
1574 * APIC register must be aligned on 128-bits boundary.
1575 * 32/64/128 bits registers must be accessed thru 32 bits.
1578 if (len != 4 || (offset & 0xf)) {
1579 /* Don't shout loud, $infamous_os would cause only noise. */
1580 apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
1586 /* too common printing */
1587 if (offset != APIC_EOI)
1588 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
1589 "0x%x\n", __func__, offset, len, val);
1591 apic_reg_write(apic, offset & 0xff0, val);
1596 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
1598 apic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
1600 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
1602 /* emulate APIC access in a trap manner */
1603 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
1607 /* hw has done the conditional check and inst decode */
1610 apic_reg_read(vcpu->arch.apic, offset, 4, &val);
1612 /* TODO: optimize to just emulate side effect w/o one more write */
1613 apic_reg_write(vcpu->arch.apic, offset, val);
1615 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
1617 void kvm_free_lapic(struct kvm_vcpu *vcpu)
1619 struct kvm_lapic *apic = vcpu->arch.apic;
1621 if (!vcpu->arch.apic)
1624 hrtimer_cancel(&apic->lapic_timer.timer);
1626 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
1627 static_key_slow_dec_deferred(&apic_hw_disabled);
1629 if (!apic->sw_enabled)
1630 static_key_slow_dec_deferred(&apic_sw_disabled);
1633 free_page((unsigned long)apic->regs);
1639 *----------------------------------------------------------------------
1641 *----------------------------------------------------------------------
1644 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
1646 struct kvm_lapic *apic = vcpu->arch.apic;
1648 if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
1649 apic_lvtt_period(apic))
1652 return apic->lapic_timer.tscdeadline;
1655 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
1657 struct kvm_lapic *apic = vcpu->arch.apic;
1659 if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
1660 apic_lvtt_period(apic))
1663 hrtimer_cancel(&apic->lapic_timer.timer);
1664 apic->lapic_timer.tscdeadline = data;
1665 start_apic_timer(apic);
1668 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
1670 struct kvm_lapic *apic = vcpu->arch.apic;
1672 apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
1673 | (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4));
1676 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
1680 tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
1682 return (tpr & 0xf0) >> 4;
1685 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1687 u64 old_value = vcpu->arch.apic_base;
1688 struct kvm_lapic *apic = vcpu->arch.apic;
1691 value |= MSR_IA32_APICBASE_BSP;
1692 vcpu->arch.apic_base = value;
1696 vcpu->arch.apic_base = value;
1698 /* update jump label if enable bit changes */
1699 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
1700 if (value & MSR_IA32_APICBASE_ENABLE)
1701 static_key_slow_dec_deferred(&apic_hw_disabled);
1703 static_key_slow_inc(&apic_hw_disabled.key);
1704 recalculate_apic_map(vcpu->kvm);
1707 if ((old_value ^ value) & X2APIC_ENABLE) {
1708 if (value & X2APIC_ENABLE) {
1709 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
1710 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true);
1712 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false);
1715 apic->base_address = apic->vcpu->arch.apic_base &
1716 MSR_IA32_APICBASE_BASE;
1718 if ((value & MSR_IA32_APICBASE_ENABLE) &&
1719 apic->base_address != APIC_DEFAULT_PHYS_BASE)
1720 pr_warn_once("APIC base relocation is unsupported by KVM");
1722 /* with FSB delivery interrupt, we can restart APIC functionality */
1723 apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
1724 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
1728 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
1730 struct kvm_lapic *apic;
1733 apic_debug("%s\n", __func__);
1736 apic = vcpu->arch.apic;
1737 ASSERT(apic != NULL);
1739 /* Stop the timer in case it's a reset to an active apic */
1740 hrtimer_cancel(&apic->lapic_timer.timer);
1743 kvm_apic_set_id(apic, vcpu->vcpu_id);
1744 kvm_apic_set_version(apic->vcpu);
1746 for (i = 0; i < APIC_LVT_NUM; i++)
1747 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
1748 apic_update_lvtt(apic);
1749 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
1750 apic_set_reg(apic, APIC_LVT0,
1751 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
1752 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
1754 apic_set_reg(apic, APIC_DFR, 0xffffffffU);
1755 apic_set_spiv(apic, 0xff);
1756 apic_set_reg(apic, APIC_TASKPRI, 0);
1757 if (!apic_x2apic_mode(apic))
1758 kvm_apic_set_ldr(apic, 0);
1759 apic_set_reg(apic, APIC_ESR, 0);
1760 apic_set_reg(apic, APIC_ICR, 0);
1761 apic_set_reg(apic, APIC_ICR2, 0);
1762 apic_set_reg(apic, APIC_TDCR, 0);
1763 apic_set_reg(apic, APIC_TMICT, 0);
1764 for (i = 0; i < 8; i++) {
1765 apic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
1766 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
1767 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
1769 apic->irr_pending = vcpu->arch.apicv_active;
1770 apic->isr_count = vcpu->arch.apicv_active ? 1 : 0;
1771 apic->highest_isr_cache = -1;
1772 update_divide_count(apic);
1773 atomic_set(&apic->lapic_timer.pending, 0);
1774 if (kvm_vcpu_is_bsp(vcpu))
1775 kvm_lapic_set_base(vcpu,
1776 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
1777 vcpu->arch.pv_eoi.msr_val = 0;
1778 apic_update_ppr(apic);
1780 vcpu->arch.apic_arb_prio = 0;
1781 vcpu->arch.apic_attention = 0;
1783 apic_debug("%s: vcpu=%p, id=%d, base_msr="
1784 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
1785 vcpu, kvm_apic_id(apic),
1786 vcpu->arch.apic_base, apic->base_address);
1790 *----------------------------------------------------------------------
1792 *----------------------------------------------------------------------
1795 static bool lapic_is_periodic(struct kvm_lapic *apic)
1797 return apic_lvtt_period(apic);
1800 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
1802 struct kvm_lapic *apic = vcpu->arch.apic;
1804 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
1805 return atomic_read(&apic->lapic_timer.pending);
1810 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
1812 u32 reg = kvm_apic_get_reg(apic, lvt_type);
1813 int vector, mode, trig_mode;
1815 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
1816 vector = reg & APIC_VECTOR_MASK;
1817 mode = reg & APIC_MODE_MASK;
1818 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
1819 return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
1825 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
1827 struct kvm_lapic *apic = vcpu->arch.apic;
1830 kvm_apic_local_deliver(apic, APIC_LVT0);
1833 static const struct kvm_io_device_ops apic_mmio_ops = {
1834 .read = apic_mmio_read,
1835 .write = apic_mmio_write,
1838 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
1840 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
1841 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
1843 apic_timer_expired(apic);
1845 if (lapic_is_periodic(apic)) {
1846 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
1847 return HRTIMER_RESTART;
1849 return HRTIMER_NORESTART;
1852 int kvm_create_lapic(struct kvm_vcpu *vcpu)
1854 struct kvm_lapic *apic;
1856 ASSERT(vcpu != NULL);
1857 apic_debug("apic_init %d\n", vcpu->vcpu_id);
1859 apic = kzalloc(sizeof(*apic), GFP_KERNEL);
1863 vcpu->arch.apic = apic;
1865 apic->regs = (void *)get_zeroed_page(GFP_KERNEL);
1867 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
1869 goto nomem_free_apic;
1873 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
1875 apic->lapic_timer.timer.function = apic_timer_fn;
1878 * APIC is created enabled. This will prevent kvm_lapic_set_base from
1879 * thinking that APIC satet has changed.
1881 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
1882 kvm_lapic_set_base(vcpu,
1883 APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE);
1885 static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
1886 kvm_lapic_reset(vcpu, false);
1887 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
1896 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
1898 struct kvm_lapic *apic = vcpu->arch.apic;
1901 if (!apic_enabled(apic))
1904 apic_update_ppr(apic);
1905 highest_irr = apic_find_highest_irr(apic);
1906 if ((highest_irr == -1) ||
1907 ((highest_irr & 0xF0) <= kvm_apic_get_reg(apic, APIC_PROCPRI)))
1912 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
1914 u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0);
1917 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
1919 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
1920 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
1925 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
1927 struct kvm_lapic *apic = vcpu->arch.apic;
1929 if (atomic_read(&apic->lapic_timer.pending) > 0) {
1930 kvm_apic_local_deliver(apic, APIC_LVTT);
1931 if (apic_lvtt_tscdeadline(apic))
1932 apic->lapic_timer.tscdeadline = 0;
1933 atomic_set(&apic->lapic_timer.pending, 0);
1937 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
1939 int vector = kvm_apic_has_interrupt(vcpu);
1940 struct kvm_lapic *apic = vcpu->arch.apic;
1946 * We get here even with APIC virtualization enabled, if doing
1947 * nested virtualization and L1 runs with the "acknowledge interrupt
1948 * on exit" mode. Then we cannot inject the interrupt via RVI,
1949 * because the process would deliver it through the IDT.
1952 apic_set_isr(vector, apic);
1953 apic_update_ppr(apic);
1954 apic_clear_irr(vector, apic);
1956 if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) {
1957 apic_clear_isr(vector, apic);
1958 apic_update_ppr(apic);
1964 void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
1965 struct kvm_lapic_state *s)
1967 struct kvm_lapic *apic = vcpu->arch.apic;
1969 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
1970 /* set SPIV separately to get count of SW disabled APICs right */
1971 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
1972 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
1973 /* call kvm_apic_set_id() to put apic into apic_map */
1974 kvm_apic_set_id(apic, kvm_apic_id(apic));
1975 kvm_apic_set_version(vcpu);
1977 apic_update_ppr(apic);
1978 hrtimer_cancel(&apic->lapic_timer.timer);
1979 apic_update_lvtt(apic);
1980 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
1981 update_divide_count(apic);
1982 start_apic_timer(apic);
1983 apic->irr_pending = true;
1984 apic->isr_count = vcpu->arch.apicv_active ?
1985 1 : count_vectors(apic->regs + APIC_ISR);
1986 apic->highest_isr_cache = -1;
1987 if (vcpu->arch.apicv_active) {
1988 kvm_x86_ops->hwapic_irr_update(vcpu,
1989 apic_find_highest_irr(apic));
1990 kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
1991 apic_find_highest_isr(apic));
1993 kvm_make_request(KVM_REQ_EVENT, vcpu);
1994 if (ioapic_in_kernel(vcpu->kvm))
1995 kvm_rtc_eoi_tracking_restore_one(vcpu);
1997 vcpu->arch.apic_arb_prio = 0;
2000 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2002 struct hrtimer *timer;
2004 if (!lapic_in_kernel(vcpu))
2007 timer = &vcpu->arch.apic->lapic_timer.timer;
2008 if (hrtimer_cancel(timer))
2009 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
2013 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2015 * Detect whether guest triggered PV EOI since the
2016 * last entry. If yes, set EOI on guests's behalf.
2017 * Clear PV EOI in guest memory in any case.
2019 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2020 struct kvm_lapic *apic)
2025 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2026 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2028 * KVM_APIC_PV_EOI_PENDING is unset:
2029 * -> host disabled PV EOI.
2030 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2031 * -> host enabled PV EOI, guest did not execute EOI yet.
2032 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2033 * -> host enabled PV EOI, guest executed EOI.
2035 BUG_ON(!pv_eoi_enabled(vcpu));
2036 pending = pv_eoi_get_pending(vcpu);
2038 * Clear pending bit in any case: it will be set again on vmentry.
2039 * While this might not be ideal from performance point of view,
2040 * this makes sure pv eoi is only enabled when we know it's safe.
2042 pv_eoi_clr_pending(vcpu);
2045 vector = apic_set_eoi(apic);
2046 trace_kvm_pv_eoi(apic, vector);
2049 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2053 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2054 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2056 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2059 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2063 apic_set_tpr(vcpu->arch.apic, data & 0xff);
2067 * apic_sync_pv_eoi_to_guest - called before vmentry
2069 * Detect whether it's safe to enable PV EOI and
2072 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2073 struct kvm_lapic *apic)
2075 if (!pv_eoi_enabled(vcpu) ||
2076 /* IRR set or many bits in ISR: could be nested. */
2077 apic->irr_pending ||
2078 /* Cache not set: could be safe but we don't bother. */
2079 apic->highest_isr_cache == -1 ||
2080 /* Need EOI to update ioapic. */
2081 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2083 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2084 * so we need not do anything here.
2089 pv_eoi_set_pending(apic->vcpu);
2092 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2095 int max_irr, max_isr;
2096 struct kvm_lapic *apic = vcpu->arch.apic;
2098 apic_sync_pv_eoi_to_guest(vcpu, apic);
2100 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2103 tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff;
2104 max_irr = apic_find_highest_irr(apic);
2107 max_isr = apic_find_highest_isr(apic);
2110 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2112 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2116 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2119 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2120 &vcpu->arch.apic->vapic_cache,
2121 vapic_addr, sizeof(u32)))
2123 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2125 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2128 vcpu->arch.apic->vapic_addr = vapic_addr;
2132 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2134 struct kvm_lapic *apic = vcpu->arch.apic;
2135 u32 reg = (msr - APIC_BASE_MSR) << 4;
2137 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2140 if (reg == APIC_ICR2)
2143 /* if this is ICR write vector before command */
2144 if (reg == APIC_ICR)
2145 apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2146 return apic_reg_write(apic, reg, (u32)data);
2149 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2151 struct kvm_lapic *apic = vcpu->arch.apic;
2152 u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
2154 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2157 if (reg == APIC_DFR || reg == APIC_ICR2) {
2158 apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n",
2163 if (apic_reg_read(apic, reg, 4, &low))
2165 if (reg == APIC_ICR)
2166 apic_reg_read(apic, APIC_ICR2, 4, &high);
2168 *data = (((u64)high) << 32) | low;
2173 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2175 struct kvm_lapic *apic = vcpu->arch.apic;
2177 if (!lapic_in_kernel(vcpu))
2180 /* if this is ICR write vector before command */
2181 if (reg == APIC_ICR)
2182 apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2183 return apic_reg_write(apic, reg, (u32)data);
2186 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2188 struct kvm_lapic *apic = vcpu->arch.apic;
2191 if (!lapic_in_kernel(vcpu))
2194 if (apic_reg_read(apic, reg, 4, &low))
2196 if (reg == APIC_ICR)
2197 apic_reg_read(apic, APIC_ICR2, 4, &high);
2199 *data = (((u64)high) << 32) | low;
2204 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
2206 u64 addr = data & ~KVM_MSR_ENABLED;
2207 if (!IS_ALIGNED(addr, 4))
2210 vcpu->arch.pv_eoi.msr_val = data;
2211 if (!pv_eoi_enabled(vcpu))
2213 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
2217 void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
2219 struct kvm_lapic *apic = vcpu->arch.apic;
2223 if (!lapic_in_kernel(vcpu) || !apic->pending_events)
2227 * INITs are latched while in SMM. Because an SMM CPU cannot
2228 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs
2229 * and delay processing of INIT until the next RSM.
2232 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
2233 if (test_bit(KVM_APIC_SIPI, &apic->pending_events))
2234 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2238 pe = xchg(&apic->pending_events, 0);
2239 if (test_bit(KVM_APIC_INIT, &pe)) {
2240 kvm_lapic_reset(vcpu, true);
2241 kvm_vcpu_reset(vcpu, true);
2242 if (kvm_vcpu_is_bsp(apic->vcpu))
2243 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2245 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
2247 if (test_bit(KVM_APIC_SIPI, &pe) &&
2248 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
2249 /* evaluate pending_events before reading the vector */
2251 sipi_vector = apic->sipi_vector;
2252 apic_debug("vcpu %d received sipi with vector # %x\n",
2253 vcpu->vcpu_id, sipi_vector);
2254 kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
2255 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2259 void kvm_lapic_init(void)
2261 /* do not patch jump label more than once per second */
2262 jump_label_rate_limit(&apic_hw_disabled, HZ);
2263 jump_label_rate_limit(&apic_sw_disabled, HZ);