2 * Copyright (C) 2015, 2016 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/kvm.h>
18 #include <linux/kvm_host.h>
19 #include <linux/list_sort.h>
23 #define CREATE_TRACE_POINTS
26 #ifdef CONFIG_DEBUG_SPINLOCK
27 #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
29 #define DEBUG_SPINLOCK_BUG_ON(p)
32 struct vgic_global __section(.hyp.text) kvm_vgic_global_state;
35 * Locking order is always:
36 * vgic_cpu->ap_list_lock
39 * (that is, always take the ap_list_lock before the struct vgic_irq lock).
41 * When taking more than one ap_list_lock at the same time, always take the
42 * lowest numbered VCPU's ap_list_lock first, so:
43 * vcpuX->vcpu_id < vcpuY->vcpu_id:
44 * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
45 * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
48 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
52 if (intid <= VGIC_MAX_PRIVATE)
53 return &vcpu->arch.vgic_cpu.private_irqs[intid];
56 if (intid <= VGIC_MAX_SPI)
57 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
59 /* LPIs are not yet covered */
60 if (intid >= VGIC_MIN_LPI)
63 WARN(1, "Looking up struct vgic_irq for reserved INTID");
68 * kvm_vgic_target_oracle - compute the target vcpu for an irq
70 * @irq: The irq to route. Must be already locked.
72 * Based on the current state of the interrupt (enabled, pending,
73 * active, vcpu and target_vcpu), compute the next vcpu this should be
74 * given to. Return NULL if this shouldn't be injected at all.
76 * Requires the IRQ lock to be held.
78 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
80 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
82 /* If the interrupt is active, it must stay on the current vcpu */
84 return irq->vcpu ? : irq->target_vcpu;
87 * If the IRQ is not active but enabled and pending, we should direct
88 * it to its configured target VCPU.
89 * If the distributor is disabled, pending interrupts shouldn't be
92 if (irq->enabled && irq->pending) {
93 if (unlikely(irq->target_vcpu &&
94 !irq->target_vcpu->kvm->arch.vgic.enabled))
97 return irq->target_vcpu;
100 /* If neither active nor pending and enabled, then this IRQ should not
101 * be queued to any VCPU.
107 * The order of items in the ap_lists defines how we'll pack things in LRs as
108 * well, the first items in the list being the first things populated in the
111 * A hard rule is that active interrupts can never be pushed out of the LRs
112 * (and therefore take priority) since we cannot reliably trap on deactivation
113 * of IRQs and therefore they have to be present in the LRs.
115 * Otherwise things should be sorted by the priority field and the GIC
116 * hardware support will take care of preemption of priority groups etc.
118 * Return negative if "a" sorts before "b", 0 to preserve order, and positive
119 * to sort "b" before "a".
121 static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
123 struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
124 struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
128 spin_lock(&irqa->irq_lock);
129 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
131 if (irqa->active || irqb->active) {
132 ret = (int)irqb->active - (int)irqa->active;
136 penda = irqa->enabled && irqa->pending;
137 pendb = irqb->enabled && irqb->pending;
139 if (!penda || !pendb) {
140 ret = (int)pendb - (int)penda;
144 /* Both pending and enabled, sort by priority */
145 ret = irqa->priority - irqb->priority;
147 spin_unlock(&irqb->irq_lock);
148 spin_unlock(&irqa->irq_lock);
152 /* Must be called with the ap_list_lock held */
153 static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
155 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
157 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
159 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
163 * Only valid injection if changing level for level-triggered IRQs or for a
166 static bool vgic_validate_injection(struct vgic_irq *irq, bool level)
168 switch (irq->config) {
169 case VGIC_CONFIG_LEVEL:
170 return irq->line_level != level;
171 case VGIC_CONFIG_EDGE:
179 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
180 * Do the queuing if necessary, taking the right locks in the right order.
181 * Returns true when the IRQ was queued, false otherwise.
183 * Needs to be entered with the IRQ lock already held, but will return
184 * with all locks dropped.
186 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
188 struct kvm_vcpu *vcpu;
190 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
193 vcpu = vgic_target_oracle(irq);
194 if (irq->vcpu || !vcpu) {
196 * If this IRQ is already on a VCPU's ap_list, then it
197 * cannot be moved or modified and there is no more work for
200 * Otherwise, if the irq is not pending and enabled, it does
201 * not need to be inserted into an ap_list and there is also
202 * no more work for us to do.
204 spin_unlock(&irq->irq_lock);
209 * We must unlock the irq lock to take the ap_list_lock where
210 * we are going to insert this new pending interrupt.
212 spin_unlock(&irq->irq_lock);
214 /* someone can do stuff here, which we re-check below */
216 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
217 spin_lock(&irq->irq_lock);
220 * Did something change behind our backs?
222 * There are two cases:
223 * 1) The irq lost its pending state or was disabled behind our
224 * backs and/or it was queued to another VCPU's ap_list.
225 * 2) Someone changed the affinity on this irq behind our
226 * backs and we are now holding the wrong ap_list_lock.
228 * In both cases, drop the locks and retry.
231 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
232 spin_unlock(&irq->irq_lock);
233 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
235 spin_lock(&irq->irq_lock);
239 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
242 spin_unlock(&irq->irq_lock);
243 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
250 static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
251 unsigned int intid, bool level,
254 struct kvm_vcpu *vcpu;
255 struct vgic_irq *irq;
258 trace_vgic_update_irq_pending(cpuid, intid, level);
260 vcpu = kvm_get_vcpu(kvm, cpuid);
261 if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
264 irq = vgic_get_irq(kvm, vcpu, intid);
268 if (irq->hw != mapped_irq)
271 spin_lock(&irq->irq_lock);
273 if (!vgic_validate_injection(irq, level)) {
274 /* Nothing to see here, move along... */
275 spin_unlock(&irq->irq_lock);
279 if (irq->config == VGIC_CONFIG_LEVEL) {
280 irq->line_level = level;
281 irq->pending = level || irq->soft_pending;
286 vgic_queue_irq_unlock(kvm, irq);
292 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
293 * @kvm: The VM structure pointer
294 * @cpuid: The CPU for PPIs
295 * @intid: The INTID to inject a new state to.
296 * @level: Edge-triggered: true: to trigger the interrupt
297 * false: to ignore the call
298 * Level-sensitive true: raise the input signal
299 * false: lower the input signal
301 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
302 * level-sensitive interrupts. You can think of the level parameter as 1
303 * being HIGH and 0 being LOW and all devices being active-HIGH.
305 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
308 return vgic_update_irq_pending(kvm, cpuid, intid, level, false);
312 * vgic_prune_ap_list - Remove non-relevant interrupts from the list
314 * @vcpu: The VCPU pointer
316 * Go over the list of "interesting" interrupts, and prune those that we
317 * won't have to consider in the near future.
319 static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
321 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
322 struct vgic_irq *irq, *tmp;
325 spin_lock(&vgic_cpu->ap_list_lock);
327 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
328 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
330 spin_lock(&irq->irq_lock);
332 BUG_ON(vcpu != irq->vcpu);
334 target_vcpu = vgic_target_oracle(irq);
338 * We don't need to process this interrupt any
339 * further, move it off the list.
341 list_del(&irq->ap_list);
343 spin_unlock(&irq->irq_lock);
347 if (target_vcpu == vcpu) {
348 /* We're on the right CPU */
349 spin_unlock(&irq->irq_lock);
353 /* This interrupt looks like it has to be migrated. */
355 spin_unlock(&irq->irq_lock);
356 spin_unlock(&vgic_cpu->ap_list_lock);
359 * Ensure locking order by always locking the smallest
362 if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
370 spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
371 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
372 SINGLE_DEPTH_NESTING);
373 spin_lock(&irq->irq_lock);
376 * If the affinity has been preserved, move the
377 * interrupt around. Otherwise, it means things have
378 * changed while the interrupt was unlocked, and we
379 * need to replay this.
381 * In all cases, we cannot trust the list not to have
382 * changed, so we restart from the beginning.
384 if (target_vcpu == vgic_target_oracle(irq)) {
385 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
387 list_del(&irq->ap_list);
388 irq->vcpu = target_vcpu;
389 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
392 spin_unlock(&irq->irq_lock);
393 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
394 spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
398 spin_unlock(&vgic_cpu->ap_list_lock);
401 static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu)
403 if (kvm_vgic_global_state.type == VGIC_V2)
404 vgic_v2_process_maintenance(vcpu);
406 vgic_v3_process_maintenance(vcpu);
409 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
411 if (kvm_vgic_global_state.type == VGIC_V2)
412 vgic_v2_fold_lr_state(vcpu);
414 vgic_v3_fold_lr_state(vcpu);
417 /* Requires the irq_lock to be held. */
418 static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
419 struct vgic_irq *irq, int lr)
421 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
423 if (kvm_vgic_global_state.type == VGIC_V2)
424 vgic_v2_populate_lr(vcpu, irq, lr);
426 vgic_v3_populate_lr(vcpu, irq, lr);
429 static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
431 if (kvm_vgic_global_state.type == VGIC_V2)
432 vgic_v2_clear_lr(vcpu, lr);
434 vgic_v3_clear_lr(vcpu, lr);
437 static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
439 if (kvm_vgic_global_state.type == VGIC_V2)
440 vgic_v2_set_underflow(vcpu);
442 vgic_v3_set_underflow(vcpu);
445 /* Requires the ap_list_lock to be held. */
446 static int compute_ap_list_depth(struct kvm_vcpu *vcpu)
448 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
449 struct vgic_irq *irq;
452 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
454 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
455 spin_lock(&irq->irq_lock);
456 /* GICv2 SGIs can count for more than one... */
457 if (vgic_irq_is_sgi(irq->intid) && irq->source)
458 count += hweight8(irq->source);
461 spin_unlock(&irq->irq_lock);
466 /* Requires the VCPU's ap_list_lock to be held. */
467 static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
469 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
470 struct vgic_irq *irq;
473 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
475 if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) {
476 vgic_set_underflow(vcpu);
477 vgic_sort_ap_list(vcpu);
480 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
481 spin_lock(&irq->irq_lock);
483 if (unlikely(vgic_target_oracle(irq) != vcpu))
487 * If we get an SGI with multiple sources, try to get
488 * them in all at once.
491 vgic_populate_lr(vcpu, irq, count++);
492 } while (irq->source && count < kvm_vgic_global_state.nr_lr);
495 spin_unlock(&irq->irq_lock);
497 if (count == kvm_vgic_global_state.nr_lr)
501 vcpu->arch.vgic_cpu.used_lrs = count;
503 /* Nuke remaining LRs */
504 for ( ; count < kvm_vgic_global_state.nr_lr; count++)
505 vgic_clear_lr(vcpu, count);
508 /* Sync back the hardware VGIC state into our emulation after a guest's run. */
509 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
511 vgic_process_maintenance_interrupt(vcpu);
512 vgic_fold_lr_state(vcpu);
513 vgic_prune_ap_list(vcpu);
516 /* Flush our emulation state into the GIC hardware before entering the guest. */
517 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
519 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
520 vgic_flush_lr_state(vcpu);
521 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
524 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
526 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
527 struct vgic_irq *irq;
528 bool pending = false;
530 if (!vcpu->kvm->arch.vgic.enabled)
533 spin_lock(&vgic_cpu->ap_list_lock);
535 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
536 spin_lock(&irq->irq_lock);
537 pending = irq->pending && irq->enabled;
538 spin_unlock(&irq->irq_lock);
544 spin_unlock(&vgic_cpu->ap_list_lock);