2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
23 #include <linux/irq.h>
25 #include <clocksource/arm_arch_timer.h>
26 #include <asm/arch_timer.h>
27 #include <asm/kvm_hyp.h>
29 #include <kvm/arm_vgic.h>
30 #include <kvm/arm_arch_timer.h>
34 static struct timecounter *timecounter;
35 static unsigned int host_vtimer_irq;
36 static u32 host_vtimer_irq_flags;
38 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
40 vcpu_vtimer(vcpu)->active_cleared_last = false;
43 static u64 kvm_phys_timer_read(void)
45 return timecounter->cc->read(timecounter->cc);
48 static bool timer_is_armed(struct arch_timer_cpu *timer)
53 /* timer_arm: as in "arm the timer", not as in ARM the company */
54 static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
57 hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
61 static void timer_disarm(struct arch_timer_cpu *timer)
63 if (timer_is_armed(timer)) {
64 hrtimer_cancel(&timer->timer);
65 cancel_work_sync(&timer->expired);
70 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
72 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
75 * We disable the timer in the world switch and let it be
76 * handled by kvm_timer_sync_hwstate(). Getting a timer
77 * interrupt at this point is a sure sign of some major
80 pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
85 * Work function for handling the backup timer that we schedule when a vcpu is
86 * no longer running, but had a timer programmed to fire in the future.
88 static void kvm_timer_inject_irq_work(struct work_struct *work)
90 struct kvm_vcpu *vcpu;
92 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
95 * If the vcpu is blocked we want to wake it up so that it will see
96 * the timer has expired when entering the guest.
101 static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
104 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
106 cval = vtimer->cnt_cval;
107 now = kvm_phys_timer_read() - vtimer->cntvoff;
112 ns = cyclecounter_cyc2ns(timecounter->cc,
122 static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
124 struct arch_timer_cpu *timer;
125 struct kvm_vcpu *vcpu;
128 timer = container_of(hrt, struct arch_timer_cpu, timer);
129 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
132 * Check that the timer has really expired from the guest's
133 * PoV (NTP on the host may have forced it to expire
134 * early). If we should have slept longer, restart it.
136 ns = kvm_timer_compute_delta(vcpu);
138 hrtimer_forward_now(hrt, ns_to_ktime(ns));
139 return HRTIMER_RESTART;
142 schedule_work(&timer->expired);
143 return HRTIMER_NORESTART;
146 static bool kvm_timer_irq_can_fire(struct kvm_vcpu *vcpu)
148 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
150 return !(vtimer->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
151 (vtimer->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
154 bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
156 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
159 if (!kvm_timer_irq_can_fire(vcpu))
162 cval = vtimer->cnt_cval;
163 now = kvm_phys_timer_read() - vtimer->cntvoff;
168 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
171 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
173 BUG_ON(!vgic_initialized(vcpu->kvm));
175 vtimer->active_cleared_last = false;
176 vtimer->irq.level = new_level;
177 trace_kvm_timer_update_irq(vcpu->vcpu_id, vtimer->irq.irq,
180 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
187 * Check if there was a change in the timer state (should we raise or lower
188 * the line level to the GIC).
190 static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
192 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
193 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
196 * If userspace modified the timer registers via SET_ONE_REG before
197 * the vgic was initialized, we mustn't set the vtimer->irq.level value
198 * because the guest would never see the interrupt. Instead wait
199 * until we call this function from kvm_timer_flush_hwstate.
201 if (!vgic_initialized(vcpu->kvm) || !timer->enabled)
204 if (kvm_timer_should_fire(vcpu) != vtimer->irq.level)
205 kvm_timer_update_irq(vcpu, !vtimer->irq.level);
211 * Schedule the background timer before calling kvm_vcpu_block, so that this
212 * thread is removed from its waitqueue and made runnable when there's a timer
213 * interrupt to handle.
215 void kvm_timer_schedule(struct kvm_vcpu *vcpu)
217 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
219 BUG_ON(timer_is_armed(timer));
222 * No need to schedule a background timer if the guest timer has
223 * already expired, because kvm_vcpu_block will return before putting
224 * the thread to sleep.
226 if (kvm_timer_should_fire(vcpu))
230 * If the timer is not capable of raising interrupts (disabled or
231 * masked), then there's no more work for us to do.
233 if (!kvm_timer_irq_can_fire(vcpu))
236 /* The timer has not yet expired, schedule a background timer */
237 timer_arm(timer, kvm_timer_compute_delta(vcpu));
240 void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
242 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
247 * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
248 * @vcpu: The vcpu pointer
250 * Check if the virtual timer has expired while we were running in the host,
251 * and inject an interrupt if that was the case.
253 void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
255 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
259 if (kvm_timer_update_state(vcpu))
263 * If we enter the guest with the virtual input level to the VGIC
264 * asserted, then we have already told the VGIC what we need to, and
265 * we don't need to exit from the guest until the guest deactivates
266 * the already injected interrupt, so therefore we should set the
267 * hardware active state to prevent unnecessary exits from the guest.
269 * Also, if we enter the guest with the virtual timer interrupt active,
270 * then it must be active on the physical distributor, because we set
271 * the HW bit and the guest must be able to deactivate the virtual and
272 * physical interrupt at the same time.
274 * Conversely, if the virtual input level is deasserted and the virtual
275 * interrupt is not active, then always clear the hardware active state
276 * to ensure that hardware interrupts from the timer triggers a guest
279 phys_active = vtimer->irq.level ||
280 kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
283 * We want to avoid hitting the (re)distributor as much as
284 * possible, as this is a potentially expensive MMIO access
285 * (not to mention locks in the irq layer), and a solution for
286 * this is to cache the "active" state in memory.
288 * Things to consider: we cannot cache an "active set" state,
289 * because the HW can change this behind our back (it becomes
290 * "clear" in the HW). We must then restrict the caching to
293 * The cache is invalidated on:
294 * - vcpu put, indicating that the HW cannot be trusted to be
295 * in a sane state on the next vcpu load,
296 * - any change in the interrupt state
299 * - cached value is "active clear"
300 * - value to be programmed is "active clear"
302 if (vtimer->active_cleared_last && !phys_active)
305 ret = irq_set_irqchip_state(host_vtimer_irq,
306 IRQCHIP_STATE_ACTIVE,
310 vtimer->active_cleared_last = !phys_active;
314 * kvm_timer_sync_hwstate - sync timer state from cpu
315 * @vcpu: The vcpu pointer
317 * Check if the virtual timer has expired while we were running in the guest,
318 * and inject an interrupt if that was the case.
320 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
322 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
324 BUG_ON(timer_is_armed(timer));
327 * The guest could have modified the timer registers or the timer
328 * could have expired, update the timer state.
330 kvm_timer_update_state(vcpu);
333 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
334 const struct kvm_irq_level *irq)
336 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
339 * The vcpu timer irq number cannot be determined in
340 * kvm_timer_vcpu_init() because it is called much before
341 * kvm_vcpu_set_target(). To handle this, we determine
342 * vcpu timer irq number when the vcpu is reset.
344 vtimer->irq.irq = irq->irq;
347 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
348 * and to 0 for ARMv7. We provide an implementation that always
349 * resets the timer to be disabled and unmasked and is compliant with
350 * the ARMv7 architecture.
353 kvm_timer_update_state(vcpu);
358 /* Make the updates of cntvoff for all vtimer contexts atomic */
359 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
362 struct kvm *kvm = vcpu->kvm;
363 struct kvm_vcpu *tmp;
365 mutex_lock(&kvm->lock);
366 kvm_for_each_vcpu(i, tmp, kvm)
367 vcpu_vtimer(tmp)->cntvoff = cntvoff;
370 * When called from the vcpu create path, the CPU being created is not
371 * included in the loop above, so we just set it here as well.
373 vcpu_vtimer(vcpu)->cntvoff = cntvoff;
374 mutex_unlock(&kvm->lock);
377 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
379 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
381 /* Synchronize cntvoff across all vtimers of a VM. */
382 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
384 INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
385 hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
386 timer->timer.function = kvm_timer_expire;
389 static void kvm_timer_init_interrupt(void *info)
391 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
394 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
396 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
399 case KVM_REG_ARM_TIMER_CTL:
400 vtimer->cnt_ctl = value;
402 case KVM_REG_ARM_TIMER_CNT:
403 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
405 case KVM_REG_ARM_TIMER_CVAL:
406 vtimer->cnt_cval = value;
412 kvm_timer_update_state(vcpu);
416 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
418 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
421 case KVM_REG_ARM_TIMER_CTL:
422 return vtimer->cnt_ctl;
423 case KVM_REG_ARM_TIMER_CNT:
424 return kvm_phys_timer_read() - vtimer->cntvoff;
425 case KVM_REG_ARM_TIMER_CVAL:
426 return vtimer->cnt_cval;
431 static int kvm_timer_starting_cpu(unsigned int cpu)
433 kvm_timer_init_interrupt(NULL);
437 static int kvm_timer_dying_cpu(unsigned int cpu)
439 disable_percpu_irq(host_vtimer_irq);
443 int kvm_timer_hyp_init(void)
445 struct arch_timer_kvm_info *info;
448 info = arch_timer_get_kvm_info();
449 timecounter = &info->timecounter;
451 if (!timecounter->cc) {
452 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
456 if (info->virtual_irq <= 0) {
457 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
461 host_vtimer_irq = info->virtual_irq;
463 host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
464 if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
465 host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
466 kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
468 host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
471 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
472 "kvm guest timer", kvm_get_running_vcpus());
474 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
475 host_vtimer_irq, err);
479 kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
481 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
482 "kvm/arm/timer:starting", kvm_timer_starting_cpu,
483 kvm_timer_dying_cpu);
487 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
489 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
490 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
493 kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq);
496 int kvm_timer_enable(struct kvm_vcpu *vcpu)
498 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
499 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
500 struct irq_desc *desc;
501 struct irq_data *data;
509 * Find the physical IRQ number corresponding to the host_vtimer_irq
511 desc = irq_to_desc(host_vtimer_irq);
513 kvm_err("%s: no interrupt descriptor\n", __func__);
517 data = irq_desc_get_irq_data(desc);
518 while (data->parent_data)
519 data = data->parent_data;
521 phys_irq = data->hwirq;
524 * Tell the VGIC that the virtual interrupt is tied to a
525 * physical interrupt. We do that once per VCPU.
527 ret = kvm_vgic_map_phys_irq(vcpu, vtimer->irq.irq, phys_irq);
537 * On VHE system, we only need to configure trap on physical timer and counter
538 * accesses in EL0 and EL1 once, not for every world switch.
539 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
540 * and this makes those bits have no effect for the host kernel execution.
542 void kvm_timer_init_vhe(void)
544 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
545 u32 cnthctl_shift = 10;
549 * Disallow physical timer access for the guest.
550 * Physical counter access is allowed.
552 val = read_sysreg(cnthctl_el2);
553 val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
554 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
555 write_sysreg(val, cnthctl_el2);