2 * 8253/8254 interval timer emulation
4 * Copyright (c) 2003-2004 Fabrice Bellard
5 * Copyright (c) 2006 Intel Corporation
6 * Copyright (c) 2007 Keir Fraser, XenSource Inc
7 * Copyright (c) 2008 Intel Corporation
8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 * Sheng Yang <sheng.yang@intel.com>
30 * Based on QEMU and Xen.
33 #define pr_fmt(fmt) "pit: " fmt
35 #include <linux/kvm_host.h>
36 #include <linux/slab.h>
42 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
44 #define mod_64(x, y) ((x) % (y))
47 #define RW_STATE_LSB 1
48 #define RW_STATE_MSB 2
49 #define RW_STATE_WORD0 3
50 #define RW_STATE_WORD1 4
52 /* Compute with 96 bit intermediate result: (a*b)/c */
53 static u64 muldiv64(u64 a, u32 b, u32 c)
64 rl = (u64)u.l.low * (u64)b;
65 rh = (u64)u.l.high * (u64)b;
67 res.l.high = div64_u64(rh, c);
68 res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
72 static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
74 struct kvm_kpit_channel_state *c =
75 &kvm->arch.vpit->pit_state.channels[channel];
77 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
83 /* XXX: just disable/enable counting */
89 /* Restart counting on rising edge. */
91 c->count_load_time = ktime_get();
98 static int pit_get_gate(struct kvm *kvm, int channel)
100 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
102 return kvm->arch.vpit->pit_state.channels[channel].gate;
105 static s64 __kpit_elapsed(struct kvm *kvm)
109 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
111 if (!ps->pit_timer.period)
115 * The Counter does not stop when it reaches zero. In
116 * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to
117 * the highest count, either FFFF hex for binary counting
118 * or 9999 for BCD counting, and continues counting.
119 * Modes 2 and 3 are periodic; the Counter reloads
120 * itself with the initial count and continues counting
123 remaining = hrtimer_get_remaining(&ps->pit_timer.timer);
124 elapsed = ps->pit_timer.period - ktime_to_ns(remaining);
125 elapsed = mod_64(elapsed, ps->pit_timer.period);
130 static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c,
134 return __kpit_elapsed(kvm);
136 return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
139 static int pit_get_count(struct kvm *kvm, int channel)
141 struct kvm_kpit_channel_state *c =
142 &kvm->arch.vpit->pit_state.channels[channel];
146 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
148 t = kpit_elapsed(kvm, c, channel);
149 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
156 counter = (c->count - d) & 0xffff;
159 /* XXX: may be incorrect for odd counts */
160 counter = c->count - (mod_64((2 * d), c->count));
163 counter = c->count - mod_64(d, c->count);
169 static int pit_get_out(struct kvm *kvm, int channel)
171 struct kvm_kpit_channel_state *c =
172 &kvm->arch.vpit->pit_state.channels[channel];
176 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
178 t = kpit_elapsed(kvm, c, channel);
179 d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
184 out = (d >= c->count);
187 out = (d < c->count);
190 out = ((mod_64(d, c->count) == 0) && (d != 0));
193 out = (mod_64(d, c->count) < ((c->count + 1) >> 1));
197 out = (d == c->count);
204 static void pit_latch_count(struct kvm *kvm, int channel)
206 struct kvm_kpit_channel_state *c =
207 &kvm->arch.vpit->pit_state.channels[channel];
209 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
211 if (!c->count_latched) {
212 c->latched_count = pit_get_count(kvm, channel);
213 c->count_latched = c->rw_mode;
217 static void pit_latch_status(struct kvm *kvm, int channel)
219 struct kvm_kpit_channel_state *c =
220 &kvm->arch.vpit->pit_state.channels[channel];
222 WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
224 if (!c->status_latched) {
225 /* TODO: Return NULL COUNT (bit 6). */
226 c->status = ((pit_get_out(kvm, channel) << 7) |
230 c->status_latched = 1;
234 static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
236 struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
240 spin_lock(&ps->inject_lock);
241 value = atomic_dec_return(&ps->pit_timer.pending);
243 /* spurious acks can be generated if, for example, the
244 * PIC is being reset. Handle it gracefully here
246 atomic_inc(&ps->pit_timer.pending);
248 /* in this case, we had multiple outstanding pit interrupts
249 * that we needed to inject. Reinject
251 queue_kthread_work(&ps->pit->worker, &ps->pit->expired);
253 spin_unlock(&ps->inject_lock);
256 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
258 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
259 struct hrtimer *timer;
261 if (!kvm_vcpu_is_bsp(vcpu) || !pit)
264 timer = &pit->pit_state.pit_timer.timer;
265 if (hrtimer_cancel(timer))
266 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
269 static void destroy_pit_timer(struct kvm_pit *pit)
271 hrtimer_cancel(&pit->pit_state.pit_timer.timer);
272 flush_kthread_work(&pit->expired);
275 static bool kpit_is_periodic(struct kvm_timer *ktimer)
277 struct kvm_kpit_state *ps = container_of(ktimer, struct kvm_kpit_state,
279 return ps->is_periodic;
282 static struct kvm_timer_ops kpit_ops = {
283 .is_periodic = kpit_is_periodic,
286 static void pit_do_work(struct kthread_work *work)
288 struct kvm_pit *pit = container_of(work, struct kvm_pit, expired);
289 struct kvm *kvm = pit->kvm;
290 struct kvm_vcpu *vcpu;
292 struct kvm_kpit_state *ps = &pit->pit_state;
295 /* Try to inject pending interrupts when
296 * last one has been acked.
298 spin_lock(&ps->inject_lock);
303 spin_unlock(&ps->inject_lock);
305 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1);
306 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0);
309 * Provides NMI watchdog support via Virtual Wire mode.
310 * The route is: PIT -> PIC -> LVT0 in NMI mode.
312 * Note: Our Virtual Wire implementation is simplified, only
313 * propagating PIT interrupts to all VCPUs when they have set
314 * LVT0 to NMI delivery. Other PIC interrupts are just sent to
315 * VCPU0, and only if its LVT0 is in EXTINT mode.
317 if (kvm->arch.vapics_in_nmi_mode > 0)
318 kvm_for_each_vcpu(i, vcpu, kvm)
319 kvm_apic_nmi_wd_deliver(vcpu);
323 static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
325 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
326 struct kvm_pit *pt = ktimer->kvm->arch.vpit;
328 if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
329 atomic_inc(&ktimer->pending);
330 queue_kthread_work(&pt->worker, &pt->expired);
333 if (ktimer->t_ops->is_periodic(ktimer)) {
334 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
335 return HRTIMER_RESTART;
337 return HRTIMER_NORESTART;
340 static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
342 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
343 struct kvm_timer *pt = &ps->pit_timer;
346 if (!irqchip_in_kernel(kvm) || ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
349 interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
351 pr_debug("create pit timer, interval is %llu nsec\n", interval);
353 /* TODO The new value only affected after the retriggered */
354 hrtimer_cancel(&pt->timer);
355 flush_kthread_work(&ps->pit->expired);
356 pt->period = interval;
357 ps->is_periodic = is_period;
359 pt->timer.function = pit_timer_fn;
360 pt->t_ops = &kpit_ops;
361 pt->kvm = ps->pit->kvm;
363 atomic_set(&pt->pending, 0);
366 hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval),
370 static void pit_load_count(struct kvm *kvm, int channel, u32 val)
372 struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
374 WARN_ON(!mutex_is_locked(&ps->lock));
376 pr_debug("load_count val is %d, channel is %d\n", val, channel);
379 * The largest possible initial count is 0; this is equivalent
380 * to 216 for binary counting and 104 for BCD counting.
385 ps->channels[channel].count = val;
388 ps->channels[channel].count_load_time = ktime_get();
392 /* Two types of timer
393 * mode 1 is one shot, mode 2 is period, otherwise del timer */
394 switch (ps->channels[0].mode) {
397 /* FIXME: enhance mode 4 precision */
399 create_pit_timer(kvm, val, 0);
403 create_pit_timer(kvm, val, 1);
406 destroy_pit_timer(kvm->arch.vpit);
410 void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start)
413 if (hpet_legacy_start) {
414 /* save existing mode for later reenablement */
415 saved_mode = kvm->arch.vpit->pit_state.channels[0].mode;
416 kvm->arch.vpit->pit_state.channels[0].mode = 0xff; /* disable timer */
417 pit_load_count(kvm, channel, val);
418 kvm->arch.vpit->pit_state.channels[0].mode = saved_mode;
420 pit_load_count(kvm, channel, val);
424 static inline struct kvm_pit *dev_to_pit(struct kvm_io_device *dev)
426 return container_of(dev, struct kvm_pit, dev);
429 static inline struct kvm_pit *speaker_to_pit(struct kvm_io_device *dev)
431 return container_of(dev, struct kvm_pit, speaker_dev);
434 static inline int pit_in_range(gpa_t addr)
436 return ((addr >= KVM_PIT_BASE_ADDRESS) &&
437 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
440 static int pit_ioport_write(struct kvm_io_device *this,
441 gpa_t addr, int len, const void *data)
443 struct kvm_pit *pit = dev_to_pit(this);
444 struct kvm_kpit_state *pit_state = &pit->pit_state;
445 struct kvm *kvm = pit->kvm;
447 struct kvm_kpit_channel_state *s;
448 u32 val = *(u32 *) data;
449 if (!pit_in_range(addr))
453 addr &= KVM_PIT_CHANNEL_MASK;
455 mutex_lock(&pit_state->lock);
458 pr_debug("write addr is 0x%x, len is %d, val is 0x%x\n",
459 (unsigned int)addr, len, val);
464 /* Read-Back Command. */
465 for (channel = 0; channel < 3; channel++) {
466 s = &pit_state->channels[channel];
467 if (val & (2 << channel)) {
469 pit_latch_count(kvm, channel);
471 pit_latch_status(kvm, channel);
475 /* Select Counter <channel>. */
476 s = &pit_state->channels[channel];
477 access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
479 pit_latch_count(kvm, channel);
482 s->read_state = access;
483 s->write_state = access;
484 s->mode = (val >> 1) & 7;
492 s = &pit_state->channels[addr];
493 switch (s->write_state) {
496 pit_load_count(kvm, addr, val);
499 pit_load_count(kvm, addr, val << 8);
502 s->write_latch = val;
503 s->write_state = RW_STATE_WORD1;
506 pit_load_count(kvm, addr, s->write_latch | (val << 8));
507 s->write_state = RW_STATE_WORD0;
512 mutex_unlock(&pit_state->lock);
516 static int pit_ioport_read(struct kvm_io_device *this,
517 gpa_t addr, int len, void *data)
519 struct kvm_pit *pit = dev_to_pit(this);
520 struct kvm_kpit_state *pit_state = &pit->pit_state;
521 struct kvm *kvm = pit->kvm;
523 struct kvm_kpit_channel_state *s;
524 if (!pit_in_range(addr))
527 addr &= KVM_PIT_CHANNEL_MASK;
531 s = &pit_state->channels[addr];
533 mutex_lock(&pit_state->lock);
535 if (s->status_latched) {
536 s->status_latched = 0;
538 } else if (s->count_latched) {
539 switch (s->count_latched) {
542 ret = s->latched_count & 0xff;
543 s->count_latched = 0;
546 ret = s->latched_count >> 8;
547 s->count_latched = 0;
550 ret = s->latched_count & 0xff;
551 s->count_latched = RW_STATE_MSB;
555 switch (s->read_state) {
558 count = pit_get_count(kvm, addr);
562 count = pit_get_count(kvm, addr);
563 ret = (count >> 8) & 0xff;
566 count = pit_get_count(kvm, addr);
568 s->read_state = RW_STATE_WORD1;
571 count = pit_get_count(kvm, addr);
572 ret = (count >> 8) & 0xff;
573 s->read_state = RW_STATE_WORD0;
578 if (len > sizeof(ret))
580 memcpy(data, (char *)&ret, len);
582 mutex_unlock(&pit_state->lock);
586 static int speaker_ioport_write(struct kvm_io_device *this,
587 gpa_t addr, int len, const void *data)
589 struct kvm_pit *pit = speaker_to_pit(this);
590 struct kvm_kpit_state *pit_state = &pit->pit_state;
591 struct kvm *kvm = pit->kvm;
592 u32 val = *(u32 *) data;
593 if (addr != KVM_SPEAKER_BASE_ADDRESS)
596 mutex_lock(&pit_state->lock);
597 pit_state->speaker_data_on = (val >> 1) & 1;
598 pit_set_gate(kvm, 2, val & 1);
599 mutex_unlock(&pit_state->lock);
603 static int speaker_ioport_read(struct kvm_io_device *this,
604 gpa_t addr, int len, void *data)
606 struct kvm_pit *pit = speaker_to_pit(this);
607 struct kvm_kpit_state *pit_state = &pit->pit_state;
608 struct kvm *kvm = pit->kvm;
609 unsigned int refresh_clock;
611 if (addr != KVM_SPEAKER_BASE_ADDRESS)
614 /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
615 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
617 mutex_lock(&pit_state->lock);
618 ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) |
619 (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4));
620 if (len > sizeof(ret))
622 memcpy(data, (char *)&ret, len);
623 mutex_unlock(&pit_state->lock);
627 void kvm_pit_reset(struct kvm_pit *pit)
630 struct kvm_kpit_channel_state *c;
632 mutex_lock(&pit->pit_state.lock);
633 pit->pit_state.flags = 0;
634 for (i = 0; i < 3; i++) {
635 c = &pit->pit_state.channels[i];
638 pit_load_count(pit->kvm, i, 0);
640 mutex_unlock(&pit->pit_state.lock);
642 atomic_set(&pit->pit_state.pit_timer.pending, 0);
643 pit->pit_state.irq_ack = 1;
646 static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
648 struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
651 atomic_set(&pit->pit_state.pit_timer.pending, 0);
652 pit->pit_state.irq_ack = 1;
656 static const struct kvm_io_device_ops pit_dev_ops = {
657 .read = pit_ioport_read,
658 .write = pit_ioport_write,
661 static const struct kvm_io_device_ops speaker_dev_ops = {
662 .read = speaker_ioport_read,
663 .write = speaker_ioport_write,
666 /* Caller must hold slots_lock */
667 struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
670 struct kvm_kpit_state *pit_state;
675 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
679 pit->irq_source_id = kvm_request_irq_source_id(kvm);
680 if (pit->irq_source_id < 0) {
685 mutex_init(&pit->pit_state.lock);
686 mutex_lock(&pit->pit_state.lock);
687 spin_lock_init(&pit->pit_state.inject_lock);
689 pid = get_pid(task_tgid(current));
690 pid_nr = pid_vnr(pid);
693 init_kthread_worker(&pit->worker);
694 pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
695 "kvm-pit/%d", pid_nr);
696 if (IS_ERR(pit->worker_task)) {
697 mutex_unlock(&pit->pit_state.lock);
698 kvm_free_irq_source_id(kvm, pit->irq_source_id);
702 init_kthread_work(&pit->expired, pit_do_work);
704 kvm->arch.vpit = pit;
707 pit_state = &pit->pit_state;
708 pit_state->pit = pit;
709 hrtimer_init(&pit_state->pit_timer.timer,
710 CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
711 pit_state->irq_ack_notifier.gsi = 0;
712 pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
713 kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
714 pit_state->pit_timer.reinject = true;
715 mutex_unlock(&pit->pit_state.lock);
719 pit->mask_notifier.func = pit_mask_notifer;
720 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
722 kvm_iodevice_init(&pit->dev, &pit_dev_ops);
723 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS,
724 KVM_PIT_MEM_LENGTH, &pit->dev);
728 if (flags & KVM_PIT_SPEAKER_DUMMY) {
729 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
730 ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS,
731 KVM_SPEAKER_BASE_ADDRESS, 4,
734 goto fail_unregister;
740 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
743 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
744 kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
745 kvm_free_irq_source_id(kvm, pit->irq_source_id);
746 kthread_stop(pit->worker_task);
751 void kvm_free_pit(struct kvm *kvm)
753 struct hrtimer *timer;
755 if (kvm->arch.vpit) {
756 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &kvm->arch.vpit->dev);
757 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
758 &kvm->arch.vpit->speaker_dev);
759 kvm_unregister_irq_mask_notifier(kvm, 0,
760 &kvm->arch.vpit->mask_notifier);
761 kvm_unregister_irq_ack_notifier(kvm,
762 &kvm->arch.vpit->pit_state.irq_ack_notifier);
763 mutex_lock(&kvm->arch.vpit->pit_state.lock);
764 timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
765 hrtimer_cancel(timer);
766 flush_kthread_work(&kvm->arch.vpit->expired);
767 kthread_stop(kvm->arch.vpit->worker_task);
768 kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id);
769 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
770 kfree(kvm->arch.vpit);