]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/x86/kvm/hyperv.c
Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
[karo-tx-linux.git] / arch / x86 / kvm / hyperv.c
1 /*
2  * KVM Microsoft Hyper-V emulation
3  *
4  * derived from arch/x86/kvm/x86.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
11  *
12  * Authors:
13  *   Avi Kivity   <avi@qumranet.com>
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Amit Shah    <amit.shah@qumranet.com>
16  *   Ben-Ami Yassour <benami@il.ibm.com>
17  *   Andrey Smetanin <asmetanin@virtuozzo.com>
18  *
19  * This work is licensed under the terms of the GNU GPL, version 2.  See
20  * the COPYING file in the top-level directory.
21  *
22  */
23
24 #include "x86.h"
25 #include "lapic.h"
26 #include "ioapic.h"
27 #include "hyperv.h"
28
29 #include <linux/kvm_host.h>
30 #include <linux/highmem.h>
31 #include <asm/apicdef.h>
32 #include <trace/events/kvm.h>
33
34 #include "trace.h"
35
36 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
37 {
38         return atomic64_read(&synic->sint[sint]);
39 }
40
41 static inline int synic_get_sint_vector(u64 sint_value)
42 {
43         if (sint_value & HV_SYNIC_SINT_MASKED)
44                 return -1;
45         return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
46 }
47
48 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
49                                       int vector)
50 {
51         int i;
52
53         for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
54                 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
55                         return true;
56         }
57         return false;
58 }
59
60 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
61                                      int vector)
62 {
63         int i;
64         u64 sint_value;
65
66         for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
67                 sint_value = synic_read_sint(synic, i);
68                 if (synic_get_sint_vector(sint_value) == vector &&
69                     sint_value & HV_SYNIC_SINT_AUTO_EOI)
70                         return true;
71         }
72         return false;
73 }
74
75 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
76                           u64 data, bool host)
77 {
78         int vector;
79
80         vector = data & HV_SYNIC_SINT_VECTOR_MASK;
81         if (vector < 16 && !host)
82                 return 1;
83         /*
84          * Guest may configure multiple SINTs to use the same vector, so
85          * we maintain a bitmap of vectors handled by synic, and a
86          * bitmap of vectors with auto-eoi behavior.  The bitmaps are
87          * updated here, and atomically queried on fast paths.
88          */
89
90         atomic64_set(&synic->sint[sint], data);
91
92         if (synic_has_vector_connected(synic, vector))
93                 __set_bit(vector, synic->vec_bitmap);
94         else
95                 __clear_bit(vector, synic->vec_bitmap);
96
97         if (synic_has_vector_auto_eoi(synic, vector))
98                 __set_bit(vector, synic->auto_eoi_bitmap);
99         else
100                 __clear_bit(vector, synic->auto_eoi_bitmap);
101
102         /* Load SynIC vectors into EOI exit bitmap */
103         kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
104         return 0;
105 }
106
107 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vcpu_id)
108 {
109         struct kvm_vcpu *vcpu;
110         struct kvm_vcpu_hv_synic *synic;
111
112         if (vcpu_id >= atomic_read(&kvm->online_vcpus))
113                 return NULL;
114         vcpu = kvm_get_vcpu(kvm, vcpu_id);
115         if (!vcpu)
116                 return NULL;
117         synic = vcpu_to_synic(vcpu);
118         return (synic->active) ? synic : NULL;
119 }
120
121 static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic,
122                                         u32 sint)
123 {
124         struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
125         struct page *page;
126         gpa_t gpa;
127         struct hv_message *msg;
128         struct hv_message_page *msg_page;
129
130         gpa = synic->msg_page & PAGE_MASK;
131         page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
132         if (is_error_page(page)) {
133                 vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n",
134                          gpa);
135                 return;
136         }
137         msg_page = kmap_atomic(page);
138
139         msg = &msg_page->sint_message[sint];
140         msg->header.message_flags.msg_pending = 0;
141
142         kunmap_atomic(msg_page);
143         kvm_release_page_dirty(page);
144         kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
145 }
146
147 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
148 {
149         struct kvm *kvm = vcpu->kvm;
150         struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
151         struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
152         struct kvm_vcpu_hv_stimer *stimer;
153         int gsi, idx, stimers_pending;
154
155         trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
156
157         if (synic->msg_page & HV_SYNIC_SIMP_ENABLE)
158                 synic_clear_sint_msg_pending(synic, sint);
159
160         /* Try to deliver pending Hyper-V SynIC timers messages */
161         stimers_pending = 0;
162         for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
163                 stimer = &hv_vcpu->stimer[idx];
164                 if (stimer->msg_pending &&
165                     (stimer->config & HV_STIMER_ENABLE) &&
166                     HV_STIMER_SINT(stimer->config) == sint) {
167                         set_bit(stimer->index,
168                                 hv_vcpu->stimer_pending_bitmap);
169                         stimers_pending++;
170                 }
171         }
172         if (stimers_pending)
173                 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
174
175         idx = srcu_read_lock(&kvm->irq_srcu);
176         gsi = atomic_read(&synic->sint_to_gsi[sint]);
177         if (gsi != -1)
178                 kvm_notify_acked_gsi(kvm, gsi);
179         srcu_read_unlock(&kvm->irq_srcu, idx);
180 }
181
182 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
183 {
184         struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
185         struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
186
187         hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
188         hv_vcpu->exit.u.synic.msr = msr;
189         hv_vcpu->exit.u.synic.control = synic->control;
190         hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
191         hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
192
193         kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
194 }
195
196 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
197                          u32 msr, u64 data, bool host)
198 {
199         struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
200         int ret;
201
202         if (!synic->active)
203                 return 1;
204
205         trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
206
207         ret = 0;
208         switch (msr) {
209         case HV_X64_MSR_SCONTROL:
210                 synic->control = data;
211                 if (!host)
212                         synic_exit(synic, msr);
213                 break;
214         case HV_X64_MSR_SVERSION:
215                 if (!host) {
216                         ret = 1;
217                         break;
218                 }
219                 synic->version = data;
220                 break;
221         case HV_X64_MSR_SIEFP:
222                 if (data & HV_SYNIC_SIEFP_ENABLE)
223                         if (kvm_clear_guest(vcpu->kvm,
224                                             data & PAGE_MASK, PAGE_SIZE)) {
225                                 ret = 1;
226                                 break;
227                         }
228                 synic->evt_page = data;
229                 if (!host)
230                         synic_exit(synic, msr);
231                 break;
232         case HV_X64_MSR_SIMP:
233                 if (data & HV_SYNIC_SIMP_ENABLE)
234                         if (kvm_clear_guest(vcpu->kvm,
235                                             data & PAGE_MASK, PAGE_SIZE)) {
236                                 ret = 1;
237                                 break;
238                         }
239                 synic->msg_page = data;
240                 if (!host)
241                         synic_exit(synic, msr);
242                 break;
243         case HV_X64_MSR_EOM: {
244                 int i;
245
246                 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
247                         kvm_hv_notify_acked_sint(vcpu, i);
248                 break;
249         }
250         case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
251                 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
252                 break;
253         default:
254                 ret = 1;
255                 break;
256         }
257         return ret;
258 }
259
260 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata)
261 {
262         int ret;
263
264         if (!synic->active)
265                 return 1;
266
267         ret = 0;
268         switch (msr) {
269         case HV_X64_MSR_SCONTROL:
270                 *pdata = synic->control;
271                 break;
272         case HV_X64_MSR_SVERSION:
273                 *pdata = synic->version;
274                 break;
275         case HV_X64_MSR_SIEFP:
276                 *pdata = synic->evt_page;
277                 break;
278         case HV_X64_MSR_SIMP:
279                 *pdata = synic->msg_page;
280                 break;
281         case HV_X64_MSR_EOM:
282                 *pdata = 0;
283                 break;
284         case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
285                 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
286                 break;
287         default:
288                 ret = 1;
289                 break;
290         }
291         return ret;
292 }
293
294 static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
295 {
296         struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
297         struct kvm_lapic_irq irq;
298         int ret, vector;
299
300         if (sint >= ARRAY_SIZE(synic->sint))
301                 return -EINVAL;
302
303         vector = synic_get_sint_vector(synic_read_sint(synic, sint));
304         if (vector < 0)
305                 return -ENOENT;
306
307         memset(&irq, 0, sizeof(irq));
308         irq.dest_id = kvm_apic_id(vcpu->arch.apic);
309         irq.dest_mode = APIC_DEST_PHYSICAL;
310         irq.delivery_mode = APIC_DM_FIXED;
311         irq.vector = vector;
312         irq.level = 1;
313
314         ret = kvm_irq_delivery_to_apic(vcpu->kvm, NULL, &irq, NULL);
315         trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
316         return ret;
317 }
318
319 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint)
320 {
321         struct kvm_vcpu_hv_synic *synic;
322
323         synic = synic_get(kvm, vcpu_id);
324         if (!synic)
325                 return -EINVAL;
326
327         return synic_set_irq(synic, sint);
328 }
329
330 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
331 {
332         struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
333         int i;
334
335         trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
336
337         for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
338                 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
339                         kvm_hv_notify_acked_sint(vcpu, i);
340 }
341
342 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vcpu_id, u32 sint, int gsi)
343 {
344         struct kvm_vcpu_hv_synic *synic;
345
346         synic = synic_get(kvm, vcpu_id);
347         if (!synic)
348                 return -EINVAL;
349
350         if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
351                 return -EINVAL;
352
353         atomic_set(&synic->sint_to_gsi[sint], gsi);
354         return 0;
355 }
356
357 void kvm_hv_irq_routing_update(struct kvm *kvm)
358 {
359         struct kvm_irq_routing_table *irq_rt;
360         struct kvm_kernel_irq_routing_entry *e;
361         u32 gsi;
362
363         irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
364                                         lockdep_is_held(&kvm->irq_lock));
365
366         for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
367                 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
368                         if (e->type == KVM_IRQ_ROUTING_HV_SINT)
369                                 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
370                                                     e->hv_sint.sint, gsi);
371                 }
372         }
373 }
374
375 static void synic_init(struct kvm_vcpu_hv_synic *synic)
376 {
377         int i;
378
379         memset(synic, 0, sizeof(*synic));
380         synic->version = HV_SYNIC_VERSION_1;
381         for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
382                 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
383                 atomic_set(&synic->sint_to_gsi[i], -1);
384         }
385 }
386
387 static u64 get_time_ref_counter(struct kvm *kvm)
388 {
389         struct kvm_hv *hv = &kvm->arch.hyperv;
390         struct kvm_vcpu *vcpu;
391         u64 tsc;
392
393         /*
394          * The guest has not set up the TSC page or the clock isn't
395          * stable, fall back to get_kvmclock_ns.
396          */
397         if (!hv->tsc_ref.tsc_sequence)
398                 return div_u64(get_kvmclock_ns(kvm), 100);
399
400         vcpu = kvm_get_vcpu(kvm, 0);
401         tsc = kvm_read_l1_tsc(vcpu, rdtsc());
402         return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
403                 + hv->tsc_ref.tsc_offset;
404 }
405
406 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
407                                 bool vcpu_kick)
408 {
409         struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
410
411         set_bit(stimer->index,
412                 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
413         kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
414         if (vcpu_kick)
415                 kvm_vcpu_kick(vcpu);
416 }
417
418 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
419 {
420         struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
421
422         trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id,
423                                     stimer->index);
424
425         hrtimer_cancel(&stimer->timer);
426         clear_bit(stimer->index,
427                   vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
428         stimer->msg_pending = false;
429         stimer->exp_time = 0;
430 }
431
432 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
433 {
434         struct kvm_vcpu_hv_stimer *stimer;
435
436         stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
437         trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id,
438                                      stimer->index);
439         stimer_mark_pending(stimer, true);
440
441         return HRTIMER_NORESTART;
442 }
443
444 /*
445  * stimer_start() assumptions:
446  * a) stimer->count is not equal to 0
447  * b) stimer->config has HV_STIMER_ENABLE flag
448  */
449 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
450 {
451         u64 time_now;
452         ktime_t ktime_now;
453
454         time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
455         ktime_now = ktime_get();
456
457         if (stimer->config & HV_STIMER_PERIODIC) {
458                 if (stimer->exp_time) {
459                         if (time_now >= stimer->exp_time) {
460                                 u64 remainder;
461
462                                 div64_u64_rem(time_now - stimer->exp_time,
463                                               stimer->count, &remainder);
464                                 stimer->exp_time =
465                                         time_now + (stimer->count - remainder);
466                         }
467                 } else
468                         stimer->exp_time = time_now + stimer->count;
469
470                 trace_kvm_hv_stimer_start_periodic(
471                                         stimer_to_vcpu(stimer)->vcpu_id,
472                                         stimer->index,
473                                         time_now, stimer->exp_time);
474
475                 hrtimer_start(&stimer->timer,
476                               ktime_add_ns(ktime_now,
477                                            100 * (stimer->exp_time - time_now)),
478                               HRTIMER_MODE_ABS);
479                 return 0;
480         }
481         stimer->exp_time = stimer->count;
482         if (time_now >= stimer->count) {
483                 /*
484                  * Expire timer according to Hypervisor Top-Level Functional
485                  * specification v4(15.3.1):
486                  * "If a one shot is enabled and the specified count is in
487                  * the past, it will expire immediately."
488                  */
489                 stimer_mark_pending(stimer, false);
490                 return 0;
491         }
492
493         trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id,
494                                            stimer->index,
495                                            time_now, stimer->count);
496
497         hrtimer_start(&stimer->timer,
498                       ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
499                       HRTIMER_MODE_ABS);
500         return 0;
501 }
502
503 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
504                              bool host)
505 {
506         trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
507                                        stimer->index, config, host);
508
509         stimer_cleanup(stimer);
510         if ((stimer->config & HV_STIMER_ENABLE) && HV_STIMER_SINT(config) == 0)
511                 config &= ~HV_STIMER_ENABLE;
512         stimer->config = config;
513         stimer_mark_pending(stimer, false);
514         return 0;
515 }
516
517 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
518                             bool host)
519 {
520         trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id,
521                                       stimer->index, count, host);
522
523         stimer_cleanup(stimer);
524         stimer->count = count;
525         if (stimer->count == 0)
526                 stimer->config &= ~HV_STIMER_ENABLE;
527         else if (stimer->config & HV_STIMER_AUTOENABLE)
528                 stimer->config |= HV_STIMER_ENABLE;
529         stimer_mark_pending(stimer, false);
530         return 0;
531 }
532
533 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
534 {
535         *pconfig = stimer->config;
536         return 0;
537 }
538
539 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
540 {
541         *pcount = stimer->count;
542         return 0;
543 }
544
545 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
546                              struct hv_message *src_msg)
547 {
548         struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
549         struct page *page;
550         gpa_t gpa;
551         struct hv_message *dst_msg;
552         int r;
553         struct hv_message_page *msg_page;
554
555         if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
556                 return -ENOENT;
557
558         gpa = synic->msg_page & PAGE_MASK;
559         page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
560         if (is_error_page(page))
561                 return -EFAULT;
562
563         msg_page = kmap_atomic(page);
564         dst_msg = &msg_page->sint_message[sint];
565         if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE,
566                          src_msg->header.message_type) != HVMSG_NONE) {
567                 dst_msg->header.message_flags.msg_pending = 1;
568                 r = -EAGAIN;
569         } else {
570                 memcpy(&dst_msg->u.payload, &src_msg->u.payload,
571                        src_msg->header.payload_size);
572                 dst_msg->header.message_type = src_msg->header.message_type;
573                 dst_msg->header.payload_size = src_msg->header.payload_size;
574                 r = synic_set_irq(synic, sint);
575                 if (r >= 1)
576                         r = 0;
577                 else if (r == 0)
578                         r = -EFAULT;
579         }
580         kunmap_atomic(msg_page);
581         kvm_release_page_dirty(page);
582         kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
583         return r;
584 }
585
586 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
587 {
588         struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
589         struct hv_message *msg = &stimer->msg;
590         struct hv_timer_message_payload *payload =
591                         (struct hv_timer_message_payload *)&msg->u.payload;
592
593         payload->expiration_time = stimer->exp_time;
594         payload->delivery_time = get_time_ref_counter(vcpu->kvm);
595         return synic_deliver_msg(vcpu_to_synic(vcpu),
596                                  HV_STIMER_SINT(stimer->config), msg);
597 }
598
599 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
600 {
601         int r;
602
603         stimer->msg_pending = true;
604         r = stimer_send_msg(stimer);
605         trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
606                                        stimer->index, r);
607         if (!r) {
608                 stimer->msg_pending = false;
609                 if (!(stimer->config & HV_STIMER_PERIODIC))
610                         stimer->config &= ~HV_STIMER_ENABLE;
611         }
612 }
613
614 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
615 {
616         struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
617         struct kvm_vcpu_hv_stimer *stimer;
618         u64 time_now, exp_time;
619         int i;
620
621         for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
622                 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
623                         stimer = &hv_vcpu->stimer[i];
624                         if (stimer->config & HV_STIMER_ENABLE) {
625                                 exp_time = stimer->exp_time;
626
627                                 if (exp_time) {
628                                         time_now =
629                                                 get_time_ref_counter(vcpu->kvm);
630                                         if (time_now >= exp_time)
631                                                 stimer_expiration(stimer);
632                                 }
633
634                                 if ((stimer->config & HV_STIMER_ENABLE) &&
635                                     stimer->count)
636                                         stimer_start(stimer);
637                                 else
638                                         stimer_cleanup(stimer);
639                         }
640                 }
641 }
642
643 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
644 {
645         struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
646         int i;
647
648         for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
649                 stimer_cleanup(&hv_vcpu->stimer[i]);
650 }
651
652 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
653 {
654         struct hv_message *msg = &stimer->msg;
655         struct hv_timer_message_payload *payload =
656                         (struct hv_timer_message_payload *)&msg->u.payload;
657
658         memset(&msg->header, 0, sizeof(msg->header));
659         msg->header.message_type = HVMSG_TIMER_EXPIRED;
660         msg->header.payload_size = sizeof(*payload);
661
662         payload->timer_index = stimer->index;
663         payload->expiration_time = 0;
664         payload->delivery_time = 0;
665 }
666
667 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
668 {
669         memset(stimer, 0, sizeof(*stimer));
670         stimer->index = timer_index;
671         hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
672         stimer->timer.function = stimer_timer_callback;
673         stimer_prepare_msg(stimer);
674 }
675
676 void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
677 {
678         struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
679         int i;
680
681         synic_init(&hv_vcpu->synic);
682
683         bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
684         for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
685                 stimer_init(&hv_vcpu->stimer[i], i);
686 }
687
688 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu)
689 {
690         /*
691          * Hyper-V SynIC auto EOI SINT's are
692          * not compatible with APICV, so deactivate APICV
693          */
694         kvm_vcpu_deactivate_apicv(vcpu);
695         vcpu_to_synic(vcpu)->active = true;
696         return 0;
697 }
698
699 static bool kvm_hv_msr_partition_wide(u32 msr)
700 {
701         bool r = false;
702
703         switch (msr) {
704         case HV_X64_MSR_GUEST_OS_ID:
705         case HV_X64_MSR_HYPERCALL:
706         case HV_X64_MSR_REFERENCE_TSC:
707         case HV_X64_MSR_TIME_REF_COUNT:
708         case HV_X64_MSR_CRASH_CTL:
709         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
710         case HV_X64_MSR_RESET:
711                 r = true;
712                 break;
713         }
714
715         return r;
716 }
717
718 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
719                                      u32 index, u64 *pdata)
720 {
721         struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
722
723         if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
724                 return -EINVAL;
725
726         *pdata = hv->hv_crash_param[index];
727         return 0;
728 }
729
730 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
731 {
732         struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
733
734         *pdata = hv->hv_crash_ctl;
735         return 0;
736 }
737
738 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
739 {
740         struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
741
742         if (host)
743                 hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
744
745         if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
746
747                 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
748                           hv->hv_crash_param[0],
749                           hv->hv_crash_param[1],
750                           hv->hv_crash_param[2],
751                           hv->hv_crash_param[3],
752                           hv->hv_crash_param[4]);
753
754                 /* Send notification about crash to user space */
755                 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
756         }
757
758         return 0;
759 }
760
761 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
762                                      u32 index, u64 data)
763 {
764         struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
765
766         if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
767                 return -EINVAL;
768
769         hv->hv_crash_param[index] = data;
770         return 0;
771 }
772
773 /*
774  * The kvmclock and Hyper-V TSC page use similar formulas, and converting
775  * between them is possible:
776  *
777  * kvmclock formula:
778  *    nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
779  *           + system_time
780  *
781  * Hyper-V formula:
782  *    nsec/100 = ticks * scale / 2^64 + offset
783  *
784  * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
785  * By dividing the kvmclock formula by 100 and equating what's left we get:
786  *    ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
787  *            scale / 2^64 =         tsc_to_system_mul * 2^(tsc_shift-32) / 100
788  *            scale        =         tsc_to_system_mul * 2^(32+tsc_shift) / 100
789  *
790  * Now expand the kvmclock formula and divide by 100:
791  *    nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
792  *           - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
793  *           + system_time
794  *    nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
795  *               - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
796  *               + system_time / 100
797  *
798  * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
799  *    nsec/100 = ticks * scale / 2^64
800  *               - tsc_timestamp * scale / 2^64
801  *               + system_time / 100
802  *
803  * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
804  *    offset = system_time / 100 - tsc_timestamp * scale / 2^64
805  *
806  * These two equivalencies are implemented in this function.
807  */
808 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
809                                         HV_REFERENCE_TSC_PAGE *tsc_ref)
810 {
811         u64 max_mul;
812
813         if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
814                 return false;
815
816         /*
817          * check if scale would overflow, if so we use the time ref counter
818          *    tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
819          *    tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
820          *    tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
821          */
822         max_mul = 100ull << (32 - hv_clock->tsc_shift);
823         if (hv_clock->tsc_to_system_mul >= max_mul)
824                 return false;
825
826         /*
827          * Otherwise compute the scale and offset according to the formulas
828          * derived above.
829          */
830         tsc_ref->tsc_scale =
831                 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
832                                 hv_clock->tsc_to_system_mul,
833                                 100);
834
835         tsc_ref->tsc_offset = hv_clock->system_time;
836         do_div(tsc_ref->tsc_offset, 100);
837         tsc_ref->tsc_offset -=
838                 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
839         return true;
840 }
841
842 void kvm_hv_setup_tsc_page(struct kvm *kvm,
843                            struct pvclock_vcpu_time_info *hv_clock)
844 {
845         struct kvm_hv *hv = &kvm->arch.hyperv;
846         u32 tsc_seq;
847         u64 gfn;
848
849         BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
850         BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0);
851
852         if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
853                 return;
854
855         mutex_lock(&kvm->arch.hyperv.hv_lock);
856         if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
857                 goto out_unlock;
858
859         gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
860         /*
861          * Because the TSC parameters only vary when there is a
862          * change in the master clock, do not bother with caching.
863          */
864         if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
865                                     &tsc_seq, sizeof(tsc_seq))))
866                 goto out_unlock;
867
868         /*
869          * While we're computing and writing the parameters, force the
870          * guest to use the time reference count MSR.
871          */
872         hv->tsc_ref.tsc_sequence = 0;
873         if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
874                             &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
875                 goto out_unlock;
876
877         if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
878                 goto out_unlock;
879
880         /* Ensure sequence is zero before writing the rest of the struct.  */
881         smp_wmb();
882         if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
883                 goto out_unlock;
884
885         /*
886          * Now switch to the TSC page mechanism by writing the sequence.
887          */
888         tsc_seq++;
889         if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
890                 tsc_seq = 1;
891
892         /* Write the struct entirely before the non-zero sequence.  */
893         smp_wmb();
894
895         hv->tsc_ref.tsc_sequence = tsc_seq;
896         kvm_write_guest(kvm, gfn_to_gpa(gfn),
897                         &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
898 out_unlock:
899         mutex_unlock(&kvm->arch.hyperv.hv_lock);
900 }
901
902 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
903                              bool host)
904 {
905         struct kvm *kvm = vcpu->kvm;
906         struct kvm_hv *hv = &kvm->arch.hyperv;
907
908         switch (msr) {
909         case HV_X64_MSR_GUEST_OS_ID:
910                 hv->hv_guest_os_id = data;
911                 /* setting guest os id to zero disables hypercall page */
912                 if (!hv->hv_guest_os_id)
913                         hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
914                 break;
915         case HV_X64_MSR_HYPERCALL: {
916                 u64 gfn;
917                 unsigned long addr;
918                 u8 instructions[4];
919
920                 /* if guest os id is not set hypercall should remain disabled */
921                 if (!hv->hv_guest_os_id)
922                         break;
923                 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
924                         hv->hv_hypercall = data;
925                         break;
926                 }
927                 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
928                 addr = gfn_to_hva(kvm, gfn);
929                 if (kvm_is_error_hva(addr))
930                         return 1;
931                 kvm_x86_ops->patch_hypercall(vcpu, instructions);
932                 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
933                 if (__copy_to_user((void __user *)addr, instructions, 4))
934                         return 1;
935                 hv->hv_hypercall = data;
936                 mark_page_dirty(kvm, gfn);
937                 break;
938         }
939         case HV_X64_MSR_REFERENCE_TSC:
940                 hv->hv_tsc_page = data;
941                 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
942                         kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
943                 break;
944         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
945                 return kvm_hv_msr_set_crash_data(vcpu,
946                                                  msr - HV_X64_MSR_CRASH_P0,
947                                                  data);
948         case HV_X64_MSR_CRASH_CTL:
949                 return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
950         case HV_X64_MSR_RESET:
951                 if (data == 1) {
952                         vcpu_debug(vcpu, "hyper-v reset requested\n");
953                         kvm_make_request(KVM_REQ_HV_RESET, vcpu);
954                 }
955                 break;
956         default:
957                 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
958                             msr, data);
959                 return 1;
960         }
961         return 0;
962 }
963
964 /* Calculate cpu time spent by current task in 100ns units */
965 static u64 current_task_runtime_100ns(void)
966 {
967         u64 utime, stime;
968
969         task_cputime_adjusted(current, &utime, &stime);
970
971         return div_u64(utime + stime, 100);
972 }
973
974 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
975 {
976         struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
977
978         switch (msr) {
979         case HV_X64_MSR_APIC_ASSIST_PAGE: {
980                 u64 gfn;
981                 unsigned long addr;
982
983                 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
984                         hv->hv_vapic = data;
985                         if (kvm_lapic_enable_pv_eoi(vcpu, 0))
986                                 return 1;
987                         break;
988                 }
989                 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
990                 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
991                 if (kvm_is_error_hva(addr))
992                         return 1;
993                 if (__clear_user((void __user *)addr, PAGE_SIZE))
994                         return 1;
995                 hv->hv_vapic = data;
996                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
997                 if (kvm_lapic_enable_pv_eoi(vcpu,
998                                             gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
999                         return 1;
1000                 break;
1001         }
1002         case HV_X64_MSR_EOI:
1003                 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1004         case HV_X64_MSR_ICR:
1005                 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1006         case HV_X64_MSR_TPR:
1007                 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1008         case HV_X64_MSR_VP_RUNTIME:
1009                 if (!host)
1010                         return 1;
1011                 hv->runtime_offset = data - current_task_runtime_100ns();
1012                 break;
1013         case HV_X64_MSR_SCONTROL:
1014         case HV_X64_MSR_SVERSION:
1015         case HV_X64_MSR_SIEFP:
1016         case HV_X64_MSR_SIMP:
1017         case HV_X64_MSR_EOM:
1018         case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1019                 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
1020         case HV_X64_MSR_STIMER0_CONFIG:
1021         case HV_X64_MSR_STIMER1_CONFIG:
1022         case HV_X64_MSR_STIMER2_CONFIG:
1023         case HV_X64_MSR_STIMER3_CONFIG: {
1024                 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1025
1026                 return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
1027                                          data, host);
1028         }
1029         case HV_X64_MSR_STIMER0_COUNT:
1030         case HV_X64_MSR_STIMER1_COUNT:
1031         case HV_X64_MSR_STIMER2_COUNT:
1032         case HV_X64_MSR_STIMER3_COUNT: {
1033                 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1034
1035                 return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
1036                                         data, host);
1037         }
1038         default:
1039                 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1040                             msr, data);
1041                 return 1;
1042         }
1043
1044         return 0;
1045 }
1046
1047 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1048 {
1049         u64 data = 0;
1050         struct kvm *kvm = vcpu->kvm;
1051         struct kvm_hv *hv = &kvm->arch.hyperv;
1052
1053         switch (msr) {
1054         case HV_X64_MSR_GUEST_OS_ID:
1055                 data = hv->hv_guest_os_id;
1056                 break;
1057         case HV_X64_MSR_HYPERCALL:
1058                 data = hv->hv_hypercall;
1059                 break;
1060         case HV_X64_MSR_TIME_REF_COUNT:
1061                 data = get_time_ref_counter(kvm);
1062                 break;
1063         case HV_X64_MSR_REFERENCE_TSC:
1064                 data = hv->hv_tsc_page;
1065                 break;
1066         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1067                 return kvm_hv_msr_get_crash_data(vcpu,
1068                                                  msr - HV_X64_MSR_CRASH_P0,
1069                                                  pdata);
1070         case HV_X64_MSR_CRASH_CTL:
1071                 return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
1072         case HV_X64_MSR_RESET:
1073                 data = 0;
1074                 break;
1075         default:
1076                 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1077                 return 1;
1078         }
1079
1080         *pdata = data;
1081         return 0;
1082 }
1083
1084 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1085 {
1086         u64 data = 0;
1087         struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1088
1089         switch (msr) {
1090         case HV_X64_MSR_VP_INDEX: {
1091                 int r;
1092                 struct kvm_vcpu *v;
1093
1094                 kvm_for_each_vcpu(r, v, vcpu->kvm) {
1095                         if (v == vcpu) {
1096                                 data = r;
1097                                 break;
1098                         }
1099                 }
1100                 break;
1101         }
1102         case HV_X64_MSR_EOI:
1103                 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1104         case HV_X64_MSR_ICR:
1105                 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1106         case HV_X64_MSR_TPR:
1107                 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1108         case HV_X64_MSR_APIC_ASSIST_PAGE:
1109                 data = hv->hv_vapic;
1110                 break;
1111         case HV_X64_MSR_VP_RUNTIME:
1112                 data = current_task_runtime_100ns() + hv->runtime_offset;
1113                 break;
1114         case HV_X64_MSR_SCONTROL:
1115         case HV_X64_MSR_SVERSION:
1116         case HV_X64_MSR_SIEFP:
1117         case HV_X64_MSR_SIMP:
1118         case HV_X64_MSR_EOM:
1119         case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1120                 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata);
1121         case HV_X64_MSR_STIMER0_CONFIG:
1122         case HV_X64_MSR_STIMER1_CONFIG:
1123         case HV_X64_MSR_STIMER2_CONFIG:
1124         case HV_X64_MSR_STIMER3_CONFIG: {
1125                 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1126
1127                 return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
1128                                          pdata);
1129         }
1130         case HV_X64_MSR_STIMER0_COUNT:
1131         case HV_X64_MSR_STIMER1_COUNT:
1132         case HV_X64_MSR_STIMER2_COUNT:
1133         case HV_X64_MSR_STIMER3_COUNT: {
1134                 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1135
1136                 return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
1137                                         pdata);
1138         }
1139         default:
1140                 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1141                 return 1;
1142         }
1143         *pdata = data;
1144         return 0;
1145 }
1146
1147 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1148 {
1149         if (kvm_hv_msr_partition_wide(msr)) {
1150                 int r;
1151
1152                 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1153                 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1154                 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1155                 return r;
1156         } else
1157                 return kvm_hv_set_msr(vcpu, msr, data, host);
1158 }
1159
1160 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1161 {
1162         if (kvm_hv_msr_partition_wide(msr)) {
1163                 int r;
1164
1165                 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1166                 r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
1167                 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1168                 return r;
1169         } else
1170                 return kvm_hv_get_msr(vcpu, msr, pdata);
1171 }
1172
1173 bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1174 {
1175         return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
1176 }
1177
1178 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1179 {
1180         bool longmode;
1181
1182         longmode = is_64_bit_mode(vcpu);
1183         if (longmode)
1184                 kvm_register_write(vcpu, VCPU_REGS_RAX, result);
1185         else {
1186                 kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32);
1187                 kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff);
1188         }
1189 }
1190
1191 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1192 {
1193         struct kvm_run *run = vcpu->run;
1194
1195         kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
1196         return 1;
1197 }
1198
1199 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1200 {
1201         u64 param, ingpa, outgpa, ret;
1202         uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
1203         bool fast, longmode;
1204
1205         /*
1206          * hypercall generates UD from non zero cpl and real mode
1207          * per HYPER-V spec
1208          */
1209         if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
1210                 kvm_queue_exception(vcpu, UD_VECTOR);
1211                 return 1;
1212         }
1213
1214         longmode = is_64_bit_mode(vcpu);
1215
1216         if (!longmode) {
1217                 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
1218                         (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
1219                 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
1220                         (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
1221                 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
1222                         (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
1223         }
1224 #ifdef CONFIG_X86_64
1225         else {
1226                 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
1227                 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
1228                 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
1229         }
1230 #endif
1231
1232         code = param & 0xffff;
1233         fast = (param >> 16) & 0x1;
1234         rep_cnt = (param >> 32) & 0xfff;
1235         rep_idx = (param >> 48) & 0xfff;
1236
1237         trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
1238
1239         /* Hypercall continuation is not supported yet */
1240         if (rep_cnt || rep_idx) {
1241                 res = HV_STATUS_INVALID_HYPERCALL_CODE;
1242                 goto set_result;
1243         }
1244
1245         switch (code) {
1246         case HVCALL_NOTIFY_LONG_SPIN_WAIT:
1247                 kvm_vcpu_on_spin(vcpu);
1248                 break;
1249         case HVCALL_POST_MESSAGE:
1250         case HVCALL_SIGNAL_EVENT:
1251                 /* don't bother userspace if it has no way to handle it */
1252                 if (!vcpu_to_synic(vcpu)->active) {
1253                         res = HV_STATUS_INVALID_HYPERCALL_CODE;
1254                         break;
1255                 }
1256                 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1257                 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1258                 vcpu->run->hyperv.u.hcall.input = param;
1259                 vcpu->run->hyperv.u.hcall.params[0] = ingpa;
1260                 vcpu->run->hyperv.u.hcall.params[1] = outgpa;
1261                 vcpu->arch.complete_userspace_io =
1262                                 kvm_hv_hypercall_complete_userspace;
1263                 return 0;
1264         default:
1265                 res = HV_STATUS_INVALID_HYPERCALL_CODE;
1266                 break;
1267         }
1268
1269 set_result:
1270         ret = res | (((u64)rep_done & 0xfff) << 32);
1271         kvm_hv_hypercall_set_result(vcpu, ret);
1272         return 1;
1273 }