2 * KVM Microsoft Hyper-V emulation
4 * derived from arch/x86/kvm/x86.c
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
13 * Avi Kivity <avi@qumranet.com>
14 * Yaniv Kamay <yaniv@qumranet.com>
15 * Amit Shah <amit.shah@qumranet.com>
16 * Ben-Ami Yassour <benami@il.ibm.com>
17 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 * This work is licensed under the terms of the GNU GPL, version 2. See
20 * the COPYING file in the top-level directory.
29 #include <linux/kvm_host.h>
30 #include <linux/highmem.h>
31 #include <linux/sched/cputime.h>
32 #include <linux/eventfd.h>
34 #include <asm/apicdef.h>
35 #include <trace/events/kvm.h>
39 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
41 return atomic64_read(&synic->sint[sint]);
44 static inline int synic_get_sint_vector(u64 sint_value)
46 if (sint_value & HV_SYNIC_SINT_MASKED)
48 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
51 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
56 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
57 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
63 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
69 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
70 sint_value = synic_read_sint(synic, i);
71 if (synic_get_sint_vector(sint_value) == vector &&
72 sint_value & HV_SYNIC_SINT_AUTO_EOI)
78 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
83 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
84 if (vector < 16 && !host)
87 * Guest may configure multiple SINTs to use the same vector, so
88 * we maintain a bitmap of vectors handled by synic, and a
89 * bitmap of vectors with auto-eoi behavior. The bitmaps are
90 * updated here, and atomically queried on fast paths.
93 atomic64_set(&synic->sint[sint], data);
95 if (synic_has_vector_connected(synic, vector))
96 __set_bit(vector, synic->vec_bitmap);
98 __clear_bit(vector, synic->vec_bitmap);
100 if (synic_has_vector_auto_eoi(synic, vector))
101 __set_bit(vector, synic->auto_eoi_bitmap);
103 __clear_bit(vector, synic->auto_eoi_bitmap);
105 /* Load SynIC vectors into EOI exit bitmap */
106 kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
110 static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
112 struct kvm_vcpu *vcpu = NULL;
115 if (vpidx < KVM_MAX_VCPUS)
116 vcpu = kvm_get_vcpu(kvm, vpidx);
117 if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
119 kvm_for_each_vcpu(i, vcpu, kvm)
120 if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
125 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
127 struct kvm_vcpu *vcpu;
128 struct kvm_vcpu_hv_synic *synic;
130 vcpu = get_vcpu_by_vpidx(kvm, vpidx);
133 synic = vcpu_to_synic(vcpu);
134 return (synic->active) ? synic : NULL;
137 static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic,
140 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
143 struct hv_message *msg;
144 struct hv_message_page *msg_page;
146 gpa = synic->msg_page & PAGE_MASK;
147 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
148 if (is_error_page(page)) {
149 vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n",
153 msg_page = kmap_atomic(page);
155 msg = &msg_page->sint_message[sint];
156 msg->header.message_flags.msg_pending = 0;
158 kunmap_atomic(msg_page);
159 kvm_release_page_dirty(page);
160 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
163 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
165 struct kvm *kvm = vcpu->kvm;
166 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
167 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
168 struct kvm_vcpu_hv_stimer *stimer;
169 int gsi, idx, stimers_pending;
171 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
173 if (synic->msg_page & HV_SYNIC_SIMP_ENABLE)
174 synic_clear_sint_msg_pending(synic, sint);
176 /* Try to deliver pending Hyper-V SynIC timers messages */
178 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
179 stimer = &hv_vcpu->stimer[idx];
180 if (stimer->msg_pending &&
181 (stimer->config & HV_STIMER_ENABLE) &&
182 HV_STIMER_SINT(stimer->config) == sint) {
183 set_bit(stimer->index,
184 hv_vcpu->stimer_pending_bitmap);
189 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
191 idx = srcu_read_lock(&kvm->irq_srcu);
192 gsi = atomic_read(&synic->sint_to_gsi[sint]);
194 kvm_notify_acked_gsi(kvm, gsi);
195 srcu_read_unlock(&kvm->irq_srcu, idx);
198 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
200 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
201 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
203 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
204 hv_vcpu->exit.u.synic.msr = msr;
205 hv_vcpu->exit.u.synic.control = synic->control;
206 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
207 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
209 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
212 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
213 u32 msr, u64 data, bool host)
215 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
221 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
225 case HV_X64_MSR_SCONTROL:
226 synic->control = data;
228 synic_exit(synic, msr);
230 case HV_X64_MSR_SVERSION:
235 synic->version = data;
237 case HV_X64_MSR_SIEFP:
238 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
239 !synic->dont_zero_synic_pages)
240 if (kvm_clear_guest(vcpu->kvm,
241 data & PAGE_MASK, PAGE_SIZE)) {
245 synic->evt_page = data;
247 synic_exit(synic, msr);
249 case HV_X64_MSR_SIMP:
250 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
251 !synic->dont_zero_synic_pages)
252 if (kvm_clear_guest(vcpu->kvm,
253 data & PAGE_MASK, PAGE_SIZE)) {
257 synic->msg_page = data;
259 synic_exit(synic, msr);
261 case HV_X64_MSR_EOM: {
264 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
265 kvm_hv_notify_acked_sint(vcpu, i);
268 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
269 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
278 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata)
287 case HV_X64_MSR_SCONTROL:
288 *pdata = synic->control;
290 case HV_X64_MSR_SVERSION:
291 *pdata = synic->version;
293 case HV_X64_MSR_SIEFP:
294 *pdata = synic->evt_page;
296 case HV_X64_MSR_SIMP:
297 *pdata = synic->msg_page;
302 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
303 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
312 static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
314 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
315 struct kvm_lapic_irq irq;
318 if (sint >= ARRAY_SIZE(synic->sint))
321 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
325 memset(&irq, 0, sizeof(irq));
326 irq.shorthand = APIC_DEST_SELF;
327 irq.dest_mode = APIC_DEST_PHYSICAL;
328 irq.delivery_mode = APIC_DM_FIXED;
332 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
333 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
337 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
339 struct kvm_vcpu_hv_synic *synic;
341 synic = synic_get(kvm, vpidx);
345 return synic_set_irq(synic, sint);
348 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
350 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
353 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
355 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
356 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
357 kvm_hv_notify_acked_sint(vcpu, i);
360 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
362 struct kvm_vcpu_hv_synic *synic;
364 synic = synic_get(kvm, vpidx);
368 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
371 atomic_set(&synic->sint_to_gsi[sint], gsi);
375 void kvm_hv_irq_routing_update(struct kvm *kvm)
377 struct kvm_irq_routing_table *irq_rt;
378 struct kvm_kernel_irq_routing_entry *e;
381 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
382 lockdep_is_held(&kvm->irq_lock));
384 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
385 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
386 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
387 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
388 e->hv_sint.sint, gsi);
393 static void synic_init(struct kvm_vcpu_hv_synic *synic)
397 memset(synic, 0, sizeof(*synic));
398 synic->version = HV_SYNIC_VERSION_1;
399 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
400 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
401 atomic_set(&synic->sint_to_gsi[i], -1);
405 static u64 get_time_ref_counter(struct kvm *kvm)
407 struct kvm_hv *hv = &kvm->arch.hyperv;
408 struct kvm_vcpu *vcpu;
412 * The guest has not set up the TSC page or the clock isn't
413 * stable, fall back to get_kvmclock_ns.
415 if (!hv->tsc_ref.tsc_sequence)
416 return div_u64(get_kvmclock_ns(kvm), 100);
418 vcpu = kvm_get_vcpu(kvm, 0);
419 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
420 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
421 + hv->tsc_ref.tsc_offset;
424 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
427 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
429 set_bit(stimer->index,
430 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
431 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
436 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
438 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
440 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id,
443 hrtimer_cancel(&stimer->timer);
444 clear_bit(stimer->index,
445 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
446 stimer->msg_pending = false;
447 stimer->exp_time = 0;
450 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
452 struct kvm_vcpu_hv_stimer *stimer;
454 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
455 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id,
457 stimer_mark_pending(stimer, true);
459 return HRTIMER_NORESTART;
463 * stimer_start() assumptions:
464 * a) stimer->count is not equal to 0
465 * b) stimer->config has HV_STIMER_ENABLE flag
467 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
472 time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
473 ktime_now = ktime_get();
475 if (stimer->config & HV_STIMER_PERIODIC) {
476 if (stimer->exp_time) {
477 if (time_now >= stimer->exp_time) {
480 div64_u64_rem(time_now - stimer->exp_time,
481 stimer->count, &remainder);
483 time_now + (stimer->count - remainder);
486 stimer->exp_time = time_now + stimer->count;
488 trace_kvm_hv_stimer_start_periodic(
489 stimer_to_vcpu(stimer)->vcpu_id,
491 time_now, stimer->exp_time);
493 hrtimer_start(&stimer->timer,
494 ktime_add_ns(ktime_now,
495 100 * (stimer->exp_time - time_now)),
499 stimer->exp_time = stimer->count;
500 if (time_now >= stimer->count) {
502 * Expire timer according to Hypervisor Top-Level Functional
503 * specification v4(15.3.1):
504 * "If a one shot is enabled and the specified count is in
505 * the past, it will expire immediately."
507 stimer_mark_pending(stimer, false);
511 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id,
513 time_now, stimer->count);
515 hrtimer_start(&stimer->timer,
516 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
521 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
524 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
525 stimer->index, config, host);
527 stimer_cleanup(stimer);
528 if ((stimer->config & HV_STIMER_ENABLE) && HV_STIMER_SINT(config) == 0)
529 config &= ~HV_STIMER_ENABLE;
530 stimer->config = config;
531 stimer_mark_pending(stimer, false);
535 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
538 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id,
539 stimer->index, count, host);
541 stimer_cleanup(stimer);
542 stimer->count = count;
543 if (stimer->count == 0)
544 stimer->config &= ~HV_STIMER_ENABLE;
545 else if (stimer->config & HV_STIMER_AUTOENABLE)
546 stimer->config |= HV_STIMER_ENABLE;
547 stimer_mark_pending(stimer, false);
551 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
553 *pconfig = stimer->config;
557 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
559 *pcount = stimer->count;
563 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
564 struct hv_message *src_msg)
566 struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
569 struct hv_message *dst_msg;
571 struct hv_message_page *msg_page;
573 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
576 gpa = synic->msg_page & PAGE_MASK;
577 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
578 if (is_error_page(page))
581 msg_page = kmap_atomic(page);
582 dst_msg = &msg_page->sint_message[sint];
583 if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE,
584 src_msg->header.message_type) != HVMSG_NONE) {
585 dst_msg->header.message_flags.msg_pending = 1;
588 memcpy(&dst_msg->u.payload, &src_msg->u.payload,
589 src_msg->header.payload_size);
590 dst_msg->header.message_type = src_msg->header.message_type;
591 dst_msg->header.payload_size = src_msg->header.payload_size;
592 r = synic_set_irq(synic, sint);
598 kunmap_atomic(msg_page);
599 kvm_release_page_dirty(page);
600 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
604 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
606 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
607 struct hv_message *msg = &stimer->msg;
608 struct hv_timer_message_payload *payload =
609 (struct hv_timer_message_payload *)&msg->u.payload;
611 payload->expiration_time = stimer->exp_time;
612 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
613 return synic_deliver_msg(vcpu_to_synic(vcpu),
614 HV_STIMER_SINT(stimer->config), msg);
617 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
621 stimer->msg_pending = true;
622 r = stimer_send_msg(stimer);
623 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
626 stimer->msg_pending = false;
627 if (!(stimer->config & HV_STIMER_PERIODIC))
628 stimer->config &= ~HV_STIMER_ENABLE;
632 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
634 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
635 struct kvm_vcpu_hv_stimer *stimer;
636 u64 time_now, exp_time;
639 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
640 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
641 stimer = &hv_vcpu->stimer[i];
642 if (stimer->config & HV_STIMER_ENABLE) {
643 exp_time = stimer->exp_time;
647 get_time_ref_counter(vcpu->kvm);
648 if (time_now >= exp_time)
649 stimer_expiration(stimer);
652 if ((stimer->config & HV_STIMER_ENABLE) &&
654 if (!stimer->msg_pending)
655 stimer_start(stimer);
657 stimer_cleanup(stimer);
662 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
664 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
667 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
668 stimer_cleanup(&hv_vcpu->stimer[i]);
671 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
673 struct hv_message *msg = &stimer->msg;
674 struct hv_timer_message_payload *payload =
675 (struct hv_timer_message_payload *)&msg->u.payload;
677 memset(&msg->header, 0, sizeof(msg->header));
678 msg->header.message_type = HVMSG_TIMER_EXPIRED;
679 msg->header.payload_size = sizeof(*payload);
681 payload->timer_index = stimer->index;
682 payload->expiration_time = 0;
683 payload->delivery_time = 0;
686 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
688 memset(stimer, 0, sizeof(*stimer));
689 stimer->index = timer_index;
690 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
691 stimer->timer.function = stimer_timer_callback;
692 stimer_prepare_msg(stimer);
695 void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
697 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
700 synic_init(&hv_vcpu->synic);
702 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
703 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
704 stimer_init(&hv_vcpu->stimer[i], i);
707 void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
709 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
711 hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
714 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
716 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
719 * Hyper-V SynIC auto EOI SINT's are
720 * not compatible with APICV, so deactivate APICV
722 kvm_vcpu_deactivate_apicv(vcpu);
723 synic->active = true;
724 synic->dont_zero_synic_pages = dont_zero_synic_pages;
728 static bool kvm_hv_msr_partition_wide(u32 msr)
733 case HV_X64_MSR_GUEST_OS_ID:
734 case HV_X64_MSR_HYPERCALL:
735 case HV_X64_MSR_REFERENCE_TSC:
736 case HV_X64_MSR_TIME_REF_COUNT:
737 case HV_X64_MSR_CRASH_CTL:
738 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
739 case HV_X64_MSR_RESET:
740 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
741 case HV_X64_MSR_TSC_EMULATION_CONTROL:
742 case HV_X64_MSR_TSC_EMULATION_STATUS:
750 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
751 u32 index, u64 *pdata)
753 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
755 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
758 *pdata = hv->hv_crash_param[index];
762 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
764 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
766 *pdata = hv->hv_crash_ctl;
770 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
772 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
775 hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
777 if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
779 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
780 hv->hv_crash_param[0],
781 hv->hv_crash_param[1],
782 hv->hv_crash_param[2],
783 hv->hv_crash_param[3],
784 hv->hv_crash_param[4]);
786 /* Send notification about crash to user space */
787 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
793 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
796 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
798 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
801 hv->hv_crash_param[index] = data;
806 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
807 * between them is possible:
810 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
814 * nsec/100 = ticks * scale / 2^64 + offset
816 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
817 * By dividing the kvmclock formula by 100 and equating what's left we get:
818 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
819 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
820 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
822 * Now expand the kvmclock formula and divide by 100:
823 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
824 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
826 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
827 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
828 * + system_time / 100
830 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
831 * nsec/100 = ticks * scale / 2^64
832 * - tsc_timestamp * scale / 2^64
833 * + system_time / 100
835 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
836 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
838 * These two equivalencies are implemented in this function.
840 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
841 HV_REFERENCE_TSC_PAGE *tsc_ref)
845 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
849 * check if scale would overflow, if so we use the time ref counter
850 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
851 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
852 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
854 max_mul = 100ull << (32 - hv_clock->tsc_shift);
855 if (hv_clock->tsc_to_system_mul >= max_mul)
859 * Otherwise compute the scale and offset according to the formulas
863 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
864 hv_clock->tsc_to_system_mul,
867 tsc_ref->tsc_offset = hv_clock->system_time;
868 do_div(tsc_ref->tsc_offset, 100);
869 tsc_ref->tsc_offset -=
870 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
874 void kvm_hv_setup_tsc_page(struct kvm *kvm,
875 struct pvclock_vcpu_time_info *hv_clock)
877 struct kvm_hv *hv = &kvm->arch.hyperv;
881 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
882 BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0);
884 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
887 mutex_lock(&kvm->arch.hyperv.hv_lock);
888 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
891 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
893 * Because the TSC parameters only vary when there is a
894 * change in the master clock, do not bother with caching.
896 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
897 &tsc_seq, sizeof(tsc_seq))))
901 * While we're computing and writing the parameters, force the
902 * guest to use the time reference count MSR.
904 hv->tsc_ref.tsc_sequence = 0;
905 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
906 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
909 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
912 /* Ensure sequence is zero before writing the rest of the struct. */
914 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
918 * Now switch to the TSC page mechanism by writing the sequence.
921 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
924 /* Write the struct entirely before the non-zero sequence. */
927 hv->tsc_ref.tsc_sequence = tsc_seq;
928 kvm_write_guest(kvm, gfn_to_gpa(gfn),
929 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
931 mutex_unlock(&kvm->arch.hyperv.hv_lock);
934 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
937 struct kvm *kvm = vcpu->kvm;
938 struct kvm_hv *hv = &kvm->arch.hyperv;
941 case HV_X64_MSR_GUEST_OS_ID:
942 hv->hv_guest_os_id = data;
943 /* setting guest os id to zero disables hypercall page */
944 if (!hv->hv_guest_os_id)
945 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
947 case HV_X64_MSR_HYPERCALL: {
952 /* if guest os id is not set hypercall should remain disabled */
953 if (!hv->hv_guest_os_id)
955 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
956 hv->hv_hypercall = data;
959 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
960 addr = gfn_to_hva(kvm, gfn);
961 if (kvm_is_error_hva(addr))
963 kvm_x86_ops->patch_hypercall(vcpu, instructions);
964 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
965 if (__copy_to_user((void __user *)addr, instructions, 4))
967 hv->hv_hypercall = data;
968 mark_page_dirty(kvm, gfn);
971 case HV_X64_MSR_REFERENCE_TSC:
972 hv->hv_tsc_page = data;
973 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
974 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
976 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
977 return kvm_hv_msr_set_crash_data(vcpu,
978 msr - HV_X64_MSR_CRASH_P0,
980 case HV_X64_MSR_CRASH_CTL:
981 return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
982 case HV_X64_MSR_RESET:
984 vcpu_debug(vcpu, "hyper-v reset requested\n");
985 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
988 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
989 hv->hv_reenlightenment_control = data;
991 case HV_X64_MSR_TSC_EMULATION_CONTROL:
992 hv->hv_tsc_emulation_control = data;
994 case HV_X64_MSR_TSC_EMULATION_STATUS:
995 hv->hv_tsc_emulation_status = data;
998 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1005 /* Calculate cpu time spent by current task in 100ns units */
1006 static u64 current_task_runtime_100ns(void)
1010 task_cputime_adjusted(current, &utime, &stime);
1012 return div_u64(utime + stime, 100);
1015 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1017 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1020 case HV_X64_MSR_VP_INDEX:
1023 hv->vp_index = (u32)data;
1025 case HV_X64_MSR_APIC_ASSIST_PAGE: {
1029 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1030 hv->hv_vapic = data;
1031 if (kvm_lapic_enable_pv_eoi(vcpu, 0))
1035 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
1036 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1037 if (kvm_is_error_hva(addr))
1039 if (__clear_user((void __user *)addr, PAGE_SIZE))
1041 hv->hv_vapic = data;
1042 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1043 if (kvm_lapic_enable_pv_eoi(vcpu,
1044 gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
1048 case HV_X64_MSR_EOI:
1049 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1050 case HV_X64_MSR_ICR:
1051 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1052 case HV_X64_MSR_TPR:
1053 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1054 case HV_X64_MSR_VP_RUNTIME:
1057 hv->runtime_offset = data - current_task_runtime_100ns();
1059 case HV_X64_MSR_SCONTROL:
1060 case HV_X64_MSR_SVERSION:
1061 case HV_X64_MSR_SIEFP:
1062 case HV_X64_MSR_SIMP:
1063 case HV_X64_MSR_EOM:
1064 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1065 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
1066 case HV_X64_MSR_STIMER0_CONFIG:
1067 case HV_X64_MSR_STIMER1_CONFIG:
1068 case HV_X64_MSR_STIMER2_CONFIG:
1069 case HV_X64_MSR_STIMER3_CONFIG: {
1070 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1072 return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
1075 case HV_X64_MSR_STIMER0_COUNT:
1076 case HV_X64_MSR_STIMER1_COUNT:
1077 case HV_X64_MSR_STIMER2_COUNT:
1078 case HV_X64_MSR_STIMER3_COUNT: {
1079 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1081 return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
1085 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1093 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1096 struct kvm *kvm = vcpu->kvm;
1097 struct kvm_hv *hv = &kvm->arch.hyperv;
1100 case HV_X64_MSR_GUEST_OS_ID:
1101 data = hv->hv_guest_os_id;
1103 case HV_X64_MSR_HYPERCALL:
1104 data = hv->hv_hypercall;
1106 case HV_X64_MSR_TIME_REF_COUNT:
1107 data = get_time_ref_counter(kvm);
1109 case HV_X64_MSR_REFERENCE_TSC:
1110 data = hv->hv_tsc_page;
1112 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1113 return kvm_hv_msr_get_crash_data(vcpu,
1114 msr - HV_X64_MSR_CRASH_P0,
1116 case HV_X64_MSR_CRASH_CTL:
1117 return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
1118 case HV_X64_MSR_RESET:
1121 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1122 data = hv->hv_reenlightenment_control;
1124 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1125 data = hv->hv_tsc_emulation_control;
1127 case HV_X64_MSR_TSC_EMULATION_STATUS:
1128 data = hv->hv_tsc_emulation_status;
1131 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1139 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1142 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1145 case HV_X64_MSR_VP_INDEX:
1146 data = hv->vp_index;
1148 case HV_X64_MSR_EOI:
1149 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1150 case HV_X64_MSR_ICR:
1151 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1152 case HV_X64_MSR_TPR:
1153 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1154 case HV_X64_MSR_APIC_ASSIST_PAGE:
1155 data = hv->hv_vapic;
1157 case HV_X64_MSR_VP_RUNTIME:
1158 data = current_task_runtime_100ns() + hv->runtime_offset;
1160 case HV_X64_MSR_SCONTROL:
1161 case HV_X64_MSR_SVERSION:
1162 case HV_X64_MSR_SIEFP:
1163 case HV_X64_MSR_SIMP:
1164 case HV_X64_MSR_EOM:
1165 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1166 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata);
1167 case HV_X64_MSR_STIMER0_CONFIG:
1168 case HV_X64_MSR_STIMER1_CONFIG:
1169 case HV_X64_MSR_STIMER2_CONFIG:
1170 case HV_X64_MSR_STIMER3_CONFIG: {
1171 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1173 return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
1176 case HV_X64_MSR_STIMER0_COUNT:
1177 case HV_X64_MSR_STIMER1_COUNT:
1178 case HV_X64_MSR_STIMER2_COUNT:
1179 case HV_X64_MSR_STIMER3_COUNT: {
1180 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1182 return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
1185 case HV_X64_MSR_TSC_FREQUENCY:
1186 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1188 case HV_X64_MSR_APIC_FREQUENCY:
1189 data = APIC_BUS_FREQUENCY;
1192 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1199 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1201 if (kvm_hv_msr_partition_wide(msr)) {
1204 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1205 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1206 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1209 return kvm_hv_set_msr(vcpu, msr, data, host);
1212 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1214 if (kvm_hv_msr_partition_wide(msr)) {
1217 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1218 r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
1219 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1222 return kvm_hv_get_msr(vcpu, msr, pdata);
1225 bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1227 return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
1230 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1234 longmode = is_64_bit_mode(vcpu);
1236 kvm_register_write(vcpu, VCPU_REGS_RAX, result);
1238 kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32);
1239 kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff);
1243 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1245 struct kvm_run *run = vcpu->run;
1247 kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
1251 static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
1253 struct eventfd_ctx *eventfd;
1255 if (unlikely(!fast)) {
1259 if ((gpa & (__alignof__(param) - 1)) ||
1260 offset_in_page(gpa) + sizeof(param) > PAGE_SIZE)
1261 return HV_STATUS_INVALID_ALIGNMENT;
1263 ret = kvm_vcpu_read_guest(vcpu, gpa, ¶m, sizeof(param));
1265 return HV_STATUS_INVALID_ALIGNMENT;
1269 * Per spec, bits 32-47 contain the extra "flag number". However, we
1270 * have no use for it, and in all known usecases it is zero, so just
1271 * report lookup failure if it isn't.
1273 if (param & 0xffff00000000ULL)
1274 return HV_STATUS_INVALID_PORT_ID;
1275 /* remaining bits are reserved-zero */
1276 if (param & ~KVM_HYPERV_CONN_ID_MASK)
1277 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1279 /* conn_to_evt is protected by vcpu->kvm->srcu */
1280 eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
1282 return HV_STATUS_INVALID_PORT_ID;
1284 eventfd_signal(eventfd, 1);
1285 return HV_STATUS_SUCCESS;
1288 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1290 u64 param, ingpa, outgpa, ret;
1291 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
1292 bool fast, longmode;
1295 * hypercall generates UD from non zero cpl and real mode
1298 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
1299 kvm_queue_exception(vcpu, UD_VECTOR);
1303 longmode = is_64_bit_mode(vcpu);
1306 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
1307 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
1308 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
1309 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
1310 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
1311 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
1313 #ifdef CONFIG_X86_64
1315 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
1316 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
1317 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
1321 code = param & 0xffff;
1322 fast = (param >> 16) & 0x1;
1323 rep_cnt = (param >> 32) & 0xfff;
1324 rep_idx = (param >> 48) & 0xfff;
1326 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
1328 /* Hypercall continuation is not supported yet */
1329 if (rep_cnt || rep_idx) {
1330 res = HV_STATUS_INVALID_HYPERCALL_CODE;
1335 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
1336 kvm_vcpu_on_spin(vcpu, true);
1338 case HVCALL_SIGNAL_EVENT:
1339 res = kvm_hvcall_signal_event(vcpu, fast, ingpa);
1340 if (res != HV_STATUS_INVALID_PORT_ID)
1342 /* maybe userspace knows this conn_id: fall through */
1343 case HVCALL_POST_MESSAGE:
1344 /* don't bother userspace if it has no way to handle it */
1345 if (!vcpu_to_synic(vcpu)->active) {
1346 res = HV_STATUS_INVALID_HYPERCALL_CODE;
1349 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1350 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1351 vcpu->run->hyperv.u.hcall.input = param;
1352 vcpu->run->hyperv.u.hcall.params[0] = ingpa;
1353 vcpu->run->hyperv.u.hcall.params[1] = outgpa;
1354 vcpu->arch.complete_userspace_io =
1355 kvm_hv_hypercall_complete_userspace;
1358 res = HV_STATUS_INVALID_HYPERCALL_CODE;
1363 ret = res | (((u64)rep_done & 0xfff) << 32);
1364 kvm_hv_hypercall_set_result(vcpu, ret);
1368 void kvm_hv_init_vm(struct kvm *kvm)
1370 mutex_init(&kvm->arch.hyperv.hv_lock);
1371 idr_init(&kvm->arch.hyperv.conn_to_evt);
1374 void kvm_hv_destroy_vm(struct kvm *kvm)
1376 struct eventfd_ctx *eventfd;
1379 idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i)
1380 eventfd_ctx_put(eventfd);
1381 idr_destroy(&kvm->arch.hyperv.conn_to_evt);
1384 static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
1386 struct kvm_hv *hv = &kvm->arch.hyperv;
1387 struct eventfd_ctx *eventfd;
1390 eventfd = eventfd_ctx_fdget(fd);
1391 if (IS_ERR(eventfd))
1392 return PTR_ERR(eventfd);
1394 mutex_lock(&hv->hv_lock);
1395 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
1397 mutex_unlock(&hv->hv_lock);
1404 eventfd_ctx_put(eventfd);
1408 static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
1410 struct kvm_hv *hv = &kvm->arch.hyperv;
1411 struct eventfd_ctx *eventfd;
1413 mutex_lock(&hv->hv_lock);
1414 eventfd = idr_remove(&hv->conn_to_evt, conn_id);
1415 mutex_unlock(&hv->hv_lock);
1420 synchronize_srcu(&kvm->srcu);
1421 eventfd_ctx_put(eventfd);
1425 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
1427 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
1428 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
1431 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
1432 return kvm_hv_eventfd_deassign(kvm, args->conn_id);
1433 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);