2 * KVMGT - the implementation of Intel mediated pass-through framework for KVM
4 * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Kevin Tian <kevin.tian@intel.com>
27 * Jike Song <jike.song@intel.com>
28 * Xiaoguang Chen <xiaoguang.chen@intel.com>
31 #include <linux/init.h>
32 #include <linux/device.h>
34 #include <linux/mmu_context.h>
35 #include <linux/types.h>
36 #include <linux/list.h>
37 #include <linux/rbtree.h>
38 #include <linux/spinlock.h>
39 #include <linux/eventfd.h>
40 #include <linux/uuid.h>
41 #include <linux/kvm_host.h>
42 #include <linux/vfio.h>
43 #include <linux/mdev.h>
48 static const struct intel_gvt_ops *intel_gvt_ops;
50 /* helper macros copied from vfio-pci */
51 #define VFIO_PCI_OFFSET_SHIFT 40
52 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
53 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
54 #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
65 struct hlist_node hnode;
68 struct kvmgt_guest_info {
70 struct intel_vgpu *vgpu;
71 struct kvm_page_track_notifier_node track_node;
72 #define NR_BKT (1 << 18)
73 struct hlist_head ptable[NR_BKT];
83 static inline bool handle_valid(unsigned long handle)
85 return !!(handle & ~0xff);
88 static int kvmgt_guest_init(struct mdev_device *mdev);
89 static void intel_vgpu_release_work(struct work_struct *work);
90 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
92 static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
94 struct rb_node *node = vgpu->vdev.cache.rb_node;
95 struct gvt_dma *ret = NULL;
98 struct gvt_dma *itr = rb_entry(node, struct gvt_dma, node);
101 node = node->rb_left;
102 else if (gfn > itr->gfn)
103 node = node->rb_right;
114 static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
116 struct gvt_dma *entry;
119 mutex_lock(&vgpu->vdev.cache_lock);
121 entry = __gvt_cache_find(vgpu, gfn);
122 pfn = (entry == NULL) ? 0 : entry->pfn;
124 mutex_unlock(&vgpu->vdev.cache_lock);
128 static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
130 struct gvt_dma *new, *itr;
131 struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
133 new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
140 mutex_lock(&vgpu->vdev.cache_lock);
143 itr = rb_entry(parent, struct gvt_dma, node);
147 else if (gfn < itr->gfn)
148 link = &parent->rb_left;
150 link = &parent->rb_right;
153 rb_link_node(&new->node, parent, link);
154 rb_insert_color(&new->node, &vgpu->vdev.cache);
155 mutex_unlock(&vgpu->vdev.cache_lock);
159 mutex_unlock(&vgpu->vdev.cache_lock);
163 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
164 struct gvt_dma *entry)
166 rb_erase(&entry->node, &vgpu->vdev.cache);
170 static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
172 struct device *dev = mdev_dev(vgpu->vdev.mdev);
173 struct gvt_dma *this;
177 mutex_lock(&vgpu->vdev.cache_lock);
178 this = __gvt_cache_find(vgpu, gfn);
180 mutex_unlock(&vgpu->vdev.cache_lock);
185 rc = vfio_unpin_pages(dev, &g1, 1);
187 __gvt_cache_remove_entry(vgpu, this);
188 mutex_unlock(&vgpu->vdev.cache_lock);
191 static void gvt_cache_init(struct intel_vgpu *vgpu)
193 vgpu->vdev.cache = RB_ROOT;
194 mutex_init(&vgpu->vdev.cache_lock);
197 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
200 struct rb_node *node = NULL;
201 struct device *dev = mdev_dev(vgpu->vdev.mdev);
204 mutex_lock(&vgpu->vdev.cache_lock);
205 while ((node = rb_first(&vgpu->vdev.cache))) {
206 dma = rb_entry(node, struct gvt_dma, node);
209 vfio_unpin_pages(dev, &gfn, 1);
210 __gvt_cache_remove_entry(vgpu, dma);
212 mutex_unlock(&vgpu->vdev.cache_lock);
215 static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
219 struct intel_vgpu_type *t;
220 const char *driver_name = dev_driver_string(
221 &gvt->dev_priv->drm.pdev->dev);
223 for (i = 0; i < gvt->num_types; i++) {
225 if (!strncmp(t->name, name + strlen(driver_name) + 1,
233 static ssize_t available_instances_show(struct kobject *kobj,
234 struct device *dev, char *buf)
236 struct intel_vgpu_type *type;
237 unsigned int num = 0;
238 void *gvt = kdev_to_i915(dev)->gvt;
240 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
244 num = type->avail_instance;
246 return sprintf(buf, "%u\n", num);
249 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
252 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
255 static ssize_t description_show(struct kobject *kobj, struct device *dev,
258 struct intel_vgpu_type *type;
259 void *gvt = kdev_to_i915(dev)->gvt;
261 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
265 return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
267 BYTES_TO_MB(type->low_gm_size),
268 BYTES_TO_MB(type->high_gm_size),
272 static MDEV_TYPE_ATTR_RO(available_instances);
273 static MDEV_TYPE_ATTR_RO(device_api);
274 static MDEV_TYPE_ATTR_RO(description);
276 static struct attribute *type_attrs[] = {
277 &mdev_type_attr_available_instances.attr,
278 &mdev_type_attr_device_api.attr,
279 &mdev_type_attr_description.attr,
283 static struct attribute_group *intel_vgpu_type_groups[] = {
284 [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
287 static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
290 struct intel_vgpu_type *type;
291 struct attribute_group *group;
293 for (i = 0; i < gvt->num_types; i++) {
294 type = &gvt->types[i];
296 group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
300 group->name = type->name;
301 group->attrs = type_attrs;
302 intel_vgpu_type_groups[i] = group;
308 for (j = 0; j < i; j++) {
309 group = intel_vgpu_type_groups[j];
316 static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
319 struct attribute_group *group;
321 for (i = 0; i < gvt->num_types; i++) {
322 group = intel_vgpu_type_groups[i];
327 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
329 hash_init(info->ptable);
332 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
334 struct kvmgt_pgfn *p;
335 struct hlist_node *tmp;
338 hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
344 static struct kvmgt_pgfn *
345 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
347 struct kvmgt_pgfn *p, *res = NULL;
349 hash_for_each_possible(info->ptable, p, hnode, gfn) {
359 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
362 struct kvmgt_pgfn *p;
364 p = __kvmgt_protect_table_find(info, gfn);
368 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
370 struct kvmgt_pgfn *p;
372 if (kvmgt_gfn_is_write_protected(info, gfn))
375 p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
376 if (WARN(!p, "gfn: 0x%llx\n", gfn))
380 hash_add(info->ptable, &p->hnode, gfn);
383 static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
386 struct kvmgt_pgfn *p;
388 p = __kvmgt_protect_table_find(info, gfn);
395 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
397 struct intel_vgpu *vgpu;
398 struct intel_vgpu_type *type;
403 pdev = mdev_parent_dev(mdev);
404 gvt = kdev_to_i915(pdev)->gvt;
406 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
408 gvt_err("failed to find type %s to create\n",
414 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
415 if (IS_ERR_OR_NULL(vgpu)) {
416 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
417 gvt_err("failed to create intel vgpu: %d\n", ret);
421 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
423 vgpu->vdev.mdev = mdev;
424 mdev_set_drvdata(mdev, vgpu);
426 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
427 dev_name(mdev_dev(mdev)));
434 static int intel_vgpu_remove(struct mdev_device *mdev)
436 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
438 if (handle_valid(vgpu->handle))
441 intel_gvt_ops->vgpu_destroy(vgpu);
445 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
446 unsigned long action, void *data)
448 struct intel_vgpu *vgpu = container_of(nb,
450 vdev.iommu_notifier);
452 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
453 struct vfio_iommu_type1_dma_unmap *unmap = data;
454 unsigned long gfn, end_gfn;
456 gfn = unmap->iova >> PAGE_SHIFT;
457 end_gfn = gfn + unmap->size / PAGE_SIZE;
459 while (gfn < end_gfn)
460 gvt_cache_remove(vgpu, gfn++);
466 static int intel_vgpu_group_notifier(struct notifier_block *nb,
467 unsigned long action, void *data)
469 struct intel_vgpu *vgpu = container_of(nb,
471 vdev.group_notifier);
473 /* the only action we care about */
474 if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
475 vgpu->vdev.kvm = data;
478 schedule_work(&vgpu->vdev.release_work);
484 static int intel_vgpu_open(struct mdev_device *mdev)
486 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
487 unsigned long events;
490 vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
491 vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
493 events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
494 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
495 &vgpu->vdev.iommu_notifier);
497 gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
501 events = VFIO_GROUP_NOTIFY_SET_KVM;
502 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
503 &vgpu->vdev.group_notifier);
505 gvt_err("vfio_register_notifier for group failed: %d\n", ret);
509 ret = kvmgt_guest_init(mdev);
513 atomic_set(&vgpu->vdev.released, 0);
517 vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
518 &vgpu->vdev.group_notifier);
521 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
522 &vgpu->vdev.iommu_notifier);
527 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
529 struct kvmgt_guest_info *info;
532 if (!handle_valid(vgpu->handle))
535 if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
538 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
539 &vgpu->vdev.iommu_notifier);
540 WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
542 ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
543 &vgpu->vdev.group_notifier);
544 WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
546 info = (struct kvmgt_guest_info *)vgpu->handle;
547 kvmgt_guest_exit(info);
549 vgpu->vdev.kvm = NULL;
553 static void intel_vgpu_release(struct mdev_device *mdev)
555 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
557 __intel_vgpu_release(vgpu);
560 static void intel_vgpu_release_work(struct work_struct *work)
562 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
565 __intel_vgpu_release(vgpu);
568 static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu)
570 u32 start_lo, start_hi;
572 int pos = PCI_BASE_ADDRESS_0;
574 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
575 PCI_BASE_ADDRESS_MEM_MASK;
576 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
577 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
580 case PCI_BASE_ADDRESS_MEM_TYPE_64:
581 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
584 case PCI_BASE_ADDRESS_MEM_TYPE_32:
585 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
586 /* 1M mem BAR treated as 32-bit BAR */
588 /* mem unknown type treated as 32-bit BAR */
593 return ((u64)start_hi << 32) | start_lo;
596 static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
597 size_t count, loff_t *ppos, bool is_write)
599 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
600 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
601 uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
605 if (index >= VFIO_PCI_NUM_REGIONS) {
606 gvt_err("invalid index: %u\n", index);
611 case VFIO_PCI_CONFIG_REGION_INDEX:
613 ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
616 ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
619 case VFIO_PCI_BAR0_REGION_INDEX:
620 case VFIO_PCI_BAR1_REGION_INDEX:
622 uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
624 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
625 bar0_start + pos, buf, count);
627 uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
629 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
630 bar0_start + pos, buf, count);
633 case VFIO_PCI_BAR2_REGION_INDEX:
634 case VFIO_PCI_BAR3_REGION_INDEX:
635 case VFIO_PCI_BAR4_REGION_INDEX:
636 case VFIO_PCI_BAR5_REGION_INDEX:
637 case VFIO_PCI_VGA_REGION_INDEX:
638 case VFIO_PCI_ROM_REGION_INDEX:
640 gvt_err("unsupported region: %u\n", index);
643 return ret == 0 ? count : ret;
646 static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
647 size_t count, loff_t *ppos)
649 unsigned int done = 0;
655 if (count >= 4 && !(*ppos % 4)) {
658 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
663 if (copy_to_user(buf, &val, sizeof(val)))
667 } else if (count >= 2 && !(*ppos % 2)) {
670 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
675 if (copy_to_user(buf, &val, sizeof(val)))
682 ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
687 if (copy_to_user(buf, &val, sizeof(val)))
705 static ssize_t intel_vgpu_write(struct mdev_device *mdev,
706 const char __user *buf,
707 size_t count, loff_t *ppos)
709 unsigned int done = 0;
715 if (count >= 4 && !(*ppos % 4)) {
718 if (copy_from_user(&val, buf, sizeof(val)))
721 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
727 } else if (count >= 2 && !(*ppos % 2)) {
730 if (copy_from_user(&val, buf, sizeof(val)))
733 ret = intel_vgpu_rw(mdev, (char *)&val,
734 sizeof(val), ppos, true);
742 if (copy_from_user(&val, buf, sizeof(val)))
745 ret = intel_vgpu_rw(mdev, &val, sizeof(val),
764 static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
768 unsigned long req_size, pgoff = 0;
770 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
772 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
773 if (index >= VFIO_PCI_ROM_REGION_INDEX)
776 if (vma->vm_end < vma->vm_start)
778 if ((vma->vm_flags & VM_SHARED) == 0)
780 if (index != VFIO_PCI_BAR2_REGION_INDEX)
783 pg_prot = vma->vm_page_prot;
784 virtaddr = vma->vm_start;
785 req_size = vma->vm_end - vma->vm_start;
786 pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
788 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
791 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
793 if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
799 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
800 unsigned int index, unsigned int start,
801 unsigned int count, uint32_t flags,
807 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
808 unsigned int index, unsigned int start,
809 unsigned int count, uint32_t flags, void *data)
814 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
815 unsigned int index, unsigned int start, unsigned int count,
816 uint32_t flags, void *data)
821 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
822 unsigned int index, unsigned int start, unsigned int count,
823 uint32_t flags, void *data)
825 struct eventfd_ctx *trigger;
827 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
828 int fd = *(int *)data;
830 trigger = eventfd_ctx_fdget(fd);
831 if (IS_ERR(trigger)) {
832 gvt_err("eventfd_ctx_fdget failed\n");
833 return PTR_ERR(trigger);
835 vgpu->vdev.msi_trigger = trigger;
841 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags,
842 unsigned int index, unsigned int start, unsigned int count,
845 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
846 unsigned int start, unsigned int count, uint32_t flags,
850 case VFIO_PCI_INTX_IRQ_INDEX:
851 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
852 case VFIO_IRQ_SET_ACTION_MASK:
853 func = intel_vgpu_set_intx_mask;
855 case VFIO_IRQ_SET_ACTION_UNMASK:
856 func = intel_vgpu_set_intx_unmask;
858 case VFIO_IRQ_SET_ACTION_TRIGGER:
859 func = intel_vgpu_set_intx_trigger;
863 case VFIO_PCI_MSI_IRQ_INDEX:
864 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
865 case VFIO_IRQ_SET_ACTION_MASK:
866 case VFIO_IRQ_SET_ACTION_UNMASK:
867 /* XXX Need masking support exported */
869 case VFIO_IRQ_SET_ACTION_TRIGGER:
870 func = intel_vgpu_set_msi_trigger;
879 return func(vgpu, index, start, count, flags, data);
882 static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
885 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
888 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
890 if (cmd == VFIO_DEVICE_GET_INFO) {
891 struct vfio_device_info info;
893 minsz = offsetofend(struct vfio_device_info, num_irqs);
895 if (copy_from_user(&info, (void __user *)arg, minsz))
898 if (info.argsz < minsz)
901 info.flags = VFIO_DEVICE_FLAGS_PCI;
902 info.flags |= VFIO_DEVICE_FLAGS_RESET;
903 info.num_regions = VFIO_PCI_NUM_REGIONS;
904 info.num_irqs = VFIO_PCI_NUM_IRQS;
906 return copy_to_user((void __user *)arg, &info, minsz) ?
909 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
910 struct vfio_region_info info;
911 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
913 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
918 minsz = offsetofend(struct vfio_region_info, offset);
920 if (copy_from_user(&info, (void __user *)arg, minsz))
923 if (info.argsz < minsz)
926 switch (info.index) {
927 case VFIO_PCI_CONFIG_REGION_INDEX:
928 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
929 info.size = INTEL_GVT_MAX_CFG_SPACE_SZ;
930 info.flags = VFIO_REGION_INFO_FLAG_READ |
931 VFIO_REGION_INFO_FLAG_WRITE;
933 case VFIO_PCI_BAR0_REGION_INDEX:
934 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
935 info.size = vgpu->cfg_space.bar[info.index].size;
941 info.flags = VFIO_REGION_INFO_FLAG_READ |
942 VFIO_REGION_INFO_FLAG_WRITE;
944 case VFIO_PCI_BAR1_REGION_INDEX:
945 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
949 case VFIO_PCI_BAR2_REGION_INDEX:
950 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
951 info.flags = VFIO_REGION_INFO_FLAG_CAPS |
952 VFIO_REGION_INFO_FLAG_MMAP |
953 VFIO_REGION_INFO_FLAG_READ |
954 VFIO_REGION_INFO_FLAG_WRITE;
955 info.size = gvt_aperture_sz(vgpu->gvt);
957 size = sizeof(*sparse) +
958 (nr_areas * sizeof(*sparse->areas));
959 sparse = kzalloc(size, GFP_KERNEL);
963 sparse->nr_areas = nr_areas;
964 cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
965 sparse->areas[0].offset =
966 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
967 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
975 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
976 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
980 gvt_dbg_core("get region info bar:%d\n", info.index);
983 case VFIO_PCI_ROM_REGION_INDEX:
984 case VFIO_PCI_VGA_REGION_INDEX:
985 gvt_dbg_core("get region info index:%d\n", info.index);
989 struct vfio_region_info_cap_type cap_type;
991 if (info.index >= VFIO_PCI_NUM_REGIONS +
992 vgpu->vdev.num_regions)
995 i = info.index - VFIO_PCI_NUM_REGIONS;
998 VFIO_PCI_INDEX_TO_OFFSET(info.index);
999 info.size = vgpu->vdev.region[i].size;
1000 info.flags = vgpu->vdev.region[i].flags;
1002 cap_type.type = vgpu->vdev.region[i].type;
1003 cap_type.subtype = vgpu->vdev.region[i].subtype;
1005 ret = vfio_info_add_capability(&caps,
1006 VFIO_REGION_INFO_CAP_TYPE,
1013 if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1014 switch (cap_type_id) {
1015 case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1016 ret = vfio_info_add_capability(&caps,
1017 VFIO_REGION_INFO_CAP_SPARSE_MMAP,
1029 if (info.argsz < sizeof(info) + caps.size) {
1030 info.argsz = sizeof(info) + caps.size;
1031 info.cap_offset = 0;
1033 vfio_info_cap_shift(&caps, sizeof(info));
1034 if (copy_to_user((void __user *)arg +
1035 sizeof(info), caps.buf,
1040 info.cap_offset = sizeof(info);
1046 return copy_to_user((void __user *)arg, &info, minsz) ?
1048 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1049 struct vfio_irq_info info;
1051 minsz = offsetofend(struct vfio_irq_info, count);
1053 if (copy_from_user(&info, (void __user *)arg, minsz))
1056 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1059 switch (info.index) {
1060 case VFIO_PCI_INTX_IRQ_INDEX:
1061 case VFIO_PCI_MSI_IRQ_INDEX:
1067 info.flags = VFIO_IRQ_INFO_EVENTFD;
1069 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1071 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1072 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1073 VFIO_IRQ_INFO_AUTOMASKED);
1075 info.flags |= VFIO_IRQ_INFO_NORESIZE;
1077 return copy_to_user((void __user *)arg, &info, minsz) ?
1079 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1080 struct vfio_irq_set hdr;
1083 size_t data_size = 0;
1085 minsz = offsetofend(struct vfio_irq_set, count);
1087 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1090 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1091 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1093 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1094 VFIO_PCI_NUM_IRQS, &data_size);
1096 gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1100 data = memdup_user((void __user *)(arg + minsz),
1103 return PTR_ERR(data);
1107 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1108 hdr.start, hdr.count, data);
1112 } else if (cmd == VFIO_DEVICE_RESET) {
1113 intel_gvt_ops->vgpu_reset(vgpu);
1120 static const struct mdev_parent_ops intel_vgpu_ops = {
1121 .supported_type_groups = intel_vgpu_type_groups,
1122 .create = intel_vgpu_create,
1123 .remove = intel_vgpu_remove,
1125 .open = intel_vgpu_open,
1126 .release = intel_vgpu_release,
1128 .read = intel_vgpu_read,
1129 .write = intel_vgpu_write,
1130 .mmap = intel_vgpu_mmap,
1131 .ioctl = intel_vgpu_ioctl,
1134 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1136 if (!intel_gvt_init_vgpu_type_groups(gvt))
1139 intel_gvt_ops = ops;
1141 return mdev_register_device(dev, &intel_vgpu_ops);
1144 static void kvmgt_host_exit(struct device *dev, void *gvt)
1146 intel_gvt_cleanup_vgpu_type_groups(gvt);
1147 mdev_unregister_device(dev);
1150 static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
1152 struct kvmgt_guest_info *info;
1154 struct kvm_memory_slot *slot;
1157 if (!handle_valid(handle))
1160 info = (struct kvmgt_guest_info *)handle;
1163 idx = srcu_read_lock(&kvm->srcu);
1164 slot = gfn_to_memslot(kvm, gfn);
1166 srcu_read_unlock(&kvm->srcu, idx);
1170 spin_lock(&kvm->mmu_lock);
1172 if (kvmgt_gfn_is_write_protected(info, gfn))
1175 kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1176 kvmgt_protect_table_add(info, gfn);
1179 spin_unlock(&kvm->mmu_lock);
1180 srcu_read_unlock(&kvm->srcu, idx);
1184 static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
1186 struct kvmgt_guest_info *info;
1188 struct kvm_memory_slot *slot;
1191 if (!handle_valid(handle))
1194 info = (struct kvmgt_guest_info *)handle;
1197 idx = srcu_read_lock(&kvm->srcu);
1198 slot = gfn_to_memslot(kvm, gfn);
1200 srcu_read_unlock(&kvm->srcu, idx);
1204 spin_lock(&kvm->mmu_lock);
1206 if (!kvmgt_gfn_is_write_protected(info, gfn))
1209 kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1210 kvmgt_protect_table_del(info, gfn);
1213 spin_unlock(&kvm->mmu_lock);
1214 srcu_read_unlock(&kvm->srcu, idx);
1218 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1219 const u8 *val, int len,
1220 struct kvm_page_track_notifier_node *node)
1222 struct kvmgt_guest_info *info = container_of(node,
1223 struct kvmgt_guest_info, track_node);
1225 if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1226 intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
1230 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1231 struct kvm_memory_slot *slot,
1232 struct kvm_page_track_notifier_node *node)
1236 struct kvmgt_guest_info *info = container_of(node,
1237 struct kvmgt_guest_info, track_node);
1239 spin_lock(&kvm->mmu_lock);
1240 for (i = 0; i < slot->npages; i++) {
1241 gfn = slot->base_gfn + i;
1242 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1243 kvm_slot_page_track_remove_page(kvm, slot, gfn,
1244 KVM_PAGE_TRACK_WRITE);
1245 kvmgt_protect_table_del(info, gfn);
1248 spin_unlock(&kvm->mmu_lock);
1251 static bool kvmgt_check_guest(void)
1253 unsigned int eax, ebx, ecx, edx;
1257 eax = KVM_CPUID_SIGNATURE;
1258 ebx = ecx = edx = 0;
1260 asm volatile ("cpuid"
1261 : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
1264 i = (unsigned int *)s;
1269 return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM"));
1274 * It's actually impossible to check if we are running in KVM host,
1275 * since the "KVM host" is simply native. So we only dectect guest here.
1277 static int kvmgt_detect_host(void)
1279 #ifdef CONFIG_INTEL_IOMMU
1280 if (intel_iommu_gfx_mapped) {
1281 gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
1285 return kvmgt_check_guest() ? -ENODEV : 0;
1288 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1290 struct intel_vgpu *itr;
1291 struct kvmgt_guest_info *info;
1295 mutex_lock(&vgpu->gvt->lock);
1296 for_each_active_vgpu(vgpu->gvt, itr, id) {
1297 if (!handle_valid(itr->handle))
1300 info = (struct kvmgt_guest_info *)itr->handle;
1301 if (kvm && kvm == info->kvm) {
1307 mutex_unlock(&vgpu->gvt->lock);
1311 static int kvmgt_guest_init(struct mdev_device *mdev)
1313 struct kvmgt_guest_info *info;
1314 struct intel_vgpu *vgpu;
1317 vgpu = mdev_get_drvdata(mdev);
1318 if (handle_valid(vgpu->handle))
1321 kvm = vgpu->vdev.kvm;
1322 if (!kvm || kvm->mm != current->mm) {
1323 gvt_err("KVM is required to use Intel vGPU\n");
1327 if (__kvmgt_vgpu_exist(vgpu, kvm))
1330 info = vzalloc(sizeof(struct kvmgt_guest_info));
1334 vgpu->handle = (unsigned long)info;
1338 kvmgt_protect_table_init(info);
1339 gvt_cache_init(vgpu);
1341 info->track_node.track_write = kvmgt_page_track_write;
1342 info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1343 kvm_page_track_register_notifier(kvm, &info->track_node);
1348 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1351 gvt_err("kvmgt_guest_info invalid\n");
1355 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1356 kvmgt_protect_table_destroy(info);
1357 gvt_cache_destroy(info->vgpu);
1363 static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1365 /* nothing to do here */
1369 static void kvmgt_detach_vgpu(unsigned long handle)
1371 /* nothing to do here */
1374 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1376 struct kvmgt_guest_info *info;
1377 struct intel_vgpu *vgpu;
1379 if (!handle_valid(handle))
1382 info = (struct kvmgt_guest_info *)handle;
1385 if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1391 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1394 struct kvmgt_guest_info *info;
1398 if (!handle_valid(handle))
1399 return INTEL_GVT_INVALID_ADDR;
1401 info = (struct kvmgt_guest_info *)handle;
1402 pfn = gvt_cache_find(info->vgpu, gfn);
1406 pfn = INTEL_GVT_INVALID_ADDR;
1407 dev = mdev_dev(info->vgpu->vdev.mdev);
1408 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
1410 gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
1411 return INTEL_GVT_INVALID_ADDR;
1414 gvt_cache_add(info->vgpu, gfn, pfn);
1418 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1419 void *buf, unsigned long len, bool write)
1421 struct kvmgt_guest_info *info;
1424 bool kthread = current->mm == NULL;
1426 if (!handle_valid(handle))
1429 info = (struct kvmgt_guest_info *)handle;
1435 ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1436 kvm_read_guest(kvm, gpa, buf, len);
1444 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
1445 void *buf, unsigned long len)
1447 return kvmgt_rw_gpa(handle, gpa, buf, len, false);
1450 static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
1451 void *buf, unsigned long len)
1453 return kvmgt_rw_gpa(handle, gpa, buf, len, true);
1456 static unsigned long kvmgt_virt_to_pfn(void *addr)
1458 return PFN_DOWN(__pa(addr));
1461 struct intel_gvt_mpt kvmgt_mpt = {
1462 .detect_host = kvmgt_detect_host,
1463 .host_init = kvmgt_host_init,
1464 .host_exit = kvmgt_host_exit,
1465 .attach_vgpu = kvmgt_attach_vgpu,
1466 .detach_vgpu = kvmgt_detach_vgpu,
1467 .inject_msi = kvmgt_inject_msi,
1468 .from_virt_to_mfn = kvmgt_virt_to_pfn,
1469 .set_wp_page = kvmgt_write_protect_add,
1470 .unset_wp_page = kvmgt_write_protect_remove,
1471 .read_gpa = kvmgt_read_gpa,
1472 .write_gpa = kvmgt_write_gpa,
1473 .gfn_to_mfn = kvmgt_gfn_to_pfn,
1475 EXPORT_SYMBOL_GPL(kvmgt_mpt);
1477 static int __init kvmgt_init(void)
1482 static void __exit kvmgt_exit(void)
1486 module_init(kvmgt_init);
1487 module_exit(kvmgt_exit);
1489 MODULE_LICENSE("GPL and additional rights");
1490 MODULE_AUTHOR("Intel Corporation");