]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/vhost/vhost.c
Merge tag 'libnvdimm-for-4.19_misc' of gitolite.kernel.org:pub/scm/linux/kernel/git...
[linux.git] / drivers / vhost / vhost.c
1 /* Copyright (C) 2009 Red Hat, Inc.
2  * Copyright (C) 2006 Rusty Russell IBM Corporation
3  *
4  * Author: Michael S. Tsirkin <mst@redhat.com>
5  *
6  * Inspiration, some code, and most witty comments come from
7  * Documentation/virtual/lguest/lguest.c, by Rusty Russell
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.
10  *
11  * Generic code for virtio server in host kernel.
12  */
13
14 #include <linux/eventfd.h>
15 #include <linux/vhost.h>
16 #include <linux/uio.h>
17 #include <linux/mm.h>
18 #include <linux/mmu_context.h>
19 #include <linux/miscdevice.h>
20 #include <linux/mutex.h>
21 #include <linux/poll.h>
22 #include <linux/file.h>
23 #include <linux/highmem.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include <linux/kthread.h>
27 #include <linux/cgroup.h>
28 #include <linux/module.h>
29 #include <linux/sort.h>
30 #include <linux/sched/mm.h>
31 #include <linux/sched/signal.h>
32 #include <linux/interval_tree_generic.h>
33
34 #include "vhost.h"
35
36 static ushort max_mem_regions = 64;
37 module_param(max_mem_regions, ushort, 0444);
38 MODULE_PARM_DESC(max_mem_regions,
39         "Maximum number of memory regions in memory map. (default: 64)");
40 static int max_iotlb_entries = 2048;
41 module_param(max_iotlb_entries, int, 0444);
42 MODULE_PARM_DESC(max_iotlb_entries,
43         "Maximum number of iotlb entries. (default: 2048)");
44
45 enum {
46         VHOST_MEMORY_F_LOG = 0x1,
47 };
48
49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
51
52 INTERVAL_TREE_DEFINE(struct vhost_umem_node,
53                      rb, __u64, __subtree_last,
54                      START, LAST, static inline, vhost_umem_interval_tree);
55
56 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
57 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
58 {
59         vq->user_be = !virtio_legacy_is_little_endian();
60 }
61
62 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
63 {
64         vq->user_be = true;
65 }
66
67 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
68 {
69         vq->user_be = false;
70 }
71
72 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
73 {
74         struct vhost_vring_state s;
75
76         if (vq->private_data)
77                 return -EBUSY;
78
79         if (copy_from_user(&s, argp, sizeof(s)))
80                 return -EFAULT;
81
82         if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
83             s.num != VHOST_VRING_BIG_ENDIAN)
84                 return -EINVAL;
85
86         if (s.num == VHOST_VRING_BIG_ENDIAN)
87                 vhost_enable_cross_endian_big(vq);
88         else
89                 vhost_enable_cross_endian_little(vq);
90
91         return 0;
92 }
93
94 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
95                                    int __user *argp)
96 {
97         struct vhost_vring_state s = {
98                 .index = idx,
99                 .num = vq->user_be
100         };
101
102         if (copy_to_user(argp, &s, sizeof(s)))
103                 return -EFAULT;
104
105         return 0;
106 }
107
108 static void vhost_init_is_le(struct vhost_virtqueue *vq)
109 {
110         /* Note for legacy virtio: user_be is initialized at reset time
111          * according to the host endianness. If userspace does not set an
112          * explicit endianness, the default behavior is native endian, as
113          * expected by legacy virtio.
114          */
115         vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
116 }
117 #else
118 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
119 {
120 }
121
122 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
123 {
124         return -ENOIOCTLCMD;
125 }
126
127 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
128                                    int __user *argp)
129 {
130         return -ENOIOCTLCMD;
131 }
132
133 static void vhost_init_is_le(struct vhost_virtqueue *vq)
134 {
135         vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
136                 || virtio_legacy_is_little_endian();
137 }
138 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
139
140 static void vhost_reset_is_le(struct vhost_virtqueue *vq)
141 {
142         vhost_init_is_le(vq);
143 }
144
145 struct vhost_flush_struct {
146         struct vhost_work work;
147         struct completion wait_event;
148 };
149
150 static void vhost_flush_work(struct vhost_work *work)
151 {
152         struct vhost_flush_struct *s;
153
154         s = container_of(work, struct vhost_flush_struct, work);
155         complete(&s->wait_event);
156 }
157
158 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
159                             poll_table *pt)
160 {
161         struct vhost_poll *poll;
162
163         poll = container_of(pt, struct vhost_poll, table);
164         poll->wqh = wqh;
165         add_wait_queue(wqh, &poll->wait);
166 }
167
168 static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
169                              void *key)
170 {
171         struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
172
173         if (!(key_to_poll(key) & poll->mask))
174                 return 0;
175
176         vhost_poll_queue(poll);
177         return 0;
178 }
179
180 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
181 {
182         clear_bit(VHOST_WORK_QUEUED, &work->flags);
183         work->fn = fn;
184 }
185 EXPORT_SYMBOL_GPL(vhost_work_init);
186
187 /* Init poll structure */
188 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
189                      __poll_t mask, struct vhost_dev *dev)
190 {
191         init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
192         init_poll_funcptr(&poll->table, vhost_poll_func);
193         poll->mask = mask;
194         poll->dev = dev;
195         poll->wqh = NULL;
196
197         vhost_work_init(&poll->work, fn);
198 }
199 EXPORT_SYMBOL_GPL(vhost_poll_init);
200
201 /* Start polling a file. We add ourselves to file's wait queue. The caller must
202  * keep a reference to a file until after vhost_poll_stop is called. */
203 int vhost_poll_start(struct vhost_poll *poll, struct file *file)
204 {
205         __poll_t mask;
206         int ret = 0;
207
208         if (poll->wqh)
209                 return 0;
210
211         mask = vfs_poll(file, &poll->table);
212         if (mask)
213                 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
214         if (mask & EPOLLERR) {
215                 vhost_poll_stop(poll);
216                 ret = -EINVAL;
217         }
218
219         return ret;
220 }
221 EXPORT_SYMBOL_GPL(vhost_poll_start);
222
223 /* Stop polling a file. After this function returns, it becomes safe to drop the
224  * file reference. You must also flush afterwards. */
225 void vhost_poll_stop(struct vhost_poll *poll)
226 {
227         if (poll->wqh) {
228                 remove_wait_queue(poll->wqh, &poll->wait);
229                 poll->wqh = NULL;
230         }
231 }
232 EXPORT_SYMBOL_GPL(vhost_poll_stop);
233
234 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
235 {
236         struct vhost_flush_struct flush;
237
238         if (dev->worker) {
239                 init_completion(&flush.wait_event);
240                 vhost_work_init(&flush.work, vhost_flush_work);
241
242                 vhost_work_queue(dev, &flush.work);
243                 wait_for_completion(&flush.wait_event);
244         }
245 }
246 EXPORT_SYMBOL_GPL(vhost_work_flush);
247
248 /* Flush any work that has been scheduled. When calling this, don't hold any
249  * locks that are also used by the callback. */
250 void vhost_poll_flush(struct vhost_poll *poll)
251 {
252         vhost_work_flush(poll->dev, &poll->work);
253 }
254 EXPORT_SYMBOL_GPL(vhost_poll_flush);
255
256 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
257 {
258         if (!dev->worker)
259                 return;
260
261         if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
262                 /* We can only add the work to the list after we're
263                  * sure it was not in the list.
264                  * test_and_set_bit() implies a memory barrier.
265                  */
266                 llist_add(&work->node, &dev->work_list);
267                 wake_up_process(dev->worker);
268         }
269 }
270 EXPORT_SYMBOL_GPL(vhost_work_queue);
271
272 /* A lockless hint for busy polling code to exit the loop */
273 bool vhost_has_work(struct vhost_dev *dev)
274 {
275         return !llist_empty(&dev->work_list);
276 }
277 EXPORT_SYMBOL_GPL(vhost_has_work);
278
279 void vhost_poll_queue(struct vhost_poll *poll)
280 {
281         vhost_work_queue(poll->dev, &poll->work);
282 }
283 EXPORT_SYMBOL_GPL(vhost_poll_queue);
284
285 static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
286 {
287         int j;
288
289         for (j = 0; j < VHOST_NUM_ADDRS; j++)
290                 vq->meta_iotlb[j] = NULL;
291 }
292
293 static void vhost_vq_meta_reset(struct vhost_dev *d)
294 {
295         int i;
296
297         for (i = 0; i < d->nvqs; ++i)
298                 __vhost_vq_meta_reset(d->vqs[i]);
299 }
300
301 static void vhost_vq_reset(struct vhost_dev *dev,
302                            struct vhost_virtqueue *vq)
303 {
304         vq->num = 1;
305         vq->desc = NULL;
306         vq->avail = NULL;
307         vq->used = NULL;
308         vq->last_avail_idx = 0;
309         vq->avail_idx = 0;
310         vq->last_used_idx = 0;
311         vq->signalled_used = 0;
312         vq->signalled_used_valid = false;
313         vq->used_flags = 0;
314         vq->log_used = false;
315         vq->log_addr = -1ull;
316         vq->private_data = NULL;
317         vq->acked_features = 0;
318         vq->acked_backend_features = 0;
319         vq->log_base = NULL;
320         vq->error_ctx = NULL;
321         vq->kick = NULL;
322         vq->call_ctx = NULL;
323         vq->log_ctx = NULL;
324         vhost_reset_is_le(vq);
325         vhost_disable_cross_endian(vq);
326         vq->busyloop_timeout = 0;
327         vq->umem = NULL;
328         vq->iotlb = NULL;
329         __vhost_vq_meta_reset(vq);
330 }
331
332 static int vhost_worker(void *data)
333 {
334         struct vhost_dev *dev = data;
335         struct vhost_work *work, *work_next;
336         struct llist_node *node;
337         mm_segment_t oldfs = get_fs();
338
339         set_fs(USER_DS);
340         use_mm(dev->mm);
341
342         for (;;) {
343                 /* mb paired w/ kthread_stop */
344                 set_current_state(TASK_INTERRUPTIBLE);
345
346                 if (kthread_should_stop()) {
347                         __set_current_state(TASK_RUNNING);
348                         break;
349                 }
350
351                 node = llist_del_all(&dev->work_list);
352                 if (!node)
353                         schedule();
354
355                 node = llist_reverse_order(node);
356                 /* make sure flag is seen after deletion */
357                 smp_wmb();
358                 llist_for_each_entry_safe(work, work_next, node, node) {
359                         clear_bit(VHOST_WORK_QUEUED, &work->flags);
360                         __set_current_state(TASK_RUNNING);
361                         work->fn(work);
362                         if (need_resched())
363                                 schedule();
364                 }
365         }
366         unuse_mm(dev->mm);
367         set_fs(oldfs);
368         return 0;
369 }
370
371 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
372 {
373         kfree(vq->indirect);
374         vq->indirect = NULL;
375         kfree(vq->log);
376         vq->log = NULL;
377         kfree(vq->heads);
378         vq->heads = NULL;
379 }
380
381 /* Helper to allocate iovec buffers for all vqs. */
382 static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
383 {
384         struct vhost_virtqueue *vq;
385         int i;
386
387         for (i = 0; i < dev->nvqs; ++i) {
388                 vq = dev->vqs[i];
389                 vq->indirect = kmalloc_array(UIO_MAXIOV,
390                                              sizeof(*vq->indirect),
391                                              GFP_KERNEL);
392                 vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log),
393                                         GFP_KERNEL);
394                 vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads),
395                                           GFP_KERNEL);
396                 if (!vq->indirect || !vq->log || !vq->heads)
397                         goto err_nomem;
398         }
399         return 0;
400
401 err_nomem:
402         for (; i >= 0; --i)
403                 vhost_vq_free_iovecs(dev->vqs[i]);
404         return -ENOMEM;
405 }
406
407 static void vhost_dev_free_iovecs(struct vhost_dev *dev)
408 {
409         int i;
410
411         for (i = 0; i < dev->nvqs; ++i)
412                 vhost_vq_free_iovecs(dev->vqs[i]);
413 }
414
415 void vhost_dev_init(struct vhost_dev *dev,
416                     struct vhost_virtqueue **vqs, int nvqs)
417 {
418         struct vhost_virtqueue *vq;
419         int i;
420
421         dev->vqs = vqs;
422         dev->nvqs = nvqs;
423         mutex_init(&dev->mutex);
424         dev->log_ctx = NULL;
425         dev->umem = NULL;
426         dev->iotlb = NULL;
427         dev->mm = NULL;
428         dev->worker = NULL;
429         init_llist_head(&dev->work_list);
430         init_waitqueue_head(&dev->wait);
431         INIT_LIST_HEAD(&dev->read_list);
432         INIT_LIST_HEAD(&dev->pending_list);
433         spin_lock_init(&dev->iotlb_lock);
434
435
436         for (i = 0; i < dev->nvqs; ++i) {
437                 vq = dev->vqs[i];
438                 vq->log = NULL;
439                 vq->indirect = NULL;
440                 vq->heads = NULL;
441                 vq->dev = dev;
442                 mutex_init(&vq->mutex);
443                 vhost_vq_reset(dev, vq);
444                 if (vq->handle_kick)
445                         vhost_poll_init(&vq->poll, vq->handle_kick,
446                                         EPOLLIN, dev);
447         }
448 }
449 EXPORT_SYMBOL_GPL(vhost_dev_init);
450
451 /* Caller should have device mutex */
452 long vhost_dev_check_owner(struct vhost_dev *dev)
453 {
454         /* Are you the owner? If not, I don't think you mean to do that */
455         return dev->mm == current->mm ? 0 : -EPERM;
456 }
457 EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
458
459 struct vhost_attach_cgroups_struct {
460         struct vhost_work work;
461         struct task_struct *owner;
462         int ret;
463 };
464
465 static void vhost_attach_cgroups_work(struct vhost_work *work)
466 {
467         struct vhost_attach_cgroups_struct *s;
468
469         s = container_of(work, struct vhost_attach_cgroups_struct, work);
470         s->ret = cgroup_attach_task_all(s->owner, current);
471 }
472
473 static int vhost_attach_cgroups(struct vhost_dev *dev)
474 {
475         struct vhost_attach_cgroups_struct attach;
476
477         attach.owner = current;
478         vhost_work_init(&attach.work, vhost_attach_cgroups_work);
479         vhost_work_queue(dev, &attach.work);
480         vhost_work_flush(dev, &attach.work);
481         return attach.ret;
482 }
483
484 /* Caller should have device mutex */
485 bool vhost_dev_has_owner(struct vhost_dev *dev)
486 {
487         return dev->mm;
488 }
489 EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
490
491 /* Caller should have device mutex */
492 long vhost_dev_set_owner(struct vhost_dev *dev)
493 {
494         struct task_struct *worker;
495         int err;
496
497         /* Is there an owner already? */
498         if (vhost_dev_has_owner(dev)) {
499                 err = -EBUSY;
500                 goto err_mm;
501         }
502
503         /* No owner, become one */
504         dev->mm = get_task_mm(current);
505         worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
506         if (IS_ERR(worker)) {
507                 err = PTR_ERR(worker);
508                 goto err_worker;
509         }
510
511         dev->worker = worker;
512         wake_up_process(worker);        /* avoid contributing to loadavg */
513
514         err = vhost_attach_cgroups(dev);
515         if (err)
516                 goto err_cgroup;
517
518         err = vhost_dev_alloc_iovecs(dev);
519         if (err)
520                 goto err_cgroup;
521
522         return 0;
523 err_cgroup:
524         kthread_stop(worker);
525         dev->worker = NULL;
526 err_worker:
527         if (dev->mm)
528                 mmput(dev->mm);
529         dev->mm = NULL;
530 err_mm:
531         return err;
532 }
533 EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
534
535 struct vhost_umem *vhost_dev_reset_owner_prepare(void)
536 {
537         return kvzalloc(sizeof(struct vhost_umem), GFP_KERNEL);
538 }
539 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
540
541 /* Caller should have device mutex */
542 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
543 {
544         int i;
545
546         vhost_dev_cleanup(dev);
547
548         /* Restore memory to default empty mapping. */
549         INIT_LIST_HEAD(&umem->umem_list);
550         dev->umem = umem;
551         /* We don't need VQ locks below since vhost_dev_cleanup makes sure
552          * VQs aren't running.
553          */
554         for (i = 0; i < dev->nvqs; ++i)
555                 dev->vqs[i]->umem = umem;
556 }
557 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
558
559 void vhost_dev_stop(struct vhost_dev *dev)
560 {
561         int i;
562
563         for (i = 0; i < dev->nvqs; ++i) {
564                 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
565                         vhost_poll_stop(&dev->vqs[i]->poll);
566                         vhost_poll_flush(&dev->vqs[i]->poll);
567                 }
568         }
569 }
570 EXPORT_SYMBOL_GPL(vhost_dev_stop);
571
572 static void vhost_umem_free(struct vhost_umem *umem,
573                             struct vhost_umem_node *node)
574 {
575         vhost_umem_interval_tree_remove(node, &umem->umem_tree);
576         list_del(&node->link);
577         kfree(node);
578         umem->numem--;
579 }
580
581 static void vhost_umem_clean(struct vhost_umem *umem)
582 {
583         struct vhost_umem_node *node, *tmp;
584
585         if (!umem)
586                 return;
587
588         list_for_each_entry_safe(node, tmp, &umem->umem_list, link)
589                 vhost_umem_free(umem, node);
590
591         kvfree(umem);
592 }
593
594 static void vhost_clear_msg(struct vhost_dev *dev)
595 {
596         struct vhost_msg_node *node, *n;
597
598         spin_lock(&dev->iotlb_lock);
599
600         list_for_each_entry_safe(node, n, &dev->read_list, node) {
601                 list_del(&node->node);
602                 kfree(node);
603         }
604
605         list_for_each_entry_safe(node, n, &dev->pending_list, node) {
606                 list_del(&node->node);
607                 kfree(node);
608         }
609
610         spin_unlock(&dev->iotlb_lock);
611 }
612
613 void vhost_dev_cleanup(struct vhost_dev *dev)
614 {
615         int i;
616
617         for (i = 0; i < dev->nvqs; ++i) {
618                 if (dev->vqs[i]->error_ctx)
619                         eventfd_ctx_put(dev->vqs[i]->error_ctx);
620                 if (dev->vqs[i]->kick)
621                         fput(dev->vqs[i]->kick);
622                 if (dev->vqs[i]->call_ctx)
623                         eventfd_ctx_put(dev->vqs[i]->call_ctx);
624                 vhost_vq_reset(dev, dev->vqs[i]);
625         }
626         vhost_dev_free_iovecs(dev);
627         if (dev->log_ctx)
628                 eventfd_ctx_put(dev->log_ctx);
629         dev->log_ctx = NULL;
630         /* No one will access memory at this point */
631         vhost_umem_clean(dev->umem);
632         dev->umem = NULL;
633         vhost_umem_clean(dev->iotlb);
634         dev->iotlb = NULL;
635         vhost_clear_msg(dev);
636         wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
637         WARN_ON(!llist_empty(&dev->work_list));
638         if (dev->worker) {
639                 kthread_stop(dev->worker);
640                 dev->worker = NULL;
641         }
642         if (dev->mm)
643                 mmput(dev->mm);
644         dev->mm = NULL;
645 }
646 EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
647
648 static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
649 {
650         u64 a = addr / VHOST_PAGE_SIZE / 8;
651
652         /* Make sure 64 bit math will not overflow. */
653         if (a > ULONG_MAX - (unsigned long)log_base ||
654             a + (unsigned long)log_base > ULONG_MAX)
655                 return false;
656
657         return access_ok(VERIFY_WRITE, log_base + a,
658                          (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
659 }
660
661 static bool vhost_overflow(u64 uaddr, u64 size)
662 {
663         /* Make sure 64 bit math will not overflow. */
664         return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
665 }
666
667 /* Caller should have vq mutex and device mutex. */
668 static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
669                                 int log_all)
670 {
671         struct vhost_umem_node *node;
672
673         if (!umem)
674                 return false;
675
676         list_for_each_entry(node, &umem->umem_list, link) {
677                 unsigned long a = node->userspace_addr;
678
679                 if (vhost_overflow(node->userspace_addr, node->size))
680                         return false;
681
682
683                 if (!access_ok(VERIFY_WRITE, (void __user *)a,
684                                     node->size))
685                         return false;
686                 else if (log_all && !log_access_ok(log_base,
687                                                    node->start,
688                                                    node->size))
689                         return false;
690         }
691         return true;
692 }
693
694 static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
695                                                u64 addr, unsigned int size,
696                                                int type)
697 {
698         const struct vhost_umem_node *node = vq->meta_iotlb[type];
699
700         if (!node)
701                 return NULL;
702
703         return (void *)(uintptr_t)(node->userspace_addr + addr - node->start);
704 }
705
706 /* Can we switch to this memory table? */
707 /* Caller should have device mutex but not vq mutex */
708 static bool memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
709                              int log_all)
710 {
711         int i;
712
713         for (i = 0; i < d->nvqs; ++i) {
714                 bool ok;
715                 bool log;
716
717                 mutex_lock(&d->vqs[i]->mutex);
718                 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
719                 /* If ring is inactive, will check when it's enabled. */
720                 if (d->vqs[i]->private_data)
721                         ok = vq_memory_access_ok(d->vqs[i]->log_base,
722                                                  umem, log);
723                 else
724                         ok = true;
725                 mutex_unlock(&d->vqs[i]->mutex);
726                 if (!ok)
727                         return false;
728         }
729         return true;
730 }
731
732 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
733                           struct iovec iov[], int iov_size, int access);
734
735 static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
736                               const void *from, unsigned size)
737 {
738         int ret;
739
740         if (!vq->iotlb)
741                 return __copy_to_user(to, from, size);
742         else {
743                 /* This function should be called after iotlb
744                  * prefetch, which means we're sure that all vq
745                  * could be access through iotlb. So -EAGAIN should
746                  * not happen in this case.
747                  */
748                 struct iov_iter t;
749                 void __user *uaddr = vhost_vq_meta_fetch(vq,
750                                      (u64)(uintptr_t)to, size,
751                                      VHOST_ADDR_USED);
752
753                 if (uaddr)
754                         return __copy_to_user(uaddr, from, size);
755
756                 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
757                                      ARRAY_SIZE(vq->iotlb_iov),
758                                      VHOST_ACCESS_WO);
759                 if (ret < 0)
760                         goto out;
761                 iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size);
762                 ret = copy_to_iter(from, size, &t);
763                 if (ret == size)
764                         ret = 0;
765         }
766 out:
767         return ret;
768 }
769
770 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
771                                 void __user *from, unsigned size)
772 {
773         int ret;
774
775         if (!vq->iotlb)
776                 return __copy_from_user(to, from, size);
777         else {
778                 /* This function should be called after iotlb
779                  * prefetch, which means we're sure that vq
780                  * could be access through iotlb. So -EAGAIN should
781                  * not happen in this case.
782                  */
783                 void __user *uaddr = vhost_vq_meta_fetch(vq,
784                                      (u64)(uintptr_t)from, size,
785                                      VHOST_ADDR_DESC);
786                 struct iov_iter f;
787
788                 if (uaddr)
789                         return __copy_from_user(to, uaddr, size);
790
791                 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
792                                      ARRAY_SIZE(vq->iotlb_iov),
793                                      VHOST_ACCESS_RO);
794                 if (ret < 0) {
795                         vq_err(vq, "IOTLB translation failure: uaddr "
796                                "%p size 0x%llx\n", from,
797                                (unsigned long long) size);
798                         goto out;
799                 }
800                 iov_iter_init(&f, READ, vq->iotlb_iov, ret, size);
801                 ret = copy_from_iter(to, size, &f);
802                 if (ret == size)
803                         ret = 0;
804         }
805
806 out:
807         return ret;
808 }
809
810 static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
811                                           void __user *addr, unsigned int size,
812                                           int type)
813 {
814         int ret;
815
816         ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
817                              ARRAY_SIZE(vq->iotlb_iov),
818                              VHOST_ACCESS_RO);
819         if (ret < 0) {
820                 vq_err(vq, "IOTLB translation failure: uaddr "
821                         "%p size 0x%llx\n", addr,
822                         (unsigned long long) size);
823                 return NULL;
824         }
825
826         if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
827                 vq_err(vq, "Non atomic userspace memory access: uaddr "
828                         "%p size 0x%llx\n", addr,
829                         (unsigned long long) size);
830                 return NULL;
831         }
832
833         return vq->iotlb_iov[0].iov_base;
834 }
835
836 /* This function should be called after iotlb
837  * prefetch, which means we're sure that vq
838  * could be access through iotlb. So -EAGAIN should
839  * not happen in this case.
840  */
841 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
842                                             void *addr, unsigned int size,
843                                             int type)
844 {
845         void __user *uaddr = vhost_vq_meta_fetch(vq,
846                              (u64)(uintptr_t)addr, size, type);
847         if (uaddr)
848                 return uaddr;
849
850         return __vhost_get_user_slow(vq, addr, size, type);
851 }
852
853 #define vhost_put_user(vq, x, ptr)              \
854 ({ \
855         int ret = -EFAULT; \
856         if (!vq->iotlb) { \
857                 ret = __put_user(x, ptr); \
858         } else { \
859                 __typeof__(ptr) to = \
860                         (__typeof__(ptr)) __vhost_get_user(vq, ptr,     \
861                                           sizeof(*ptr), VHOST_ADDR_USED); \
862                 if (to != NULL) \
863                         ret = __put_user(x, to); \
864                 else \
865                         ret = -EFAULT;  \
866         } \
867         ret; \
868 })
869
870 #define vhost_get_user(vq, x, ptr, type)                \
871 ({ \
872         int ret; \
873         if (!vq->iotlb) { \
874                 ret = __get_user(x, ptr); \
875         } else { \
876                 __typeof__(ptr) from = \
877                         (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
878                                                            sizeof(*ptr), \
879                                                            type); \
880                 if (from != NULL) \
881                         ret = __get_user(x, from); \
882                 else \
883                         ret = -EFAULT; \
884         } \
885         ret; \
886 })
887
888 #define vhost_get_avail(vq, x, ptr) \
889         vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
890
891 #define vhost_get_used(vq, x, ptr) \
892         vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
893
894 static void vhost_dev_lock_vqs(struct vhost_dev *d)
895 {
896         int i = 0;
897         for (i = 0; i < d->nvqs; ++i)
898                 mutex_lock_nested(&d->vqs[i]->mutex, i);
899 }
900
901 static void vhost_dev_unlock_vqs(struct vhost_dev *d)
902 {
903         int i = 0;
904         for (i = 0; i < d->nvqs; ++i)
905                 mutex_unlock(&d->vqs[i]->mutex);
906 }
907
908 static int vhost_new_umem_range(struct vhost_umem *umem,
909                                 u64 start, u64 size, u64 end,
910                                 u64 userspace_addr, int perm)
911 {
912         struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
913
914         if (!node)
915                 return -ENOMEM;
916
917         if (umem->numem == max_iotlb_entries) {
918                 tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link);
919                 vhost_umem_free(umem, tmp);
920         }
921
922         node->start = start;
923         node->size = size;
924         node->last = end;
925         node->userspace_addr = userspace_addr;
926         node->perm = perm;
927         INIT_LIST_HEAD(&node->link);
928         list_add_tail(&node->link, &umem->umem_list);
929         vhost_umem_interval_tree_insert(node, &umem->umem_tree);
930         umem->numem++;
931
932         return 0;
933 }
934
935 static void vhost_del_umem_range(struct vhost_umem *umem,
936                                  u64 start, u64 end)
937 {
938         struct vhost_umem_node *node;
939
940         while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
941                                                            start, end)))
942                 vhost_umem_free(umem, node);
943 }
944
945 static void vhost_iotlb_notify_vq(struct vhost_dev *d,
946                                   struct vhost_iotlb_msg *msg)
947 {
948         struct vhost_msg_node *node, *n;
949
950         spin_lock(&d->iotlb_lock);
951
952         list_for_each_entry_safe(node, n, &d->pending_list, node) {
953                 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
954                 if (msg->iova <= vq_msg->iova &&
955                     msg->iova + msg->size - 1 > vq_msg->iova &&
956                     vq_msg->type == VHOST_IOTLB_MISS) {
957                         vhost_poll_queue(&node->vq->poll);
958                         list_del(&node->node);
959                         kfree(node);
960                 }
961         }
962
963         spin_unlock(&d->iotlb_lock);
964 }
965
966 static bool umem_access_ok(u64 uaddr, u64 size, int access)
967 {
968         unsigned long a = uaddr;
969
970         /* Make sure 64 bit math will not overflow. */
971         if (vhost_overflow(uaddr, size))
972                 return false;
973
974         if ((access & VHOST_ACCESS_RO) &&
975             !access_ok(VERIFY_READ, (void __user *)a, size))
976                 return false;
977         if ((access & VHOST_ACCESS_WO) &&
978             !access_ok(VERIFY_WRITE, (void __user *)a, size))
979                 return false;
980         return true;
981 }
982
983 static int vhost_process_iotlb_msg(struct vhost_dev *dev,
984                                    struct vhost_iotlb_msg *msg)
985 {
986         int ret = 0;
987
988         mutex_lock(&dev->mutex);
989         vhost_dev_lock_vqs(dev);
990         switch (msg->type) {
991         case VHOST_IOTLB_UPDATE:
992                 if (!dev->iotlb) {
993                         ret = -EFAULT;
994                         break;
995                 }
996                 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
997                         ret = -EFAULT;
998                         break;
999                 }
1000                 vhost_vq_meta_reset(dev);
1001                 if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size,
1002                                          msg->iova + msg->size - 1,
1003                                          msg->uaddr, msg->perm)) {
1004                         ret = -ENOMEM;
1005                         break;
1006                 }
1007                 vhost_iotlb_notify_vq(dev, msg);
1008                 break;
1009         case VHOST_IOTLB_INVALIDATE:
1010                 if (!dev->iotlb) {
1011                         ret = -EFAULT;
1012                         break;
1013                 }
1014                 vhost_vq_meta_reset(dev);
1015                 vhost_del_umem_range(dev->iotlb, msg->iova,
1016                                      msg->iova + msg->size - 1);
1017                 break;
1018         default:
1019                 ret = -EINVAL;
1020                 break;
1021         }
1022
1023         vhost_dev_unlock_vqs(dev);
1024         mutex_unlock(&dev->mutex);
1025
1026         return ret;
1027 }
1028 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1029                              struct iov_iter *from)
1030 {
1031         struct vhost_iotlb_msg msg;
1032         size_t offset;
1033         int type, ret;
1034
1035         ret = copy_from_iter(&type, sizeof(type), from);
1036         if (ret != sizeof(type))
1037                 goto done;
1038
1039         switch (type) {
1040         case VHOST_IOTLB_MSG:
1041                 /* There maybe a hole after type for V1 message type,
1042                  * so skip it here.
1043                  */
1044                 offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1045                 break;
1046         case VHOST_IOTLB_MSG_V2:
1047                 offset = sizeof(__u32);
1048                 break;
1049         default:
1050                 ret = -EINVAL;
1051                 goto done;
1052         }
1053
1054         iov_iter_advance(from, offset);
1055         ret = copy_from_iter(&msg, sizeof(msg), from);
1056         if (ret != sizeof(msg))
1057                 goto done;
1058         if (vhost_process_iotlb_msg(dev, &msg)) {
1059                 ret = -EFAULT;
1060                 goto done;
1061         }
1062
1063         ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1064               sizeof(struct vhost_msg_v2);
1065 done:
1066         return ret;
1067 }
1068 EXPORT_SYMBOL(vhost_chr_write_iter);
1069
1070 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
1071                             poll_table *wait)
1072 {
1073         __poll_t mask = 0;
1074
1075         poll_wait(file, &dev->wait, wait);
1076
1077         if (!list_empty(&dev->read_list))
1078                 mask |= EPOLLIN | EPOLLRDNORM;
1079
1080         return mask;
1081 }
1082 EXPORT_SYMBOL(vhost_chr_poll);
1083
1084 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1085                             int noblock)
1086 {
1087         DEFINE_WAIT(wait);
1088         struct vhost_msg_node *node;
1089         ssize_t ret = 0;
1090         unsigned size = sizeof(struct vhost_msg);
1091
1092         if (iov_iter_count(to) < size)
1093                 return 0;
1094
1095         while (1) {
1096                 if (!noblock)
1097                         prepare_to_wait(&dev->wait, &wait,
1098                                         TASK_INTERRUPTIBLE);
1099
1100                 node = vhost_dequeue_msg(dev, &dev->read_list);
1101                 if (node)
1102                         break;
1103                 if (noblock) {
1104                         ret = -EAGAIN;
1105                         break;
1106                 }
1107                 if (signal_pending(current)) {
1108                         ret = -ERESTARTSYS;
1109                         break;
1110                 }
1111                 if (!dev->iotlb) {
1112                         ret = -EBADFD;
1113                         break;
1114                 }
1115
1116                 schedule();
1117         }
1118
1119         if (!noblock)
1120                 finish_wait(&dev->wait, &wait);
1121
1122         if (node) {
1123                 struct vhost_iotlb_msg *msg;
1124                 void *start = &node->msg;
1125
1126                 switch (node->msg.type) {
1127                 case VHOST_IOTLB_MSG:
1128                         size = sizeof(node->msg);
1129                         msg = &node->msg.iotlb;
1130                         break;
1131                 case VHOST_IOTLB_MSG_V2:
1132                         size = sizeof(node->msg_v2);
1133                         msg = &node->msg_v2.iotlb;
1134                         break;
1135                 default:
1136                         BUG();
1137                         break;
1138                 }
1139
1140                 ret = copy_to_iter(start, size, to);
1141                 if (ret != size || msg->type != VHOST_IOTLB_MISS) {
1142                         kfree(node);
1143                         return ret;
1144                 }
1145                 vhost_enqueue_msg(dev, &dev->pending_list, node);
1146         }
1147
1148         return ret;
1149 }
1150 EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1151
1152 static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1153 {
1154         struct vhost_dev *dev = vq->dev;
1155         struct vhost_msg_node *node;
1156         struct vhost_iotlb_msg *msg;
1157         bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
1158
1159         node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
1160         if (!node)
1161                 return -ENOMEM;
1162
1163         if (v2) {
1164                 node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1165                 msg = &node->msg_v2.iotlb;
1166         } else {
1167                 msg = &node->msg.iotlb;
1168         }
1169
1170         msg->type = VHOST_IOTLB_MISS;
1171         msg->iova = iova;
1172         msg->perm = access;
1173
1174         vhost_enqueue_msg(dev, &dev->read_list, node);
1175
1176         return 0;
1177 }
1178
1179 static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1180                          struct vring_desc __user *desc,
1181                          struct vring_avail __user *avail,
1182                          struct vring_used __user *used)
1183
1184 {
1185         size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
1186
1187         return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
1188                access_ok(VERIFY_READ, avail,
1189                          sizeof *avail + num * sizeof *avail->ring + s) &&
1190                access_ok(VERIFY_WRITE, used,
1191                         sizeof *used + num * sizeof *used->ring + s);
1192 }
1193
1194 static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
1195                                  const struct vhost_umem_node *node,
1196                                  int type)
1197 {
1198         int access = (type == VHOST_ADDR_USED) ?
1199                      VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1200
1201         if (likely(node->perm & access))
1202                 vq->meta_iotlb[type] = node;
1203 }
1204
1205 static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1206                             int access, u64 addr, u64 len, int type)
1207 {
1208         const struct vhost_umem_node *node;
1209         struct vhost_umem *umem = vq->iotlb;
1210         u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
1211
1212         if (vhost_vq_meta_fetch(vq, addr, len, type))
1213                 return true;
1214
1215         while (len > s) {
1216                 node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1217                                                            addr,
1218                                                            last);
1219                 if (node == NULL || node->start > addr) {
1220                         vhost_iotlb_miss(vq, addr, access);
1221                         return false;
1222                 } else if (!(node->perm & access)) {
1223                         /* Report the possible access violation by
1224                          * request another translation from userspace.
1225                          */
1226                         return false;
1227                 }
1228
1229                 size = node->size - addr + node->start;
1230
1231                 if (orig_addr == addr && size >= len)
1232                         vhost_vq_meta_update(vq, node, type);
1233
1234                 s += size;
1235                 addr += size;
1236         }
1237
1238         return true;
1239 }
1240
1241 int vq_iotlb_prefetch(struct vhost_virtqueue *vq)
1242 {
1243         size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
1244         unsigned int num = vq->num;
1245
1246         if (!vq->iotlb)
1247                 return 1;
1248
1249         return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
1250                                num * sizeof(*vq->desc), VHOST_ADDR_DESC) &&
1251                iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
1252                                sizeof *vq->avail +
1253                                num * sizeof(*vq->avail->ring) + s,
1254                                VHOST_ADDR_AVAIL) &&
1255                iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used,
1256                                sizeof *vq->used +
1257                                num * sizeof(*vq->used->ring) + s,
1258                                VHOST_ADDR_USED);
1259 }
1260 EXPORT_SYMBOL_GPL(vq_iotlb_prefetch);
1261
1262 /* Can we log writes? */
1263 /* Caller should have device mutex but not vq mutex */
1264 bool vhost_log_access_ok(struct vhost_dev *dev)
1265 {
1266         return memory_access_ok(dev, dev->umem, 1);
1267 }
1268 EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1269
1270 /* Verify access for write logging. */
1271 /* Caller should have vq mutex and device mutex */
1272 static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1273                              void __user *log_base)
1274 {
1275         size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
1276
1277         return vq_memory_access_ok(log_base, vq->umem,
1278                                    vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
1279                 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
1280                                         sizeof *vq->used +
1281                                         vq->num * sizeof *vq->used->ring + s));
1282 }
1283
1284 /* Can we start vq? */
1285 /* Caller should have vq mutex and device mutex */
1286 bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
1287 {
1288         if (!vq_log_access_ok(vq, vq->log_base))
1289                 return false;
1290
1291         /* Access validation occurs at prefetch time with IOTLB */
1292         if (vq->iotlb)
1293                 return true;
1294
1295         return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
1296 }
1297 EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1298
1299 static struct vhost_umem *vhost_umem_alloc(void)
1300 {
1301         struct vhost_umem *umem = kvzalloc(sizeof(*umem), GFP_KERNEL);
1302
1303         if (!umem)
1304                 return NULL;
1305
1306         umem->umem_tree = RB_ROOT_CACHED;
1307         umem->numem = 0;
1308         INIT_LIST_HEAD(&umem->umem_list);
1309
1310         return umem;
1311 }
1312
1313 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1314 {
1315         struct vhost_memory mem, *newmem;
1316         struct vhost_memory_region *region;
1317         struct vhost_umem *newumem, *oldumem;
1318         unsigned long size = offsetof(struct vhost_memory, regions);
1319         int i;
1320
1321         if (copy_from_user(&mem, m, size))
1322                 return -EFAULT;
1323         if (mem.padding)
1324                 return -EOPNOTSUPP;
1325         if (mem.nregions > max_mem_regions)
1326                 return -E2BIG;
1327         newmem = kvzalloc(struct_size(newmem, regions, mem.nregions),
1328                         GFP_KERNEL);
1329         if (!newmem)
1330                 return -ENOMEM;
1331
1332         memcpy(newmem, &mem, size);
1333         if (copy_from_user(newmem->regions, m->regions,
1334                            mem.nregions * sizeof *m->regions)) {
1335                 kvfree(newmem);
1336                 return -EFAULT;
1337         }
1338
1339         newumem = vhost_umem_alloc();
1340         if (!newumem) {
1341                 kvfree(newmem);
1342                 return -ENOMEM;
1343         }
1344
1345         for (region = newmem->regions;
1346              region < newmem->regions + mem.nregions;
1347              region++) {
1348                 if (vhost_new_umem_range(newumem,
1349                                          region->guest_phys_addr,
1350                                          region->memory_size,
1351                                          region->guest_phys_addr +
1352                                          region->memory_size - 1,
1353                                          region->userspace_addr,
1354                                          VHOST_ACCESS_RW))
1355                         goto err;
1356         }
1357
1358         if (!memory_access_ok(d, newumem, 0))
1359                 goto err;
1360
1361         oldumem = d->umem;
1362         d->umem = newumem;
1363
1364         /* All memory accesses are done under some VQ mutex. */
1365         for (i = 0; i < d->nvqs; ++i) {
1366                 mutex_lock(&d->vqs[i]->mutex);
1367                 d->vqs[i]->umem = newumem;
1368                 mutex_unlock(&d->vqs[i]->mutex);
1369         }
1370
1371         kvfree(newmem);
1372         vhost_umem_clean(oldumem);
1373         return 0;
1374
1375 err:
1376         vhost_umem_clean(newumem);
1377         kvfree(newmem);
1378         return -EFAULT;
1379 }
1380
1381 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1382 {
1383         struct file *eventfp, *filep = NULL;
1384         bool pollstart = false, pollstop = false;
1385         struct eventfd_ctx *ctx = NULL;
1386         u32 __user *idxp = argp;
1387         struct vhost_virtqueue *vq;
1388         struct vhost_vring_state s;
1389         struct vhost_vring_file f;
1390         struct vhost_vring_addr a;
1391         u32 idx;
1392         long r;
1393
1394         r = get_user(idx, idxp);
1395         if (r < 0)
1396                 return r;
1397         if (idx >= d->nvqs)
1398                 return -ENOBUFS;
1399
1400         vq = d->vqs[idx];
1401
1402         mutex_lock(&vq->mutex);
1403
1404         switch (ioctl) {
1405         case VHOST_SET_VRING_NUM:
1406                 /* Resizing ring with an active backend?
1407                  * You don't want to do that. */
1408                 if (vq->private_data) {
1409                         r = -EBUSY;
1410                         break;
1411                 }
1412                 if (copy_from_user(&s, argp, sizeof s)) {
1413                         r = -EFAULT;
1414                         break;
1415                 }
1416                 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
1417                         r = -EINVAL;
1418                         break;
1419                 }
1420                 vq->num = s.num;
1421                 break;
1422         case VHOST_SET_VRING_BASE:
1423                 /* Moving base with an active backend?
1424                  * You don't want to do that. */
1425                 if (vq->private_data) {
1426                         r = -EBUSY;
1427                         break;
1428                 }
1429                 if (copy_from_user(&s, argp, sizeof s)) {
1430                         r = -EFAULT;
1431                         break;
1432                 }
1433                 if (s.num > 0xffff) {
1434                         r = -EINVAL;
1435                         break;
1436                 }
1437                 vq->last_avail_idx = s.num;
1438                 /* Forget the cached index value. */
1439                 vq->avail_idx = vq->last_avail_idx;
1440                 break;
1441         case VHOST_GET_VRING_BASE:
1442                 s.index = idx;
1443                 s.num = vq->last_avail_idx;
1444                 if (copy_to_user(argp, &s, sizeof s))
1445                         r = -EFAULT;
1446                 break;
1447         case VHOST_SET_VRING_ADDR:
1448                 if (copy_from_user(&a, argp, sizeof a)) {
1449                         r = -EFAULT;
1450                         break;
1451                 }
1452                 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
1453                         r = -EOPNOTSUPP;
1454                         break;
1455                 }
1456                 /* For 32bit, verify that the top 32bits of the user
1457                    data are set to zero. */
1458                 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
1459                     (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
1460                     (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
1461                         r = -EFAULT;
1462                         break;
1463                 }
1464
1465                 /* Make sure it's safe to cast pointers to vring types. */
1466                 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
1467                 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
1468                 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
1469                     (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
1470                     (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) {
1471                         r = -EINVAL;
1472                         break;
1473                 }
1474
1475                 /* We only verify access here if backend is configured.
1476                  * If it is not, we don't as size might not have been setup.
1477                  * We will verify when backend is configured. */
1478                 if (vq->private_data) {
1479                         if (!vq_access_ok(vq, vq->num,
1480                                 (void __user *)(unsigned long)a.desc_user_addr,
1481                                 (void __user *)(unsigned long)a.avail_user_addr,
1482                                 (void __user *)(unsigned long)a.used_user_addr)) {
1483                                 r = -EINVAL;
1484                                 break;
1485                         }
1486
1487                         /* Also validate log access for used ring if enabled. */
1488                         if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
1489                             !log_access_ok(vq->log_base, a.log_guest_addr,
1490                                            sizeof *vq->used +
1491                                            vq->num * sizeof *vq->used->ring)) {
1492                                 r = -EINVAL;
1493                                 break;
1494                         }
1495                 }
1496
1497                 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
1498                 vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
1499                 vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
1500                 vq->log_addr = a.log_guest_addr;
1501                 vq->used = (void __user *)(unsigned long)a.used_user_addr;
1502                 break;
1503         case VHOST_SET_VRING_KICK:
1504                 if (copy_from_user(&f, argp, sizeof f)) {
1505                         r = -EFAULT;
1506                         break;
1507                 }
1508                 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
1509                 if (IS_ERR(eventfp)) {
1510                         r = PTR_ERR(eventfp);
1511                         break;
1512                 }
1513                 if (eventfp != vq->kick) {
1514                         pollstop = (filep = vq->kick) != NULL;
1515                         pollstart = (vq->kick = eventfp) != NULL;
1516                 } else
1517                         filep = eventfp;
1518                 break;
1519         case VHOST_SET_VRING_CALL:
1520                 if (copy_from_user(&f, argp, sizeof f)) {
1521                         r = -EFAULT;
1522                         break;
1523                 }
1524                 ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
1525                 if (IS_ERR(ctx)) {
1526                         r = PTR_ERR(ctx);
1527                         break;
1528                 }
1529                 swap(ctx, vq->call_ctx);
1530                 break;
1531         case VHOST_SET_VRING_ERR:
1532                 if (copy_from_user(&f, argp, sizeof f)) {
1533                         r = -EFAULT;
1534                         break;
1535                 }
1536                 ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
1537                 if (IS_ERR(ctx)) {
1538                         r = PTR_ERR(ctx);
1539                         break;
1540                 }
1541                 swap(ctx, vq->error_ctx);
1542                 break;
1543         case VHOST_SET_VRING_ENDIAN:
1544                 r = vhost_set_vring_endian(vq, argp);
1545                 break;
1546         case VHOST_GET_VRING_ENDIAN:
1547                 r = vhost_get_vring_endian(vq, idx, argp);
1548                 break;
1549         case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
1550                 if (copy_from_user(&s, argp, sizeof(s))) {
1551                         r = -EFAULT;
1552                         break;
1553                 }
1554                 vq->busyloop_timeout = s.num;
1555                 break;
1556         case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
1557                 s.index = idx;
1558                 s.num = vq->busyloop_timeout;
1559                 if (copy_to_user(argp, &s, sizeof(s)))
1560                         r = -EFAULT;
1561                 break;
1562         default:
1563                 r = -ENOIOCTLCMD;
1564         }
1565
1566         if (pollstop && vq->handle_kick)
1567                 vhost_poll_stop(&vq->poll);
1568
1569         if (!IS_ERR_OR_NULL(ctx))
1570                 eventfd_ctx_put(ctx);
1571         if (filep)
1572                 fput(filep);
1573
1574         if (pollstart && vq->handle_kick)
1575                 r = vhost_poll_start(&vq->poll, vq->kick);
1576
1577         mutex_unlock(&vq->mutex);
1578
1579         if (pollstop && vq->handle_kick)
1580                 vhost_poll_flush(&vq->poll);
1581         return r;
1582 }
1583 EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
1584
1585 int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1586 {
1587         struct vhost_umem *niotlb, *oiotlb;
1588         int i;
1589
1590         niotlb = vhost_umem_alloc();
1591         if (!niotlb)
1592                 return -ENOMEM;
1593
1594         oiotlb = d->iotlb;
1595         d->iotlb = niotlb;
1596
1597         for (i = 0; i < d->nvqs; ++i) {
1598                 struct vhost_virtqueue *vq = d->vqs[i];
1599
1600                 mutex_lock(&vq->mutex);
1601                 vq->iotlb = niotlb;
1602                 __vhost_vq_meta_reset(vq);
1603                 mutex_unlock(&vq->mutex);
1604         }
1605
1606         vhost_umem_clean(oiotlb);
1607
1608         return 0;
1609 }
1610 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
1611
1612 /* Caller must have device mutex */
1613 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
1614 {
1615         struct eventfd_ctx *ctx;
1616         u64 p;
1617         long r;
1618         int i, fd;
1619
1620         /* If you are not the owner, you can become one */
1621         if (ioctl == VHOST_SET_OWNER) {
1622                 r = vhost_dev_set_owner(d);
1623                 goto done;
1624         }
1625
1626         /* You must be the owner to do anything else */
1627         r = vhost_dev_check_owner(d);
1628         if (r)
1629                 goto done;
1630
1631         switch (ioctl) {
1632         case VHOST_SET_MEM_TABLE:
1633                 r = vhost_set_memory(d, argp);
1634                 break;
1635         case VHOST_SET_LOG_BASE:
1636                 if (copy_from_user(&p, argp, sizeof p)) {
1637                         r = -EFAULT;
1638                         break;
1639                 }
1640                 if ((u64)(unsigned long)p != p) {
1641                         r = -EFAULT;
1642                         break;
1643                 }
1644                 for (i = 0; i < d->nvqs; ++i) {
1645                         struct vhost_virtqueue *vq;
1646                         void __user *base = (void __user *)(unsigned long)p;
1647                         vq = d->vqs[i];
1648                         mutex_lock(&vq->mutex);
1649                         /* If ring is inactive, will check when it's enabled. */
1650                         if (vq->private_data && !vq_log_access_ok(vq, base))
1651                                 r = -EFAULT;
1652                         else
1653                                 vq->log_base = base;
1654                         mutex_unlock(&vq->mutex);
1655                 }
1656                 break;
1657         case VHOST_SET_LOG_FD:
1658                 r = get_user(fd, (int __user *)argp);
1659                 if (r < 0)
1660                         break;
1661                 ctx = fd == -1 ? NULL : eventfd_ctx_fdget(fd);
1662                 if (IS_ERR(ctx)) {
1663                         r = PTR_ERR(ctx);
1664                         break;
1665                 }
1666                 swap(ctx, d->log_ctx);
1667                 for (i = 0; i < d->nvqs; ++i) {
1668                         mutex_lock(&d->vqs[i]->mutex);
1669                         d->vqs[i]->log_ctx = d->log_ctx;
1670                         mutex_unlock(&d->vqs[i]->mutex);
1671                 }
1672                 if (ctx)
1673                         eventfd_ctx_put(ctx);
1674                 break;
1675         default:
1676                 r = -ENOIOCTLCMD;
1677                 break;
1678         }
1679 done:
1680         return r;
1681 }
1682 EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
1683
1684 /* TODO: This is really inefficient.  We need something like get_user()
1685  * (instruction directly accesses the data, with an exception table entry
1686  * returning -EFAULT). See Documentation/x86/exception-tables.txt.
1687  */
1688 static int set_bit_to_user(int nr, void __user *addr)
1689 {
1690         unsigned long log = (unsigned long)addr;
1691         struct page *page;
1692         void *base;
1693         int bit = nr + (log % PAGE_SIZE) * 8;
1694         int r;
1695
1696         r = get_user_pages_fast(log, 1, 1, &page);
1697         if (r < 0)
1698                 return r;
1699         BUG_ON(r != 1);
1700         base = kmap_atomic(page);
1701         set_bit(bit, base);
1702         kunmap_atomic(base);
1703         set_page_dirty_lock(page);
1704         put_page(page);
1705         return 0;
1706 }
1707
1708 static int log_write(void __user *log_base,
1709                      u64 write_address, u64 write_length)
1710 {
1711         u64 write_page = write_address / VHOST_PAGE_SIZE;
1712         int r;
1713
1714         if (!write_length)
1715                 return 0;
1716         write_length += write_address % VHOST_PAGE_SIZE;
1717         for (;;) {
1718                 u64 base = (u64)(unsigned long)log_base;
1719                 u64 log = base + write_page / 8;
1720                 int bit = write_page % 8;
1721                 if ((u64)(unsigned long)log != log)
1722                         return -EFAULT;
1723                 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
1724                 if (r < 0)
1725                         return r;
1726                 if (write_length <= VHOST_PAGE_SIZE)
1727                         break;
1728                 write_length -= VHOST_PAGE_SIZE;
1729                 write_page += 1;
1730         }
1731         return r;
1732 }
1733
1734 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1735                     unsigned int log_num, u64 len)
1736 {
1737         int i, r;
1738
1739         /* Make sure data written is seen before log. */
1740         smp_wmb();
1741         for (i = 0; i < log_num; ++i) {
1742                 u64 l = min(log[i].len, len);
1743                 r = log_write(vq->log_base, log[i].addr, l);
1744                 if (r < 0)
1745                         return r;
1746                 len -= l;
1747                 if (!len) {
1748                         if (vq->log_ctx)
1749                                 eventfd_signal(vq->log_ctx, 1);
1750                         return 0;
1751                 }
1752         }
1753         /* Length written exceeds what we have stored. This is a bug. */
1754         BUG();
1755         return 0;
1756 }
1757 EXPORT_SYMBOL_GPL(vhost_log_write);
1758
1759 static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1760 {
1761         void __user *used;
1762         if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
1763                            &vq->used->flags) < 0)
1764                 return -EFAULT;
1765         if (unlikely(vq->log_used)) {
1766                 /* Make sure the flag is seen before log. */
1767                 smp_wmb();
1768                 /* Log used flag write. */
1769                 used = &vq->used->flags;
1770                 log_write(vq->log_base, vq->log_addr +
1771                           (used - (void __user *)vq->used),
1772                           sizeof vq->used->flags);
1773                 if (vq->log_ctx)
1774                         eventfd_signal(vq->log_ctx, 1);
1775         }
1776         return 0;
1777 }
1778
1779 static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1780 {
1781         if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
1782                            vhost_avail_event(vq)))
1783                 return -EFAULT;
1784         if (unlikely(vq->log_used)) {
1785                 void __user *used;
1786                 /* Make sure the event is seen before log. */
1787                 smp_wmb();
1788                 /* Log avail event write */
1789                 used = vhost_avail_event(vq);
1790                 log_write(vq->log_base, vq->log_addr +
1791                           (used - (void __user *)vq->used),
1792                           sizeof *vhost_avail_event(vq));
1793                 if (vq->log_ctx)
1794                         eventfd_signal(vq->log_ctx, 1);
1795         }
1796         return 0;
1797 }
1798
1799 int vhost_vq_init_access(struct vhost_virtqueue *vq)
1800 {
1801         __virtio16 last_used_idx;
1802         int r;
1803         bool is_le = vq->is_le;
1804
1805         if (!vq->private_data)
1806                 return 0;
1807
1808         vhost_init_is_le(vq);
1809
1810         r = vhost_update_used_flags(vq);
1811         if (r)
1812                 goto err;
1813         vq->signalled_used_valid = false;
1814         if (!vq->iotlb &&
1815             !access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
1816                 r = -EFAULT;
1817                 goto err;
1818         }
1819         r = vhost_get_used(vq, last_used_idx, &vq->used->idx);
1820         if (r) {
1821                 vq_err(vq, "Can't access used idx at %p\n",
1822                        &vq->used->idx);
1823                 goto err;
1824         }
1825         vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
1826         return 0;
1827
1828 err:
1829         vq->is_le = is_le;
1830         return r;
1831 }
1832 EXPORT_SYMBOL_GPL(vhost_vq_init_access);
1833
1834 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
1835                           struct iovec iov[], int iov_size, int access)
1836 {
1837         const struct vhost_umem_node *node;
1838         struct vhost_dev *dev = vq->dev;
1839         struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
1840         struct iovec *_iov;
1841         u64 s = 0;
1842         int ret = 0;
1843
1844         while ((u64)len > s) {
1845                 u64 size;
1846                 if (unlikely(ret >= iov_size)) {
1847                         ret = -ENOBUFS;
1848                         break;
1849                 }
1850
1851                 node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
1852                                                         addr, addr + len - 1);
1853                 if (node == NULL || node->start > addr) {
1854                         if (umem != dev->iotlb) {
1855                                 ret = -EFAULT;
1856                                 break;
1857                         }
1858                         ret = -EAGAIN;
1859                         break;
1860                 } else if (!(node->perm & access)) {
1861                         ret = -EPERM;
1862                         break;
1863                 }
1864
1865                 _iov = iov + ret;
1866                 size = node->size - addr + node->start;
1867                 _iov->iov_len = min((u64)len - s, size);
1868                 _iov->iov_base = (void __user *)(unsigned long)
1869                         (node->userspace_addr + addr - node->start);
1870                 s += size;
1871                 addr += size;
1872                 ++ret;
1873         }
1874
1875         if (ret == -EAGAIN)
1876                 vhost_iotlb_miss(vq, addr, access);
1877         return ret;
1878 }
1879
1880 /* Each buffer in the virtqueues is actually a chain of descriptors.  This
1881  * function returns the next descriptor in the chain,
1882  * or -1U if we're at the end. */
1883 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
1884 {
1885         unsigned int next;
1886
1887         /* If this descriptor says it doesn't chain, we're done. */
1888         if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
1889                 return -1U;
1890
1891         /* Check they're not leading us off end of descriptors. */
1892         next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
1893         return next;
1894 }
1895
1896 static int get_indirect(struct vhost_virtqueue *vq,
1897                         struct iovec iov[], unsigned int iov_size,
1898                         unsigned int *out_num, unsigned int *in_num,
1899                         struct vhost_log *log, unsigned int *log_num,
1900                         struct vring_desc *indirect)
1901 {
1902         struct vring_desc desc;
1903         unsigned int i = 0, count, found = 0;
1904         u32 len = vhost32_to_cpu(vq, indirect->len);
1905         struct iov_iter from;
1906         int ret, access;
1907
1908         /* Sanity check */
1909         if (unlikely(len % sizeof desc)) {
1910                 vq_err(vq, "Invalid length in indirect descriptor: "
1911                        "len 0x%llx not multiple of 0x%zx\n",
1912                        (unsigned long long)len,
1913                        sizeof desc);
1914                 return -EINVAL;
1915         }
1916
1917         ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
1918                              UIO_MAXIOV, VHOST_ACCESS_RO);
1919         if (unlikely(ret < 0)) {
1920                 if (ret != -EAGAIN)
1921                         vq_err(vq, "Translation failure %d in indirect.\n", ret);
1922                 return ret;
1923         }
1924         iov_iter_init(&from, READ, vq->indirect, ret, len);
1925
1926         /* We will use the result as an address to read from, so most
1927          * architectures only need a compiler barrier here. */
1928         read_barrier_depends();
1929
1930         count = len / sizeof desc;
1931         /* Buffers are chained via a 16 bit next field, so
1932          * we can have at most 2^16 of these. */
1933         if (unlikely(count > USHRT_MAX + 1)) {
1934                 vq_err(vq, "Indirect buffer length too big: %d\n",
1935                        indirect->len);
1936                 return -E2BIG;
1937         }
1938
1939         do {
1940                 unsigned iov_count = *in_num + *out_num;
1941                 if (unlikely(++found > count)) {
1942                         vq_err(vq, "Loop detected: last one at %u "
1943                                "indirect size %u\n",
1944                                i, count);
1945                         return -EINVAL;
1946                 }
1947                 if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
1948                         vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1949                                i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
1950                         return -EINVAL;
1951                 }
1952                 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
1953                         vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1954                                i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
1955                         return -EINVAL;
1956                 }
1957
1958                 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
1959                         access = VHOST_ACCESS_WO;
1960                 else
1961                         access = VHOST_ACCESS_RO;
1962
1963                 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
1964                                      vhost32_to_cpu(vq, desc.len), iov + iov_count,
1965                                      iov_size - iov_count, access);
1966                 if (unlikely(ret < 0)) {
1967                         if (ret != -EAGAIN)
1968                                 vq_err(vq, "Translation failure %d indirect idx %d\n",
1969                                         ret, i);
1970                         return ret;
1971                 }
1972                 /* If this is an input descriptor, increment that count. */
1973                 if (access == VHOST_ACCESS_WO) {
1974                         *in_num += ret;
1975                         if (unlikely(log)) {
1976                                 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
1977                                 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
1978                                 ++*log_num;
1979                         }
1980                 } else {
1981                         /* If it's an output descriptor, they're all supposed
1982                          * to come before any input descriptors. */
1983                         if (unlikely(*in_num)) {
1984                                 vq_err(vq, "Indirect descriptor "
1985                                        "has out after in: idx %d\n", i);
1986                                 return -EINVAL;
1987                         }
1988                         *out_num += ret;
1989                 }
1990         } while ((i = next_desc(vq, &desc)) != -1);
1991         return 0;
1992 }
1993
1994 /* This looks in the virtqueue and for the first available buffer, and converts
1995  * it to an iovec for convenient access.  Since descriptors consist of some
1996  * number of output then some number of input descriptors, it's actually two
1997  * iovecs, but we pack them into one and note how many of each there were.
1998  *
1999  * This function returns the descriptor number found, or vq->num (which is
2000  * never a valid descriptor number) if none was found.  A negative code is
2001  * returned on error. */
2002 int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2003                       struct iovec iov[], unsigned int iov_size,
2004                       unsigned int *out_num, unsigned int *in_num,
2005                       struct vhost_log *log, unsigned int *log_num)
2006 {
2007         struct vring_desc desc;
2008         unsigned int i, head, found = 0;
2009         u16 last_avail_idx;
2010         __virtio16 avail_idx;
2011         __virtio16 ring_head;
2012         int ret, access;
2013
2014         /* Check it isn't doing very strange things with descriptor numbers. */
2015         last_avail_idx = vq->last_avail_idx;
2016
2017         if (vq->avail_idx == vq->last_avail_idx) {
2018                 if (unlikely(vhost_get_avail(vq, avail_idx, &vq->avail->idx))) {
2019                         vq_err(vq, "Failed to access avail idx at %p\n",
2020                                 &vq->avail->idx);
2021                         return -EFAULT;
2022                 }
2023                 vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2024
2025                 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
2026                         vq_err(vq, "Guest moved used index from %u to %u",
2027                                 last_avail_idx, vq->avail_idx);
2028                         return -EFAULT;
2029                 }
2030
2031                 /* If there's nothing new since last we looked, return
2032                  * invalid.
2033                  */
2034                 if (vq->avail_idx == last_avail_idx)
2035                         return vq->num;
2036
2037                 /* Only get avail ring entries after they have been
2038                  * exposed by guest.
2039                  */
2040                 smp_rmb();
2041         }
2042
2043         /* Grab the next descriptor number they're advertising, and increment
2044          * the index we've seen. */
2045         if (unlikely(vhost_get_avail(vq, ring_head,
2046                      &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) {
2047                 vq_err(vq, "Failed to read head: idx %d address %p\n",
2048                        last_avail_idx,
2049                        &vq->avail->ring[last_avail_idx % vq->num]);
2050                 return -EFAULT;
2051         }
2052
2053         head = vhost16_to_cpu(vq, ring_head);
2054
2055         /* If their number is silly, that's an error. */
2056         if (unlikely(head >= vq->num)) {
2057                 vq_err(vq, "Guest says index %u > %u is available",
2058                        head, vq->num);
2059                 return -EINVAL;
2060         }
2061
2062         /* When we start there are none of either input nor output. */
2063         *out_num = *in_num = 0;
2064         if (unlikely(log))
2065                 *log_num = 0;
2066
2067         i = head;
2068         do {
2069                 unsigned iov_count = *in_num + *out_num;
2070                 if (unlikely(i >= vq->num)) {
2071                         vq_err(vq, "Desc index is %u > %u, head = %u",
2072                                i, vq->num, head);
2073                         return -EINVAL;
2074                 }
2075                 if (unlikely(++found > vq->num)) {
2076                         vq_err(vq, "Loop detected: last one at %u "
2077                                "vq size %u head %u\n",
2078                                i, vq->num, head);
2079                         return -EINVAL;
2080                 }
2081                 ret = vhost_copy_from_user(vq, &desc, vq->desc + i,
2082                                            sizeof desc);
2083                 if (unlikely(ret)) {
2084                         vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2085                                i, vq->desc + i);
2086                         return -EFAULT;
2087                 }
2088                 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
2089                         ret = get_indirect(vq, iov, iov_size,
2090                                            out_num, in_num,
2091                                            log, log_num, &desc);
2092                         if (unlikely(ret < 0)) {
2093                                 if (ret != -EAGAIN)
2094                                         vq_err(vq, "Failure detected "
2095                                                 "in indirect descriptor at idx %d\n", i);
2096                                 return ret;
2097                         }
2098                         continue;
2099                 }
2100
2101                 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2102                         access = VHOST_ACCESS_WO;
2103                 else
2104                         access = VHOST_ACCESS_RO;
2105                 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2106                                      vhost32_to_cpu(vq, desc.len), iov + iov_count,
2107                                      iov_size - iov_count, access);
2108                 if (unlikely(ret < 0)) {
2109                         if (ret != -EAGAIN)
2110                                 vq_err(vq, "Translation failure %d descriptor idx %d\n",
2111                                         ret, i);
2112                         return ret;
2113                 }
2114                 if (access == VHOST_ACCESS_WO) {
2115                         /* If this is an input descriptor,
2116                          * increment that count. */
2117                         *in_num += ret;
2118                         if (unlikely(log)) {
2119                                 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2120                                 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2121                                 ++*log_num;
2122                         }
2123                 } else {
2124                         /* If it's an output descriptor, they're all supposed
2125                          * to come before any input descriptors. */
2126                         if (unlikely(*in_num)) {
2127                                 vq_err(vq, "Descriptor has out after in: "
2128                                        "idx %d\n", i);
2129                                 return -EINVAL;
2130                         }
2131                         *out_num += ret;
2132                 }
2133         } while ((i = next_desc(vq, &desc)) != -1);
2134
2135         /* On success, increment avail index. */
2136         vq->last_avail_idx++;
2137
2138         /* Assume notifications from guest are disabled at this point,
2139          * if they aren't we would need to update avail_event index. */
2140         BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
2141         return head;
2142 }
2143 EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
2144
2145 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
2146 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
2147 {
2148         vq->last_avail_idx -= n;
2149 }
2150 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
2151
2152 /* After we've used one of their buffers, we tell them about it.  We'll then
2153  * want to notify the guest, using eventfd. */
2154 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2155 {
2156         struct vring_used_elem heads = {
2157                 cpu_to_vhost32(vq, head),
2158                 cpu_to_vhost32(vq, len)
2159         };
2160
2161         return vhost_add_used_n(vq, &heads, 1);
2162 }
2163 EXPORT_SYMBOL_GPL(vhost_add_used);
2164
2165 static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2166                             struct vring_used_elem *heads,
2167                             unsigned count)
2168 {
2169         struct vring_used_elem __user *used;
2170         u16 old, new;
2171         int start;
2172
2173         start = vq->last_used_idx & (vq->num - 1);
2174         used = vq->used->ring + start;
2175         if (count == 1) {
2176                 if (vhost_put_user(vq, heads[0].id, &used->id)) {
2177                         vq_err(vq, "Failed to write used id");
2178                         return -EFAULT;
2179                 }
2180                 if (vhost_put_user(vq, heads[0].len, &used->len)) {
2181                         vq_err(vq, "Failed to write used len");
2182                         return -EFAULT;
2183                 }
2184         } else if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) {
2185                 vq_err(vq, "Failed to write used");
2186                 return -EFAULT;
2187         }
2188         if (unlikely(vq->log_used)) {
2189                 /* Make sure data is seen before log. */
2190                 smp_wmb();
2191                 /* Log used ring entry write. */
2192                 log_write(vq->log_base,
2193                           vq->log_addr +
2194                            ((void __user *)used - (void __user *)vq->used),
2195                           count * sizeof *used);
2196         }
2197         old = vq->last_used_idx;
2198         new = (vq->last_used_idx += count);
2199         /* If the driver never bothers to signal in a very long while,
2200          * used index might wrap around. If that happens, invalidate
2201          * signalled_used index we stored. TODO: make sure driver
2202          * signals at least once in 2^16 and remove this. */
2203         if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
2204                 vq->signalled_used_valid = false;
2205         return 0;
2206 }
2207
2208 /* After we've used one of their buffers, we tell them about it.  We'll then
2209  * want to notify the guest, using eventfd. */
2210 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2211                      unsigned count)
2212 {
2213         int start, n, r;
2214
2215         start = vq->last_used_idx & (vq->num - 1);
2216         n = vq->num - start;
2217         if (n < count) {
2218                 r = __vhost_add_used_n(vq, heads, n);
2219                 if (r < 0)
2220                         return r;
2221                 heads += n;
2222                 count -= n;
2223         }
2224         r = __vhost_add_used_n(vq, heads, count);
2225
2226         /* Make sure buffer is written before we update index. */
2227         smp_wmb();
2228         if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
2229                            &vq->used->idx)) {
2230                 vq_err(vq, "Failed to increment used idx");
2231                 return -EFAULT;
2232         }
2233         if (unlikely(vq->log_used)) {
2234                 /* Log used index update. */
2235                 log_write(vq->log_base,
2236                           vq->log_addr + offsetof(struct vring_used, idx),
2237                           sizeof vq->used->idx);
2238                 if (vq->log_ctx)
2239                         eventfd_signal(vq->log_ctx, 1);
2240         }
2241         return r;
2242 }
2243 EXPORT_SYMBOL_GPL(vhost_add_used_n);
2244
2245 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2246 {
2247         __u16 old, new;
2248         __virtio16 event;
2249         bool v;
2250         /* Flush out used index updates. This is paired
2251          * with the barrier that the Guest executes when enabling
2252          * interrupts. */
2253         smp_mb();
2254
2255         if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2256             unlikely(vq->avail_idx == vq->last_avail_idx))
2257                 return true;
2258
2259         if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2260                 __virtio16 flags;
2261                 if (vhost_get_avail(vq, flags, &vq->avail->flags)) {
2262                         vq_err(vq, "Failed to get flags");
2263                         return true;
2264                 }
2265                 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
2266         }
2267         old = vq->signalled_used;
2268         v = vq->signalled_used_valid;
2269         new = vq->signalled_used = vq->last_used_idx;
2270         vq->signalled_used_valid = true;
2271
2272         if (unlikely(!v))
2273                 return true;
2274
2275         if (vhost_get_avail(vq, event, vhost_used_event(vq))) {
2276                 vq_err(vq, "Failed to get used event idx");
2277                 return true;
2278         }
2279         return vring_need_event(vhost16_to_cpu(vq, event), new, old);
2280 }
2281
2282 /* This actually signals the guest, using eventfd. */
2283 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2284 {
2285         /* Signal the Guest tell them we used something up. */
2286         if (vq->call_ctx && vhost_notify(dev, vq))
2287                 eventfd_signal(vq->call_ctx, 1);
2288 }
2289 EXPORT_SYMBOL_GPL(vhost_signal);
2290
2291 /* And here's the combo meal deal.  Supersize me! */
2292 void vhost_add_used_and_signal(struct vhost_dev *dev,
2293                                struct vhost_virtqueue *vq,
2294                                unsigned int head, int len)
2295 {
2296         vhost_add_used(vq, head, len);
2297         vhost_signal(dev, vq);
2298 }
2299 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
2300
2301 /* multi-buffer version of vhost_add_used_and_signal */
2302 void vhost_add_used_and_signal_n(struct vhost_dev *dev,
2303                                  struct vhost_virtqueue *vq,
2304                                  struct vring_used_elem *heads, unsigned count)
2305 {
2306         vhost_add_used_n(vq, heads, count);
2307         vhost_signal(dev, vq);
2308 }
2309 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
2310
2311 /* return true if we're sure that avaiable ring is empty */
2312 bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2313 {
2314         __virtio16 avail_idx;
2315         int r;
2316
2317         if (vq->avail_idx != vq->last_avail_idx)
2318                 return false;
2319
2320         r = vhost_get_avail(vq, avail_idx, &vq->avail->idx);
2321         if (unlikely(r))
2322                 return false;
2323         vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
2324
2325         return vq->avail_idx == vq->last_avail_idx;
2326 }
2327 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
2328
2329 /* OK, now we need to know about added descriptors. */
2330 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2331 {
2332         __virtio16 avail_idx;
2333         int r;
2334
2335         if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
2336                 return false;
2337         vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
2338         if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2339                 r = vhost_update_used_flags(vq);
2340                 if (r) {
2341                         vq_err(vq, "Failed to enable notification at %p: %d\n",
2342                                &vq->used->flags, r);
2343                         return false;
2344                 }
2345         } else {
2346                 r = vhost_update_avail_event(vq, vq->avail_idx);
2347                 if (r) {
2348                         vq_err(vq, "Failed to update avail event index at %p: %d\n",
2349                                vhost_avail_event(vq), r);
2350                         return false;
2351                 }
2352         }
2353         /* They could have slipped one in as we were doing that: make
2354          * sure it's written, then check again. */
2355         smp_mb();
2356         r = vhost_get_avail(vq, avail_idx, &vq->avail->idx);
2357         if (r) {
2358                 vq_err(vq, "Failed to check avail idx at %p: %d\n",
2359                        &vq->avail->idx, r);
2360                 return false;
2361         }
2362
2363         return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
2364 }
2365 EXPORT_SYMBOL_GPL(vhost_enable_notify);
2366
2367 /* We don't need to be notified again. */
2368 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2369 {
2370         int r;
2371
2372         if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
2373                 return;
2374         vq->used_flags |= VRING_USED_F_NO_NOTIFY;
2375         if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2376                 r = vhost_update_used_flags(vq);
2377                 if (r)
2378                         vq_err(vq, "Failed to enable notification at %p: %d\n",
2379                                &vq->used->flags, r);
2380         }
2381 }
2382 EXPORT_SYMBOL_GPL(vhost_disable_notify);
2383
2384 /* Create a new message. */
2385 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
2386 {
2387         struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
2388         if (!node)
2389                 return NULL;
2390
2391         /* Make sure all padding within the structure is initialized. */
2392         memset(&node->msg, 0, sizeof node->msg);
2393         node->vq = vq;
2394         node->msg.type = type;
2395         return node;
2396 }
2397 EXPORT_SYMBOL_GPL(vhost_new_msg);
2398
2399 void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
2400                        struct vhost_msg_node *node)
2401 {
2402         spin_lock(&dev->iotlb_lock);
2403         list_add_tail(&node->node, head);
2404         spin_unlock(&dev->iotlb_lock);
2405
2406         wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
2407 }
2408 EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
2409
2410 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
2411                                          struct list_head *head)
2412 {
2413         struct vhost_msg_node *node = NULL;
2414
2415         spin_lock(&dev->iotlb_lock);
2416         if (!list_empty(head)) {
2417                 node = list_first_entry(head, struct vhost_msg_node,
2418                                         node);
2419                 list_del(&node->node);
2420         }
2421         spin_unlock(&dev->iotlb_lock);
2422
2423         return node;
2424 }
2425 EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
2426
2427
2428 static int __init vhost_init(void)
2429 {
2430         return 0;
2431 }
2432
2433 static void __exit vhost_exit(void)
2434 {
2435 }
2436
2437 module_init(vhost_init);
2438 module_exit(vhost_exit);
2439
2440 MODULE_VERSION("0.0.1");
2441 MODULE_LICENSE("GPL v2");
2442 MODULE_AUTHOR("Michael S. Tsirkin");
2443 MODULE_DESCRIPTION("Host kernel accelerator for virtio");