]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/vhost/net.c
Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net
[linux.git] / drivers / vhost / net.c
1 /* Copyright (C) 2009 Red Hat, Inc.
2  * Author: Michael S. Tsirkin <mst@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU GPL, version 2.
5  *
6  * virtio-net server in host kernel.
7  */
8
9 #include <linux/compat.h>
10 #include <linux/eventfd.h>
11 #include <linux/vhost.h>
12 #include <linux/virtio_net.h>
13 #include <linux/miscdevice.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/mutex.h>
17 #include <linux/workqueue.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/sched/clock.h>
21 #include <linux/sched/signal.h>
22 #include <linux/vmalloc.h>
23
24 #include <linux/net.h>
25 #include <linux/if_packet.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_tun.h>
28 #include <linux/if_macvlan.h>
29 #include <linux/if_tap.h>
30 #include <linux/if_vlan.h>
31 #include <linux/skb_array.h>
32 #include <linux/skbuff.h>
33
34 #include <net/sock.h>
35 #include <net/xdp.h>
36
37 #include "vhost.h"
38
39 static int experimental_zcopytx = 1;
40 module_param(experimental_zcopytx, int, 0444);
41 MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
42                                        " 1 -Enable; 0 - Disable");
43
44 /* Max number of bytes transferred before requeueing the job.
45  * Using this limit prevents one virtqueue from starving others. */
46 #define VHOST_NET_WEIGHT 0x80000
47
48 /* Max number of packets transferred before requeueing the job.
49  * Using this limit prevents one virtqueue from starving others with small
50  * pkts.
51  */
52 #define VHOST_NET_PKT_WEIGHT 256
53
54 /* MAX number of TX used buffers for outstanding zerocopy */
55 #define VHOST_MAX_PEND 128
56 #define VHOST_GOODCOPY_LEN 256
57
58 /*
59  * For transmit, used buffer len is unused; we override it to track buffer
60  * status internally; used for zerocopy tx only.
61  */
62 /* Lower device DMA failed */
63 #define VHOST_DMA_FAILED_LEN    ((__force __virtio32)3)
64 /* Lower device DMA done */
65 #define VHOST_DMA_DONE_LEN      ((__force __virtio32)2)
66 /* Lower device DMA in progress */
67 #define VHOST_DMA_IN_PROGRESS   ((__force __virtio32)1)
68 /* Buffer unused */
69 #define VHOST_DMA_CLEAR_LEN     ((__force __virtio32)0)
70
71 #define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
72
73 enum {
74         VHOST_NET_FEATURES = VHOST_FEATURES |
75                          (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
76                          (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
77                          (1ULL << VIRTIO_F_IOMMU_PLATFORM)
78 };
79
80 enum {
81         VHOST_NET_VQ_RX = 0,
82         VHOST_NET_VQ_TX = 1,
83         VHOST_NET_VQ_MAX = 2,
84 };
85
86 struct vhost_net_ubuf_ref {
87         /* refcount follows semantics similar to kref:
88          *  0: object is released
89          *  1: no outstanding ubufs
90          * >1: outstanding ubufs
91          */
92         atomic_t refcount;
93         wait_queue_head_t wait;
94         struct vhost_virtqueue *vq;
95 };
96
97 #define VHOST_NET_BATCH 64
98 struct vhost_net_buf {
99         void **queue;
100         int tail;
101         int head;
102 };
103
104 struct vhost_net_virtqueue {
105         struct vhost_virtqueue vq;
106         size_t vhost_hlen;
107         size_t sock_hlen;
108         /* vhost zerocopy support fields below: */
109         /* last used idx for outstanding DMA zerocopy buffers */
110         int upend_idx;
111         /* For TX, first used idx for DMA done zerocopy buffers
112          * For RX, number of batched heads
113          */
114         int done_idx;
115         /* an array of userspace buffers info */
116         struct ubuf_info *ubuf_info;
117         /* Reference counting for outstanding ubufs.
118          * Protected by vq mutex. Writers must also take device mutex. */
119         struct vhost_net_ubuf_ref *ubufs;
120         struct ptr_ring *rx_ring;
121         struct vhost_net_buf rxq;
122 };
123
124 struct vhost_net {
125         struct vhost_dev dev;
126         struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
127         struct vhost_poll poll[VHOST_NET_VQ_MAX];
128         /* Number of TX recently submitted.
129          * Protected by tx vq lock. */
130         unsigned tx_packets;
131         /* Number of times zerocopy TX recently failed.
132          * Protected by tx vq lock. */
133         unsigned tx_zcopy_err;
134         /* Flush in progress. Protected by tx vq lock. */
135         bool tx_flush;
136 };
137
138 static unsigned vhost_net_zcopy_mask __read_mostly;
139
140 static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
141 {
142         if (rxq->tail != rxq->head)
143                 return rxq->queue[rxq->head];
144         else
145                 return NULL;
146 }
147
148 static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
149 {
150         return rxq->tail - rxq->head;
151 }
152
153 static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
154 {
155         return rxq->tail == rxq->head;
156 }
157
158 static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
159 {
160         void *ret = vhost_net_buf_get_ptr(rxq);
161         ++rxq->head;
162         return ret;
163 }
164
165 static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
166 {
167         struct vhost_net_buf *rxq = &nvq->rxq;
168
169         rxq->head = 0;
170         rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
171                                               VHOST_NET_BATCH);
172         return rxq->tail;
173 }
174
175 static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
176 {
177         struct vhost_net_buf *rxq = &nvq->rxq;
178
179         if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
180                 ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
181                                    vhost_net_buf_get_size(rxq),
182                                    tun_ptr_free);
183                 rxq->head = rxq->tail = 0;
184         }
185 }
186
187 static int vhost_net_buf_peek_len(void *ptr)
188 {
189         if (tun_is_xdp_frame(ptr)) {
190                 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
191
192                 return xdpf->len;
193         }
194
195         return __skb_array_len_with_tag(ptr);
196 }
197
198 static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
199 {
200         struct vhost_net_buf *rxq = &nvq->rxq;
201
202         if (!vhost_net_buf_is_empty(rxq))
203                 goto out;
204
205         if (!vhost_net_buf_produce(nvq))
206                 return 0;
207
208 out:
209         return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
210 }
211
212 static void vhost_net_buf_init(struct vhost_net_buf *rxq)
213 {
214         rxq->head = rxq->tail = 0;
215 }
216
217 static void vhost_net_enable_zcopy(int vq)
218 {
219         vhost_net_zcopy_mask |= 0x1 << vq;
220 }
221
222 static struct vhost_net_ubuf_ref *
223 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
224 {
225         struct vhost_net_ubuf_ref *ubufs;
226         /* No zero copy backend? Nothing to count. */
227         if (!zcopy)
228                 return NULL;
229         ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
230         if (!ubufs)
231                 return ERR_PTR(-ENOMEM);
232         atomic_set(&ubufs->refcount, 1);
233         init_waitqueue_head(&ubufs->wait);
234         ubufs->vq = vq;
235         return ubufs;
236 }
237
238 static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
239 {
240         int r = atomic_sub_return(1, &ubufs->refcount);
241         if (unlikely(!r))
242                 wake_up(&ubufs->wait);
243         return r;
244 }
245
246 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
247 {
248         vhost_net_ubuf_put(ubufs);
249         wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
250 }
251
252 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
253 {
254         vhost_net_ubuf_put_and_wait(ubufs);
255         kfree(ubufs);
256 }
257
258 static void vhost_net_clear_ubuf_info(struct vhost_net *n)
259 {
260         int i;
261
262         for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
263                 kfree(n->vqs[i].ubuf_info);
264                 n->vqs[i].ubuf_info = NULL;
265         }
266 }
267
268 static int vhost_net_set_ubuf_info(struct vhost_net *n)
269 {
270         bool zcopy;
271         int i;
272
273         for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
274                 zcopy = vhost_net_zcopy_mask & (0x1 << i);
275                 if (!zcopy)
276                         continue;
277                 n->vqs[i].ubuf_info =
278                         kmalloc_array(UIO_MAXIOV,
279                                       sizeof(*n->vqs[i].ubuf_info),
280                                       GFP_KERNEL);
281                 if  (!n->vqs[i].ubuf_info)
282                         goto err;
283         }
284         return 0;
285
286 err:
287         vhost_net_clear_ubuf_info(n);
288         return -ENOMEM;
289 }
290
291 static void vhost_net_vq_reset(struct vhost_net *n)
292 {
293         int i;
294
295         vhost_net_clear_ubuf_info(n);
296
297         for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
298                 n->vqs[i].done_idx = 0;
299                 n->vqs[i].upend_idx = 0;
300                 n->vqs[i].ubufs = NULL;
301                 n->vqs[i].vhost_hlen = 0;
302                 n->vqs[i].sock_hlen = 0;
303                 vhost_net_buf_init(&n->vqs[i].rxq);
304         }
305
306 }
307
308 static void vhost_net_tx_packet(struct vhost_net *net)
309 {
310         ++net->tx_packets;
311         if (net->tx_packets < 1024)
312                 return;
313         net->tx_packets = 0;
314         net->tx_zcopy_err = 0;
315 }
316
317 static void vhost_net_tx_err(struct vhost_net *net)
318 {
319         ++net->tx_zcopy_err;
320 }
321
322 static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
323 {
324         /* TX flush waits for outstanding DMAs to be done.
325          * Don't start new DMAs.
326          */
327         return !net->tx_flush &&
328                 net->tx_packets / 64 >= net->tx_zcopy_err;
329 }
330
331 static bool vhost_sock_zcopy(struct socket *sock)
332 {
333         return unlikely(experimental_zcopytx) &&
334                 sock_flag(sock->sk, SOCK_ZEROCOPY);
335 }
336
337 /* In case of DMA done not in order in lower device driver for some reason.
338  * upend_idx is used to track end of used idx, done_idx is used to track head
339  * of used idx. Once lower device DMA done contiguously, we will signal KVM
340  * guest used idx.
341  */
342 static void vhost_zerocopy_signal_used(struct vhost_net *net,
343                                        struct vhost_virtqueue *vq)
344 {
345         struct vhost_net_virtqueue *nvq =
346                 container_of(vq, struct vhost_net_virtqueue, vq);
347         int i, add;
348         int j = 0;
349
350         for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
351                 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
352                         vhost_net_tx_err(net);
353                 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
354                         vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
355                         ++j;
356                 } else
357                         break;
358         }
359         while (j) {
360                 add = min(UIO_MAXIOV - nvq->done_idx, j);
361                 vhost_add_used_and_signal_n(vq->dev, vq,
362                                             &vq->heads[nvq->done_idx], add);
363                 nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
364                 j -= add;
365         }
366 }
367
368 static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
369 {
370         struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
371         struct vhost_virtqueue *vq = ubufs->vq;
372         int cnt;
373
374         rcu_read_lock_bh();
375
376         /* set len to mark this desc buffers done DMA */
377         vq->heads[ubuf->desc].len = success ?
378                 VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
379         cnt = vhost_net_ubuf_put(ubufs);
380
381         /*
382          * Trigger polling thread if guest stopped submitting new buffers:
383          * in this case, the refcount after decrement will eventually reach 1.
384          * We also trigger polling periodically after each 16 packets
385          * (the value 16 here is more or less arbitrary, it's tuned to trigger
386          * less than 10% of times).
387          */
388         if (cnt <= 1 || !(cnt % 16))
389                 vhost_poll_queue(&vq->poll);
390
391         rcu_read_unlock_bh();
392 }
393
394 static inline unsigned long busy_clock(void)
395 {
396         return local_clock() >> 10;
397 }
398
399 static bool vhost_can_busy_poll(unsigned long endtime)
400 {
401         return likely(!need_resched() && !time_after(busy_clock(), endtime) &&
402                       !signal_pending(current));
403 }
404
405 static void vhost_net_disable_vq(struct vhost_net *n,
406                                  struct vhost_virtqueue *vq)
407 {
408         struct vhost_net_virtqueue *nvq =
409                 container_of(vq, struct vhost_net_virtqueue, vq);
410         struct vhost_poll *poll = n->poll + (nvq - n->vqs);
411         if (!vq->private_data)
412                 return;
413         vhost_poll_stop(poll);
414 }
415
416 static int vhost_net_enable_vq(struct vhost_net *n,
417                                 struct vhost_virtqueue *vq)
418 {
419         struct vhost_net_virtqueue *nvq =
420                 container_of(vq, struct vhost_net_virtqueue, vq);
421         struct vhost_poll *poll = n->poll + (nvq - n->vqs);
422         struct socket *sock;
423
424         sock = vq->private_data;
425         if (!sock)
426                 return 0;
427
428         return vhost_poll_start(poll, sock->file);
429 }
430
431 static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
432 {
433         struct vhost_virtqueue *vq = &nvq->vq;
434         struct vhost_dev *dev = vq->dev;
435
436         if (!nvq->done_idx)
437                 return;
438
439         vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
440         nvq->done_idx = 0;
441 }
442
443 static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
444                                     struct vhost_net_virtqueue *nvq,
445                                     unsigned int *out_num, unsigned int *in_num,
446                                     bool *busyloop_intr)
447 {
448         struct vhost_virtqueue *vq = &nvq->vq;
449         unsigned long uninitialized_var(endtime);
450         int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
451                                   out_num, in_num, NULL, NULL);
452
453         if (r == vq->num && vq->busyloop_timeout) {
454                 if (!vhost_sock_zcopy(vq->private_data))
455                         vhost_net_signal_used(nvq);
456                 preempt_disable();
457                 endtime = busy_clock() + vq->busyloop_timeout;
458                 while (vhost_can_busy_poll(endtime)) {
459                         if (vhost_has_work(vq->dev)) {
460                                 *busyloop_intr = true;
461                                 break;
462                         }
463                         if (!vhost_vq_avail_empty(vq->dev, vq))
464                                 break;
465                         cpu_relax();
466                 }
467                 preempt_enable();
468                 r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
469                                       out_num, in_num, NULL, NULL);
470         }
471
472         return r;
473 }
474
475 static bool vhost_exceeds_maxpend(struct vhost_net *net)
476 {
477         struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
478         struct vhost_virtqueue *vq = &nvq->vq;
479
480         return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV >
481                min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
482 }
483
484 static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
485                             size_t hdr_size, int out)
486 {
487         /* Skip header. TODO: support TSO. */
488         size_t len = iov_length(vq->iov, out);
489
490         iov_iter_init(iter, WRITE, vq->iov, out, len);
491         iov_iter_advance(iter, hdr_size);
492
493         return iov_iter_count(iter);
494 }
495
496 static bool vhost_exceeds_weight(int pkts, int total_len)
497 {
498         return total_len >= VHOST_NET_WEIGHT ||
499                pkts >= VHOST_NET_PKT_WEIGHT;
500 }
501
502 static int get_tx_bufs(struct vhost_net *net,
503                        struct vhost_net_virtqueue *nvq,
504                        struct msghdr *msg,
505                        unsigned int *out, unsigned int *in,
506                        size_t *len, bool *busyloop_intr)
507 {
508         struct vhost_virtqueue *vq = &nvq->vq;
509         int ret;
510
511         ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, busyloop_intr);
512
513         if (ret < 0 || ret == vq->num)
514                 return ret;
515
516         if (*in) {
517                 vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n",
518                         *out, *in);
519                 return -EFAULT;
520         }
521
522         /* Sanity check */
523         *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out);
524         if (*len == 0) {
525                 vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n",
526                         *len, nvq->vhost_hlen);
527                 return -EFAULT;
528         }
529
530         return ret;
531 }
532
533 static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
534 {
535         return total_len < VHOST_NET_WEIGHT &&
536                !vhost_vq_avail_empty(vq->dev, vq);
537 }
538
539 static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
540 {
541         struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
542         struct vhost_virtqueue *vq = &nvq->vq;
543         unsigned out, in;
544         int head;
545         struct msghdr msg = {
546                 .msg_name = NULL,
547                 .msg_namelen = 0,
548                 .msg_control = NULL,
549                 .msg_controllen = 0,
550                 .msg_flags = MSG_DONTWAIT,
551         };
552         size_t len, total_len = 0;
553         int err;
554         int sent_pkts = 0;
555
556         for (;;) {
557                 bool busyloop_intr = false;
558
559                 head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
560                                    &busyloop_intr);
561                 /* On error, stop handling until the next kick. */
562                 if (unlikely(head < 0))
563                         break;
564                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
565                 if (head == vq->num) {
566                         if (unlikely(busyloop_intr)) {
567                                 vhost_poll_queue(&vq->poll);
568                         } else if (unlikely(vhost_enable_notify(&net->dev,
569                                                                 vq))) {
570                                 vhost_disable_notify(&net->dev, vq);
571                                 continue;
572                         }
573                         break;
574                 }
575
576                 vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
577                 vq->heads[nvq->done_idx].len = 0;
578
579                 total_len += len;
580                 if (tx_can_batch(vq, total_len))
581                         msg.msg_flags |= MSG_MORE;
582                 else
583                         msg.msg_flags &= ~MSG_MORE;
584
585                 /* TODO: Check specific error and bomb out unless ENOBUFS? */
586                 err = sock->ops->sendmsg(sock, &msg, len);
587                 if (unlikely(err < 0)) {
588                         vhost_discard_vq_desc(vq, 1);
589                         vhost_net_enable_vq(net, vq);
590                         break;
591                 }
592                 if (err != len)
593                         pr_debug("Truncated TX packet: len %d != %zd\n",
594                                  err, len);
595                 if (++nvq->done_idx >= VHOST_NET_BATCH)
596                         vhost_net_signal_used(nvq);
597                 if (vhost_exceeds_weight(++sent_pkts, total_len)) {
598                         vhost_poll_queue(&vq->poll);
599                         break;
600                 }
601         }
602
603         vhost_net_signal_used(nvq);
604 }
605
606 static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
607 {
608         struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
609         struct vhost_virtqueue *vq = &nvq->vq;
610         unsigned out, in;
611         int head;
612         struct msghdr msg = {
613                 .msg_name = NULL,
614                 .msg_namelen = 0,
615                 .msg_control = NULL,
616                 .msg_controllen = 0,
617                 .msg_flags = MSG_DONTWAIT,
618         };
619         size_t len, total_len = 0;
620         int err;
621         struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
622         bool zcopy_used;
623         int sent_pkts = 0;
624
625         for (;;) {
626                 bool busyloop_intr;
627
628                 /* Release DMAs done buffers first */
629                 vhost_zerocopy_signal_used(net, vq);
630
631                 busyloop_intr = false;
632                 head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
633                                    &busyloop_intr);
634                 /* On error, stop handling until the next kick. */
635                 if (unlikely(head < 0))
636                         break;
637                 /* Nothing new?  Wait for eventfd to tell us they refilled. */
638                 if (head == vq->num) {
639                         if (unlikely(busyloop_intr)) {
640                                 vhost_poll_queue(&vq->poll);
641                         } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
642                                 vhost_disable_notify(&net->dev, vq);
643                                 continue;
644                         }
645                         break;
646                 }
647
648                 zcopy_used = len >= VHOST_GOODCOPY_LEN
649                              && !vhost_exceeds_maxpend(net)
650                              && vhost_net_tx_select_zcopy(net);
651
652                 /* use msg_control to pass vhost zerocopy ubuf info to skb */
653                 if (zcopy_used) {
654                         struct ubuf_info *ubuf;
655                         ubuf = nvq->ubuf_info + nvq->upend_idx;
656
657                         vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
658                         vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
659                         ubuf->callback = vhost_zerocopy_callback;
660                         ubuf->ctx = nvq->ubufs;
661                         ubuf->desc = nvq->upend_idx;
662                         refcount_set(&ubuf->refcnt, 1);
663                         msg.msg_control = ubuf;
664                         msg.msg_controllen = sizeof(ubuf);
665                         ubufs = nvq->ubufs;
666                         atomic_inc(&ubufs->refcount);
667                         nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
668                 } else {
669                         msg.msg_control = NULL;
670                         ubufs = NULL;
671                 }
672                 total_len += len;
673                 if (tx_can_batch(vq, total_len) &&
674                     likely(!vhost_exceeds_maxpend(net))) {
675                         msg.msg_flags |= MSG_MORE;
676                 } else {
677                         msg.msg_flags &= ~MSG_MORE;
678                 }
679
680                 /* TODO: Check specific error and bomb out unless ENOBUFS? */
681                 err = sock->ops->sendmsg(sock, &msg, len);
682                 if (unlikely(err < 0)) {
683                         if (zcopy_used) {
684                                 vhost_net_ubuf_put(ubufs);
685                                 nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
686                                         % UIO_MAXIOV;
687                         }
688                         vhost_discard_vq_desc(vq, 1);
689                         vhost_net_enable_vq(net, vq);
690                         break;
691                 }
692                 if (err != len)
693                         pr_debug("Truncated TX packet: "
694                                  " len %d != %zd\n", err, len);
695                 if (!zcopy_used)
696                         vhost_add_used_and_signal(&net->dev, vq, head, 0);
697                 else
698                         vhost_zerocopy_signal_used(net, vq);
699                 vhost_net_tx_packet(net);
700                 if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) {
701                         vhost_poll_queue(&vq->poll);
702                         break;
703                 }
704         }
705 }
706
707 /* Expects to be always run from workqueue - which acts as
708  * read-size critical section for our kind of RCU. */
709 static void handle_tx(struct vhost_net *net)
710 {
711         struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
712         struct vhost_virtqueue *vq = &nvq->vq;
713         struct socket *sock;
714
715         mutex_lock(&vq->mutex);
716         sock = vq->private_data;
717         if (!sock)
718                 goto out;
719
720         if (!vq_iotlb_prefetch(vq))
721                 goto out;
722
723         vhost_disable_notify(&net->dev, vq);
724         vhost_net_disable_vq(net, vq);
725
726         if (vhost_sock_zcopy(sock))
727                 handle_tx_zerocopy(net, sock);
728         else
729                 handle_tx_copy(net, sock);
730
731 out:
732         mutex_unlock(&vq->mutex);
733 }
734
735 static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
736 {
737         struct sk_buff *head;
738         int len = 0;
739         unsigned long flags;
740
741         if (rvq->rx_ring)
742                 return vhost_net_buf_peek(rvq);
743
744         spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
745         head = skb_peek(&sk->sk_receive_queue);
746         if (likely(head)) {
747                 len = head->len;
748                 if (skb_vlan_tag_present(head))
749                         len += VLAN_HLEN;
750         }
751
752         spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
753         return len;
754 }
755
756 static int sk_has_rx_data(struct sock *sk)
757 {
758         struct socket *sock = sk->sk_socket;
759
760         if (sock->ops->peek_len)
761                 return sock->ops->peek_len(sock);
762
763         return skb_queue_empty(&sk->sk_receive_queue);
764 }
765
766 static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
767                                       bool *busyloop_intr)
768 {
769         struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
770         struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
771         struct vhost_virtqueue *rvq = &rnvq->vq;
772         struct vhost_virtqueue *tvq = &tnvq->vq;
773         unsigned long uninitialized_var(endtime);
774         int len = peek_head_len(rnvq, sk);
775
776         if (!len && tvq->busyloop_timeout) {
777                 /* Flush batched heads first */
778                 vhost_net_signal_used(rnvq);
779                 /* Both tx vq and rx socket were polled here */
780                 mutex_lock_nested(&tvq->mutex, 1);
781                 vhost_disable_notify(&net->dev, tvq);
782
783                 preempt_disable();
784                 endtime = busy_clock() + tvq->busyloop_timeout;
785
786                 while (vhost_can_busy_poll(endtime)) {
787                         if (vhost_has_work(&net->dev)) {
788                                 *busyloop_intr = true;
789                                 break;
790                         }
791                         if ((sk_has_rx_data(sk) &&
792                              !vhost_vq_avail_empty(&net->dev, rvq)) ||
793                             !vhost_vq_avail_empty(&net->dev, tvq))
794                                 break;
795                         cpu_relax();
796                 }
797
798                 preempt_enable();
799
800                 if (!vhost_vq_avail_empty(&net->dev, tvq)) {
801                         vhost_poll_queue(&tvq->poll);
802                 } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
803                         vhost_disable_notify(&net->dev, tvq);
804                         vhost_poll_queue(&tvq->poll);
805                 }
806
807                 mutex_unlock(&tvq->mutex);
808
809                 len = peek_head_len(rnvq, sk);
810         }
811
812         return len;
813 }
814
815 /* This is a multi-buffer version of vhost_get_desc, that works if
816  *      vq has read descriptors only.
817  * @vq          - the relevant virtqueue
818  * @datalen     - data length we'll be reading
819  * @iovcount    - returned count of io vectors we fill
820  * @log         - vhost log
821  * @log_num     - log offset
822  * @quota       - headcount quota, 1 for big buffer
823  *      returns number of buffer heads allocated, negative on error
824  */
825 static int get_rx_bufs(struct vhost_virtqueue *vq,
826                        struct vring_used_elem *heads,
827                        int datalen,
828                        unsigned *iovcount,
829                        struct vhost_log *log,
830                        unsigned *log_num,
831                        unsigned int quota)
832 {
833         unsigned int out, in;
834         int seg = 0;
835         int headcount = 0;
836         unsigned d;
837         int r, nlogs = 0;
838         /* len is always initialized before use since we are always called with
839          * datalen > 0.
840          */
841         u32 uninitialized_var(len);
842
843         while (datalen > 0 && headcount < quota) {
844                 if (unlikely(seg >= UIO_MAXIOV)) {
845                         r = -ENOBUFS;
846                         goto err;
847                 }
848                 r = vhost_get_vq_desc(vq, vq->iov + seg,
849                                       ARRAY_SIZE(vq->iov) - seg, &out,
850                                       &in, log, log_num);
851                 if (unlikely(r < 0))
852                         goto err;
853
854                 d = r;
855                 if (d == vq->num) {
856                         r = 0;
857                         goto err;
858                 }
859                 if (unlikely(out || in <= 0)) {
860                         vq_err(vq, "unexpected descriptor format for RX: "
861                                 "out %d, in %d\n", out, in);
862                         r = -EINVAL;
863                         goto err;
864                 }
865                 if (unlikely(log)) {
866                         nlogs += *log_num;
867                         log += *log_num;
868                 }
869                 heads[headcount].id = cpu_to_vhost32(vq, d);
870                 len = iov_length(vq->iov + seg, in);
871                 heads[headcount].len = cpu_to_vhost32(vq, len);
872                 datalen -= len;
873                 ++headcount;
874                 seg += in;
875         }
876         heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
877         *iovcount = seg;
878         if (unlikely(log))
879                 *log_num = nlogs;
880
881         /* Detect overrun */
882         if (unlikely(datalen > 0)) {
883                 r = UIO_MAXIOV + 1;
884                 goto err;
885         }
886         return headcount;
887 err:
888         vhost_discard_vq_desc(vq, headcount);
889         return r;
890 }
891
892 /* Expects to be always run from workqueue - which acts as
893  * read-size critical section for our kind of RCU. */
894 static void handle_rx(struct vhost_net *net)
895 {
896         struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
897         struct vhost_virtqueue *vq = &nvq->vq;
898         unsigned uninitialized_var(in), log;
899         struct vhost_log *vq_log;
900         struct msghdr msg = {
901                 .msg_name = NULL,
902                 .msg_namelen = 0,
903                 .msg_control = NULL, /* FIXME: get and handle RX aux data. */
904                 .msg_controllen = 0,
905                 .msg_flags = MSG_DONTWAIT,
906         };
907         struct virtio_net_hdr hdr = {
908                 .flags = 0,
909                 .gso_type = VIRTIO_NET_HDR_GSO_NONE
910         };
911         size_t total_len = 0;
912         int err, mergeable;
913         s16 headcount;
914         size_t vhost_hlen, sock_hlen;
915         size_t vhost_len, sock_len;
916         bool busyloop_intr = false;
917         struct socket *sock;
918         struct iov_iter fixup;
919         __virtio16 num_buffers;
920         int recv_pkts = 0;
921
922         mutex_lock_nested(&vq->mutex, 0);
923         sock = vq->private_data;
924         if (!sock)
925                 goto out;
926
927         if (!vq_iotlb_prefetch(vq))
928                 goto out;
929
930         vhost_disable_notify(&net->dev, vq);
931         vhost_net_disable_vq(net, vq);
932
933         vhost_hlen = nvq->vhost_hlen;
934         sock_hlen = nvq->sock_hlen;
935
936         vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
937                 vq->log : NULL;
938         mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
939
940         while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
941                                                       &busyloop_intr))) {
942                 sock_len += sock_hlen;
943                 vhost_len = sock_len + vhost_hlen;
944                 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
945                                         vhost_len, &in, vq_log, &log,
946                                         likely(mergeable) ? UIO_MAXIOV : 1);
947                 /* On error, stop handling until the next kick. */
948                 if (unlikely(headcount < 0))
949                         goto out;
950                 /* OK, now we need to know about added descriptors. */
951                 if (!headcount) {
952                         if (unlikely(busyloop_intr)) {
953                                 vhost_poll_queue(&vq->poll);
954                         } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
955                                 /* They have slipped one in as we were
956                                  * doing that: check again. */
957                                 vhost_disable_notify(&net->dev, vq);
958                                 continue;
959                         }
960                         /* Nothing new?  Wait for eventfd to tell us
961                          * they refilled. */
962                         goto out;
963                 }
964                 busyloop_intr = false;
965                 if (nvq->rx_ring)
966                         msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
967                 /* On overrun, truncate and discard */
968                 if (unlikely(headcount > UIO_MAXIOV)) {
969                         iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
970                         err = sock->ops->recvmsg(sock, &msg,
971                                                  1, MSG_DONTWAIT | MSG_TRUNC);
972                         pr_debug("Discarded rx packet: len %zd\n", sock_len);
973                         continue;
974                 }
975                 /* We don't need to be notified again. */
976                 iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
977                 fixup = msg.msg_iter;
978                 if (unlikely((vhost_hlen))) {
979                         /* We will supply the header ourselves
980                          * TODO: support TSO.
981                          */
982                         iov_iter_advance(&msg.msg_iter, vhost_hlen);
983                 }
984                 err = sock->ops->recvmsg(sock, &msg,
985                                          sock_len, MSG_DONTWAIT | MSG_TRUNC);
986                 /* Userspace might have consumed the packet meanwhile:
987                  * it's not supposed to do this usually, but might be hard
988                  * to prevent. Discard data we got (if any) and keep going. */
989                 if (unlikely(err != sock_len)) {
990                         pr_debug("Discarded rx packet: "
991                                  " len %d, expected %zd\n", err, sock_len);
992                         vhost_discard_vq_desc(vq, headcount);
993                         continue;
994                 }
995                 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
996                 if (unlikely(vhost_hlen)) {
997                         if (copy_to_iter(&hdr, sizeof(hdr),
998                                          &fixup) != sizeof(hdr)) {
999                                 vq_err(vq, "Unable to write vnet_hdr "
1000                                        "at addr %p\n", vq->iov->iov_base);
1001                                 goto out;
1002                         }
1003                 } else {
1004                         /* Header came from socket; we'll need to patch
1005                          * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
1006                          */
1007                         iov_iter_advance(&fixup, sizeof(hdr));
1008                 }
1009                 /* TODO: Should check and handle checksum. */
1010
1011                 num_buffers = cpu_to_vhost16(vq, headcount);
1012                 if (likely(mergeable) &&
1013                     copy_to_iter(&num_buffers, sizeof num_buffers,
1014                                  &fixup) != sizeof num_buffers) {
1015                         vq_err(vq, "Failed num_buffers write");
1016                         vhost_discard_vq_desc(vq, headcount);
1017                         goto out;
1018                 }
1019                 nvq->done_idx += headcount;
1020                 if (nvq->done_idx > VHOST_NET_BATCH)
1021                         vhost_net_signal_used(nvq);
1022                 if (unlikely(vq_log))
1023                         vhost_log_write(vq, vq_log, log, vhost_len);
1024                 total_len += vhost_len;
1025                 if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
1026                         vhost_poll_queue(&vq->poll);
1027                         goto out;
1028                 }
1029         }
1030         if (unlikely(busyloop_intr))
1031                 vhost_poll_queue(&vq->poll);
1032         else
1033                 vhost_net_enable_vq(net, vq);
1034 out:
1035         vhost_net_signal_used(nvq);
1036         mutex_unlock(&vq->mutex);
1037 }
1038
1039 static void handle_tx_kick(struct vhost_work *work)
1040 {
1041         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1042                                                   poll.work);
1043         struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
1044
1045         handle_tx(net);
1046 }
1047
1048 static void handle_rx_kick(struct vhost_work *work)
1049 {
1050         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1051                                                   poll.work);
1052         struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
1053
1054         handle_rx(net);
1055 }
1056
1057 static void handle_tx_net(struct vhost_work *work)
1058 {
1059         struct vhost_net *net = container_of(work, struct vhost_net,
1060                                              poll[VHOST_NET_VQ_TX].work);
1061         handle_tx(net);
1062 }
1063
1064 static void handle_rx_net(struct vhost_work *work)
1065 {
1066         struct vhost_net *net = container_of(work, struct vhost_net,
1067                                              poll[VHOST_NET_VQ_RX].work);
1068         handle_rx(net);
1069 }
1070
1071 static int vhost_net_open(struct inode *inode, struct file *f)
1072 {
1073         struct vhost_net *n;
1074         struct vhost_dev *dev;
1075         struct vhost_virtqueue **vqs;
1076         void **queue;
1077         int i;
1078
1079         n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1080         if (!n)
1081                 return -ENOMEM;
1082         vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
1083         if (!vqs) {
1084                 kvfree(n);
1085                 return -ENOMEM;
1086         }
1087
1088         queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
1089                               GFP_KERNEL);
1090         if (!queue) {
1091                 kfree(vqs);
1092                 kvfree(n);
1093                 return -ENOMEM;
1094         }
1095         n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
1096
1097         dev = &n->dev;
1098         vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
1099         vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
1100         n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
1101         n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
1102         for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
1103                 n->vqs[i].ubufs = NULL;
1104                 n->vqs[i].ubuf_info = NULL;
1105                 n->vqs[i].upend_idx = 0;
1106                 n->vqs[i].done_idx = 0;
1107                 n->vqs[i].vhost_hlen = 0;
1108                 n->vqs[i].sock_hlen = 0;
1109                 n->vqs[i].rx_ring = NULL;
1110                 vhost_net_buf_init(&n->vqs[i].rxq);
1111         }
1112         vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
1113
1114         vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
1115         vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
1116
1117         f->private_data = n;
1118
1119         return 0;
1120 }
1121
1122 static struct socket *vhost_net_stop_vq(struct vhost_net *n,
1123                                         struct vhost_virtqueue *vq)
1124 {
1125         struct socket *sock;
1126         struct vhost_net_virtqueue *nvq =
1127                 container_of(vq, struct vhost_net_virtqueue, vq);
1128
1129         mutex_lock(&vq->mutex);
1130         sock = vq->private_data;
1131         vhost_net_disable_vq(n, vq);
1132         vq->private_data = NULL;
1133         vhost_net_buf_unproduce(nvq);
1134         nvq->rx_ring = NULL;
1135         mutex_unlock(&vq->mutex);
1136         return sock;
1137 }
1138
1139 static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
1140                            struct socket **rx_sock)
1141 {
1142         *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
1143         *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
1144 }
1145
1146 static void vhost_net_flush_vq(struct vhost_net *n, int index)
1147 {
1148         vhost_poll_flush(n->poll + index);
1149         vhost_poll_flush(&n->vqs[index].vq.poll);
1150 }
1151
1152 static void vhost_net_flush(struct vhost_net *n)
1153 {
1154         vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
1155         vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
1156         if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
1157                 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1158                 n->tx_flush = true;
1159                 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1160                 /* Wait for all lower device DMAs done. */
1161                 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
1162                 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1163                 n->tx_flush = false;
1164                 atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
1165                 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1166         }
1167 }
1168
1169 static int vhost_net_release(struct inode *inode, struct file *f)
1170 {
1171         struct vhost_net *n = f->private_data;
1172         struct socket *tx_sock;
1173         struct socket *rx_sock;
1174
1175         vhost_net_stop(n, &tx_sock, &rx_sock);
1176         vhost_net_flush(n);
1177         vhost_dev_stop(&n->dev);
1178         vhost_dev_cleanup(&n->dev);
1179         vhost_net_vq_reset(n);
1180         if (tx_sock)
1181                 sockfd_put(tx_sock);
1182         if (rx_sock)
1183                 sockfd_put(rx_sock);
1184         /* Make sure no callbacks are outstanding */
1185         synchronize_rcu_bh();
1186         /* We do an extra flush before freeing memory,
1187          * since jobs can re-queue themselves. */
1188         vhost_net_flush(n);
1189         kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
1190         kfree(n->dev.vqs);
1191         kvfree(n);
1192         return 0;
1193 }
1194
1195 static struct socket *get_raw_socket(int fd)
1196 {
1197         struct {
1198                 struct sockaddr_ll sa;
1199                 char  buf[MAX_ADDR_LEN];
1200         } uaddr;
1201         int r;
1202         struct socket *sock = sockfd_lookup(fd, &r);
1203
1204         if (!sock)
1205                 return ERR_PTR(-ENOTSOCK);
1206
1207         /* Parameter checking */
1208         if (sock->sk->sk_type != SOCK_RAW) {
1209                 r = -ESOCKTNOSUPPORT;
1210                 goto err;
1211         }
1212
1213         r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0);
1214         if (r < 0)
1215                 goto err;
1216
1217         if (uaddr.sa.sll_family != AF_PACKET) {
1218                 r = -EPFNOSUPPORT;
1219                 goto err;
1220         }
1221         return sock;
1222 err:
1223         sockfd_put(sock);
1224         return ERR_PTR(r);
1225 }
1226
1227 static struct ptr_ring *get_tap_ptr_ring(int fd)
1228 {
1229         struct ptr_ring *ring;
1230         struct file *file = fget(fd);
1231
1232         if (!file)
1233                 return NULL;
1234         ring = tun_get_tx_ring(file);
1235         if (!IS_ERR(ring))
1236                 goto out;
1237         ring = tap_get_ptr_ring(file);
1238         if (!IS_ERR(ring))
1239                 goto out;
1240         ring = NULL;
1241 out:
1242         fput(file);
1243         return ring;
1244 }
1245
1246 static struct socket *get_tap_socket(int fd)
1247 {
1248         struct file *file = fget(fd);
1249         struct socket *sock;
1250
1251         if (!file)
1252                 return ERR_PTR(-EBADF);
1253         sock = tun_get_socket(file);
1254         if (!IS_ERR(sock))
1255                 return sock;
1256         sock = tap_get_socket(file);
1257         if (IS_ERR(sock))
1258                 fput(file);
1259         return sock;
1260 }
1261
1262 static struct socket *get_socket(int fd)
1263 {
1264         struct socket *sock;
1265
1266         /* special case to disable backend */
1267         if (fd == -1)
1268                 return NULL;
1269         sock = get_raw_socket(fd);
1270         if (!IS_ERR(sock))
1271                 return sock;
1272         sock = get_tap_socket(fd);
1273         if (!IS_ERR(sock))
1274                 return sock;
1275         return ERR_PTR(-ENOTSOCK);
1276 }
1277
1278 static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
1279 {
1280         struct socket *sock, *oldsock;
1281         struct vhost_virtqueue *vq;
1282         struct vhost_net_virtqueue *nvq;
1283         struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
1284         int r;
1285
1286         mutex_lock(&n->dev.mutex);
1287         r = vhost_dev_check_owner(&n->dev);
1288         if (r)
1289                 goto err;
1290
1291         if (index >= VHOST_NET_VQ_MAX) {
1292                 r = -ENOBUFS;
1293                 goto err;
1294         }
1295         vq = &n->vqs[index].vq;
1296         nvq = &n->vqs[index];
1297         mutex_lock(&vq->mutex);
1298
1299         /* Verify that ring has been setup correctly. */
1300         if (!vhost_vq_access_ok(vq)) {
1301                 r = -EFAULT;
1302                 goto err_vq;
1303         }
1304         sock = get_socket(fd);
1305         if (IS_ERR(sock)) {
1306                 r = PTR_ERR(sock);
1307                 goto err_vq;
1308         }
1309
1310         /* start polling new socket */
1311         oldsock = vq->private_data;
1312         if (sock != oldsock) {
1313                 ubufs = vhost_net_ubuf_alloc(vq,
1314                                              sock && vhost_sock_zcopy(sock));
1315                 if (IS_ERR(ubufs)) {
1316                         r = PTR_ERR(ubufs);
1317                         goto err_ubufs;
1318                 }
1319
1320                 vhost_net_disable_vq(n, vq);
1321                 vq->private_data = sock;
1322                 vhost_net_buf_unproduce(nvq);
1323                 r = vhost_vq_init_access(vq);
1324                 if (r)
1325                         goto err_used;
1326                 r = vhost_net_enable_vq(n, vq);
1327                 if (r)
1328                         goto err_used;
1329                 if (index == VHOST_NET_VQ_RX)
1330                         nvq->rx_ring = get_tap_ptr_ring(fd);
1331
1332                 oldubufs = nvq->ubufs;
1333                 nvq->ubufs = ubufs;
1334
1335                 n->tx_packets = 0;
1336                 n->tx_zcopy_err = 0;
1337                 n->tx_flush = false;
1338         }
1339
1340         mutex_unlock(&vq->mutex);
1341
1342         if (oldubufs) {
1343                 vhost_net_ubuf_put_wait_and_free(oldubufs);
1344                 mutex_lock(&vq->mutex);
1345                 vhost_zerocopy_signal_used(n, vq);
1346                 mutex_unlock(&vq->mutex);
1347         }
1348
1349         if (oldsock) {
1350                 vhost_net_flush_vq(n, index);
1351                 sockfd_put(oldsock);
1352         }
1353
1354         mutex_unlock(&n->dev.mutex);
1355         return 0;
1356
1357 err_used:
1358         vq->private_data = oldsock;
1359         vhost_net_enable_vq(n, vq);
1360         if (ubufs)
1361                 vhost_net_ubuf_put_wait_and_free(ubufs);
1362 err_ubufs:
1363         if (sock)
1364                 sockfd_put(sock);
1365 err_vq:
1366         mutex_unlock(&vq->mutex);
1367 err:
1368         mutex_unlock(&n->dev.mutex);
1369         return r;
1370 }
1371
1372 static long vhost_net_reset_owner(struct vhost_net *n)
1373 {
1374         struct socket *tx_sock = NULL;
1375         struct socket *rx_sock = NULL;
1376         long err;
1377         struct vhost_umem *umem;
1378
1379         mutex_lock(&n->dev.mutex);
1380         err = vhost_dev_check_owner(&n->dev);
1381         if (err)
1382                 goto done;
1383         umem = vhost_dev_reset_owner_prepare();
1384         if (!umem) {
1385                 err = -ENOMEM;
1386                 goto done;
1387         }
1388         vhost_net_stop(n, &tx_sock, &rx_sock);
1389         vhost_net_flush(n);
1390         vhost_dev_stop(&n->dev);
1391         vhost_dev_reset_owner(&n->dev, umem);
1392         vhost_net_vq_reset(n);
1393 done:
1394         mutex_unlock(&n->dev.mutex);
1395         if (tx_sock)
1396                 sockfd_put(tx_sock);
1397         if (rx_sock)
1398                 sockfd_put(rx_sock);
1399         return err;
1400 }
1401
1402 static int vhost_net_set_features(struct vhost_net *n, u64 features)
1403 {
1404         size_t vhost_hlen, sock_hlen, hdr_len;
1405         int i;
1406
1407         hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
1408                                (1ULL << VIRTIO_F_VERSION_1))) ?
1409                         sizeof(struct virtio_net_hdr_mrg_rxbuf) :
1410                         sizeof(struct virtio_net_hdr);
1411         if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
1412                 /* vhost provides vnet_hdr */
1413                 vhost_hlen = hdr_len;
1414                 sock_hlen = 0;
1415         } else {
1416                 /* socket provides vnet_hdr */
1417                 vhost_hlen = 0;
1418                 sock_hlen = hdr_len;
1419         }
1420         mutex_lock(&n->dev.mutex);
1421         if ((features & (1 << VHOST_F_LOG_ALL)) &&
1422             !vhost_log_access_ok(&n->dev))
1423                 goto out_unlock;
1424
1425         if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) {
1426                 if (vhost_init_device_iotlb(&n->dev, true))
1427                         goto out_unlock;
1428         }
1429
1430         for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1431                 mutex_lock(&n->vqs[i].vq.mutex);
1432                 n->vqs[i].vq.acked_features = features;
1433                 n->vqs[i].vhost_hlen = vhost_hlen;
1434                 n->vqs[i].sock_hlen = sock_hlen;
1435                 mutex_unlock(&n->vqs[i].vq.mutex);
1436         }
1437         mutex_unlock(&n->dev.mutex);
1438         return 0;
1439
1440 out_unlock:
1441         mutex_unlock(&n->dev.mutex);
1442         return -EFAULT;
1443 }
1444
1445 static long vhost_net_set_owner(struct vhost_net *n)
1446 {
1447         int r;
1448
1449         mutex_lock(&n->dev.mutex);
1450         if (vhost_dev_has_owner(&n->dev)) {
1451                 r = -EBUSY;
1452                 goto out;
1453         }
1454         r = vhost_net_set_ubuf_info(n);
1455         if (r)
1456                 goto out;
1457         r = vhost_dev_set_owner(&n->dev);
1458         if (r)
1459                 vhost_net_clear_ubuf_info(n);
1460         vhost_net_flush(n);
1461 out:
1462         mutex_unlock(&n->dev.mutex);
1463         return r;
1464 }
1465
1466 static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
1467                             unsigned long arg)
1468 {
1469         struct vhost_net *n = f->private_data;
1470         void __user *argp = (void __user *)arg;
1471         u64 __user *featurep = argp;
1472         struct vhost_vring_file backend;
1473         u64 features;
1474         int r;
1475
1476         switch (ioctl) {
1477         case VHOST_NET_SET_BACKEND:
1478                 if (copy_from_user(&backend, argp, sizeof backend))
1479                         return -EFAULT;
1480                 return vhost_net_set_backend(n, backend.index, backend.fd);
1481         case VHOST_GET_FEATURES:
1482                 features = VHOST_NET_FEATURES;
1483                 if (copy_to_user(featurep, &features, sizeof features))
1484                         return -EFAULT;
1485                 return 0;
1486         case VHOST_SET_FEATURES:
1487                 if (copy_from_user(&features, featurep, sizeof features))
1488                         return -EFAULT;
1489                 if (features & ~VHOST_NET_FEATURES)
1490                         return -EOPNOTSUPP;
1491                 return vhost_net_set_features(n, features);
1492         case VHOST_RESET_OWNER:
1493                 return vhost_net_reset_owner(n);
1494         case VHOST_SET_OWNER:
1495                 return vhost_net_set_owner(n);
1496         default:
1497                 mutex_lock(&n->dev.mutex);
1498                 r = vhost_dev_ioctl(&n->dev, ioctl, argp);
1499                 if (r == -ENOIOCTLCMD)
1500                         r = vhost_vring_ioctl(&n->dev, ioctl, argp);
1501                 else
1502                         vhost_net_flush(n);
1503                 mutex_unlock(&n->dev.mutex);
1504                 return r;
1505         }
1506 }
1507
1508 #ifdef CONFIG_COMPAT
1509 static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
1510                                    unsigned long arg)
1511 {
1512         return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1513 }
1514 #endif
1515
1516 static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
1517 {
1518         struct file *file = iocb->ki_filp;
1519         struct vhost_net *n = file->private_data;
1520         struct vhost_dev *dev = &n->dev;
1521         int noblock = file->f_flags & O_NONBLOCK;
1522
1523         return vhost_chr_read_iter(dev, to, noblock);
1524 }
1525
1526 static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
1527                                         struct iov_iter *from)
1528 {
1529         struct file *file = iocb->ki_filp;
1530         struct vhost_net *n = file->private_data;
1531         struct vhost_dev *dev = &n->dev;
1532
1533         return vhost_chr_write_iter(dev, from);
1534 }
1535
1536 static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
1537 {
1538         struct vhost_net *n = file->private_data;
1539         struct vhost_dev *dev = &n->dev;
1540
1541         return vhost_chr_poll(file, dev, wait);
1542 }
1543
1544 static const struct file_operations vhost_net_fops = {
1545         .owner          = THIS_MODULE,
1546         .release        = vhost_net_release,
1547         .read_iter      = vhost_net_chr_read_iter,
1548         .write_iter     = vhost_net_chr_write_iter,
1549         .poll           = vhost_net_chr_poll,
1550         .unlocked_ioctl = vhost_net_ioctl,
1551 #ifdef CONFIG_COMPAT
1552         .compat_ioctl   = vhost_net_compat_ioctl,
1553 #endif
1554         .open           = vhost_net_open,
1555         .llseek         = noop_llseek,
1556 };
1557
1558 static struct miscdevice vhost_net_misc = {
1559         .minor = VHOST_NET_MINOR,
1560         .name = "vhost-net",
1561         .fops = &vhost_net_fops,
1562 };
1563
1564 static int vhost_net_init(void)
1565 {
1566         if (experimental_zcopytx)
1567                 vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
1568         return misc_register(&vhost_net_misc);
1569 }
1570 module_init(vhost_net_init);
1571
1572 static void vhost_net_exit(void)
1573 {
1574         misc_deregister(&vhost_net_misc);
1575 }
1576 module_exit(vhost_net_exit);
1577
1578 MODULE_VERSION("0.0.1");
1579 MODULE_LICENSE("GPL v2");
1580 MODULE_AUTHOR("Michael S. Tsirkin");
1581 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1582 MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
1583 MODULE_ALIAS("devname:vhost-net");