2 * VMware vSockets Driver
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 /* Implementation notes:
18 * - There are two kinds of sockets: those created by user action (such as
19 * calling socket(2)) and those created by incoming connection request packets.
21 * - There are two "global" tables, one for bound sockets (sockets that have
22 * specified an address that they are responsible for) and one for connected
23 * sockets (sockets that have established a connection with another socket).
24 * These tables are "global" in that all sockets on the system are placed
25 * within them. - Note, though, that the bound table contains an extra entry
26 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
27 * that list. The bound table is used solely for lookup of sockets when packets
28 * are received and that's not necessary for SOCK_DGRAM sockets since we create
29 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
30 * sockets out of the bound hash buckets will reduce the chance of collisions
31 * when looking for SOCK_STREAM sockets and prevents us from having to check the
32 * socket type in the hash table lookups.
34 * - Sockets created by user action will either be "client" sockets that
35 * initiate a connection or "server" sockets that listen for connections; we do
36 * not support simultaneous connects (two "client" sockets connecting).
38 * - "Server" sockets are referred to as listener sockets throughout this
39 * implementation because they are in the VSOCK_SS_LISTEN state. When a
40 * connection request is received (the second kind of socket mentioned above),
41 * we create a new socket and refer to it as a pending socket. These pending
42 * sockets are placed on the pending connection list of the listener socket.
43 * When future packets are received for the address the listener socket is
44 * bound to, we check if the source of the packet is from one that has an
45 * existing pending connection. If it does, we process the packet for the
46 * pending socket. When that socket reaches the connected state, it is removed
47 * from the listener socket's pending list and enqueued in the listener
48 * socket's accept queue. Callers of accept(2) will accept connected sockets
49 * from the listener socket's accept queue. If the socket cannot be accepted
50 * for some reason then it is marked rejected. Once the connection is
51 * accepted, it is owned by the user process and the responsibility for cleanup
52 * falls with that user process.
54 * - It is possible that these pending sockets will never reach the connected
55 * state; in fact, we may never receive another packet after the connection
56 * request. Because of this, we must schedule a cleanup function to run in the
57 * future, after some amount of time passes where a connection should have been
58 * established. This function ensures that the socket is off all lists so it
59 * cannot be retrieved, then drops all references to the socket so it is cleaned
60 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
61 * function will also cleanup rejected sockets, those that reach the connected
62 * state but leave it before they have been accepted.
64 * - Lock ordering for pending or accept queue sockets is:
66 * lock_sock(listener);
67 * lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
69 * Using explicit nested locking keeps lockdep happy since normally only one
70 * lock of a given class may be taken at a time.
72 * - Sockets created by user action will be cleaned up when the user process
73 * calls close(2), causing our release implementation to be called. Our release
74 * implementation will perform some cleanup then drop the last reference so our
75 * sk_destruct implementation is invoked. Our sk_destruct implementation will
76 * perform additional cleanup that's common for both types of sockets.
78 * - A socket's reference count is what ensures that the structure won't be
79 * freed. Each entry in a list (such as the "global" bound and connected tables
80 * and the listener socket's pending list and connected queue) ensures a
81 * reference. When we defer work until process context and pass a socket as our
82 * argument, we must ensure the reference count is increased to ensure the
83 * socket isn't freed before the function is run; the deferred function will
84 * then drop the reference.
87 #include <linux/types.h>
88 #include <linux/bitops.h>
89 #include <linux/cred.h>
90 #include <linux/init.h>
92 #include <linux/kernel.h>
93 #include <linux/kmod.h>
94 #include <linux/list.h>
95 #include <linux/miscdevice.h>
96 #include <linux/module.h>
97 #include <linux/mutex.h>
98 #include <linux/net.h>
99 #include <linux/poll.h>
100 #include <linux/skbuff.h>
101 #include <linux/smp.h>
102 #include <linux/socket.h>
103 #include <linux/stddef.h>
104 #include <linux/unistd.h>
105 #include <linux/wait.h>
106 #include <linux/workqueue.h>
107 #include <net/sock.h>
108 #include <net/af_vsock.h>
110 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
111 static void vsock_sk_destruct(struct sock *sk);
112 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
114 /* Protocol family. */
115 static struct proto vsock_proto = {
117 .owner = THIS_MODULE,
118 .obj_size = sizeof(struct vsock_sock),
121 /* The default peer timeout indicates how long we will wait for a peer response
122 * to a control message.
124 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
126 static const struct vsock_transport *transport;
127 static DEFINE_MUTEX(vsock_register_mutex);
131 /* Get the ID of the local context. This is transport dependent. */
133 int vm_sockets_get_local_cid(void)
135 return transport->get_local_cid();
137 EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid);
141 /* Each bound VSocket is stored in the bind hash table and each connected
142 * VSocket is stored in the connected hash table.
144 * Unbound sockets are all put on the same list attached to the end of the hash
145 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in
146 * the bucket that their local address hashes to (vsock_bound_sockets(addr)
147 * represents the list that addr hashes to).
149 * Specifically, we initialize the vsock_bind_table array to a size of
150 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
151 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
152 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
153 * mods with VSOCK_HASH_SIZE to ensure this.
155 #define VSOCK_HASH_SIZE 251
156 #define MAX_PORT_RETRIES 24
158 #define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE)
159 #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
160 #define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
162 /* XXX This can probably be implemented in a better way. */
163 #define VSOCK_CONN_HASH(src, dst) \
164 (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE)
165 #define vsock_connected_sockets(src, dst) \
166 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
167 #define vsock_connected_sockets_vsk(vsk) \
168 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
170 static struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
171 static struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
172 static DEFINE_SPINLOCK(vsock_table_lock);
174 /* Autobind this socket to the local address if necessary. */
175 static int vsock_auto_bind(struct vsock_sock *vsk)
177 struct sock *sk = sk_vsock(vsk);
178 struct sockaddr_vm local_addr;
180 if (vsock_addr_bound(&vsk->local_addr))
182 vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
183 return __vsock_bind(sk, &local_addr);
186 static void vsock_init_tables(void)
190 for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
191 INIT_LIST_HEAD(&vsock_bind_table[i]);
193 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
194 INIT_LIST_HEAD(&vsock_connected_table[i]);
197 static void __vsock_insert_bound(struct list_head *list,
198 struct vsock_sock *vsk)
201 list_add(&vsk->bound_table, list);
204 static void __vsock_insert_connected(struct list_head *list,
205 struct vsock_sock *vsk)
208 list_add(&vsk->connected_table, list);
211 static void __vsock_remove_bound(struct vsock_sock *vsk)
213 list_del_init(&vsk->bound_table);
217 static void __vsock_remove_connected(struct vsock_sock *vsk)
219 list_del_init(&vsk->connected_table);
223 static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
225 struct vsock_sock *vsk;
227 list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table)
228 if (addr->svm_port == vsk->local_addr.svm_port)
229 return sk_vsock(vsk);
234 static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
235 struct sockaddr_vm *dst)
237 struct vsock_sock *vsk;
239 list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
241 if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
242 dst->svm_port == vsk->local_addr.svm_port) {
243 return sk_vsock(vsk);
250 static bool __vsock_in_bound_table(struct vsock_sock *vsk)
252 return !list_empty(&vsk->bound_table);
255 static bool __vsock_in_connected_table(struct vsock_sock *vsk)
257 return !list_empty(&vsk->connected_table);
260 static void vsock_insert_unbound(struct vsock_sock *vsk)
262 spin_lock_bh(&vsock_table_lock);
263 __vsock_insert_bound(vsock_unbound_sockets, vsk);
264 spin_unlock_bh(&vsock_table_lock);
267 void vsock_insert_connected(struct vsock_sock *vsk)
269 struct list_head *list = vsock_connected_sockets(
270 &vsk->remote_addr, &vsk->local_addr);
272 spin_lock_bh(&vsock_table_lock);
273 __vsock_insert_connected(list, vsk);
274 spin_unlock_bh(&vsock_table_lock);
276 EXPORT_SYMBOL_GPL(vsock_insert_connected);
278 void vsock_remove_bound(struct vsock_sock *vsk)
280 spin_lock_bh(&vsock_table_lock);
281 __vsock_remove_bound(vsk);
282 spin_unlock_bh(&vsock_table_lock);
284 EXPORT_SYMBOL_GPL(vsock_remove_bound);
286 void vsock_remove_connected(struct vsock_sock *vsk)
288 spin_lock_bh(&vsock_table_lock);
289 __vsock_remove_connected(vsk);
290 spin_unlock_bh(&vsock_table_lock);
292 EXPORT_SYMBOL_GPL(vsock_remove_connected);
294 struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
298 spin_lock_bh(&vsock_table_lock);
299 sk = __vsock_find_bound_socket(addr);
303 spin_unlock_bh(&vsock_table_lock);
307 EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
309 struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
310 struct sockaddr_vm *dst)
314 spin_lock_bh(&vsock_table_lock);
315 sk = __vsock_find_connected_socket(src, dst);
319 spin_unlock_bh(&vsock_table_lock);
323 EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
325 static bool vsock_in_bound_table(struct vsock_sock *vsk)
329 spin_lock_bh(&vsock_table_lock);
330 ret = __vsock_in_bound_table(vsk);
331 spin_unlock_bh(&vsock_table_lock);
336 static bool vsock_in_connected_table(struct vsock_sock *vsk)
340 spin_lock_bh(&vsock_table_lock);
341 ret = __vsock_in_connected_table(vsk);
342 spin_unlock_bh(&vsock_table_lock);
347 void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
351 spin_lock_bh(&vsock_table_lock);
353 for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
354 struct vsock_sock *vsk;
355 list_for_each_entry(vsk, &vsock_connected_table[i],
360 spin_unlock_bh(&vsock_table_lock);
362 EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
364 void vsock_add_pending(struct sock *listener, struct sock *pending)
366 struct vsock_sock *vlistener;
367 struct vsock_sock *vpending;
369 vlistener = vsock_sk(listener);
370 vpending = vsock_sk(pending);
374 list_add_tail(&vpending->pending_links, &vlistener->pending_links);
376 EXPORT_SYMBOL_GPL(vsock_add_pending);
378 void vsock_remove_pending(struct sock *listener, struct sock *pending)
380 struct vsock_sock *vpending = vsock_sk(pending);
382 list_del_init(&vpending->pending_links);
386 EXPORT_SYMBOL_GPL(vsock_remove_pending);
388 void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
390 struct vsock_sock *vlistener;
391 struct vsock_sock *vconnected;
393 vlistener = vsock_sk(listener);
394 vconnected = vsock_sk(connected);
396 sock_hold(connected);
398 list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
400 EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
402 static struct sock *vsock_dequeue_accept(struct sock *listener)
404 struct vsock_sock *vlistener;
405 struct vsock_sock *vconnected;
407 vlistener = vsock_sk(listener);
409 if (list_empty(&vlistener->accept_queue))
412 vconnected = list_entry(vlistener->accept_queue.next,
413 struct vsock_sock, accept_queue);
415 list_del_init(&vconnected->accept_queue);
417 /* The caller will need a reference on the connected socket so we let
418 * it call sock_put().
421 return sk_vsock(vconnected);
424 static bool vsock_is_accept_queue_empty(struct sock *sk)
426 struct vsock_sock *vsk = vsock_sk(sk);
427 return list_empty(&vsk->accept_queue);
430 static bool vsock_is_pending(struct sock *sk)
432 struct vsock_sock *vsk = vsock_sk(sk);
433 return !list_empty(&vsk->pending_links);
436 static int vsock_send_shutdown(struct sock *sk, int mode)
438 return transport->shutdown(vsock_sk(sk), mode);
441 void vsock_pending_work(struct work_struct *work)
444 struct sock *listener;
445 struct vsock_sock *vsk;
448 vsk = container_of(work, struct vsock_sock, dwork.work);
450 listener = vsk->listener;
454 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
456 if (vsock_is_pending(sk)) {
457 vsock_remove_pending(listener, sk);
458 } else if (!vsk->rejected) {
459 /* We are not on the pending list and accept() did not reject
460 * us, so we must have been accepted by our user process. We
461 * just need to drop our references to the sockets and be on
468 listener->sk_ack_backlog--;
470 /* We need to remove ourself from the global connected sockets list so
471 * incoming packets can't find this socket, and to reduce the reference
474 if (vsock_in_connected_table(vsk))
475 vsock_remove_connected(vsk);
477 sk->sk_state = SS_FREE;
481 release_sock(listener);
488 EXPORT_SYMBOL_GPL(vsock_pending_work);
490 /**** SOCKET OPERATIONS ****/
492 static int __vsock_bind_stream(struct vsock_sock *vsk,
493 struct sockaddr_vm *addr)
495 static u32 port = LAST_RESERVED_PORT + 1;
496 struct sockaddr_vm new_addr;
498 vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
500 if (addr->svm_port == VMADDR_PORT_ANY) {
504 for (i = 0; i < MAX_PORT_RETRIES; i++) {
505 if (port <= LAST_RESERVED_PORT)
506 port = LAST_RESERVED_PORT + 1;
508 new_addr.svm_port = port++;
510 if (!__vsock_find_bound_socket(&new_addr)) {
517 return -EADDRNOTAVAIL;
519 /* If port is in reserved range, ensure caller
520 * has necessary privileges.
522 if (addr->svm_port <= LAST_RESERVED_PORT &&
523 !capable(CAP_NET_BIND_SERVICE)) {
527 if (__vsock_find_bound_socket(&new_addr))
531 vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
533 /* Remove stream sockets from the unbound list and add them to the hash
534 * table for easy lookup by its address. The unbound list is simply an
535 * extra entry at the end of the hash table, a trick used by AF_UNIX.
537 __vsock_remove_bound(vsk);
538 __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
543 static int __vsock_bind_dgram(struct vsock_sock *vsk,
544 struct sockaddr_vm *addr)
546 return transport->dgram_bind(vsk, addr);
549 static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
551 struct vsock_sock *vsk = vsock_sk(sk);
555 /* First ensure this socket isn't already bound. */
556 if (vsock_addr_bound(&vsk->local_addr))
559 /* Now bind to the provided address or select appropriate values if
560 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
561 * like AF_INET prevents binding to a non-local IP address (in most
562 * cases), we only allow binding to the local CID.
564 cid = transport->get_local_cid();
565 if (addr->svm_cid != cid && addr->svm_cid != VMADDR_CID_ANY)
566 return -EADDRNOTAVAIL;
568 switch (sk->sk_socket->type) {
570 spin_lock_bh(&vsock_table_lock);
571 retval = __vsock_bind_stream(vsk, addr);
572 spin_unlock_bh(&vsock_table_lock);
576 retval = __vsock_bind_dgram(vsk, addr);
587 struct sock *__vsock_create(struct net *net,
595 struct vsock_sock *psk;
596 struct vsock_sock *vsk;
598 sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern);
602 sock_init_data(sock, sk);
604 /* sk->sk_type is normally set in sock_init_data, but only if sock is
605 * non-NULL. We make sure that our sockets always have a type by
606 * setting it here if needed.
612 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
613 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
615 sk->sk_destruct = vsock_sk_destruct;
616 sk->sk_backlog_rcv = vsock_queue_rcv_skb;
618 sock_reset_flag(sk, SOCK_DONE);
620 INIT_LIST_HEAD(&vsk->bound_table);
621 INIT_LIST_HEAD(&vsk->connected_table);
622 vsk->listener = NULL;
623 INIT_LIST_HEAD(&vsk->pending_links);
624 INIT_LIST_HEAD(&vsk->accept_queue);
625 vsk->rejected = false;
626 vsk->sent_request = false;
627 vsk->ignore_connecting_rst = false;
628 vsk->peer_shutdown = 0;
630 psk = parent ? vsock_sk(parent) : NULL;
632 vsk->trusted = psk->trusted;
633 vsk->owner = get_cred(psk->owner);
634 vsk->connect_timeout = psk->connect_timeout;
636 vsk->trusted = capable(CAP_NET_ADMIN);
637 vsk->owner = get_current_cred();
638 vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
641 if (transport->init(vsk, psk) < 0) {
647 vsock_insert_unbound(vsk);
651 EXPORT_SYMBOL_GPL(__vsock_create);
653 static void __vsock_release(struct sock *sk)
657 struct sock *pending;
658 struct vsock_sock *vsk;
661 pending = NULL; /* Compiler warning. */
663 if (vsock_in_bound_table(vsk))
664 vsock_remove_bound(vsk);
666 if (vsock_in_connected_table(vsk))
667 vsock_remove_connected(vsk);
669 transport->release(vsk);
673 sk->sk_shutdown = SHUTDOWN_MASK;
675 while ((skb = skb_dequeue(&sk->sk_receive_queue)))
678 /* Clean up any sockets that never were accepted. */
679 while ((pending = vsock_dequeue_accept(sk)) != NULL) {
680 __vsock_release(pending);
689 static void vsock_sk_destruct(struct sock *sk)
691 struct vsock_sock *vsk = vsock_sk(sk);
693 transport->destruct(vsk);
695 /* When clearing these addresses, there's no need to set the family and
696 * possibly register the address family with the kernel.
698 vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
699 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
701 put_cred(vsk->owner);
704 static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
708 err = sock_queue_rcv_skb(sk, skb);
715 s64 vsock_stream_has_data(struct vsock_sock *vsk)
717 return transport->stream_has_data(vsk);
719 EXPORT_SYMBOL_GPL(vsock_stream_has_data);
721 s64 vsock_stream_has_space(struct vsock_sock *vsk)
723 return transport->stream_has_space(vsk);
725 EXPORT_SYMBOL_GPL(vsock_stream_has_space);
727 static int vsock_release(struct socket *sock)
729 __vsock_release(sock->sk);
731 sock->state = SS_FREE;
737 vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
741 struct sockaddr_vm *vm_addr;
745 if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
749 err = __vsock_bind(sk, vm_addr);
755 static int vsock_getname(struct socket *sock,
756 struct sockaddr *addr, int *addr_len, int peer)
760 struct vsock_sock *vsk;
761 struct sockaddr_vm *vm_addr;
770 if (sock->state != SS_CONNECTED) {
774 vm_addr = &vsk->remote_addr;
776 vm_addr = &vsk->local_addr;
784 /* sys_getsockname() and sys_getpeername() pass us a
785 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
786 * that macro is defined in socket.c instead of .h, so we hardcode its
789 BUILD_BUG_ON(sizeof(*vm_addr) > 128);
790 memcpy(addr, vm_addr, sizeof(*vm_addr));
791 *addr_len = sizeof(*vm_addr);
798 static int vsock_shutdown(struct socket *sock, int mode)
803 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
804 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
805 * here like the other address families do. Note also that the
806 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
807 * which is what we want.
811 if ((mode & ~SHUTDOWN_MASK) || !mode)
814 /* If this is a STREAM socket and it is not connected then bail out
815 * immediately. If it is a DGRAM socket then we must first kick the
816 * socket so that it wakes up from any sleeping calls, for example
817 * recv(), and then afterwards return the error.
821 if (sock->state == SS_UNCONNECTED) {
823 if (sk->sk_type == SOCK_STREAM)
826 sock->state = SS_DISCONNECTING;
830 /* Receive and send shutdowns are treated alike. */
831 mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
834 sk->sk_shutdown |= mode;
835 sk->sk_state_change(sk);
838 if (sk->sk_type == SOCK_STREAM) {
839 sock_reset_flag(sk, SOCK_DONE);
840 vsock_send_shutdown(sk, mode);
847 static unsigned int vsock_poll(struct file *file, struct socket *sock,
852 struct vsock_sock *vsk;
857 poll_wait(file, sk_sleep(sk), wait);
861 /* Signify that there has been an error on this socket. */
864 /* INET sockets treat local write shutdown and peer write shutdown as a
865 * case of POLLHUP set.
867 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
868 ((sk->sk_shutdown & SEND_SHUTDOWN) &&
869 (vsk->peer_shutdown & SEND_SHUTDOWN))) {
873 if (sk->sk_shutdown & RCV_SHUTDOWN ||
874 vsk->peer_shutdown & SEND_SHUTDOWN) {
878 if (sock->type == SOCK_DGRAM) {
879 /* For datagram sockets we can read if there is something in
880 * the queue and write as long as the socket isn't shutdown for
883 if (!skb_queue_empty(&sk->sk_receive_queue) ||
884 (sk->sk_shutdown & RCV_SHUTDOWN)) {
885 mask |= POLLIN | POLLRDNORM;
888 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
889 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
891 } else if (sock->type == SOCK_STREAM) {
894 /* Listening sockets that have connections in their accept
897 if (sk->sk_state == VSOCK_SS_LISTEN
898 && !vsock_is_accept_queue_empty(sk))
899 mask |= POLLIN | POLLRDNORM;
901 /* If there is something in the queue then we can read. */
902 if (transport->stream_is_active(vsk) &&
903 !(sk->sk_shutdown & RCV_SHUTDOWN)) {
904 bool data_ready_now = false;
905 int ret = transport->notify_poll_in(
906 vsk, 1, &data_ready_now);
911 mask |= POLLIN | POLLRDNORM;
916 /* Sockets whose connections have been closed, reset, or
917 * terminated should also be considered read, and we check the
918 * shutdown flag for that.
920 if (sk->sk_shutdown & RCV_SHUTDOWN ||
921 vsk->peer_shutdown & SEND_SHUTDOWN) {
922 mask |= POLLIN | POLLRDNORM;
925 /* Connected sockets that can produce data can be written. */
926 if (sk->sk_state == SS_CONNECTED) {
927 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
928 bool space_avail_now = false;
929 int ret = transport->notify_poll_out(
930 vsk, 1, &space_avail_now);
935 /* Remove POLLWRBAND since INET
936 * sockets are not setting it.
938 mask |= POLLOUT | POLLWRNORM;
944 /* Simulate INET socket poll behaviors, which sets
945 * POLLOUT|POLLWRNORM when peer is closed and nothing to read,
946 * but local send is not shutdown.
948 if (sk->sk_state == SS_UNCONNECTED) {
949 if (!(sk->sk_shutdown & SEND_SHUTDOWN))
950 mask |= POLLOUT | POLLWRNORM;
960 static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
965 struct vsock_sock *vsk;
966 struct sockaddr_vm *remote_addr;
968 if (msg->msg_flags & MSG_OOB)
971 /* For now, MSG_DONTWAIT is always assumed... */
978 err = vsock_auto_bind(vsk);
983 /* If the provided message contains an address, use that. Otherwise
984 * fall back on the socket's remote handle (if it has been connected).
987 vsock_addr_cast(msg->msg_name, msg->msg_namelen,
988 &remote_addr) == 0) {
989 /* Ensure this address is of the right type and is a valid
993 if (remote_addr->svm_cid == VMADDR_CID_ANY)
994 remote_addr->svm_cid = transport->get_local_cid();
996 if (!vsock_addr_bound(remote_addr)) {
1000 } else if (sock->state == SS_CONNECTED) {
1001 remote_addr = &vsk->remote_addr;
1003 if (remote_addr->svm_cid == VMADDR_CID_ANY)
1004 remote_addr->svm_cid = transport->get_local_cid();
1006 /* XXX Should connect() or this function ensure remote_addr is
1009 if (!vsock_addr_bound(&vsk->remote_addr)) {
1018 if (!transport->dgram_allow(remote_addr->svm_cid,
1019 remote_addr->svm_port)) {
1024 err = transport->dgram_enqueue(vsk, remote_addr, msg, len);
1031 static int vsock_dgram_connect(struct socket *sock,
1032 struct sockaddr *addr, int addr_len, int flags)
1036 struct vsock_sock *vsk;
1037 struct sockaddr_vm *remote_addr;
1042 err = vsock_addr_cast(addr, addr_len, &remote_addr);
1043 if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
1045 vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
1047 sock->state = SS_UNCONNECTED;
1050 } else if (err != 0)
1055 err = vsock_auto_bind(vsk);
1059 if (!transport->dgram_allow(remote_addr->svm_cid,
1060 remote_addr->svm_port)) {
1065 memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
1066 sock->state = SS_CONNECTED;
1073 static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1074 size_t len, int flags)
1076 return transport->dgram_dequeue(vsock_sk(sock->sk), msg, len, flags);
1079 static const struct proto_ops vsock_dgram_ops = {
1081 .owner = THIS_MODULE,
1082 .release = vsock_release,
1084 .connect = vsock_dgram_connect,
1085 .socketpair = sock_no_socketpair,
1086 .accept = sock_no_accept,
1087 .getname = vsock_getname,
1089 .ioctl = sock_no_ioctl,
1090 .listen = sock_no_listen,
1091 .shutdown = vsock_shutdown,
1092 .setsockopt = sock_no_setsockopt,
1093 .getsockopt = sock_no_getsockopt,
1094 .sendmsg = vsock_dgram_sendmsg,
1095 .recvmsg = vsock_dgram_recvmsg,
1096 .mmap = sock_no_mmap,
1097 .sendpage = sock_no_sendpage,
1100 static void vsock_connect_timeout(struct work_struct *work)
1103 struct vsock_sock *vsk;
1105 vsk = container_of(work, struct vsock_sock, dwork.work);
1109 if (sk->sk_state == SS_CONNECTING &&
1110 (sk->sk_shutdown != SHUTDOWN_MASK)) {
1111 sk->sk_state = SS_UNCONNECTED;
1112 sk->sk_err = ETIMEDOUT;
1113 sk->sk_error_report(sk);
1120 static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
1121 int addr_len, int flags)
1125 struct vsock_sock *vsk;
1126 struct sockaddr_vm *remote_addr;
1136 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1137 switch (sock->state) {
1141 case SS_DISCONNECTING:
1145 /* This continues on so we can move sock into the SS_CONNECTED
1146 * state once the connection has completed (at which point err
1147 * will be set to zero also). Otherwise, we will either wait
1148 * for the connection or return -EALREADY should this be a
1149 * non-blocking call.
1154 if ((sk->sk_state == VSOCK_SS_LISTEN) ||
1155 vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
1160 /* The hypervisor and well-known contexts do not have socket
1163 if (!transport->stream_allow(remote_addr->svm_cid,
1164 remote_addr->svm_port)) {
1169 /* Set the remote address that we are connecting to. */
1170 memcpy(&vsk->remote_addr, remote_addr,
1171 sizeof(vsk->remote_addr));
1173 err = vsock_auto_bind(vsk);
1177 sk->sk_state = SS_CONNECTING;
1179 err = transport->connect(vsk);
1183 /* Mark sock as connecting and set the error code to in
1184 * progress in case this is a non-blocking connect.
1186 sock->state = SS_CONNECTING;
1190 /* The receive path will handle all communication until we are able to
1191 * enter the connected state. Here we wait for the connection to be
1192 * completed or a notification of an error.
1194 timeout = vsk->connect_timeout;
1195 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1197 while (sk->sk_state != SS_CONNECTED && sk->sk_err == 0) {
1198 if (flags & O_NONBLOCK) {
1199 /* If we're not going to block, we schedule a timeout
1200 * function to generate a timeout on the connection
1201 * attempt, in case the peer doesn't respond in a
1202 * timely manner. We hold on to the socket until the
1206 INIT_DELAYED_WORK(&vsk->dwork,
1207 vsock_connect_timeout);
1208 schedule_delayed_work(&vsk->dwork, timeout);
1210 /* Skip ahead to preserve error code set above. */
1215 timeout = schedule_timeout(timeout);
1218 if (signal_pending(current)) {
1219 err = sock_intr_errno(timeout);
1220 sk->sk_state = SS_UNCONNECTED;
1221 sock->state = SS_UNCONNECTED;
1223 } else if (timeout == 0) {
1225 sk->sk_state = SS_UNCONNECTED;
1226 sock->state = SS_UNCONNECTED;
1230 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1235 sk->sk_state = SS_UNCONNECTED;
1236 sock->state = SS_UNCONNECTED;
1242 finish_wait(sk_sleep(sk), &wait);
1248 static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
1250 struct sock *listener;
1252 struct sock *connected;
1253 struct vsock_sock *vconnected;
1258 listener = sock->sk;
1260 lock_sock(listener);
1262 if (sock->type != SOCK_STREAM) {
1267 if (listener->sk_state != VSOCK_SS_LISTEN) {
1272 /* Wait for children sockets to appear; these are the new sockets
1273 * created upon connection establishment.
1275 timeout = sock_sndtimeo(listener, flags & O_NONBLOCK);
1276 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1278 while ((connected = vsock_dequeue_accept(listener)) == NULL &&
1279 listener->sk_err == 0) {
1280 release_sock(listener);
1281 timeout = schedule_timeout(timeout);
1282 finish_wait(sk_sleep(listener), &wait);
1283 lock_sock(listener);
1285 if (signal_pending(current)) {
1286 err = sock_intr_errno(timeout);
1288 } else if (timeout == 0) {
1293 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1295 finish_wait(sk_sleep(listener), &wait);
1297 if (listener->sk_err)
1298 err = -listener->sk_err;
1301 listener->sk_ack_backlog--;
1303 lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
1304 vconnected = vsock_sk(connected);
1306 /* If the listener socket has received an error, then we should
1307 * reject this socket and return. Note that we simply mark the
1308 * socket rejected, drop our reference, and let the cleanup
1309 * function handle the cleanup; the fact that we found it in
1310 * the listener's accept queue guarantees that the cleanup
1311 * function hasn't run yet.
1314 vconnected->rejected = true;
1316 newsock->state = SS_CONNECTED;
1317 sock_graft(connected, newsock);
1320 release_sock(connected);
1321 sock_put(connected);
1325 release_sock(listener);
1329 static int vsock_listen(struct socket *sock, int backlog)
1333 struct vsock_sock *vsk;
1339 if (sock->type != SOCK_STREAM) {
1344 if (sock->state != SS_UNCONNECTED) {
1351 if (!vsock_addr_bound(&vsk->local_addr)) {
1356 sk->sk_max_ack_backlog = backlog;
1357 sk->sk_state = VSOCK_SS_LISTEN;
1366 static int vsock_stream_setsockopt(struct socket *sock,
1369 char __user *optval,
1370 unsigned int optlen)
1374 struct vsock_sock *vsk;
1377 if (level != AF_VSOCK)
1378 return -ENOPROTOOPT;
1380 #define COPY_IN(_v) \
1382 if (optlen < sizeof(_v)) { \
1386 if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \
1399 case SO_VM_SOCKETS_BUFFER_SIZE:
1401 transport->set_buffer_size(vsk, val);
1404 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1406 transport->set_max_buffer_size(vsk, val);
1409 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1411 transport->set_min_buffer_size(vsk, val);
1414 case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
1417 if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
1418 tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
1419 vsk->connect_timeout = tv.tv_sec * HZ +
1420 DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ));
1421 if (vsk->connect_timeout == 0)
1422 vsk->connect_timeout =
1423 VSOCK_DEFAULT_CONNECT_TIMEOUT;
1443 static int vsock_stream_getsockopt(struct socket *sock,
1444 int level, int optname,
1445 char __user *optval,
1451 struct vsock_sock *vsk;
1454 if (level != AF_VSOCK)
1455 return -ENOPROTOOPT;
1457 err = get_user(len, optlen);
1461 #define COPY_OUT(_v) \
1463 if (len < sizeof(_v)) \
1467 if (copy_to_user(optval, &_v, len) != 0) \
1477 case SO_VM_SOCKETS_BUFFER_SIZE:
1478 val = transport->get_buffer_size(vsk);
1482 case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1483 val = transport->get_max_buffer_size(vsk);
1487 case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1488 val = transport->get_min_buffer_size(vsk);
1492 case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
1494 tv.tv_sec = vsk->connect_timeout / HZ;
1496 (vsk->connect_timeout -
1497 tv.tv_sec * HZ) * (1000000 / HZ);
1502 return -ENOPROTOOPT;
1505 err = put_user(len, optlen);
1514 static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1518 struct vsock_sock *vsk;
1519 ssize_t total_written;
1522 struct vsock_transport_send_notify_data send_data;
1531 if (msg->msg_flags & MSG_OOB)
1536 /* Callers should not provide a destination with stream sockets. */
1537 if (msg->msg_namelen) {
1538 err = sk->sk_state == SS_CONNECTED ? -EISCONN : -EOPNOTSUPP;
1542 /* Send data only if both sides are not shutdown in the direction. */
1543 if (sk->sk_shutdown & SEND_SHUTDOWN ||
1544 vsk->peer_shutdown & RCV_SHUTDOWN) {
1549 if (sk->sk_state != SS_CONNECTED ||
1550 !vsock_addr_bound(&vsk->local_addr)) {
1555 if (!vsock_addr_bound(&vsk->remote_addr)) {
1556 err = -EDESTADDRREQ;
1560 /* Wait for room in the produce queue to enqueue our user's data. */
1561 timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1563 err = transport->notify_send_init(vsk, &send_data);
1568 while (total_written < len) {
1571 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1572 while (vsock_stream_has_space(vsk) == 0 &&
1574 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
1575 !(vsk->peer_shutdown & RCV_SHUTDOWN)) {
1577 /* Don't wait for non-blocking sockets. */
1580 finish_wait(sk_sleep(sk), &wait);
1584 err = transport->notify_send_pre_block(vsk, &send_data);
1586 finish_wait(sk_sleep(sk), &wait);
1591 timeout = schedule_timeout(timeout);
1593 if (signal_pending(current)) {
1594 err = sock_intr_errno(timeout);
1595 finish_wait(sk_sleep(sk), &wait);
1597 } else if (timeout == 0) {
1599 finish_wait(sk_sleep(sk), &wait);
1603 prepare_to_wait(sk_sleep(sk), &wait,
1604 TASK_INTERRUPTIBLE);
1606 finish_wait(sk_sleep(sk), &wait);
1608 /* These checks occur both as part of and after the loop
1609 * conditional since we need to check before and after
1615 } else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1616 (vsk->peer_shutdown & RCV_SHUTDOWN)) {
1621 err = transport->notify_send_pre_enqueue(vsk, &send_data);
1625 /* Note that enqueue will only write as many bytes as are free
1626 * in the produce queue, so we don't need to ensure len is
1627 * smaller than the queue size. It is the caller's
1628 * responsibility to check how many bytes we were able to send.
1631 written = transport->stream_enqueue(
1633 len - total_written);
1639 total_written += written;
1641 err = transport->notify_send_post_enqueue(
1642 vsk, written, &send_data);
1649 if (total_written > 0)
1650 err = total_written;
1658 vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1662 struct vsock_sock *vsk;
1667 struct vsock_transport_recv_notify_data recv_data;
1677 if (sk->sk_state != SS_CONNECTED) {
1678 /* Recvmsg is supposed to return 0 if a peer performs an
1679 * orderly shutdown. Differentiate between that case and when a
1680 * peer has not connected or a local shutdown occured with the
1683 if (sock_flag(sk, SOCK_DONE))
1691 if (flags & MSG_OOB) {
1696 /* We don't check peer_shutdown flag here since peer may actually shut
1697 * down, but there can be data in the queue that a local socket can
1700 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1705 /* It is valid on Linux to pass in a zero-length receive buffer. This
1706 * is not an error. We may as well bail out now.
1713 /* We must not copy less than target bytes into the user's buffer
1714 * before returning successfully, so we wait for the consume queue to
1715 * have that much data to consume before dequeueing. Note that this
1716 * makes it impossible to handle cases where target is greater than the
1719 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1720 if (target >= transport->stream_rcvhiwat(vsk)) {
1724 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1727 err = transport->notify_recv_init(vsk, target, &recv_data);
1735 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1736 ready = vsock_stream_has_data(vsk);
1739 if (sk->sk_err != 0 ||
1740 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1741 (vsk->peer_shutdown & SEND_SHUTDOWN)) {
1742 finish_wait(sk_sleep(sk), &wait);
1745 /* Don't wait for non-blocking sockets. */
1748 finish_wait(sk_sleep(sk), &wait);
1752 err = transport->notify_recv_pre_block(
1753 vsk, target, &recv_data);
1755 finish_wait(sk_sleep(sk), &wait);
1759 timeout = schedule_timeout(timeout);
1762 if (signal_pending(current)) {
1763 err = sock_intr_errno(timeout);
1764 finish_wait(sk_sleep(sk), &wait);
1766 } else if (timeout == 0) {
1768 finish_wait(sk_sleep(sk), &wait);
1774 finish_wait(sk_sleep(sk), &wait);
1777 /* Invalid queue pair content. XXX This should
1778 * be changed to a connection reset in a later
1786 err = transport->notify_recv_pre_dequeue(
1787 vsk, target, &recv_data);
1791 read = transport->stream_dequeue(
1793 len - copied, flags);
1801 err = transport->notify_recv_post_dequeue(
1803 !(flags & MSG_PEEK), &recv_data);
1807 if (read >= target || flags & MSG_PEEK)
1816 else if (sk->sk_shutdown & RCV_SHUTDOWN)
1827 static const struct proto_ops vsock_stream_ops = {
1829 .owner = THIS_MODULE,
1830 .release = vsock_release,
1832 .connect = vsock_stream_connect,
1833 .socketpair = sock_no_socketpair,
1834 .accept = vsock_accept,
1835 .getname = vsock_getname,
1837 .ioctl = sock_no_ioctl,
1838 .listen = vsock_listen,
1839 .shutdown = vsock_shutdown,
1840 .setsockopt = vsock_stream_setsockopt,
1841 .getsockopt = vsock_stream_getsockopt,
1842 .sendmsg = vsock_stream_sendmsg,
1843 .recvmsg = vsock_stream_recvmsg,
1844 .mmap = sock_no_mmap,
1845 .sendpage = sock_no_sendpage,
1848 static int vsock_create(struct net *net, struct socket *sock,
1849 int protocol, int kern)
1854 if (protocol && protocol != PF_VSOCK)
1855 return -EPROTONOSUPPORT;
1857 switch (sock->type) {
1859 sock->ops = &vsock_dgram_ops;
1862 sock->ops = &vsock_stream_ops;
1865 return -ESOCKTNOSUPPORT;
1868 sock->state = SS_UNCONNECTED;
1870 return __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern) ? 0 : -ENOMEM;
1873 static const struct net_proto_family vsock_family_ops = {
1875 .create = vsock_create,
1876 .owner = THIS_MODULE,
1879 static long vsock_dev_do_ioctl(struct file *filp,
1880 unsigned int cmd, void __user *ptr)
1882 u32 __user *p = ptr;
1886 case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
1887 if (put_user(transport->get_local_cid(), p) != 0)
1892 pr_err("Unknown ioctl %d\n", cmd);
1899 static long vsock_dev_ioctl(struct file *filp,
1900 unsigned int cmd, unsigned long arg)
1902 return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
1905 #ifdef CONFIG_COMPAT
1906 static long vsock_dev_compat_ioctl(struct file *filp,
1907 unsigned int cmd, unsigned long arg)
1909 return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
1913 static const struct file_operations vsock_device_ops = {
1914 .owner = THIS_MODULE,
1915 .unlocked_ioctl = vsock_dev_ioctl,
1916 #ifdef CONFIG_COMPAT
1917 .compat_ioctl = vsock_dev_compat_ioctl,
1919 .open = nonseekable_open,
1922 static struct miscdevice vsock_device = {
1924 .fops = &vsock_device_ops,
1927 int __vsock_core_init(const struct vsock_transport *t, struct module *owner)
1929 int err = mutex_lock_interruptible(&vsock_register_mutex);
1939 /* Transport must be the owner of the protocol so that it can't
1940 * unload while there are open sockets.
1942 vsock_proto.owner = owner;
1945 vsock_init_tables();
1947 vsock_device.minor = MISC_DYNAMIC_MINOR;
1948 err = misc_register(&vsock_device);
1950 pr_err("Failed to register misc device\n");
1951 goto err_reset_transport;
1954 err = proto_register(&vsock_proto, 1); /* we want our slab */
1956 pr_err("Cannot register vsock protocol\n");
1957 goto err_deregister_misc;
1960 err = sock_register(&vsock_family_ops);
1962 pr_err("could not register af_vsock (%d) address family: %d\n",
1964 goto err_unregister_proto;
1967 mutex_unlock(&vsock_register_mutex);
1970 err_unregister_proto:
1971 proto_unregister(&vsock_proto);
1972 err_deregister_misc:
1973 misc_deregister(&vsock_device);
1974 err_reset_transport:
1977 mutex_unlock(&vsock_register_mutex);
1980 EXPORT_SYMBOL_GPL(__vsock_core_init);
1982 void vsock_core_exit(void)
1984 mutex_lock(&vsock_register_mutex);
1986 misc_deregister(&vsock_device);
1987 sock_unregister(AF_VSOCK);
1988 proto_unregister(&vsock_proto);
1990 /* We do not want the assignment below re-ordered. */
1994 mutex_unlock(&vsock_register_mutex);
1996 EXPORT_SYMBOL_GPL(vsock_core_exit);
1998 MODULE_AUTHOR("VMware, Inc.");
1999 MODULE_DESCRIPTION("VMware Virtual Socket Family");
2000 MODULE_VERSION("1.0.1.0-k");
2001 MODULE_LICENSE("GPL v2");