1 // SPDX-License-Identifier: GPL-2.0-only
3 * Hyper-V transport for vsock
5 * Hyper-V Sockets supplies a byte-stream based communication mechanism
6 * between the host and the VM. This driver implements the necessary
7 * support in the VM by introducing the new vsock transport.
9 * Copyright (c) 2017, Microsoft Corporation.
11 #include <linux/module.h>
12 #include <linux/vmalloc.h>
13 #include <linux/hyperv.h>
15 #include <net/af_vsock.h>
17 /* The host side's design of the feature requires 6 exact 4KB pages for
18 * recv/send rings respectively -- this is suboptimal considering memory
19 * consumption, however unluckily we have to live with it, before the
20 * host comes up with a better design in the future.
22 #define PAGE_SIZE_4K 4096
23 #define RINGBUFFER_HVS_RCV_SIZE (PAGE_SIZE_4K * 6)
24 #define RINGBUFFER_HVS_SND_SIZE (PAGE_SIZE_4K * 6)
26 /* The MTU is 16KB per the host side's design */
27 #define HVS_MTU_SIZE (1024 * 16)
29 /* How long to wait for graceful shutdown of a connection */
30 #define HVS_CLOSE_TIMEOUT (8 * HZ)
32 struct vmpipe_proto_header {
37 /* For recv, we use the VMBus in-place packet iterator APIs to directly copy
38 * data from the ringbuffer into the userspace buffer.
41 /* The header before the payload data */
42 struct vmpipe_proto_header hdr;
45 u8 data[HVS_MTU_SIZE];
48 /* We can send up to HVS_MTU_SIZE bytes of payload to the host, but let's use
49 * a small size, i.e. HVS_SEND_BUF_SIZE, to minimize the dynamically-allocated
50 * buffer, because tests show there is no significant performance difference.
52 * Note: the buffer can be eliminated in the future when we add new VMBus
53 * ringbuffer APIs that allow us to directly copy data from userspace buffer
54 * to VMBus ringbuffer.
56 #define HVS_SEND_BUF_SIZE (PAGE_SIZE_4K - sizeof(struct vmpipe_proto_header))
59 /* The header before the payload data */
60 struct vmpipe_proto_header hdr;
63 u8 data[HVS_SEND_BUF_SIZE];
66 #define HVS_HEADER_LEN (sizeof(struct vmpacket_descriptor) + \
67 sizeof(struct vmpipe_proto_header))
69 /* See 'prev_indices' in hv_ringbuffer_read(), hv_ringbuffer_write(), and
70 * __hv_pkt_iter_next().
72 #define VMBUS_PKT_TRAILER_SIZE (sizeof(u64))
74 #define HVS_PKT_LEN(payload_len) (HVS_HEADER_LEN + \
75 ALIGN((payload_len), 8) + \
76 VMBUS_PKT_TRAILER_SIZE)
78 union hvs_service_id {
82 unsigned int svm_port;
83 unsigned char b[sizeof(uuid_le) - sizeof(unsigned int)];
87 /* Per-socket state (accessed via vsk->trans) */
89 struct vsock_sock *vsk;
94 struct vmbus_channel *chan;
95 struct vmpacket_descriptor *recv_desc;
97 /* The length of the payload not delivered to userland yet */
99 /* The offset of the payload */
102 /* Have we sent the zero-length packet (FIN)? */
106 /* In the VM, we support Hyper-V Sockets with AF_VSOCK, and the endpoint is
107 * <cid, port> (see struct sockaddr_vm). Note: cid is not really used here:
108 * when we write apps to connect to the host, we can only use VMADDR_CID_ANY
109 * or VMADDR_CID_HOST (both are equivalent) as the remote cid, and when we
110 * write apps to bind() & listen() in the VM, we can only use VMADDR_CID_ANY
113 * On the host, Hyper-V Sockets are supported by Winsock AF_HYPERV:
114 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-
115 * guide/make-integration-service, and the endpoint is <VmID, ServiceId> with
116 * the below sockaddr:
120 * ADDRESS_FAMILY Family;
125 * Note: VmID is not used by Linux VM and actually it isn't transmitted via
126 * VMBus, because here it's obvious the host and the VM can easily identify
127 * each other. Though the VmID is useful on the host, especially in the case
128 * of Windows container, Linux VM doesn't need it at all.
130 * To make use of the AF_VSOCK infrastructure in Linux VM, we have to limit
131 * the available GUID space of SOCKADDR_HV so that we can create a mapping
132 * between AF_VSOCK port and SOCKADDR_HV Service GUID. The rule of writing
133 * Hyper-V Sockets apps on the host and in Linux VM is:
135 ****************************************************************************
136 * The only valid Service GUIDs, from the perspectives of both the host and *
137 * Linux VM, that can be connected by the other end, must conform to this *
138 * format: <port>-facb-11e6-bd58-64006a7986d3, and the "port" must be in *
139 * this range [0, 0x7FFFFFFF]. *
140 ****************************************************************************
142 * When we write apps on the host to connect(), the GUID ServiceID is used.
143 * When we write apps in Linux VM to connect(), we only need to specify the
144 * port and the driver will form the GUID and use that to request the host.
146 * From the perspective of Linux VM:
147 * 1. the local ephemeral port (i.e. the local auto-bound port when we call
148 * connect() without explicit bind()) is generated by __vsock_bind_stream(),
149 * and the range is [1024, 0xFFFFFFFF).
150 * 2. the remote ephemeral port (i.e. the auto-generated remote port for
151 * a connect request initiated by the host's connect()) is generated by
152 * hvs_remote_addr_init() and the range is [0x80000000, 0xFFFFFFFF).
155 #define MAX_LISTEN_PORT ((u32)0x7FFFFFFF)
156 #define MAX_VM_LISTEN_PORT MAX_LISTEN_PORT
157 #define MAX_HOST_LISTEN_PORT MAX_LISTEN_PORT
158 #define MIN_HOST_EPHEMERAL_PORT (MAX_HOST_LISTEN_PORT + 1)
160 /* 00000000-facb-11e6-bd58-64006a7986d3 */
161 static const uuid_le srv_id_template =
162 UUID_LE(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58,
163 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3);
165 static bool is_valid_srv_id(const uuid_le *id)
167 return !memcmp(&id->b[4], &srv_id_template.b[4], sizeof(uuid_le) - 4);
170 static unsigned int get_port_by_srv_id(const uuid_le *svr_id)
172 return *((unsigned int *)svr_id);
175 static void hvs_addr_init(struct sockaddr_vm *addr, const uuid_le *svr_id)
177 unsigned int port = get_port_by_srv_id(svr_id);
179 vsock_addr_init(addr, VMADDR_CID_ANY, port);
182 static void hvs_remote_addr_init(struct sockaddr_vm *remote,
183 struct sockaddr_vm *local)
185 static u32 host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
188 vsock_addr_init(remote, VMADDR_CID_ANY, VMADDR_PORT_ANY);
192 if (host_ephemeral_port < MIN_HOST_EPHEMERAL_PORT ||
193 host_ephemeral_port == VMADDR_PORT_ANY)
194 host_ephemeral_port = MIN_HOST_EPHEMERAL_PORT;
196 remote->svm_port = host_ephemeral_port++;
198 sk = vsock_find_connected_socket(remote, local);
200 /* Found an available ephemeral port */
204 /* Release refcnt got in vsock_find_connected_socket */
209 static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan)
211 set_channel_pending_send_size(chan,
212 HVS_PKT_LEN(HVS_SEND_BUF_SIZE));
214 /* See hvs_stream_has_space(): we must make sure the host has seen
215 * the new pending send size, before we can re-check the writable
221 static void hvs_clear_channel_pending_send_size(struct vmbus_channel *chan)
223 set_channel_pending_send_size(chan, 0);
229 static bool hvs_channel_readable(struct vmbus_channel *chan)
231 u32 readable = hv_get_bytes_to_read(&chan->inbound);
233 /* 0-size payload means FIN */
234 return readable >= HVS_PKT_LEN(0);
237 static int hvs_channel_readable_payload(struct vmbus_channel *chan)
239 u32 readable = hv_get_bytes_to_read(&chan->inbound);
241 if (readable > HVS_PKT_LEN(0)) {
242 /* At least we have 1 byte to read. We don't need to return
243 * the exact readable bytes: see vsock_stream_recvmsg() ->
244 * vsock_stream_has_data().
249 if (readable == HVS_PKT_LEN(0)) {
250 /* 0-size payload means FIN */
254 /* No payload or FIN */
258 static size_t hvs_channel_writable_bytes(struct vmbus_channel *chan)
260 u32 writeable = hv_get_bytes_to_write(&chan->outbound);
263 /* The ringbuffer mustn't be 100% full, and we should reserve a
264 * zero-length-payload packet for the FIN: see hv_ringbuffer_write()
265 * and hvs_shutdown().
267 if (writeable <= HVS_PKT_LEN(1) + HVS_PKT_LEN(0))
270 ret = writeable - HVS_PKT_LEN(1) - HVS_PKT_LEN(0);
272 return round_down(ret, 8);
275 static int hvs_send_data(struct vmbus_channel *chan,
276 struct hvs_send_buf *send_buf, size_t to_write)
278 send_buf->hdr.pkt_type = 1;
279 send_buf->hdr.data_size = to_write;
280 return vmbus_sendpacket(chan, &send_buf->hdr,
281 sizeof(send_buf->hdr) + to_write,
282 0, VM_PKT_DATA_INBAND, 0);
285 static void hvs_channel_cb(void *ctx)
287 struct sock *sk = (struct sock *)ctx;
288 struct vsock_sock *vsk = vsock_sk(sk);
289 struct hvsock *hvs = vsk->trans;
290 struct vmbus_channel *chan = hvs->chan;
292 if (hvs_channel_readable(chan))
293 sk->sk_data_ready(sk);
295 /* See hvs_stream_has_space(): when we reach here, the writable bytes
296 * may be already less than HVS_PKT_LEN(HVS_SEND_BUF_SIZE).
298 if (hv_get_bytes_to_write(&chan->outbound) > 0)
299 sk->sk_write_space(sk);
302 static void hvs_do_close_lock_held(struct vsock_sock *vsk,
305 struct sock *sk = sk_vsock(vsk);
307 sock_set_flag(sk, SOCK_DONE);
308 vsk->peer_shutdown = SHUTDOWN_MASK;
309 if (vsock_stream_has_data(vsk) <= 0)
310 sk->sk_state = TCP_CLOSING;
311 sk->sk_state_change(sk);
312 if (vsk->close_work_scheduled &&
313 (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
314 vsk->close_work_scheduled = false;
315 vsock_remove_sock(vsk);
317 /* Release the reference taken while scheduling the timeout */
322 static void hvs_close_connection(struct vmbus_channel *chan)
324 struct sock *sk = get_per_channel_state(chan);
327 hvs_do_close_lock_held(vsock_sk(sk), true);
331 static void hvs_open_connection(struct vmbus_channel *chan)
333 uuid_le *if_instance, *if_type;
334 unsigned char conn_from_host;
336 struct sockaddr_vm addr;
337 struct sock *sk, *new = NULL;
338 struct vsock_sock *vnew = NULL;
339 struct hvsock *hvs, *hvs_new = NULL;
342 if_type = &chan->offermsg.offer.if_type;
343 if_instance = &chan->offermsg.offer.if_instance;
344 conn_from_host = chan->offermsg.offer.u.pipe.user_def[0];
346 /* The host or the VM should only listen on a port in
347 * [0, MAX_LISTEN_PORT]
349 if (!is_valid_srv_id(if_type) ||
350 get_port_by_srv_id(if_type) > MAX_LISTEN_PORT)
353 hvs_addr_init(&addr, conn_from_host ? if_type : if_instance);
354 sk = vsock_find_bound_socket(&addr);
359 if ((conn_from_host && sk->sk_state != TCP_LISTEN) ||
360 (!conn_from_host && sk->sk_state != TCP_SYN_SENT))
363 if (conn_from_host) {
364 if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog)
367 new = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
372 new->sk_state = TCP_SYN_SENT;
373 vnew = vsock_sk(new);
374 hvs_new = vnew->trans;
375 hvs_new->chan = chan;
377 hvs = vsock_sk(sk)->trans;
381 set_channel_read_mode(chan, HV_CALL_DIRECT);
382 ret = vmbus_open(chan, RINGBUFFER_HVS_SND_SIZE,
383 RINGBUFFER_HVS_RCV_SIZE, NULL, 0,
384 hvs_channel_cb, conn_from_host ? new : sk);
386 if (conn_from_host) {
387 hvs_new->chan = NULL;
395 set_per_channel_state(chan, conn_from_host ? new : sk);
396 vmbus_set_chn_rescind_callback(chan, hvs_close_connection);
398 if (conn_from_host) {
399 new->sk_state = TCP_ESTABLISHED;
400 sk->sk_ack_backlog++;
402 hvs_addr_init(&vnew->local_addr, if_type);
403 hvs_remote_addr_init(&vnew->remote_addr, &vnew->local_addr);
405 hvs_new->vm_srv_id = *if_type;
406 hvs_new->host_srv_id = *if_instance;
408 vsock_insert_connected(vnew);
410 vsock_enqueue_accept(sk, new);
412 sk->sk_state = TCP_ESTABLISHED;
413 sk->sk_socket->state = SS_CONNECTED;
415 vsock_insert_connected(vsock_sk(sk));
418 sk->sk_state_change(sk);
421 /* Release refcnt obtained when we called vsock_find_bound_socket() */
427 static u32 hvs_get_local_cid(void)
429 return VMADDR_CID_ANY;
432 static int hvs_sock_init(struct vsock_sock *vsk, struct vsock_sock *psk)
436 hvs = kzalloc(sizeof(*hvs), GFP_KERNEL);
446 static int hvs_connect(struct vsock_sock *vsk)
448 union hvs_service_id vm, host;
449 struct hvsock *h = vsk->trans;
451 vm.srv_id = srv_id_template;
452 vm.svm_port = vsk->local_addr.svm_port;
453 h->vm_srv_id = vm.srv_id;
455 host.srv_id = srv_id_template;
456 host.svm_port = vsk->remote_addr.svm_port;
457 h->host_srv_id = host.srv_id;
459 return vmbus_send_tl_connect_request(&h->vm_srv_id, &h->host_srv_id);
462 static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
464 struct vmpipe_proto_header hdr;
466 if (hvs->fin_sent || !hvs->chan)
469 /* It can't fail: see hvs_channel_writable_bytes(). */
470 (void)hvs_send_data(hvs->chan, (struct hvs_send_buf *)&hdr, 0);
471 hvs->fin_sent = true;
474 static int hvs_shutdown(struct vsock_sock *vsk, int mode)
476 struct sock *sk = sk_vsock(vsk);
478 if (!(mode & SEND_SHUTDOWN))
482 hvs_shutdown_lock_held(vsk->trans, mode);
487 static void hvs_close_timeout(struct work_struct *work)
489 struct vsock_sock *vsk =
490 container_of(work, struct vsock_sock, close_work.work);
491 struct sock *sk = sk_vsock(vsk);
495 if (!sock_flag(sk, SOCK_DONE))
496 hvs_do_close_lock_held(vsk, false);
498 vsk->close_work_scheduled = false;
503 /* Returns true, if it is safe to remove socket; false otherwise */
504 static bool hvs_close_lock_held(struct vsock_sock *vsk)
506 struct sock *sk = sk_vsock(vsk);
508 if (!(sk->sk_state == TCP_ESTABLISHED ||
509 sk->sk_state == TCP_CLOSING))
512 if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
513 hvs_shutdown_lock_held(vsk->trans, SHUTDOWN_MASK);
515 if (sock_flag(sk, SOCK_DONE))
518 /* This reference will be dropped by the delayed close routine */
520 INIT_DELAYED_WORK(&vsk->close_work, hvs_close_timeout);
521 vsk->close_work_scheduled = true;
522 schedule_delayed_work(&vsk->close_work, HVS_CLOSE_TIMEOUT);
526 static void hvs_release(struct vsock_sock *vsk)
528 struct sock *sk = sk_vsock(vsk);
532 remove_sock = hvs_close_lock_held(vsk);
535 vsock_remove_sock(vsk);
538 static void hvs_destruct(struct vsock_sock *vsk)
540 struct hvsock *hvs = vsk->trans;
541 struct vmbus_channel *chan = hvs->chan;
544 vmbus_hvsock_device_unregister(chan);
549 static int hvs_dgram_bind(struct vsock_sock *vsk, struct sockaddr_vm *addr)
554 static int hvs_dgram_dequeue(struct vsock_sock *vsk, struct msghdr *msg,
555 size_t len, int flags)
560 static int hvs_dgram_enqueue(struct vsock_sock *vsk,
561 struct sockaddr_vm *remote, struct msghdr *msg,
567 static bool hvs_dgram_allow(u32 cid, u32 port)
572 static int hvs_update_recv_data(struct hvsock *hvs)
574 struct hvs_recv_buf *recv_buf;
577 recv_buf = (struct hvs_recv_buf *)(hvs->recv_desc + 1);
578 payload_len = recv_buf->hdr.data_size;
580 if (payload_len > HVS_MTU_SIZE)
583 if (payload_len == 0)
584 hvs->vsk->peer_shutdown |= SEND_SHUTDOWN;
586 hvs->recv_data_len = payload_len;
587 hvs->recv_data_off = 0;
592 static ssize_t hvs_stream_dequeue(struct vsock_sock *vsk, struct msghdr *msg,
593 size_t len, int flags)
595 struct hvsock *hvs = vsk->trans;
596 bool need_refill = !hvs->recv_desc;
597 struct hvs_recv_buf *recv_buf;
601 if (flags & MSG_PEEK)
605 hvs->recv_desc = hv_pkt_iter_first(hvs->chan);
606 ret = hvs_update_recv_data(hvs);
611 recv_buf = (struct hvs_recv_buf *)(hvs->recv_desc + 1);
612 to_read = min_t(u32, len, hvs->recv_data_len);
613 ret = memcpy_to_msg(msg, recv_buf->data + hvs->recv_data_off, to_read);
617 hvs->recv_data_len -= to_read;
618 if (hvs->recv_data_len == 0) {
619 hvs->recv_desc = hv_pkt_iter_next(hvs->chan, hvs->recv_desc);
620 if (hvs->recv_desc) {
621 ret = hvs_update_recv_data(hvs);
626 hvs->recv_data_off += to_read;
632 static ssize_t hvs_stream_enqueue(struct vsock_sock *vsk, struct msghdr *msg,
635 struct hvsock *hvs = vsk->trans;
636 struct vmbus_channel *chan = hvs->chan;
637 struct hvs_send_buf *send_buf;
638 ssize_t to_write, max_writable, ret;
640 BUILD_BUG_ON(sizeof(*send_buf) != PAGE_SIZE_4K);
642 send_buf = kmalloc(sizeof(*send_buf), GFP_KERNEL);
646 max_writable = hvs_channel_writable_bytes(chan);
647 to_write = min_t(ssize_t, len, max_writable);
648 to_write = min_t(ssize_t, to_write, HVS_SEND_BUF_SIZE);
650 ret = memcpy_from_msg(send_buf->data, msg, to_write);
654 ret = hvs_send_data(hvs->chan, send_buf, to_write);
664 static s64 hvs_stream_has_data(struct vsock_sock *vsk)
666 struct hvsock *hvs = vsk->trans;
669 if (hvs->recv_data_len > 0)
672 switch (hvs_channel_readable_payload(hvs->chan)) {
677 vsk->peer_shutdown |= SEND_SHUTDOWN;
688 static s64 hvs_stream_has_space(struct vsock_sock *vsk)
690 struct hvsock *hvs = vsk->trans;
691 struct vmbus_channel *chan = hvs->chan;
694 ret = hvs_channel_writable_bytes(chan);
696 hvs_clear_channel_pending_send_size(chan);
698 /* See hvs_channel_cb() */
699 hvs_set_channel_pending_send_size(chan);
701 /* Re-check the writable bytes to avoid race */
702 ret = hvs_channel_writable_bytes(chan);
704 hvs_clear_channel_pending_send_size(chan);
710 static u64 hvs_stream_rcvhiwat(struct vsock_sock *vsk)
712 return HVS_MTU_SIZE + 1;
715 static bool hvs_stream_is_active(struct vsock_sock *vsk)
717 struct hvsock *hvs = vsk->trans;
719 return hvs->chan != NULL;
722 static bool hvs_stream_allow(u32 cid, u32 port)
724 /* The host's port range [MIN_HOST_EPHEMERAL_PORT, 0xFFFFFFFF) is
725 * reserved as ephemeral ports, which are used as the host's ports
726 * when the host initiates connections.
728 * Perform this check in the guest so an immediate error is produced
729 * instead of a timeout.
731 if (port > MAX_HOST_LISTEN_PORT)
734 if (cid == VMADDR_CID_HOST)
741 int hvs_notify_poll_in(struct vsock_sock *vsk, size_t target, bool *readable)
743 struct hvsock *hvs = vsk->trans;
745 *readable = hvs_channel_readable(hvs->chan);
750 int hvs_notify_poll_out(struct vsock_sock *vsk, size_t target, bool *writable)
752 *writable = hvs_stream_has_space(vsk) > 0;
758 int hvs_notify_recv_init(struct vsock_sock *vsk, size_t target,
759 struct vsock_transport_recv_notify_data *d)
765 int hvs_notify_recv_pre_block(struct vsock_sock *vsk, size_t target,
766 struct vsock_transport_recv_notify_data *d)
772 int hvs_notify_recv_pre_dequeue(struct vsock_sock *vsk, size_t target,
773 struct vsock_transport_recv_notify_data *d)
779 int hvs_notify_recv_post_dequeue(struct vsock_sock *vsk, size_t target,
780 ssize_t copied, bool data_read,
781 struct vsock_transport_recv_notify_data *d)
787 int hvs_notify_send_init(struct vsock_sock *vsk,
788 struct vsock_transport_send_notify_data *d)
794 int hvs_notify_send_pre_block(struct vsock_sock *vsk,
795 struct vsock_transport_send_notify_data *d)
801 int hvs_notify_send_pre_enqueue(struct vsock_sock *vsk,
802 struct vsock_transport_send_notify_data *d)
808 int hvs_notify_send_post_enqueue(struct vsock_sock *vsk, ssize_t written,
809 struct vsock_transport_send_notify_data *d)
814 static void hvs_set_buffer_size(struct vsock_sock *vsk, u64 val)
819 static void hvs_set_min_buffer_size(struct vsock_sock *vsk, u64 val)
824 static void hvs_set_max_buffer_size(struct vsock_sock *vsk, u64 val)
829 static u64 hvs_get_buffer_size(struct vsock_sock *vsk)
834 static u64 hvs_get_min_buffer_size(struct vsock_sock *vsk)
839 static u64 hvs_get_max_buffer_size(struct vsock_sock *vsk)
844 static struct vsock_transport hvs_transport = {
845 .get_local_cid = hvs_get_local_cid,
847 .init = hvs_sock_init,
848 .destruct = hvs_destruct,
849 .release = hvs_release,
850 .connect = hvs_connect,
851 .shutdown = hvs_shutdown,
853 .dgram_bind = hvs_dgram_bind,
854 .dgram_dequeue = hvs_dgram_dequeue,
855 .dgram_enqueue = hvs_dgram_enqueue,
856 .dgram_allow = hvs_dgram_allow,
858 .stream_dequeue = hvs_stream_dequeue,
859 .stream_enqueue = hvs_stream_enqueue,
860 .stream_has_data = hvs_stream_has_data,
861 .stream_has_space = hvs_stream_has_space,
862 .stream_rcvhiwat = hvs_stream_rcvhiwat,
863 .stream_is_active = hvs_stream_is_active,
864 .stream_allow = hvs_stream_allow,
866 .notify_poll_in = hvs_notify_poll_in,
867 .notify_poll_out = hvs_notify_poll_out,
868 .notify_recv_init = hvs_notify_recv_init,
869 .notify_recv_pre_block = hvs_notify_recv_pre_block,
870 .notify_recv_pre_dequeue = hvs_notify_recv_pre_dequeue,
871 .notify_recv_post_dequeue = hvs_notify_recv_post_dequeue,
872 .notify_send_init = hvs_notify_send_init,
873 .notify_send_pre_block = hvs_notify_send_pre_block,
874 .notify_send_pre_enqueue = hvs_notify_send_pre_enqueue,
875 .notify_send_post_enqueue = hvs_notify_send_post_enqueue,
877 .set_buffer_size = hvs_set_buffer_size,
878 .set_min_buffer_size = hvs_set_min_buffer_size,
879 .set_max_buffer_size = hvs_set_max_buffer_size,
880 .get_buffer_size = hvs_get_buffer_size,
881 .get_min_buffer_size = hvs_get_min_buffer_size,
882 .get_max_buffer_size = hvs_get_max_buffer_size,
885 static int hvs_probe(struct hv_device *hdev,
886 const struct hv_vmbus_device_id *dev_id)
888 struct vmbus_channel *chan = hdev->channel;
890 hvs_open_connection(chan);
892 /* Always return success to suppress the unnecessary error message
893 * in vmbus_probe(): on error the host will rescind the device in
894 * 30 seconds and we can do cleanup at that time in
895 * vmbus_onoffer_rescind().
900 static int hvs_remove(struct hv_device *hdev)
902 struct vmbus_channel *chan = hdev->channel;
909 /* This isn't really used. See vmbus_match() and vmbus_probe() */
910 static const struct hv_vmbus_device_id id_table[] = {
914 static struct hv_driver hvs_drv = {
917 .id_table = id_table,
919 .remove = hvs_remove,
922 static int __init hvs_init(void)
926 if (vmbus_proto_version < VERSION_WIN10)
929 ret = vmbus_driver_register(&hvs_drv);
933 ret = vsock_core_init(&hvs_transport);
935 vmbus_driver_unregister(&hvs_drv);
942 static void __exit hvs_exit(void)
945 vmbus_driver_unregister(&hvs_drv);
948 module_init(hvs_init);
949 module_exit(hvs_exit);
951 MODULE_DESCRIPTION("Hyper-V Sockets");
952 MODULE_VERSION("1.0.0");
953 MODULE_LICENSE("GPL");
954 MODULE_ALIAS_NETPROTO(PF_VSOCK);