2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/if_vlan.h>
35 #include <linux/slab.h>
37 #include <net/route.h>
39 #include <net/pkt_sched.h>
41 #include "hyperv_net.h"
43 #define RING_SIZE_MIN 64
44 #define LINKCHANGE_INT (2 * HZ)
46 static int ring_size = 128;
47 module_param(ring_size, int, S_IRUGO);
48 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
50 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
51 NETIF_MSG_LINK | NETIF_MSG_IFUP |
52 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
55 static int debug = -1;
56 module_param(debug, int, S_IRUGO);
57 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
59 static void do_set_multicast(struct work_struct *w)
61 struct net_device_context *ndevctx =
62 container_of(w, struct net_device_context, work);
63 struct hv_device *device_obj = ndevctx->device_ctx;
64 struct net_device *ndev = hv_get_drvdata(device_obj);
65 struct netvsc_device *nvdev = ndevctx->nvdev;
66 struct rndis_device *rdev;
71 rdev = nvdev->extension;
75 if (ndev->flags & IFF_PROMISC)
76 rndis_filter_set_packet_filter(rdev,
77 NDIS_PACKET_TYPE_PROMISCUOUS);
79 rndis_filter_set_packet_filter(rdev,
80 NDIS_PACKET_TYPE_BROADCAST |
81 NDIS_PACKET_TYPE_ALL_MULTICAST |
82 NDIS_PACKET_TYPE_DIRECTED);
85 static void netvsc_set_multicast_list(struct net_device *net)
87 struct net_device_context *net_device_ctx = netdev_priv(net);
89 schedule_work(&net_device_ctx->work);
92 static int netvsc_open(struct net_device *net)
94 struct netvsc_device *nvdev = net_device_to_netvsc_device(net);
95 struct rndis_device *rdev;
98 netif_carrier_off(net);
100 /* Open up the device */
101 ret = rndis_filter_open(nvdev);
103 netdev_err(net, "unable to open device (ret %d).\n", ret);
107 netif_tx_wake_all_queues(net);
109 rdev = nvdev->extension;
110 if (!rdev->link_state)
111 netif_carrier_on(net);
116 static int netvsc_close(struct net_device *net)
118 struct net_device_context *net_device_ctx = netdev_priv(net);
119 struct netvsc_device *nvdev = net_device_ctx->nvdev;
121 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
122 struct vmbus_channel *chn;
124 netif_tx_disable(net);
126 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
127 cancel_work_sync(&net_device_ctx->work);
128 ret = rndis_filter_close(nvdev);
130 netdev_err(net, "unable to close device (ret %d).\n", ret);
134 /* Ensure pending bytes in ring are read */
137 for (i = 0; i < nvdev->num_chn; i++) {
138 chn = nvdev->chan_table[i].channel;
142 hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
148 hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
156 if (retry > retry_max || aread == 0)
166 netdev_err(net, "Ring buffer not empty after closing rndis\n");
173 static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
176 struct rndis_packet *rndis_pkt;
177 struct rndis_per_packet_info *ppi;
179 rndis_pkt = &msg->msg.pkt;
180 rndis_pkt->data_offset += ppi_size;
182 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
183 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
185 ppi->size = ppi_size;
186 ppi->type = pkt_type;
187 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
189 rndis_pkt->per_pkt_info_len += ppi_size;
195 * Select queue for transmit.
197 * If a valid queue has already been assigned, then use that.
198 * Otherwise compute tx queue based on hash and the send table.
200 * This is basically similar to default (__netdev_pick_tx) with the added step
201 * of using the host send_table when no other queue has been assigned.
203 * TODO support XPS - but get_xps_queue not exported
205 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
206 void *accel_priv, select_queue_fallback_t fallback)
208 struct net_device_context *net_device_ctx = netdev_priv(ndev);
209 struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
210 struct sock *sk = skb->sk;
211 int q_idx = sk_tx_queue_get(sk);
213 if (q_idx < 0 || skb->ooo_okay ||
214 q_idx >= ndev->real_num_tx_queues) {
215 u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
218 new_idx = nvsc_dev->send_table[hash]
221 if (q_idx != new_idx && sk &&
222 sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
223 sk_tx_queue_set(sk, new_idx);
228 if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
234 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
235 struct hv_page_buffer *pb)
239 /* Deal with compund pages by ignoring unused part
242 page += (offset >> PAGE_SHIFT);
243 offset &= ~PAGE_MASK;
248 bytes = PAGE_SIZE - offset;
251 pb[j].pfn = page_to_pfn(page);
252 pb[j].offset = offset;
258 if (offset == PAGE_SIZE && len) {
268 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
269 struct hv_netvsc_packet *packet,
270 struct hv_page_buffer **page_buf)
272 struct hv_page_buffer *pb = *page_buf;
274 char *data = skb->data;
275 int frags = skb_shinfo(skb)->nr_frags;
278 /* The packet is laid out thus:
279 * 1. hdr: RNDIS header and PPI
281 * 3. skb fragment data
284 slots_used += fill_pg_buf(virt_to_page(hdr),
286 len, &pb[slots_used]);
288 packet->rmsg_size = len;
289 packet->rmsg_pgcnt = slots_used;
291 slots_used += fill_pg_buf(virt_to_page(data),
292 offset_in_page(data),
293 skb_headlen(skb), &pb[slots_used]);
295 for (i = 0; i < frags; i++) {
296 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
298 slots_used += fill_pg_buf(skb_frag_page(frag),
300 skb_frag_size(frag), &pb[slots_used]);
305 static int count_skb_frag_slots(struct sk_buff *skb)
307 int i, frags = skb_shinfo(skb)->nr_frags;
310 for (i = 0; i < frags; i++) {
311 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
312 unsigned long size = skb_frag_size(frag);
313 unsigned long offset = frag->page_offset;
315 /* Skip unused frames from start of page */
316 offset &= ~PAGE_MASK;
317 pages += PFN_UP(offset + size);
322 static int netvsc_get_slots(struct sk_buff *skb)
324 char *data = skb->data;
325 unsigned int offset = offset_in_page(data);
326 unsigned int len = skb_headlen(skb);
330 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
331 frag_slots = count_skb_frag_slots(skb);
332 return slots + frag_slots;
335 static u32 net_checksum_info(struct sk_buff *skb)
337 if (skb->protocol == htons(ETH_P_IP)) {
338 struct iphdr *ip = ip_hdr(skb);
340 if (ip->protocol == IPPROTO_TCP)
341 return TRANSPORT_INFO_IPV4_TCP;
342 else if (ip->protocol == IPPROTO_UDP)
343 return TRANSPORT_INFO_IPV4_UDP;
345 struct ipv6hdr *ip6 = ipv6_hdr(skb);
347 if (ip6->nexthdr == IPPROTO_TCP)
348 return TRANSPORT_INFO_IPV6_TCP;
349 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
350 return TRANSPORT_INFO_IPV6_UDP;
353 return TRANSPORT_INFO_NOT_IP;
356 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
358 struct net_device_context *net_device_ctx = netdev_priv(net);
359 struct hv_netvsc_packet *packet = NULL;
361 unsigned int num_data_pgs;
362 struct rndis_message *rndis_msg;
363 struct rndis_packet *rndis_pkt;
365 struct rndis_per_packet_info *ppi;
367 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
368 struct hv_page_buffer *pb = page_buf;
370 /* We will atmost need two pages to describe the rndis
371 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
372 * of pages in a single packet. If skb is scattered around
373 * more pages we try linearizing it.
376 num_data_pgs = netvsc_get_slots(skb) + 2;
378 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
379 ++net_device_ctx->eth_stats.tx_scattered;
381 if (skb_linearize(skb))
384 num_data_pgs = netvsc_get_slots(skb) + 2;
385 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
386 ++net_device_ctx->eth_stats.tx_too_big;
392 * Place the rndis header in the skb head room and
393 * the skb->cb will be used for hv_netvsc_packet
396 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
400 /* Use the skb control buffer for building up the packet */
401 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
402 FIELD_SIZEOF(struct sk_buff, cb));
403 packet = (struct hv_netvsc_packet *)skb->cb;
405 packet->q_idx = skb_get_queue_mapping(skb);
407 packet->total_data_buflen = skb->len;
408 packet->total_bytes = skb->len;
409 packet->total_packets = 1;
411 rndis_msg = (struct rndis_message *)skb->head;
413 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
415 /* Add the rndis header */
416 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
417 rndis_msg->msg_len = packet->total_data_buflen;
418 rndis_pkt = &rndis_msg->msg.pkt;
419 rndis_pkt->data_offset = sizeof(struct rndis_packet);
420 rndis_pkt->data_len = packet->total_data_buflen;
421 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
423 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
425 hash = skb_get_hash_raw(skb);
426 if (hash != 0 && net->real_num_tx_queues > 1) {
427 rndis_msg_size += NDIS_HASH_PPI_SIZE;
428 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
430 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
433 if (skb_vlan_tag_present(skb)) {
434 struct ndis_pkt_8021q_info *vlan;
436 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
437 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
439 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
441 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
442 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
446 if (skb_is_gso(skb)) {
447 struct ndis_tcp_lso_info *lso_info;
449 rndis_msg_size += NDIS_LSO_PPI_SIZE;
450 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
451 TCP_LARGESEND_PKTINFO);
453 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
456 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
457 if (skb->protocol == htons(ETH_P_IP)) {
458 lso_info->lso_v2_transmit.ip_version =
459 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
460 ip_hdr(skb)->tot_len = 0;
461 ip_hdr(skb)->check = 0;
462 tcp_hdr(skb)->check =
463 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
464 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
466 lso_info->lso_v2_transmit.ip_version =
467 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
468 ipv6_hdr(skb)->payload_len = 0;
469 tcp_hdr(skb)->check =
470 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
471 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
473 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
474 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
475 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
476 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
477 struct ndis_tcp_ip_checksum_info *csum_info;
479 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
480 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
481 TCPIP_CHKSUM_PKTINFO);
483 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
486 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
488 if (skb->protocol == htons(ETH_P_IP)) {
489 csum_info->transmit.is_ipv4 = 1;
491 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
492 csum_info->transmit.tcp_checksum = 1;
494 csum_info->transmit.udp_checksum = 1;
496 csum_info->transmit.is_ipv6 = 1;
498 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
499 csum_info->transmit.tcp_checksum = 1;
501 csum_info->transmit.udp_checksum = 1;
504 /* Can't do offload of this type of checksum */
505 if (skb_checksum_help(skb))
510 /* Start filling in the page buffers with the rndis hdr */
511 rndis_msg->msg_len += rndis_msg_size;
512 packet->total_data_buflen = rndis_msg->msg_len;
513 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
516 /* timestamp packet in software */
517 skb_tx_timestamp(skb);
518 ret = netvsc_send(net_device_ctx->device_ctx, packet,
519 rndis_msg, &pb, skb);
520 if (likely(ret == 0))
523 if (ret == -EAGAIN) {
524 ++net_device_ctx->eth_stats.tx_busy;
525 return NETDEV_TX_BUSY;
529 ++net_device_ctx->eth_stats.tx_no_space;
532 dev_kfree_skb_any(skb);
533 net->stats.tx_dropped++;
538 ++net_device_ctx->eth_stats.tx_no_memory;
542 * netvsc_linkstatus_callback - Link up/down notification
544 void netvsc_linkstatus_callback(struct hv_device *device_obj,
545 struct rndis_message *resp)
547 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
548 struct net_device *net;
549 struct net_device_context *ndev_ctx;
550 struct netvsc_reconfig *event;
553 net = hv_get_drvdata(device_obj);
558 ndev_ctx = netdev_priv(net);
560 /* Update the physical link speed when changing to another vSwitch */
561 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
564 speed = *(u32 *)((void *)indicate + indicate->
565 status_buf_offset) / 10000;
566 ndev_ctx->speed = speed;
570 /* Handle these link change statuses below */
571 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
572 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
573 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
576 if (net->reg_state != NETREG_REGISTERED)
579 event = kzalloc(sizeof(*event), GFP_ATOMIC);
582 event->event = indicate->status;
584 spin_lock_irqsave(&ndev_ctx->lock, flags);
585 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
586 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
588 schedule_delayed_work(&ndev_ctx->dwork, 0);
591 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
592 const struct ndis_tcp_ip_checksum_info *csum_info,
593 const struct ndis_pkt_8021q_info *vlan,
594 void *data, u32 buflen)
598 skb = netdev_alloc_skb_ip_align(net, buflen);
603 * Copy to skb. This copy is needed here since the memory pointed by
604 * hv_netvsc_packet cannot be deallocated
606 memcpy(skb_put(skb, buflen), data, buflen);
608 skb->protocol = eth_type_trans(skb, net);
610 /* skb is already created with CHECKSUM_NONE */
611 skb_checksum_none_assert(skb);
614 * In Linux, the IP checksum is always checked.
615 * Do L4 checksum offload if enabled and present.
617 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
618 if (csum_info->receive.tcp_checksum_succeeded ||
619 csum_info->receive.udp_checksum_succeeded)
620 skb->ip_summed = CHECKSUM_UNNECESSARY;
624 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
626 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
634 * netvsc_recv_callback - Callback when we receive a packet from the
635 * "wire" on the specified device.
637 int netvsc_recv_callback(struct net_device *net,
638 struct vmbus_channel *channel,
640 const struct ndis_tcp_ip_checksum_info *csum_info,
641 const struct ndis_pkt_8021q_info *vlan)
643 struct net_device_context *net_device_ctx = netdev_priv(net);
644 struct netvsc_device *net_device = net_device_ctx->nvdev;
645 struct net_device *vf_netdev;
647 struct netvsc_stats *rx_stats;
648 u16 q_idx = channel->offermsg.offer.sub_channel_index;
651 if (net->reg_state != NETREG_REGISTERED)
652 return NVSP_STAT_FAIL;
655 * If necessary, inject this packet into the VF interface.
656 * On Hyper-V, multicast and brodcast packets are only delivered
657 * to the synthetic interface (after subjecting these to
658 * policy filters on the host). Deliver these via the VF
659 * interface in the guest.
662 vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
663 if (vf_netdev && (vf_netdev->flags & IFF_UP))
666 /* Allocate a skb - TODO direct I/O to pages? */
667 skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len);
668 if (unlikely(!skb)) {
669 ++net->stats.rx_dropped;
671 return NVSP_STAT_FAIL;
674 if (net != vf_netdev)
675 skb_record_rx_queue(skb, q_idx);
678 * Even if injecting the packet, record the statistics
679 * on the synthetic device because modifying the VF device
680 * statistics will not work correctly.
682 rx_stats = &net_device->chan_table[q_idx].rx_stats;
683 u64_stats_update_begin(&rx_stats->syncp);
685 rx_stats->bytes += len;
687 if (skb->pkt_type == PACKET_BROADCAST)
688 ++rx_stats->broadcast;
689 else if (skb->pkt_type == PACKET_MULTICAST)
690 ++rx_stats->multicast;
691 u64_stats_update_end(&rx_stats->syncp);
694 * Pass the skb back up. Network stack will deallocate the skb when it
698 netif_receive_skb(skb);
704 static void netvsc_get_drvinfo(struct net_device *net,
705 struct ethtool_drvinfo *info)
707 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
708 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
711 static void netvsc_get_channels(struct net_device *net,
712 struct ethtool_channels *channel)
714 struct net_device_context *net_device_ctx = netdev_priv(net);
715 struct netvsc_device *nvdev = net_device_ctx->nvdev;
718 channel->max_combined = nvdev->max_chn;
719 channel->combined_count = nvdev->num_chn;
723 static int netvsc_set_queues(struct net_device *net, struct hv_device *dev,
726 struct netvsc_device_info device_info;
729 memset(&device_info, 0, sizeof(device_info));
730 device_info.num_chn = num_chn;
731 device_info.ring_size = ring_size;
732 device_info.max_num_vrss_chns = num_chn;
734 ret = rndis_filter_device_add(dev, &device_info);
738 ret = netif_set_real_num_tx_queues(net, num_chn);
742 ret = netif_set_real_num_rx_queues(net, num_chn);
747 static int netvsc_set_channels(struct net_device *net,
748 struct ethtool_channels *channels)
750 struct net_device_context *net_device_ctx = netdev_priv(net);
751 struct hv_device *dev = net_device_ctx->device_ctx;
752 struct netvsc_device *nvdev = net_device_ctx->nvdev;
753 unsigned int count = channels->combined_count;
756 /* We do not support separate count for rx, tx, or other */
758 channels->rx_count || channels->tx_count || channels->other_count)
761 if (count > net->num_tx_queues || count > net->num_rx_queues)
764 if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
767 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
770 if (count > nvdev->max_chn)
773 ret = netvsc_close(net);
777 net_device_ctx->start_remove = true;
778 rndis_filter_device_remove(dev, nvdev);
780 ret = netvsc_set_queues(net, dev, count);
782 nvdev->num_chn = count;
784 netvsc_set_queues(net, dev, nvdev->num_chn);
787 net_device_ctx->start_remove = false;
789 /* We may have missed link change notifications */
790 schedule_delayed_work(&net_device_ctx->dwork, 0);
795 static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
797 struct ethtool_cmd diff1 = *cmd;
798 struct ethtool_cmd diff2 = {};
800 ethtool_cmd_speed_set(&diff1, 0);
802 /* advertising and cmd are usually set */
803 diff1.advertising = 0;
805 /* We set port to PORT_OTHER */
806 diff2.port = PORT_OTHER;
808 return !memcmp(&diff1, &diff2, sizeof(diff1));
811 static void netvsc_init_settings(struct net_device *dev)
813 struct net_device_context *ndc = netdev_priv(dev);
815 ndc->speed = SPEED_UNKNOWN;
816 ndc->duplex = DUPLEX_UNKNOWN;
819 static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
821 struct net_device_context *ndc = netdev_priv(dev);
823 ethtool_cmd_speed_set(cmd, ndc->speed);
824 cmd->duplex = ndc->duplex;
825 cmd->port = PORT_OTHER;
830 static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
832 struct net_device_context *ndc = netdev_priv(dev);
835 speed = ethtool_cmd_speed(cmd);
836 if (!ethtool_validate_speed(speed) ||
837 !ethtool_validate_duplex(cmd->duplex) ||
838 !netvsc_validate_ethtool_ss_cmd(cmd))
842 ndc->duplex = cmd->duplex;
847 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
849 struct net_device_context *ndevctx = netdev_priv(ndev);
850 struct netvsc_device *nvdev = ndevctx->nvdev;
851 struct hv_device *hdev = ndevctx->device_ctx;
852 struct netvsc_device_info device_info;
855 if (ndevctx->start_remove || !nvdev || nvdev->destroy)
858 ret = netvsc_close(ndev);
862 memset(&device_info, 0, sizeof(device_info));
863 device_info.ring_size = ring_size;
864 device_info.num_chn = nvdev->num_chn;
865 device_info.max_num_vrss_chns = nvdev->num_chn;
867 ndevctx->start_remove = true;
868 rndis_filter_device_remove(hdev, nvdev);
870 /* 'nvdev' has been freed in rndis_filter_device_remove() ->
871 * netvsc_device_remove () -> free_netvsc_device().
872 * We mustn't access it before it's re-created in
873 * rndis_filter_device_add() -> netvsc_device_add().
878 rndis_filter_device_add(hdev, &device_info);
882 ndevctx->start_remove = false;
884 /* We may have missed link change notifications */
885 schedule_delayed_work(&ndevctx->dwork, 0);
890 static void netvsc_get_stats64(struct net_device *net,
891 struct rtnl_link_stats64 *t)
893 struct net_device_context *ndev_ctx = netdev_priv(net);
894 struct netvsc_device *nvdev = ndev_ctx->nvdev;
900 for (i = 0; i < nvdev->num_chn; i++) {
901 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
902 const struct netvsc_stats *stats;
903 u64 packets, bytes, multicast;
906 stats = &nvchan->tx_stats;
908 start = u64_stats_fetch_begin_irq(&stats->syncp);
909 packets = stats->packets;
910 bytes = stats->bytes;
911 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
913 t->tx_bytes += bytes;
914 t->tx_packets += packets;
916 stats = &nvchan->rx_stats;
918 start = u64_stats_fetch_begin_irq(&stats->syncp);
919 packets = stats->packets;
920 bytes = stats->bytes;
921 multicast = stats->multicast + stats->broadcast;
922 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
924 t->rx_bytes += bytes;
925 t->rx_packets += packets;
926 t->multicast += multicast;
929 t->tx_dropped = net->stats.tx_dropped;
930 t->tx_errors = net->stats.tx_errors;
932 t->rx_dropped = net->stats.rx_dropped;
933 t->rx_errors = net->stats.rx_errors;
936 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
938 struct sockaddr *addr = p;
939 char save_adr[ETH_ALEN];
940 unsigned char save_aatype;
943 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
944 save_aatype = ndev->addr_assign_type;
946 err = eth_mac_addr(ndev, p);
950 err = rndis_filter_set_device_mac(ndev, addr->sa_data);
952 /* roll back to saved MAC */
953 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
954 ndev->addr_assign_type = save_aatype;
960 static const struct {
961 char name[ETH_GSTRING_LEN];
964 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
965 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
966 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
967 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
968 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
971 #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
973 /* 4 statistics per queue (rx/tx packets/bytes) */
974 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
976 static int netvsc_get_sset_count(struct net_device *dev, int string_set)
978 struct net_device_context *ndc = netdev_priv(dev);
979 struct netvsc_device *nvdev = ndc->nvdev;
981 switch (string_set) {
983 return NETVSC_GLOBAL_STATS_LEN + NETVSC_QUEUE_STATS_LEN(nvdev);
989 static void netvsc_get_ethtool_stats(struct net_device *dev,
990 struct ethtool_stats *stats, u64 *data)
992 struct net_device_context *ndc = netdev_priv(dev);
993 struct netvsc_device *nvdev = ndc->nvdev;
994 const void *nds = &ndc->eth_stats;
995 const struct netvsc_stats *qstats;
1000 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
1001 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1003 for (j = 0; j < nvdev->num_chn; j++) {
1004 qstats = &nvdev->chan_table[j].tx_stats;
1007 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1008 packets = qstats->packets;
1009 bytes = qstats->bytes;
1010 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1011 data[i++] = packets;
1014 qstats = &nvdev->chan_table[j].rx_stats;
1016 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1017 packets = qstats->packets;
1018 bytes = qstats->bytes;
1019 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1020 data[i++] = packets;
1025 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1027 struct net_device_context *ndc = netdev_priv(dev);
1028 struct netvsc_device *nvdev = ndc->nvdev;
1032 switch (stringset) {
1034 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
1035 memcpy(p + i * ETH_GSTRING_LEN,
1036 netvsc_stats[i].name, ETH_GSTRING_LEN);
1038 p += i * ETH_GSTRING_LEN;
1039 for (i = 0; i < nvdev->num_chn; i++) {
1040 sprintf(p, "tx_queue_%u_packets", i);
1041 p += ETH_GSTRING_LEN;
1042 sprintf(p, "tx_queue_%u_bytes", i);
1043 p += ETH_GSTRING_LEN;
1044 sprintf(p, "rx_queue_%u_packets", i);
1045 p += ETH_GSTRING_LEN;
1046 sprintf(p, "rx_queue_%u_bytes", i);
1047 p += ETH_GSTRING_LEN;
1055 netvsc_get_rss_hash_opts(struct netvsc_device *nvdev,
1056 struct ethtool_rxnfc *info)
1058 info->data = RXH_IP_SRC | RXH_IP_DST;
1060 switch (info->flow_type) {
1063 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1079 netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1082 struct net_device_context *ndc = netdev_priv(dev);
1083 struct netvsc_device *nvdev = ndc->nvdev;
1085 switch (info->cmd) {
1086 case ETHTOOL_GRXRINGS:
1087 info->data = nvdev->num_chn;
1091 return netvsc_get_rss_hash_opts(nvdev, info);
1096 #ifdef CONFIG_NET_POLL_CONTROLLER
1097 static void netvsc_poll_controller(struct net_device *net)
1099 /* As netvsc_start_xmit() works synchronous we don't have to
1100 * trigger anything here.
1105 static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1107 return NETVSC_HASH_KEYLEN;
1110 static u32 netvsc_rss_indir_size(struct net_device *dev)
1115 static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1118 struct net_device_context *ndc = netdev_priv(dev);
1119 struct netvsc_device *ndev = ndc->nvdev;
1120 struct rndis_device *rndis_dev = ndev->extension;
1124 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1127 for (i = 0; i < ITAB_NUM; i++)
1128 indir[i] = rndis_dev->ind_table[i];
1132 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1137 static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1138 const u8 *key, const u8 hfunc)
1140 struct net_device_context *ndc = netdev_priv(dev);
1141 struct netvsc_device *ndev = ndc->nvdev;
1142 struct rndis_device *rndis_dev = ndev->extension;
1145 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1149 for (i = 0; i < ITAB_NUM; i++)
1150 if (indir[i] >= dev->num_rx_queues)
1153 for (i = 0; i < ITAB_NUM; i++)
1154 rndis_dev->ind_table[i] = indir[i];
1161 key = rndis_dev->rss_key;
1164 return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn);
1167 static const struct ethtool_ops ethtool_ops = {
1168 .get_drvinfo = netvsc_get_drvinfo,
1169 .get_link = ethtool_op_get_link,
1170 .get_ethtool_stats = netvsc_get_ethtool_stats,
1171 .get_sset_count = netvsc_get_sset_count,
1172 .get_strings = netvsc_get_strings,
1173 .get_channels = netvsc_get_channels,
1174 .set_channels = netvsc_set_channels,
1175 .get_ts_info = ethtool_op_get_ts_info,
1176 .get_settings = netvsc_get_settings,
1177 .set_settings = netvsc_set_settings,
1178 .get_rxnfc = netvsc_get_rxnfc,
1179 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1180 .get_rxfh_indir_size = netvsc_rss_indir_size,
1181 .get_rxfh = netvsc_get_rxfh,
1182 .set_rxfh = netvsc_set_rxfh,
1185 static const struct net_device_ops device_ops = {
1186 .ndo_open = netvsc_open,
1187 .ndo_stop = netvsc_close,
1188 .ndo_start_xmit = netvsc_start_xmit,
1189 .ndo_set_rx_mode = netvsc_set_multicast_list,
1190 .ndo_change_mtu = netvsc_change_mtu,
1191 .ndo_validate_addr = eth_validate_addr,
1192 .ndo_set_mac_address = netvsc_set_mac_addr,
1193 .ndo_select_queue = netvsc_select_queue,
1194 .ndo_get_stats64 = netvsc_get_stats64,
1195 #ifdef CONFIG_NET_POLL_CONTROLLER
1196 .ndo_poll_controller = netvsc_poll_controller,
1201 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1202 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1203 * present send GARP packet to network peers with netif_notify_peers().
1205 static void netvsc_link_change(struct work_struct *w)
1207 struct net_device_context *ndev_ctx =
1208 container_of(w, struct net_device_context, dwork.work);
1209 struct hv_device *device_obj = ndev_ctx->device_ctx;
1210 struct net_device *net = hv_get_drvdata(device_obj);
1211 struct netvsc_device *net_device;
1212 struct rndis_device *rdev;
1213 struct netvsc_reconfig *event = NULL;
1214 bool notify = false, reschedule = false;
1215 unsigned long flags, next_reconfig, delay;
1218 if (ndev_ctx->start_remove)
1221 net_device = ndev_ctx->nvdev;
1222 rdev = net_device->extension;
1224 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1225 if (time_is_after_jiffies(next_reconfig)) {
1226 /* link_watch only sends one notification with current state
1227 * per second, avoid doing reconfig more frequently. Handle
1230 delay = next_reconfig - jiffies;
1231 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1232 schedule_delayed_work(&ndev_ctx->dwork, delay);
1235 ndev_ctx->last_reconfig = jiffies;
1237 spin_lock_irqsave(&ndev_ctx->lock, flags);
1238 if (!list_empty(&ndev_ctx->reconfig_events)) {
1239 event = list_first_entry(&ndev_ctx->reconfig_events,
1240 struct netvsc_reconfig, list);
1241 list_del(&event->list);
1242 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1244 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1249 switch (event->event) {
1250 /* Only the following events are possible due to the check in
1251 * netvsc_linkstatus_callback()
1253 case RNDIS_STATUS_MEDIA_CONNECT:
1254 if (rdev->link_state) {
1255 rdev->link_state = false;
1256 netif_carrier_on(net);
1257 netif_tx_wake_all_queues(net);
1263 case RNDIS_STATUS_MEDIA_DISCONNECT:
1264 if (!rdev->link_state) {
1265 rdev->link_state = true;
1266 netif_carrier_off(net);
1267 netif_tx_stop_all_queues(net);
1271 case RNDIS_STATUS_NETWORK_CHANGE:
1272 /* Only makes sense if carrier is present */
1273 if (!rdev->link_state) {
1274 rdev->link_state = true;
1275 netif_carrier_off(net);
1276 netif_tx_stop_all_queues(net);
1277 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1278 spin_lock_irqsave(&ndev_ctx->lock, flags);
1279 list_add(&event->list, &ndev_ctx->reconfig_events);
1280 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1289 netdev_notify_peers(net);
1291 /* link_watch only sends one notification with current state per
1292 * second, handle next reconfig event in 2 seconds.
1295 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1303 static struct net_device *get_netvsc_bymac(const u8 *mac)
1305 struct net_device *dev;
1309 for_each_netdev(&init_net, dev) {
1310 if (dev->netdev_ops != &device_ops)
1311 continue; /* not a netvsc device */
1313 if (ether_addr_equal(mac, dev->perm_addr))
1320 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1322 struct net_device *dev;
1326 for_each_netdev(&init_net, dev) {
1327 struct net_device_context *net_device_ctx;
1329 if (dev->netdev_ops != &device_ops)
1330 continue; /* not a netvsc device */
1332 net_device_ctx = netdev_priv(dev);
1333 if (net_device_ctx->nvdev == NULL)
1334 continue; /* device is removed */
1336 if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
1337 return dev; /* a match */
1343 static int netvsc_register_vf(struct net_device *vf_netdev)
1345 struct net_device *ndev;
1346 struct net_device_context *net_device_ctx;
1347 struct netvsc_device *netvsc_dev;
1349 if (vf_netdev->addr_len != ETH_ALEN)
1353 * We will use the MAC address to locate the synthetic interface to
1354 * associate with the VF interface. If we don't find a matching
1355 * synthetic interface, move on.
1357 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
1361 net_device_ctx = netdev_priv(ndev);
1362 netvsc_dev = net_device_ctx->nvdev;
1363 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
1366 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
1368 * Take a reference on the module.
1370 try_module_get(THIS_MODULE);
1372 dev_hold(vf_netdev);
1373 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
1377 static int netvsc_vf_up(struct net_device *vf_netdev)
1379 struct net_device *ndev;
1380 struct netvsc_device *netvsc_dev;
1381 struct net_device_context *net_device_ctx;
1383 ndev = get_netvsc_byref(vf_netdev);
1387 net_device_ctx = netdev_priv(ndev);
1388 netvsc_dev = net_device_ctx->nvdev;
1390 netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
1393 * Open the device before switching data path.
1395 rndis_filter_open(netvsc_dev);
1398 * notify the host to switch the data path.
1400 netvsc_switch_datapath(ndev, true);
1401 netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
1403 netif_carrier_off(ndev);
1405 /* Now notify peers through VF device. */
1406 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
1411 static int netvsc_vf_down(struct net_device *vf_netdev)
1413 struct net_device *ndev;
1414 struct netvsc_device *netvsc_dev;
1415 struct net_device_context *net_device_ctx;
1417 ndev = get_netvsc_byref(vf_netdev);
1421 net_device_ctx = netdev_priv(ndev);
1422 netvsc_dev = net_device_ctx->nvdev;
1424 netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
1425 netvsc_switch_datapath(ndev, false);
1426 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
1427 rndis_filter_close(netvsc_dev);
1428 netif_carrier_on(ndev);
1430 /* Now notify peers through netvsc device. */
1431 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
1436 static int netvsc_unregister_vf(struct net_device *vf_netdev)
1438 struct net_device *ndev;
1439 struct net_device_context *net_device_ctx;
1441 ndev = get_netvsc_byref(vf_netdev);
1445 net_device_ctx = netdev_priv(ndev);
1447 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
1449 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
1451 module_put(THIS_MODULE);
1455 static int netvsc_probe(struct hv_device *dev,
1456 const struct hv_vmbus_device_id *dev_id)
1458 struct net_device *net = NULL;
1459 struct net_device_context *net_device_ctx;
1460 struct netvsc_device_info device_info;
1461 struct netvsc_device *nvdev;
1464 net = alloc_etherdev_mq(sizeof(struct net_device_context),
1469 netif_carrier_off(net);
1471 netvsc_init_settings(net);
1473 net_device_ctx = netdev_priv(net);
1474 net_device_ctx->device_ctx = dev;
1475 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1476 if (netif_msg_probe(net_device_ctx))
1477 netdev_dbg(net, "netvsc msg_enable: %d\n",
1478 net_device_ctx->msg_enable);
1480 hv_set_drvdata(dev, net);
1482 net_device_ctx->start_remove = false;
1484 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
1485 INIT_WORK(&net_device_ctx->work, do_set_multicast);
1487 spin_lock_init(&net_device_ctx->lock);
1488 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1490 net->netdev_ops = &device_ops;
1491 net->ethtool_ops = ðtool_ops;
1492 SET_NETDEV_DEV(net, &dev->device);
1494 /* We always need headroom for rndis header */
1495 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1497 /* Notify the netvsc driver of the new device */
1498 memset(&device_info, 0, sizeof(device_info));
1499 device_info.ring_size = ring_size;
1500 device_info.max_num_vrss_chns = min_t(u32, VRSS_CHANNEL_DEFAULT,
1502 ret = rndis_filter_device_add(dev, &device_info);
1504 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
1506 hv_set_drvdata(dev, NULL);
1509 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1511 /* hw_features computed in rndis_filter_device_add */
1512 net->features = net->hw_features |
1513 NETIF_F_HIGHDMA | NETIF_F_SG |
1514 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1515 net->vlan_features = net->features;
1517 nvdev = net_device_ctx->nvdev;
1518 netif_set_real_num_tx_queues(net, nvdev->num_chn);
1519 netif_set_real_num_rx_queues(net, nvdev->num_chn);
1521 /* MTU range: 68 - 1500 or 65521 */
1522 net->min_mtu = NETVSC_MTU_MIN;
1523 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
1524 net->max_mtu = NETVSC_MTU - ETH_HLEN;
1526 net->max_mtu = ETH_DATA_LEN;
1528 ret = register_netdev(net);
1530 pr_err("Unable to register netdev.\n");
1531 rndis_filter_device_remove(dev, nvdev);
1538 static int netvsc_remove(struct hv_device *dev)
1540 struct net_device *net;
1541 struct net_device_context *ndev_ctx;
1543 net = hv_get_drvdata(dev);
1546 dev_err(&dev->device, "No net device to remove\n");
1550 ndev_ctx = netdev_priv(net);
1552 /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
1553 * removing the device.
1556 ndev_ctx->start_remove = true;
1559 cancel_delayed_work_sync(&ndev_ctx->dwork);
1560 cancel_work_sync(&ndev_ctx->work);
1562 /* Stop outbound asap */
1563 netif_tx_disable(net);
1565 unregister_netdev(net);
1568 * Call to the vsc driver to let it know that the device is being
1571 rndis_filter_device_remove(dev, ndev_ctx->nvdev);
1573 hv_set_drvdata(dev, NULL);
1579 static const struct hv_vmbus_device_id id_table[] = {
1585 MODULE_DEVICE_TABLE(vmbus, id_table);
1587 /* The one and only one */
1588 static struct hv_driver netvsc_drv = {
1589 .name = KBUILD_MODNAME,
1590 .id_table = id_table,
1591 .probe = netvsc_probe,
1592 .remove = netvsc_remove,
1596 * On Hyper-V, every VF interface is matched with a corresponding
1597 * synthetic interface. The synthetic interface is presented first
1598 * to the guest. When the corresponding VF instance is registered,
1599 * we will take care of switching the data path.
1601 static int netvsc_netdev_event(struct notifier_block *this,
1602 unsigned long event, void *ptr)
1604 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1606 /* Skip our own events */
1607 if (event_dev->netdev_ops == &device_ops)
1610 /* Avoid non-Ethernet type devices */
1611 if (event_dev->type != ARPHRD_ETHER)
1614 /* Avoid Vlan dev with same MAC registering as VF */
1615 if (is_vlan_dev(event_dev))
1618 /* Avoid Bonding master dev with same MAC registering as VF */
1619 if ((event_dev->priv_flags & IFF_BONDING) &&
1620 (event_dev->flags & IFF_MASTER))
1624 case NETDEV_REGISTER:
1625 return netvsc_register_vf(event_dev);
1626 case NETDEV_UNREGISTER:
1627 return netvsc_unregister_vf(event_dev);
1629 return netvsc_vf_up(event_dev);
1631 return netvsc_vf_down(event_dev);
1637 static struct notifier_block netvsc_netdev_notifier = {
1638 .notifier_call = netvsc_netdev_event,
1641 static void __exit netvsc_drv_exit(void)
1643 unregister_netdevice_notifier(&netvsc_netdev_notifier);
1644 vmbus_driver_unregister(&netvsc_drv);
1647 static int __init netvsc_drv_init(void)
1651 if (ring_size < RING_SIZE_MIN) {
1652 ring_size = RING_SIZE_MIN;
1653 pr_info("Increased ring_size to %d (min allowed)\n",
1656 ret = vmbus_driver_register(&netvsc_drv);
1661 register_netdevice_notifier(&netvsc_netdev_notifier);
1665 MODULE_LICENSE("GPL");
1666 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
1668 module_init(netvsc_drv_init);
1669 module_exit(netvsc_drv_exit);