2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/if_vlan.h>
35 #include <linux/slab.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/netpoll.h>
40 #include <net/route.h>
42 #include <net/pkt_sched.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
46 #include "hyperv_net.h"
48 #define RING_SIZE_MIN 64
49 #define NETVSC_MIN_TX_SECTIONS 10
50 #define NETVSC_DEFAULT_TX 192 /* ~1M */
51 #define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */
52 #define NETVSC_DEFAULT_RX 2048 /* ~4M */
54 #define LINKCHANGE_INT (2 * HZ)
55 #define VF_TAKEOVER_INT (HZ / 10)
57 static int ring_size = 128;
58 module_param(ring_size, int, S_IRUGO);
59 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
61 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
62 NETIF_MSG_LINK | NETIF_MSG_IFUP |
63 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
66 static int debug = -1;
67 module_param(debug, int, S_IRUGO);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70 static void netvsc_set_multicast_list(struct net_device *net)
72 struct net_device_context *net_device_ctx = netdev_priv(net);
73 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
75 rndis_filter_update(nvdev);
78 static int netvsc_open(struct net_device *net)
80 struct net_device_context *ndev_ctx = netdev_priv(net);
81 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
82 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
83 struct rndis_device *rdev;
86 netif_carrier_off(net);
88 /* Open up the device */
89 ret = rndis_filter_open(nvdev);
91 netdev_err(net, "unable to open device (ret %d).\n", ret);
95 netif_tx_wake_all_queues(net);
97 rdev = nvdev->extension;
99 if (!rdev->link_state)
100 netif_carrier_on(net);
103 /* Setting synthetic device up transparently sets
104 * slave as up. If open fails, then slave will be
105 * still be offline (and not used).
107 ret = dev_open(vf_netdev);
110 "unable to open slave: %s: %d\n",
111 vf_netdev->name, ret);
116 static int netvsc_close(struct net_device *net)
118 struct net_device_context *net_device_ctx = netdev_priv(net);
119 struct net_device *vf_netdev
120 = rtnl_dereference(net_device_ctx->vf_netdev);
121 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
123 u32 aread, i, msec = 10, retry = 0, retry_max = 20;
124 struct vmbus_channel *chn;
126 netif_tx_disable(net);
128 /* No need to close rndis filter if it is removed already */
132 ret = rndis_filter_close(nvdev);
134 netdev_err(net, "unable to close device (ret %d).\n", ret);
138 /* Ensure pending bytes in ring are read */
141 for (i = 0; i < nvdev->num_chn; i++) {
142 chn = nvdev->chan_table[i].channel;
146 aread = hv_get_bytes_to_read(&chn->inbound);
150 aread = hv_get_bytes_to_read(&chn->outbound);
156 if (retry > retry_max || aread == 0)
166 netdev_err(net, "Ring buffer not empty after closing rndis\n");
172 dev_close(vf_netdev);
177 static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
180 struct rndis_packet *rndis_pkt;
181 struct rndis_per_packet_info *ppi;
183 rndis_pkt = &msg->msg.pkt;
184 rndis_pkt->data_offset += ppi_size;
186 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
187 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
189 ppi->size = ppi_size;
190 ppi->type = pkt_type;
191 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
193 rndis_pkt->per_pkt_info_len += ppi_size;
198 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
199 * packets. We can use ethtool to change UDP hash level when necessary.
201 static inline u32 netvsc_get_hash(
203 const struct net_device_context *ndc)
205 struct flow_keys flow;
207 static u32 hashrnd __read_mostly;
209 net_get_random_once(&hashrnd, sizeof(hashrnd));
211 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
214 if (flow.basic.ip_proto == IPPROTO_TCP ||
215 (flow.basic.ip_proto == IPPROTO_UDP &&
216 ((flow.basic.n_proto == htons(ETH_P_IP) && ndc->udp4_l4_hash) ||
217 (flow.basic.n_proto == htons(ETH_P_IPV6) &&
218 ndc->udp6_l4_hash)))) {
219 return skb_get_hash(skb);
221 if (flow.basic.n_proto == htons(ETH_P_IP))
222 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
223 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
224 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
228 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
234 static inline int netvsc_get_tx_queue(struct net_device *ndev,
235 struct sk_buff *skb, int old_idx)
237 const struct net_device_context *ndc = netdev_priv(ndev);
238 struct sock *sk = skb->sk;
241 q_idx = ndc->tx_send_table[netvsc_get_hash(skb, ndc) &
242 (VRSS_SEND_TAB_SIZE - 1)];
244 /* If queue index changed record the new value */
245 if (q_idx != old_idx &&
246 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
247 sk_tx_queue_set(sk, q_idx);
253 * Select queue for transmit.
255 * If a valid queue has already been assigned, then use that.
256 * Otherwise compute tx queue based on hash and the send table.
258 * This is basically similar to default (__netdev_pick_tx) with the added step
259 * of using the host send_table when no other queue has been assigned.
261 * TODO support XPS - but get_xps_queue not exported
263 static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
265 int q_idx = sk_tx_queue_get(skb->sk);
267 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
268 /* If forwarding a packet, we use the recorded queue when
269 * available for better cache locality.
271 if (skb_rx_queue_recorded(skb))
272 q_idx = skb_get_rx_queue(skb);
274 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
280 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
282 select_queue_fallback_t fallback)
284 struct net_device_context *ndc = netdev_priv(ndev);
285 struct net_device *vf_netdev;
289 vf_netdev = rcu_dereference(ndc->vf_netdev);
291 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
292 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
294 txq = netvsc_pick_tx(ndev, skb);
298 while (unlikely(txq >= ndev->real_num_tx_queues))
299 txq -= ndev->real_num_tx_queues;
304 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
305 struct hv_page_buffer *pb)
309 /* Deal with compund pages by ignoring unused part
312 page += (offset >> PAGE_SHIFT);
313 offset &= ~PAGE_MASK;
318 bytes = PAGE_SIZE - offset;
321 pb[j].pfn = page_to_pfn(page);
322 pb[j].offset = offset;
328 if (offset == PAGE_SIZE && len) {
338 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
339 struct hv_netvsc_packet *packet,
340 struct hv_page_buffer *pb)
343 char *data = skb->data;
344 int frags = skb_shinfo(skb)->nr_frags;
347 /* The packet is laid out thus:
348 * 1. hdr: RNDIS header and PPI
350 * 3. skb fragment data
352 slots_used += fill_pg_buf(virt_to_page(hdr),
354 len, &pb[slots_used]);
356 packet->rmsg_size = len;
357 packet->rmsg_pgcnt = slots_used;
359 slots_used += fill_pg_buf(virt_to_page(data),
360 offset_in_page(data),
361 skb_headlen(skb), &pb[slots_used]);
363 for (i = 0; i < frags; i++) {
364 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
366 slots_used += fill_pg_buf(skb_frag_page(frag),
368 skb_frag_size(frag), &pb[slots_used]);
373 static int count_skb_frag_slots(struct sk_buff *skb)
375 int i, frags = skb_shinfo(skb)->nr_frags;
378 for (i = 0; i < frags; i++) {
379 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
380 unsigned long size = skb_frag_size(frag);
381 unsigned long offset = frag->page_offset;
383 /* Skip unused frames from start of page */
384 offset &= ~PAGE_MASK;
385 pages += PFN_UP(offset + size);
390 static int netvsc_get_slots(struct sk_buff *skb)
392 char *data = skb->data;
393 unsigned int offset = offset_in_page(data);
394 unsigned int len = skb_headlen(skb);
398 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
399 frag_slots = count_skb_frag_slots(skb);
400 return slots + frag_slots;
403 static u32 net_checksum_info(struct sk_buff *skb)
405 if (skb->protocol == htons(ETH_P_IP)) {
406 struct iphdr *ip = ip_hdr(skb);
408 if (ip->protocol == IPPROTO_TCP)
409 return TRANSPORT_INFO_IPV4_TCP;
410 else if (ip->protocol == IPPROTO_UDP)
411 return TRANSPORT_INFO_IPV4_UDP;
413 struct ipv6hdr *ip6 = ipv6_hdr(skb);
415 if (ip6->nexthdr == IPPROTO_TCP)
416 return TRANSPORT_INFO_IPV6_TCP;
417 else if (ip6->nexthdr == IPPROTO_UDP)
418 return TRANSPORT_INFO_IPV6_UDP;
421 return TRANSPORT_INFO_NOT_IP;
424 /* Send skb on the slave VF device. */
425 static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
428 struct net_device_context *ndev_ctx = netdev_priv(net);
429 unsigned int len = skb->len;
432 skb->dev = vf_netdev;
433 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
435 rc = dev_queue_xmit(skb);
436 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
437 struct netvsc_vf_pcpu_stats *pcpu_stats
438 = this_cpu_ptr(ndev_ctx->vf_stats);
440 u64_stats_update_begin(&pcpu_stats->syncp);
441 pcpu_stats->tx_packets++;
442 pcpu_stats->tx_bytes += len;
443 u64_stats_update_end(&pcpu_stats->syncp);
445 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
451 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
453 struct net_device_context *net_device_ctx = netdev_priv(net);
454 struct hv_netvsc_packet *packet = NULL;
456 unsigned int num_data_pgs;
457 struct rndis_message *rndis_msg;
458 struct rndis_packet *rndis_pkt;
459 struct net_device *vf_netdev;
461 struct rndis_per_packet_info *ppi;
463 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
465 /* if VF is present and up then redirect packets
466 * already called with rcu_read_lock_bh
468 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
469 if (vf_netdev && netif_running(vf_netdev) &&
470 !netpoll_tx_running(net))
471 return netvsc_vf_xmit(net, vf_netdev, skb);
473 /* We will atmost need two pages to describe the rndis
474 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
475 * of pages in a single packet. If skb is scattered around
476 * more pages we try linearizing it.
479 num_data_pgs = netvsc_get_slots(skb) + 2;
481 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
482 ++net_device_ctx->eth_stats.tx_scattered;
484 if (skb_linearize(skb))
487 num_data_pgs = netvsc_get_slots(skb) + 2;
488 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
489 ++net_device_ctx->eth_stats.tx_too_big;
495 * Place the rndis header in the skb head room and
496 * the skb->cb will be used for hv_netvsc_packet
499 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
503 /* Use the skb control buffer for building up the packet */
504 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
505 FIELD_SIZEOF(struct sk_buff, cb));
506 packet = (struct hv_netvsc_packet *)skb->cb;
508 packet->q_idx = skb_get_queue_mapping(skb);
510 packet->total_data_buflen = skb->len;
511 packet->total_bytes = skb->len;
512 packet->total_packets = 1;
514 rndis_msg = (struct rndis_message *)skb->head;
516 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
518 /* Add the rndis header */
519 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
520 rndis_msg->msg_len = packet->total_data_buflen;
521 rndis_pkt = &rndis_msg->msg.pkt;
522 rndis_pkt->data_offset = sizeof(struct rndis_packet);
523 rndis_pkt->data_len = packet->total_data_buflen;
524 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
526 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
528 hash = skb_get_hash_raw(skb);
529 if (hash != 0 && net->real_num_tx_queues > 1) {
530 rndis_msg_size += NDIS_HASH_PPI_SIZE;
531 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
533 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
536 if (skb_vlan_tag_present(skb)) {
537 struct ndis_pkt_8021q_info *vlan;
539 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
540 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
543 vlan = (void *)ppi + ppi->ppi_offset;
544 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
545 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
549 if (skb_is_gso(skb)) {
550 struct ndis_tcp_lso_info *lso_info;
552 rndis_msg_size += NDIS_LSO_PPI_SIZE;
553 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
554 TCP_LARGESEND_PKTINFO);
556 lso_info = (void *)ppi + ppi->ppi_offset;
558 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
559 if (skb->protocol == htons(ETH_P_IP)) {
560 lso_info->lso_v2_transmit.ip_version =
561 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
562 ip_hdr(skb)->tot_len = 0;
563 ip_hdr(skb)->check = 0;
564 tcp_hdr(skb)->check =
565 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
566 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
568 lso_info->lso_v2_transmit.ip_version =
569 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
570 ipv6_hdr(skb)->payload_len = 0;
571 tcp_hdr(skb)->check =
572 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
573 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
575 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
576 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
577 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
578 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
579 struct ndis_tcp_ip_checksum_info *csum_info;
581 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
582 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
583 TCPIP_CHKSUM_PKTINFO);
585 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
588 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
590 if (skb->protocol == htons(ETH_P_IP)) {
591 csum_info->transmit.is_ipv4 = 1;
593 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
594 csum_info->transmit.tcp_checksum = 1;
596 csum_info->transmit.udp_checksum = 1;
598 csum_info->transmit.is_ipv6 = 1;
600 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
601 csum_info->transmit.tcp_checksum = 1;
603 csum_info->transmit.udp_checksum = 1;
606 /* Can't do offload of this type of checksum */
607 if (skb_checksum_help(skb))
612 /* Start filling in the page buffers with the rndis hdr */
613 rndis_msg->msg_len += rndis_msg_size;
614 packet->total_data_buflen = rndis_msg->msg_len;
615 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
618 /* timestamp packet in software */
619 skb_tx_timestamp(skb);
621 ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb);
622 if (likely(ret == 0))
625 if (ret == -EAGAIN) {
626 ++net_device_ctx->eth_stats.tx_busy;
627 return NETDEV_TX_BUSY;
631 ++net_device_ctx->eth_stats.tx_no_space;
634 dev_kfree_skb_any(skb);
635 net->stats.tx_dropped++;
640 ++net_device_ctx->eth_stats.tx_no_memory;
645 * netvsc_linkstatus_callback - Link up/down notification
647 void netvsc_linkstatus_callback(struct hv_device *device_obj,
648 struct rndis_message *resp)
650 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
651 struct net_device *net;
652 struct net_device_context *ndev_ctx;
653 struct netvsc_reconfig *event;
656 net = hv_get_drvdata(device_obj);
661 ndev_ctx = netdev_priv(net);
663 /* Update the physical link speed when changing to another vSwitch */
664 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
667 speed = *(u32 *)((void *)indicate
668 + indicate->status_buf_offset) / 10000;
669 ndev_ctx->speed = speed;
673 /* Handle these link change statuses below */
674 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
675 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
676 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
679 if (net->reg_state != NETREG_REGISTERED)
682 event = kzalloc(sizeof(*event), GFP_ATOMIC);
685 event->event = indicate->status;
687 spin_lock_irqsave(&ndev_ctx->lock, flags);
688 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
689 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
691 schedule_delayed_work(&ndev_ctx->dwork, 0);
694 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
695 struct napi_struct *napi,
696 const struct ndis_tcp_ip_checksum_info *csum_info,
697 const struct ndis_pkt_8021q_info *vlan,
698 void *data, u32 buflen)
702 skb = napi_alloc_skb(napi, buflen);
707 * Copy to skb. This copy is needed here since the memory pointed by
708 * hv_netvsc_packet cannot be deallocated
710 skb_put_data(skb, data, buflen);
712 skb->protocol = eth_type_trans(skb, net);
714 /* skb is already created with CHECKSUM_NONE */
715 skb_checksum_none_assert(skb);
718 * In Linux, the IP checksum is always checked.
719 * Do L4 checksum offload if enabled and present.
721 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
722 if (csum_info->receive.tcp_checksum_succeeded ||
723 csum_info->receive.udp_checksum_succeeded)
724 skb->ip_summed = CHECKSUM_UNNECESSARY;
728 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
730 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
738 * netvsc_recv_callback - Callback when we receive a packet from the
739 * "wire" on the specified device.
741 int netvsc_recv_callback(struct net_device *net,
742 struct vmbus_channel *channel,
744 const struct ndis_tcp_ip_checksum_info *csum_info,
745 const struct ndis_pkt_8021q_info *vlan)
747 struct net_device_context *net_device_ctx = netdev_priv(net);
748 struct netvsc_device *net_device;
749 u16 q_idx = channel->offermsg.offer.sub_channel_index;
750 struct netvsc_channel *nvchan;
752 struct netvsc_stats *rx_stats;
754 if (net->reg_state != NETREG_REGISTERED)
755 return NVSP_STAT_FAIL;
758 net_device = rcu_dereference(net_device_ctx->nvdev);
759 if (unlikely(!net_device))
762 nvchan = &net_device->chan_table[q_idx];
764 /* Allocate a skb - TODO direct I/O to pages? */
765 skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
766 csum_info, vlan, data, len);
767 if (unlikely(!skb)) {
769 ++net->stats.rx_dropped;
771 return NVSP_STAT_FAIL;
774 skb_record_rx_queue(skb, q_idx);
777 * Even if injecting the packet, record the statistics
778 * on the synthetic device because modifying the VF device
779 * statistics will not work correctly.
781 rx_stats = &nvchan->rx_stats;
782 u64_stats_update_begin(&rx_stats->syncp);
784 rx_stats->bytes += len;
786 if (skb->pkt_type == PACKET_BROADCAST)
787 ++rx_stats->broadcast;
788 else if (skb->pkt_type == PACKET_MULTICAST)
789 ++rx_stats->multicast;
790 u64_stats_update_end(&rx_stats->syncp);
792 napi_gro_receive(&nvchan->napi, skb);
798 static void netvsc_get_drvinfo(struct net_device *net,
799 struct ethtool_drvinfo *info)
801 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
802 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
805 static void netvsc_get_channels(struct net_device *net,
806 struct ethtool_channels *channel)
808 struct net_device_context *net_device_ctx = netdev_priv(net);
809 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
812 channel->max_combined = nvdev->max_chn;
813 channel->combined_count = nvdev->num_chn;
817 static int netvsc_set_channels(struct net_device *net,
818 struct ethtool_channels *channels)
820 struct net_device_context *net_device_ctx = netdev_priv(net);
821 struct hv_device *dev = net_device_ctx->device_ctx;
822 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
823 unsigned int orig, count = channels->combined_count;
824 struct netvsc_device_info device_info;
828 /* We do not support separate count for rx, tx, or other */
830 channels->rx_count || channels->tx_count || channels->other_count)
833 if (count > net->num_tx_queues || count > VRSS_CHANNEL_MAX)
836 if (!nvdev || nvdev->destroy)
839 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
842 if (count > nvdev->max_chn)
845 orig = nvdev->num_chn;
846 was_opened = rndis_filter_opened(nvdev);
848 rndis_filter_close(nvdev);
850 memset(&device_info, 0, sizeof(device_info));
851 device_info.num_chn = count;
852 device_info.ring_size = ring_size;
853 device_info.send_sections = nvdev->send_section_cnt;
854 device_info.recv_sections = nvdev->recv_section_cnt;
856 rndis_filter_device_remove(dev, nvdev);
858 nvdev = rndis_filter_device_add(dev, &device_info);
859 if (!IS_ERR(nvdev)) {
860 netif_set_real_num_tx_queues(net, nvdev->num_chn);
861 netif_set_real_num_rx_queues(net, nvdev->num_chn);
863 ret = PTR_ERR(nvdev);
864 device_info.num_chn = orig;
865 nvdev = rndis_filter_device_add(dev, &device_info);
868 netdev_err(net, "restoring channel setting failed: %ld\n",
875 rndis_filter_open(nvdev);
877 /* We may have missed link change notifications */
878 net_device_ctx->last_reconfig = 0;
879 schedule_delayed_work(&net_device_ctx->dwork, 0);
885 netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
887 struct ethtool_link_ksettings diff1 = *cmd;
888 struct ethtool_link_ksettings diff2 = {};
890 diff1.base.speed = 0;
891 diff1.base.duplex = 0;
892 /* advertising and cmd are usually set */
893 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
895 /* We set port to PORT_OTHER */
896 diff2.base.port = PORT_OTHER;
898 return !memcmp(&diff1, &diff2, sizeof(diff1));
901 static void netvsc_init_settings(struct net_device *dev)
903 struct net_device_context *ndc = netdev_priv(dev);
905 ndc->udp4_l4_hash = true;
906 ndc->udp6_l4_hash = true;
908 ndc->speed = SPEED_UNKNOWN;
909 ndc->duplex = DUPLEX_FULL;
912 static int netvsc_get_link_ksettings(struct net_device *dev,
913 struct ethtool_link_ksettings *cmd)
915 struct net_device_context *ndc = netdev_priv(dev);
917 cmd->base.speed = ndc->speed;
918 cmd->base.duplex = ndc->duplex;
919 cmd->base.port = PORT_OTHER;
924 static int netvsc_set_link_ksettings(struct net_device *dev,
925 const struct ethtool_link_ksettings *cmd)
927 struct net_device_context *ndc = netdev_priv(dev);
930 speed = cmd->base.speed;
931 if (!ethtool_validate_speed(speed) ||
932 !ethtool_validate_duplex(cmd->base.duplex) ||
933 !netvsc_validate_ethtool_ss_cmd(cmd))
937 ndc->duplex = cmd->base.duplex;
942 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
944 struct net_device_context *ndevctx = netdev_priv(ndev);
945 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
946 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
947 struct hv_device *hdev = ndevctx->device_ctx;
948 int orig_mtu = ndev->mtu;
949 struct netvsc_device_info device_info;
953 if (!nvdev || nvdev->destroy)
956 /* Change MTU of underlying VF netdev first. */
958 ret = dev_set_mtu(vf_netdev, mtu);
963 netif_device_detach(ndev);
964 was_opened = rndis_filter_opened(nvdev);
966 rndis_filter_close(nvdev);
968 memset(&device_info, 0, sizeof(device_info));
969 device_info.ring_size = ring_size;
970 device_info.num_chn = nvdev->num_chn;
971 device_info.send_sections = nvdev->send_section_cnt;
972 device_info.recv_sections = nvdev->recv_section_cnt;
974 rndis_filter_device_remove(hdev, nvdev);
978 nvdev = rndis_filter_device_add(hdev, &device_info);
980 ret = PTR_ERR(nvdev);
982 /* Attempt rollback to original MTU */
983 ndev->mtu = orig_mtu;
984 nvdev = rndis_filter_device_add(hdev, &device_info);
987 dev_set_mtu(vf_netdev, orig_mtu);
990 netdev_err(ndev, "restoring mtu failed: %ld\n",
997 rndis_filter_open(nvdev);
999 netif_device_attach(ndev);
1001 /* We may have missed link change notifications */
1002 schedule_delayed_work(&ndevctx->dwork, 0);
1007 static void netvsc_get_vf_stats(struct net_device *net,
1008 struct netvsc_vf_pcpu_stats *tot)
1010 struct net_device_context *ndev_ctx = netdev_priv(net);
1013 memset(tot, 0, sizeof(*tot));
1015 for_each_possible_cpu(i) {
1016 const struct netvsc_vf_pcpu_stats *stats
1017 = per_cpu_ptr(ndev_ctx->vf_stats, i);
1018 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1022 start = u64_stats_fetch_begin_irq(&stats->syncp);
1023 rx_packets = stats->rx_packets;
1024 tx_packets = stats->tx_packets;
1025 rx_bytes = stats->rx_bytes;
1026 tx_bytes = stats->tx_bytes;
1027 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1029 tot->rx_packets += rx_packets;
1030 tot->tx_packets += tx_packets;
1031 tot->rx_bytes += rx_bytes;
1032 tot->tx_bytes += tx_bytes;
1033 tot->tx_dropped += stats->tx_dropped;
1037 static void netvsc_get_stats64(struct net_device *net,
1038 struct rtnl_link_stats64 *t)
1040 struct net_device_context *ndev_ctx = netdev_priv(net);
1041 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1042 struct netvsc_vf_pcpu_stats vf_tot;
1048 netdev_stats_to_stats64(t, &net->stats);
1050 netvsc_get_vf_stats(net, &vf_tot);
1051 t->rx_packets += vf_tot.rx_packets;
1052 t->tx_packets += vf_tot.tx_packets;
1053 t->rx_bytes += vf_tot.rx_bytes;
1054 t->tx_bytes += vf_tot.tx_bytes;
1055 t->tx_dropped += vf_tot.tx_dropped;
1057 for (i = 0; i < nvdev->num_chn; i++) {
1058 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1059 const struct netvsc_stats *stats;
1060 u64 packets, bytes, multicast;
1063 stats = &nvchan->tx_stats;
1065 start = u64_stats_fetch_begin_irq(&stats->syncp);
1066 packets = stats->packets;
1067 bytes = stats->bytes;
1068 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1070 t->tx_bytes += bytes;
1071 t->tx_packets += packets;
1073 stats = &nvchan->rx_stats;
1075 start = u64_stats_fetch_begin_irq(&stats->syncp);
1076 packets = stats->packets;
1077 bytes = stats->bytes;
1078 multicast = stats->multicast + stats->broadcast;
1079 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1081 t->rx_bytes += bytes;
1082 t->rx_packets += packets;
1083 t->multicast += multicast;
1087 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1089 struct net_device_context *ndc = netdev_priv(ndev);
1090 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1091 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1092 struct sockaddr *addr = p;
1095 err = eth_prepare_mac_addr_change(ndev, p);
1103 err = dev_set_mac_address(vf_netdev, addr);
1108 err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
1110 eth_commit_mac_addr_change(ndev, p);
1111 } else if (vf_netdev) {
1112 /* rollback change on VF */
1113 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1114 dev_set_mac_address(vf_netdev, addr);
1120 static const struct {
1121 char name[ETH_GSTRING_LEN];
1123 } netvsc_stats[] = {
1124 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1125 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1126 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1127 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1128 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
1129 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1130 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
1132 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1133 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1134 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1135 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1136 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
1139 #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
1140 #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
1142 /* 4 statistics per queue (rx/tx packets/bytes) */
1143 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1145 static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1147 struct net_device_context *ndc = netdev_priv(dev);
1148 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1153 switch (string_set) {
1155 return NETVSC_GLOBAL_STATS_LEN
1156 + NETVSC_VF_STATS_LEN
1157 + NETVSC_QUEUE_STATS_LEN(nvdev);
1163 static void netvsc_get_ethtool_stats(struct net_device *dev,
1164 struct ethtool_stats *stats, u64 *data)
1166 struct net_device_context *ndc = netdev_priv(dev);
1167 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1168 const void *nds = &ndc->eth_stats;
1169 const struct netvsc_stats *qstats;
1170 struct netvsc_vf_pcpu_stats sum;
1178 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
1179 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1181 netvsc_get_vf_stats(dev, &sum);
1182 for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1183 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1185 for (j = 0; j < nvdev->num_chn; j++) {
1186 qstats = &nvdev->chan_table[j].tx_stats;
1189 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1190 packets = qstats->packets;
1191 bytes = qstats->bytes;
1192 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1193 data[i++] = packets;
1196 qstats = &nvdev->chan_table[j].rx_stats;
1198 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1199 packets = qstats->packets;
1200 bytes = qstats->bytes;
1201 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1202 data[i++] = packets;
1207 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1209 struct net_device_context *ndc = netdev_priv(dev);
1210 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1217 switch (stringset) {
1219 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1220 memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1221 p += ETH_GSTRING_LEN;
1224 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1225 memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1226 p += ETH_GSTRING_LEN;
1229 for (i = 0; i < nvdev->num_chn; i++) {
1230 sprintf(p, "tx_queue_%u_packets", i);
1231 p += ETH_GSTRING_LEN;
1232 sprintf(p, "tx_queue_%u_bytes", i);
1233 p += ETH_GSTRING_LEN;
1234 sprintf(p, "rx_queue_%u_packets", i);
1235 p += ETH_GSTRING_LEN;
1236 sprintf(p, "rx_queue_%u_bytes", i);
1237 p += ETH_GSTRING_LEN;
1245 netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1246 struct ethtool_rxnfc *info)
1248 info->data = RXH_IP_SRC | RXH_IP_DST;
1250 switch (info->flow_type) {
1253 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1257 if (ndc->udp4_l4_hash)
1258 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1263 if (ndc->udp6_l4_hash)
1264 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1280 netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1283 struct net_device_context *ndc = netdev_priv(dev);
1284 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1289 switch (info->cmd) {
1290 case ETHTOOL_GRXRINGS:
1291 info->data = nvdev->num_chn;
1295 return netvsc_get_rss_hash_opts(ndc, info);
1300 static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1301 struct ethtool_rxnfc *info)
1303 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1304 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1305 if (info->flow_type == UDP_V4_FLOW)
1306 ndc->udp4_l4_hash = true;
1307 else if (info->flow_type == UDP_V6_FLOW)
1308 ndc->udp6_l4_hash = true;
1315 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1316 if (info->flow_type == UDP_V4_FLOW)
1317 ndc->udp4_l4_hash = false;
1318 else if (info->flow_type == UDP_V6_FLOW)
1319 ndc->udp6_l4_hash = false;
1330 netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1332 struct net_device_context *ndc = netdev_priv(ndev);
1334 if (info->cmd == ETHTOOL_SRXFH)
1335 return netvsc_set_rss_hash_opts(ndc, info);
1340 #ifdef CONFIG_NET_POLL_CONTROLLER
1341 static void netvsc_poll_controller(struct net_device *dev)
1343 struct net_device_context *ndc = netdev_priv(dev);
1344 struct netvsc_device *ndev;
1348 ndev = rcu_dereference(ndc->nvdev);
1350 for (i = 0; i < ndev->num_chn; i++) {
1351 struct netvsc_channel *nvchan = &ndev->chan_table[i];
1353 napi_schedule(&nvchan->napi);
1360 static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1362 return NETVSC_HASH_KEYLEN;
1365 static u32 netvsc_rss_indir_size(struct net_device *dev)
1370 static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1373 struct net_device_context *ndc = netdev_priv(dev);
1374 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1375 struct rndis_device *rndis_dev;
1382 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1384 rndis_dev = ndev->extension;
1386 for (i = 0; i < ITAB_NUM; i++)
1387 indir[i] = rndis_dev->ind_table[i];
1391 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1396 static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1397 const u8 *key, const u8 hfunc)
1399 struct net_device_context *ndc = netdev_priv(dev);
1400 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1401 struct rndis_device *rndis_dev;
1407 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1410 rndis_dev = ndev->extension;
1412 for (i = 0; i < ITAB_NUM; i++)
1413 if (indir[i] >= VRSS_CHANNEL_MAX)
1416 for (i = 0; i < ITAB_NUM; i++)
1417 rndis_dev->ind_table[i] = indir[i];
1424 key = rndis_dev->rss_key;
1427 return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn);
1430 /* Hyper-V RNDIS protocol does not have ring in the HW sense.
1431 * It does have pre-allocated receive area which is divided into sections.
1433 static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1434 struct ethtool_ringparam *ring)
1438 ring->rx_pending = nvdev->recv_section_cnt;
1439 ring->tx_pending = nvdev->send_section_cnt;
1441 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1442 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1444 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1446 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1447 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1448 / nvdev->send_section_size;
1451 static void netvsc_get_ringparam(struct net_device *ndev,
1452 struct ethtool_ringparam *ring)
1454 struct net_device_context *ndevctx = netdev_priv(ndev);
1455 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1460 __netvsc_get_ringparam(nvdev, ring);
1463 static int netvsc_set_ringparam(struct net_device *ndev,
1464 struct ethtool_ringparam *ring)
1466 struct net_device_context *ndevctx = netdev_priv(ndev);
1467 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1468 struct hv_device *hdev = ndevctx->device_ctx;
1469 struct netvsc_device_info device_info;
1470 struct ethtool_ringparam orig;
1475 if (!nvdev || nvdev->destroy)
1478 memset(&orig, 0, sizeof(orig));
1479 __netvsc_get_ringparam(nvdev, &orig);
1481 new_tx = clamp_t(u32, ring->tx_pending,
1482 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1483 new_rx = clamp_t(u32, ring->rx_pending,
1484 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1486 if (new_tx == orig.tx_pending &&
1487 new_rx == orig.rx_pending)
1488 return 0; /* no change */
1490 memset(&device_info, 0, sizeof(device_info));
1491 device_info.num_chn = nvdev->num_chn;
1492 device_info.ring_size = ring_size;
1493 device_info.send_sections = new_tx;
1494 device_info.recv_sections = new_rx;
1496 netif_device_detach(ndev);
1497 was_opened = rndis_filter_opened(nvdev);
1499 rndis_filter_close(nvdev);
1501 rndis_filter_device_remove(hdev, nvdev);
1503 nvdev = rndis_filter_device_add(hdev, &device_info);
1504 if (IS_ERR(nvdev)) {
1505 ret = PTR_ERR(nvdev);
1507 device_info.send_sections = orig.tx_pending;
1508 device_info.recv_sections = orig.rx_pending;
1509 nvdev = rndis_filter_device_add(hdev, &device_info);
1510 if (IS_ERR(nvdev)) {
1511 netdev_err(ndev, "restoring ringparam failed: %ld\n",
1518 rndis_filter_open(nvdev);
1519 netif_device_attach(ndev);
1521 /* We may have missed link change notifications */
1522 ndevctx->last_reconfig = 0;
1523 schedule_delayed_work(&ndevctx->dwork, 0);
1528 static const struct ethtool_ops ethtool_ops = {
1529 .get_drvinfo = netvsc_get_drvinfo,
1530 .get_link = ethtool_op_get_link,
1531 .get_ethtool_stats = netvsc_get_ethtool_stats,
1532 .get_sset_count = netvsc_get_sset_count,
1533 .get_strings = netvsc_get_strings,
1534 .get_channels = netvsc_get_channels,
1535 .set_channels = netvsc_set_channels,
1536 .get_ts_info = ethtool_op_get_ts_info,
1537 .get_rxnfc = netvsc_get_rxnfc,
1538 .set_rxnfc = netvsc_set_rxnfc,
1539 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1540 .get_rxfh_indir_size = netvsc_rss_indir_size,
1541 .get_rxfh = netvsc_get_rxfh,
1542 .set_rxfh = netvsc_set_rxfh,
1543 .get_link_ksettings = netvsc_get_link_ksettings,
1544 .set_link_ksettings = netvsc_set_link_ksettings,
1545 .get_ringparam = netvsc_get_ringparam,
1546 .set_ringparam = netvsc_set_ringparam,
1549 static const struct net_device_ops device_ops = {
1550 .ndo_open = netvsc_open,
1551 .ndo_stop = netvsc_close,
1552 .ndo_start_xmit = netvsc_start_xmit,
1553 .ndo_set_rx_mode = netvsc_set_multicast_list,
1554 .ndo_change_mtu = netvsc_change_mtu,
1555 .ndo_validate_addr = eth_validate_addr,
1556 .ndo_set_mac_address = netvsc_set_mac_addr,
1557 .ndo_select_queue = netvsc_select_queue,
1558 .ndo_get_stats64 = netvsc_get_stats64,
1559 #ifdef CONFIG_NET_POLL_CONTROLLER
1560 .ndo_poll_controller = netvsc_poll_controller,
1565 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1566 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1567 * present send GARP packet to network peers with netif_notify_peers().
1569 static void netvsc_link_change(struct work_struct *w)
1571 struct net_device_context *ndev_ctx =
1572 container_of(w, struct net_device_context, dwork.work);
1573 struct hv_device *device_obj = ndev_ctx->device_ctx;
1574 struct net_device *net = hv_get_drvdata(device_obj);
1575 struct netvsc_device *net_device;
1576 struct rndis_device *rdev;
1577 struct netvsc_reconfig *event = NULL;
1578 bool notify = false, reschedule = false;
1579 unsigned long flags, next_reconfig, delay;
1582 net_device = rtnl_dereference(ndev_ctx->nvdev);
1586 rdev = net_device->extension;
1588 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1589 if (time_is_after_jiffies(next_reconfig)) {
1590 /* link_watch only sends one notification with current state
1591 * per second, avoid doing reconfig more frequently. Handle
1594 delay = next_reconfig - jiffies;
1595 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1596 schedule_delayed_work(&ndev_ctx->dwork, delay);
1599 ndev_ctx->last_reconfig = jiffies;
1601 spin_lock_irqsave(&ndev_ctx->lock, flags);
1602 if (!list_empty(&ndev_ctx->reconfig_events)) {
1603 event = list_first_entry(&ndev_ctx->reconfig_events,
1604 struct netvsc_reconfig, list);
1605 list_del(&event->list);
1606 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1608 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1613 switch (event->event) {
1614 /* Only the following events are possible due to the check in
1615 * netvsc_linkstatus_callback()
1617 case RNDIS_STATUS_MEDIA_CONNECT:
1618 if (rdev->link_state) {
1619 rdev->link_state = false;
1620 netif_carrier_on(net);
1621 netif_tx_wake_all_queues(net);
1627 case RNDIS_STATUS_MEDIA_DISCONNECT:
1628 if (!rdev->link_state) {
1629 rdev->link_state = true;
1630 netif_carrier_off(net);
1631 netif_tx_stop_all_queues(net);
1635 case RNDIS_STATUS_NETWORK_CHANGE:
1636 /* Only makes sense if carrier is present */
1637 if (!rdev->link_state) {
1638 rdev->link_state = true;
1639 netif_carrier_off(net);
1640 netif_tx_stop_all_queues(net);
1641 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1642 spin_lock_irqsave(&ndev_ctx->lock, flags);
1643 list_add(&event->list, &ndev_ctx->reconfig_events);
1644 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1653 netdev_notify_peers(net);
1655 /* link_watch only sends one notification with current state per
1656 * second, handle next reconfig event in 2 seconds.
1659 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1667 static struct net_device *get_netvsc_bymac(const u8 *mac)
1669 struct net_device *dev;
1673 for_each_netdev(&init_net, dev) {
1674 if (dev->netdev_ops != &device_ops)
1675 continue; /* not a netvsc device */
1677 if (ether_addr_equal(mac, dev->perm_addr))
1684 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1686 struct net_device *dev;
1690 for_each_netdev(&init_net, dev) {
1691 struct net_device_context *net_device_ctx;
1693 if (dev->netdev_ops != &device_ops)
1694 continue; /* not a netvsc device */
1696 net_device_ctx = netdev_priv(dev);
1697 if (!rtnl_dereference(net_device_ctx->nvdev))
1698 continue; /* device is removed */
1700 if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
1701 return dev; /* a match */
1707 /* Called when VF is injecting data into network stack.
1708 * Change the associated network device from VF to netvsc.
1709 * note: already called with rcu_read_lock
1711 static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
1713 struct sk_buff *skb = *pskb;
1714 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
1715 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1716 struct netvsc_vf_pcpu_stats *pcpu_stats
1717 = this_cpu_ptr(ndev_ctx->vf_stats);
1721 u64_stats_update_begin(&pcpu_stats->syncp);
1722 pcpu_stats->rx_packets++;
1723 pcpu_stats->rx_bytes += skb->len;
1724 u64_stats_update_end(&pcpu_stats->syncp);
1726 return RX_HANDLER_ANOTHER;
1729 static int netvsc_vf_join(struct net_device *vf_netdev,
1730 struct net_device *ndev)
1732 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1735 ret = netdev_rx_handler_register(vf_netdev,
1736 netvsc_vf_handle_frame, ndev);
1738 netdev_err(vf_netdev,
1739 "can not register netvsc VF receive handler (err = %d)\n",
1741 goto rx_handler_failed;
1744 ret = netdev_upper_dev_link(vf_netdev, ndev);
1746 netdev_err(vf_netdev,
1747 "can not set master device %s (err = %d)\n",
1749 goto upper_link_failed;
1752 /* set slave flag before open to prevent IPv6 addrconf */
1753 vf_netdev->flags |= IFF_SLAVE;
1755 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
1757 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
1759 netdev_info(vf_netdev, "joined to %s\n", ndev->name);
1763 netdev_rx_handler_unregister(vf_netdev);
1768 static void __netvsc_vf_setup(struct net_device *ndev,
1769 struct net_device *vf_netdev)
1773 /* Align MTU of VF with master */
1774 ret = dev_set_mtu(vf_netdev, ndev->mtu);
1776 netdev_warn(vf_netdev,
1777 "unable to change mtu to %u\n", ndev->mtu);
1779 if (netif_running(ndev)) {
1780 ret = dev_open(vf_netdev);
1782 netdev_warn(vf_netdev,
1783 "unable to open: %d\n", ret);
1787 /* Setup VF as slave of the synthetic device.
1788 * Runs in workqueue to avoid recursion in netlink callbacks.
1790 static void netvsc_vf_setup(struct work_struct *w)
1792 struct net_device_context *ndev_ctx
1793 = container_of(w, struct net_device_context, vf_takeover.work);
1794 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
1795 struct net_device *vf_netdev;
1797 if (!rtnl_trylock()) {
1798 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
1802 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
1804 __netvsc_vf_setup(ndev, vf_netdev);
1809 static int netvsc_register_vf(struct net_device *vf_netdev)
1811 struct net_device *ndev;
1812 struct net_device_context *net_device_ctx;
1813 struct netvsc_device *netvsc_dev;
1815 if (vf_netdev->addr_len != ETH_ALEN)
1819 * We will use the MAC address to locate the synthetic interface to
1820 * associate with the VF interface. If we don't find a matching
1821 * synthetic interface, move on.
1823 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
1827 net_device_ctx = netdev_priv(ndev);
1828 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
1829 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
1832 if (netvsc_vf_join(vf_netdev, ndev) != 0)
1835 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
1837 /* Prevent this module from being unloaded while VF is registered */
1838 try_module_get(THIS_MODULE);
1840 dev_hold(vf_netdev);
1841 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
1845 static int netvsc_vf_up(struct net_device *vf_netdev)
1847 struct net_device_context *net_device_ctx;
1848 struct netvsc_device *netvsc_dev;
1849 struct net_device *ndev;
1851 ndev = get_netvsc_byref(vf_netdev);
1855 net_device_ctx = netdev_priv(ndev);
1856 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
1860 /* Bump refcount when datapath is acvive - Why? */
1861 rndis_filter_open(netvsc_dev);
1863 /* notify the host to switch the data path. */
1864 netvsc_switch_datapath(ndev, true);
1865 netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
1870 static int netvsc_vf_down(struct net_device *vf_netdev)
1872 struct net_device_context *net_device_ctx;
1873 struct netvsc_device *netvsc_dev;
1874 struct net_device *ndev;
1876 ndev = get_netvsc_byref(vf_netdev);
1880 net_device_ctx = netdev_priv(ndev);
1881 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
1885 netvsc_switch_datapath(ndev, false);
1886 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
1887 rndis_filter_close(netvsc_dev);
1892 static int netvsc_unregister_vf(struct net_device *vf_netdev)
1894 struct net_device *ndev;
1895 struct net_device_context *net_device_ctx;
1897 ndev = get_netvsc_byref(vf_netdev);
1901 net_device_ctx = netdev_priv(ndev);
1902 cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
1904 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
1906 netdev_upper_dev_unlink(vf_netdev, ndev);
1907 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
1909 module_put(THIS_MODULE);
1913 static int netvsc_probe(struct hv_device *dev,
1914 const struct hv_vmbus_device_id *dev_id)
1916 struct net_device *net = NULL;
1917 struct net_device_context *net_device_ctx;
1918 struct netvsc_device_info device_info;
1919 struct netvsc_device *nvdev;
1922 net = alloc_etherdev_mq(sizeof(struct net_device_context),
1927 netif_carrier_off(net);
1929 netvsc_init_settings(net);
1931 net_device_ctx = netdev_priv(net);
1932 net_device_ctx->device_ctx = dev;
1933 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1934 if (netif_msg_probe(net_device_ctx))
1935 netdev_dbg(net, "netvsc msg_enable: %d\n",
1936 net_device_ctx->msg_enable);
1938 hv_set_drvdata(dev, net);
1940 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
1942 spin_lock_init(&net_device_ctx->lock);
1943 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1944 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
1946 net_device_ctx->vf_stats
1947 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
1948 if (!net_device_ctx->vf_stats)
1951 net->netdev_ops = &device_ops;
1952 net->ethtool_ops = ðtool_ops;
1953 SET_NETDEV_DEV(net, &dev->device);
1955 /* We always need headroom for rndis header */
1956 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1958 /* Notify the netvsc driver of the new device */
1959 memset(&device_info, 0, sizeof(device_info));
1960 device_info.ring_size = ring_size;
1961 device_info.num_chn = VRSS_CHANNEL_DEFAULT;
1962 device_info.send_sections = NETVSC_DEFAULT_TX;
1963 device_info.recv_sections = NETVSC_DEFAULT_RX;
1965 nvdev = rndis_filter_device_add(dev, &device_info);
1966 if (IS_ERR(nvdev)) {
1967 ret = PTR_ERR(nvdev);
1968 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
1972 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1974 /* hw_features computed in rndis_filter_device_add */
1975 net->features = net->hw_features |
1976 NETIF_F_HIGHDMA | NETIF_F_SG |
1977 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1978 net->vlan_features = net->features;
1980 netif_set_real_num_tx_queues(net, nvdev->num_chn);
1981 netif_set_real_num_rx_queues(net, nvdev->num_chn);
1983 netdev_lockdep_set_classes(net);
1985 /* MTU range: 68 - 1500 or 65521 */
1986 net->min_mtu = NETVSC_MTU_MIN;
1987 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
1988 net->max_mtu = NETVSC_MTU - ETH_HLEN;
1990 net->max_mtu = ETH_DATA_LEN;
1992 ret = register_netdev(net);
1994 pr_err("Unable to register netdev.\n");
1995 goto register_failed;
2001 rndis_filter_device_remove(dev, nvdev);
2003 free_percpu(net_device_ctx->vf_stats);
2005 hv_set_drvdata(dev, NULL);
2011 static int netvsc_remove(struct hv_device *dev)
2013 struct net_device *net;
2014 struct net_device_context *ndev_ctx;
2016 net = hv_get_drvdata(dev);
2019 dev_err(&dev->device, "No net device to remove\n");
2023 ndev_ctx = netdev_priv(net);
2025 netif_device_detach(net);
2027 cancel_delayed_work_sync(&ndev_ctx->dwork);
2030 * Call to the vsc driver to let it know that the device is being
2031 * removed. Also blocks mtu and channel changes.
2034 rndis_filter_device_remove(dev,
2035 rtnl_dereference(ndev_ctx->nvdev));
2038 unregister_netdev(net);
2040 hv_set_drvdata(dev, NULL);
2042 free_percpu(ndev_ctx->vf_stats);
2047 static const struct hv_vmbus_device_id id_table[] = {
2053 MODULE_DEVICE_TABLE(vmbus, id_table);
2055 /* The one and only one */
2056 static struct hv_driver netvsc_drv = {
2057 .name = KBUILD_MODNAME,
2058 .id_table = id_table,
2059 .probe = netvsc_probe,
2060 .remove = netvsc_remove,
2064 * On Hyper-V, every VF interface is matched with a corresponding
2065 * synthetic interface. The synthetic interface is presented first
2066 * to the guest. When the corresponding VF instance is registered,
2067 * we will take care of switching the data path.
2069 static int netvsc_netdev_event(struct notifier_block *this,
2070 unsigned long event, void *ptr)
2072 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2074 /* Skip our own events */
2075 if (event_dev->netdev_ops == &device_ops)
2078 /* Avoid non-Ethernet type devices */
2079 if (event_dev->type != ARPHRD_ETHER)
2082 /* Avoid Vlan dev with same MAC registering as VF */
2083 if (is_vlan_dev(event_dev))
2086 /* Avoid Bonding master dev with same MAC registering as VF */
2087 if ((event_dev->priv_flags & IFF_BONDING) &&
2088 (event_dev->flags & IFF_MASTER))
2092 case NETDEV_REGISTER:
2093 return netvsc_register_vf(event_dev);
2094 case NETDEV_UNREGISTER:
2095 return netvsc_unregister_vf(event_dev);
2097 return netvsc_vf_up(event_dev);
2099 return netvsc_vf_down(event_dev);
2105 static struct notifier_block netvsc_netdev_notifier = {
2106 .notifier_call = netvsc_netdev_event,
2109 static void __exit netvsc_drv_exit(void)
2111 unregister_netdevice_notifier(&netvsc_netdev_notifier);
2112 vmbus_driver_unregister(&netvsc_drv);
2115 static int __init netvsc_drv_init(void)
2119 if (ring_size < RING_SIZE_MIN) {
2120 ring_size = RING_SIZE_MIN;
2121 pr_info("Increased ring_size to %d (min allowed)\n",
2124 ret = vmbus_driver_register(&netvsc_drv);
2129 register_netdevice_notifier(&netvsc_netdev_notifier);
2133 MODULE_LICENSE("GPL");
2134 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2136 module_init(netvsc_drv_init);
2137 module_exit(netvsc_drv_exit);