2 * Linux NET3: GRE over IP protocol decoder.
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
50 #include <net/dst_metadata.h>
51 #include <net/erspan.h>
57 1. The most important issue is detecting local dead loops.
58 They would cause complete host lockup in transmit, which
59 would be "resolved" by stack overflow or, if queueing is enabled,
60 with infinite looping in net_bh.
62 We cannot track such dead loops during route installation,
63 it is infeasible task. The most general solutions would be
64 to keep skb->encapsulation counter (sort of local ttl),
65 and silently drop packet when it expires. It is a good
66 solution, but it supposes maintaining new variable in ALL
67 skb, even if no tunneling is used.
69 Current solution: xmit_recursion breaks dead loops. This is a percpu
70 counter, since when we enter the first ndo_xmit(), cpu migration is
71 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
73 2. Networking dead loops would not kill routers, but would really
74 kill network. IP hop limit plays role of "t->recursion" in this case,
75 if we copy it from packet being encapsulated to upper header.
76 It is very good solution, but it introduces two problems:
78 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
79 do not work over tunnels.
80 - traceroute does not work. I planned to relay ICMP from tunnel,
81 so that this problem would be solved and traceroute output
82 would even more informative. This idea appeared to be wrong:
83 only Linux complies to rfc1812 now (yes, guys, Linux is the only
84 true router now :-)), all routers (at least, in neighbourhood of mine)
85 return only 8 bytes of payload. It is the end.
87 Hence, if we want that OSPF worked or traceroute said something reasonable,
88 we should search for another solution.
90 One of them is to parse packet trying to detect inner encapsulation
91 made by our node. It is difficult or even impossible, especially,
92 taking into account fragmentation. TO be short, ttl is not solution at all.
94 Current solution: The solution was UNEXPECTEDLY SIMPLE.
95 We force DF flag on tunnels with preconfigured hop limit,
96 that is ALL. :-) Well, it does not remove the problem completely,
97 but exponential growth of network traffic is changed to linear
98 (branches, that exceed pmtu are pruned) and tunnel mtu
99 rapidly degrades to value <68, where looping stops.
100 Yes, it is not good if there exists a router in the loop,
101 which does not force DF, even when encapsulating packets have DF set.
102 But it is not our problem! Nobody could accuse us, we made
103 all that we could make. Even if it is your gated who injected
104 fatal route to network, even if it were you who configured
105 fatal static route: you are innocent. :-)
110 static bool log_ecn_error = true;
111 module_param(log_ecn_error, bool, 0644);
112 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
114 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
115 static int ipgre_tunnel_init(struct net_device *dev);
116 static void erspan_build_header(struct sk_buff *skb,
118 bool truncate, bool is_ipv4);
120 static unsigned int ipgre_net_id __read_mostly;
121 static unsigned int gre_tap_net_id __read_mostly;
122 static unsigned int erspan_net_id __read_mostly;
124 static int ipgre_err(struct sk_buff *skb, u32 info,
125 const struct tnl_ptk_info *tpi)
128 /* All the routers (except for Linux) return only
129 8 bytes of packet payload. It means, that precise relaying of
130 ICMP in the real Internet is absolutely infeasible.
132 Moreover, Cisco "wise men" put GRE key to the third word
133 in GRE header. It makes impossible maintaining even soft
134 state for keyed GRE tunnels with enabled checksum. Tell
137 Well, I wonder, rfc1812 was written by Cisco employee,
138 what the hell these idiots break standards established
141 struct net *net = dev_net(skb->dev);
142 struct ip_tunnel_net *itn;
143 const struct iphdr *iph;
144 const int type = icmp_hdr(skb)->type;
145 const int code = icmp_hdr(skb)->code;
146 unsigned int data_len = 0;
149 if (tpi->proto == htons(ETH_P_TEB))
150 itn = net_generic(net, gre_tap_net_id);
151 else if (tpi->proto == htons(ETH_P_ERSPAN) ||
152 tpi->proto == htons(ETH_P_ERSPAN2))
153 itn = net_generic(net, erspan_net_id);
155 itn = net_generic(net, ipgre_net_id);
157 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
158 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
159 iph->daddr, iph->saddr, tpi->key);
166 case ICMP_PARAMETERPROB:
169 case ICMP_DEST_UNREACH:
172 case ICMP_PORT_UNREACH:
173 /* Impossible event. */
176 /* All others are translated to HOST_UNREACH.
177 rfc2003 contains "deep thoughts" about NET_UNREACH,
178 I believe they are just ether pollution. --ANK
184 case ICMP_TIME_EXCEEDED:
185 if (code != ICMP_EXC_TTL)
187 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
194 #if IS_ENABLED(CONFIG_IPV6)
195 if (tpi->proto == htons(ETH_P_IPV6) &&
196 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
201 if (t->parms.iph.daddr == 0 ||
202 ipv4_is_multicast(t->parms.iph.daddr))
205 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
208 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
212 t->err_time = jiffies;
217 static void gre_err(struct sk_buff *skb, u32 info)
219 /* All the routers (except for Linux) return only
220 * 8 bytes of packet payload. It means, that precise relaying of
221 * ICMP in the real Internet is absolutely infeasible.
223 * Moreover, Cisco "wise men" put GRE key to the third word
224 * in GRE header. It makes impossible maintaining even soft
226 * GRE tunnels with enabled checksum. Tell them "thank you".
228 * Well, I wonder, rfc1812 was written by Cisco employee,
229 * what the hell these idiots break standards established
233 const struct iphdr *iph = (struct iphdr *)skb->data;
234 const int type = icmp_hdr(skb)->type;
235 const int code = icmp_hdr(skb)->code;
236 struct tnl_ptk_info tpi;
238 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
242 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
243 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
244 skb->dev->ifindex, IPPROTO_GRE);
247 if (type == ICMP_REDIRECT) {
248 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
253 ipgre_err(skb, info, &tpi);
256 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
259 struct net *net = dev_net(skb->dev);
260 struct metadata_dst *tun_dst = NULL;
261 struct erspan_base_hdr *ershdr;
262 struct ip_tunnel_net *itn;
263 struct ip_tunnel *tunnel;
264 const struct iphdr *iph;
265 struct erspan_md2 *md2;
269 itn = net_generic(net, erspan_net_id);
272 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
275 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
276 tpi->flags | TUNNEL_KEY,
277 iph->saddr, iph->daddr, tpi->key);
280 len = gre_hdr_len + erspan_hdr_len(ver);
281 if (unlikely(!pskb_may_pull(skb, len)))
282 return PACKET_REJECT;
284 if (__iptunnel_pull_header(skb,
290 if (tunnel->collect_md) {
291 struct erspan_metadata *pkt_md, *md;
292 struct ip_tunnel_info *info;
297 tpi->flags |= TUNNEL_KEY;
299 tun_id = key32_to_tunnel_id(tpi->key);
301 tun_dst = ip_tun_rx_dst(skb, flags,
302 tun_id, sizeof(*md));
304 return PACKET_REJECT;
306 /* skb can be uncloned in __iptunnel_pull_header, so
307 * old pkt_md is no longer valid and we need to reset
310 gh = skb_network_header(skb) +
311 skb_network_header_len(skb);
312 pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
314 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
317 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
320 info = &tun_dst->u.tun_info;
321 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
322 info->options_len = sizeof(*md);
325 skb_reset_mac_header(skb);
326 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
329 return PACKET_REJECT;
336 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
337 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
339 struct metadata_dst *tun_dst = NULL;
340 const struct iphdr *iph;
341 struct ip_tunnel *tunnel;
344 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
345 iph->saddr, iph->daddr, tpi->key);
348 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
349 raw_proto, false) < 0)
352 if (tunnel->dev->type != ARPHRD_NONE)
353 skb_pop_mac_header(skb);
355 skb_reset_mac_header(skb);
356 if (tunnel->collect_md) {
360 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
361 tun_id = key32_to_tunnel_id(tpi->key);
362 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
364 return PACKET_REJECT;
367 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
377 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
380 struct net *net = dev_net(skb->dev);
381 struct ip_tunnel_net *itn;
384 if (tpi->proto == htons(ETH_P_TEB))
385 itn = net_generic(net, gre_tap_net_id);
387 itn = net_generic(net, ipgre_net_id);
389 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
390 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
391 /* ipgre tunnels in collect metadata mode should receive
392 * also ETH_P_TEB traffic.
394 itn = net_generic(net, ipgre_net_id);
395 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
400 static int gre_rcv(struct sk_buff *skb)
402 struct tnl_ptk_info tpi;
403 bool csum_err = false;
406 #ifdef CONFIG_NET_IPGRE_BROADCAST
407 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
408 /* Looped back packet, drop it! */
409 if (rt_is_output_route(skb_rtable(skb)))
414 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
418 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
419 tpi.proto == htons(ETH_P_ERSPAN2))) {
420 if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
425 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
429 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
435 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
436 const struct iphdr *tnl_params,
439 struct ip_tunnel *tunnel = netdev_priv(dev);
441 if (tunnel->parms.o_flags & TUNNEL_SEQ)
444 /* Push GRE header. */
445 gre_build_header(skb, tunnel->tun_hlen,
446 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
447 htonl(tunnel->o_seqno));
449 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
452 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
454 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
457 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
460 struct ip_tunnel *tunnel = netdev_priv(dev);
461 struct ip_tunnel_info *tun_info;
462 const struct ip_tunnel_key *key;
466 tun_info = skb_tunnel_info(skb);
467 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
468 ip_tunnel_info_af(tun_info) != AF_INET))
471 key = &tun_info->key;
472 tunnel_hlen = gre_calc_hlen(key->tun_flags);
474 if (skb_cow_head(skb, dev->needed_headroom))
477 /* Push Tunnel header. */
478 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
481 flags = tun_info->key.tun_flags &
482 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
483 gre_build_header(skb, tunnel_hlen, flags, proto,
484 tunnel_id_to_key32(tun_info->key.tun_id),
485 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
487 ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
493 dev->stats.tx_dropped++;
496 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
498 struct ip_tunnel *tunnel = netdev_priv(dev);
499 struct ip_tunnel_info *tun_info;
500 const struct ip_tunnel_key *key;
501 struct erspan_metadata *md;
502 bool truncate = false;
509 tun_info = skb_tunnel_info(skb);
510 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
511 ip_tunnel_info_af(tun_info) != AF_INET))
514 key = &tun_info->key;
515 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
517 md = ip_tunnel_info_opts(tun_info);
521 /* ERSPAN has fixed 8 byte GRE header */
522 version = md->version;
523 tunnel_hlen = 8 + erspan_hdr_len(version);
525 if (skb_cow_head(skb, dev->needed_headroom))
528 if (gre_handle_offloads(skb, false))
531 if (skb->len > dev->mtu + dev->hard_header_len) {
532 pskb_trim(skb, dev->mtu + dev->hard_header_len);
536 nhoff = skb_network_header(skb) - skb_mac_header(skb);
537 if (skb->protocol == htons(ETH_P_IP) &&
538 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
541 thoff = skb_transport_header(skb) - skb_mac_header(skb);
542 if (skb->protocol == htons(ETH_P_IPV6) &&
543 (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
547 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
548 ntohl(md->u.index), truncate, true);
549 proto = htons(ETH_P_ERSPAN);
550 } else if (version == 2) {
551 erspan_build_header_v2(skb,
552 ntohl(tunnel_id_to_key32(key->tun_id)),
554 get_hwid(&md->u.md2),
556 proto = htons(ETH_P_ERSPAN2);
561 gre_build_header(skb, 8, TUNNEL_SEQ,
562 proto, 0, htonl(tunnel->o_seqno++));
564 ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
570 dev->stats.tx_dropped++;
573 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
575 struct ip_tunnel_info *info = skb_tunnel_info(skb);
576 const struct ip_tunnel_key *key;
580 if (ip_tunnel_info_af(info) != AF_INET)
584 ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
585 tunnel_id_to_key32(key->tun_id), key->tos, 0,
586 skb->mark, skb_get_hash(skb));
587 rt = ip_route_output_key(dev_net(dev), &fl4);
592 info->key.u.ipv4.src = fl4.saddr;
596 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
597 struct net_device *dev)
599 struct ip_tunnel *tunnel = netdev_priv(dev);
600 const struct iphdr *tnl_params;
602 if (!pskb_inet_may_pull(skb))
605 if (tunnel->collect_md) {
606 gre_fb_xmit(skb, dev, skb->protocol);
610 if (dev->header_ops) {
611 /* Need space for new headers */
612 if (skb_cow_head(skb, dev->needed_headroom -
613 (tunnel->hlen + sizeof(struct iphdr))))
616 tnl_params = (const struct iphdr *)skb->data;
618 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
621 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
622 skb_reset_mac_header(skb);
624 if (skb_cow_head(skb, dev->needed_headroom))
627 tnl_params = &tunnel->parms.iph;
630 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
633 __gre_xmit(skb, dev, tnl_params, skb->protocol);
638 dev->stats.tx_dropped++;
642 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
643 struct net_device *dev)
645 struct ip_tunnel *tunnel = netdev_priv(dev);
646 bool truncate = false;
649 if (!pskb_inet_may_pull(skb))
652 if (tunnel->collect_md) {
653 erspan_fb_xmit(skb, dev);
657 if (gre_handle_offloads(skb, false))
660 if (skb_cow_head(skb, dev->needed_headroom))
663 if (skb->len > dev->mtu + dev->hard_header_len) {
664 pskb_trim(skb, dev->mtu + dev->hard_header_len);
668 /* Push ERSPAN header */
669 if (tunnel->erspan_ver == 1) {
670 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
673 proto = htons(ETH_P_ERSPAN);
674 } else if (tunnel->erspan_ver == 2) {
675 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
676 tunnel->dir, tunnel->hwid,
678 proto = htons(ETH_P_ERSPAN2);
683 tunnel->parms.o_flags &= ~TUNNEL_KEY;
684 __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
689 dev->stats.tx_dropped++;
693 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
694 struct net_device *dev)
696 struct ip_tunnel *tunnel = netdev_priv(dev);
698 if (!pskb_inet_may_pull(skb))
701 if (tunnel->collect_md) {
702 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
706 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
709 if (skb_cow_head(skb, dev->needed_headroom))
712 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
717 dev->stats.tx_dropped++;
721 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
723 struct ip_tunnel *tunnel = netdev_priv(dev);
726 len = tunnel->tun_hlen;
727 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
728 len = tunnel->tun_hlen - len;
729 tunnel->hlen = tunnel->hlen + len;
731 dev->needed_headroom = dev->needed_headroom + len;
733 dev->mtu = max_t(int, dev->mtu - len, 68);
735 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
736 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
737 tunnel->encap.type == TUNNEL_ENCAP_NONE) {
738 dev->features |= NETIF_F_GSO_SOFTWARE;
739 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
741 dev->features &= ~NETIF_F_GSO_SOFTWARE;
742 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
744 dev->features |= NETIF_F_LLTX;
746 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
747 dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
751 static int ipgre_tunnel_ioctl(struct net_device *dev,
752 struct ifreq *ifr, int cmd)
754 struct ip_tunnel_parm p;
757 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
760 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
761 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
762 p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
763 ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
767 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
768 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
770 err = ip_tunnel_ioctl(dev, &p, cmd);
774 if (cmd == SIOCCHGTUNNEL) {
775 struct ip_tunnel *t = netdev_priv(dev);
777 t->parms.i_flags = p.i_flags;
778 t->parms.o_flags = p.o_flags;
780 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
781 ipgre_link_update(dev, true);
784 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
785 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
787 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
793 /* Nice toy. Unfortunately, useless in real life :-)
794 It allows to construct virtual multiprotocol broadcast "LAN"
795 over the Internet, provided multicast routing is tuned.
798 I have no idea was this bicycle invented before me,
799 so that I had to set ARPHRD_IPGRE to a random value.
800 I have an impression, that Cisco could make something similar,
801 but this feature is apparently missing in IOS<=11.2(8).
803 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
804 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
806 ping -t 255 224.66.66.66
808 If nobody answers, mbone does not work.
810 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
811 ip addr add 10.66.66.<somewhat>/24 dev Universe
813 ifconfig Universe add fe80::<Your_real_addr>/10
814 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
817 ftp fec0:6666:6666::193.233.7.65
820 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
822 const void *daddr, const void *saddr, unsigned int len)
824 struct ip_tunnel *t = netdev_priv(dev);
826 struct gre_base_hdr *greh;
828 iph = skb_push(skb, t->hlen + sizeof(*iph));
829 greh = (struct gre_base_hdr *)(iph+1);
830 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
831 greh->protocol = htons(type);
833 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
835 /* Set the source hardware address. */
837 memcpy(&iph->saddr, saddr, 4);
839 memcpy(&iph->daddr, daddr, 4);
841 return t->hlen + sizeof(*iph);
843 return -(t->hlen + sizeof(*iph));
846 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
848 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
849 memcpy(haddr, &iph->saddr, 4);
853 static const struct header_ops ipgre_header_ops = {
854 .create = ipgre_header,
855 .parse = ipgre_header_parse,
858 #ifdef CONFIG_NET_IPGRE_BROADCAST
859 static int ipgre_open(struct net_device *dev)
861 struct ip_tunnel *t = netdev_priv(dev);
863 if (ipv4_is_multicast(t->parms.iph.daddr)) {
867 rt = ip_route_output_gre(t->net, &fl4,
871 RT_TOS(t->parms.iph.tos),
874 return -EADDRNOTAVAIL;
877 if (!__in_dev_get_rtnl(dev))
878 return -EADDRNOTAVAIL;
879 t->mlink = dev->ifindex;
880 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
885 static int ipgre_close(struct net_device *dev)
887 struct ip_tunnel *t = netdev_priv(dev);
889 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
890 struct in_device *in_dev;
891 in_dev = inetdev_by_index(t->net, t->mlink);
893 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
899 static const struct net_device_ops ipgre_netdev_ops = {
900 .ndo_init = ipgre_tunnel_init,
901 .ndo_uninit = ip_tunnel_uninit,
902 #ifdef CONFIG_NET_IPGRE_BROADCAST
903 .ndo_open = ipgre_open,
904 .ndo_stop = ipgre_close,
906 .ndo_start_xmit = ipgre_xmit,
907 .ndo_do_ioctl = ipgre_tunnel_ioctl,
908 .ndo_change_mtu = ip_tunnel_change_mtu,
909 .ndo_get_stats64 = ip_tunnel_get_stats64,
910 .ndo_get_iflink = ip_tunnel_get_iflink,
913 #define GRE_FEATURES (NETIF_F_SG | \
918 static void ipgre_tunnel_setup(struct net_device *dev)
920 dev->netdev_ops = &ipgre_netdev_ops;
921 dev->type = ARPHRD_IPGRE;
922 ip_tunnel_setup(dev, ipgre_net_id);
925 static void __gre_tunnel_init(struct net_device *dev)
927 struct ip_tunnel *tunnel;
929 tunnel = netdev_priv(dev);
930 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
931 tunnel->parms.iph.protocol = IPPROTO_GRE;
933 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
935 dev->features |= GRE_FEATURES;
936 dev->hw_features |= GRE_FEATURES;
938 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
939 /* TCP offload with GRE SEQ is not supported, nor
940 * can we support 2 levels of outer headers requiring
943 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
944 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
945 dev->features |= NETIF_F_GSO_SOFTWARE;
946 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
949 /* Can use a lockless transmit, unless we generate
952 dev->features |= NETIF_F_LLTX;
956 static int ipgre_tunnel_init(struct net_device *dev)
958 struct ip_tunnel *tunnel = netdev_priv(dev);
959 struct iphdr *iph = &tunnel->parms.iph;
961 __gre_tunnel_init(dev);
963 memcpy(dev->dev_addr, &iph->saddr, 4);
964 memcpy(dev->broadcast, &iph->daddr, 4);
966 dev->flags = IFF_NOARP;
970 if (iph->daddr && !tunnel->collect_md) {
971 #ifdef CONFIG_NET_IPGRE_BROADCAST
972 if (ipv4_is_multicast(iph->daddr)) {
975 dev->flags = IFF_BROADCAST;
976 dev->header_ops = &ipgre_header_ops;
979 } else if (!tunnel->collect_md) {
980 dev->header_ops = &ipgre_header_ops;
983 return ip_tunnel_init(dev);
986 static const struct gre_protocol ipgre_protocol = {
988 .err_handler = gre_err,
991 static int __net_init ipgre_init_net(struct net *net)
993 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
996 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
998 ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1001 static struct pernet_operations ipgre_net_ops = {
1002 .init = ipgre_init_net,
1003 .exit_batch = ipgre_exit_batch_net,
1004 .id = &ipgre_net_id,
1005 .size = sizeof(struct ip_tunnel_net),
1008 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1009 struct netlink_ext_ack *extack)
1017 if (data[IFLA_GRE_IFLAGS])
1018 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1019 if (data[IFLA_GRE_OFLAGS])
1020 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1021 if (flags & (GRE_VERSION|GRE_ROUTING))
1024 if (data[IFLA_GRE_COLLECT_METADATA] &&
1025 data[IFLA_GRE_ENCAP_TYPE] &&
1026 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1032 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1033 struct netlink_ext_ack *extack)
1037 if (tb[IFLA_ADDRESS]) {
1038 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1040 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1041 return -EADDRNOTAVAIL;
1047 if (data[IFLA_GRE_REMOTE]) {
1048 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1054 return ipgre_tunnel_validate(tb, data, extack);
1057 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1058 struct netlink_ext_ack *extack)
1066 ret = ipgre_tap_validate(tb, data, extack);
1070 /* ERSPAN should only have GRE sequence and key flag */
1071 if (data[IFLA_GRE_OFLAGS])
1072 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1073 if (data[IFLA_GRE_IFLAGS])
1074 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1075 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1076 flags != (GRE_SEQ | GRE_KEY))
1079 /* ERSPAN Session ID only has 10-bit. Since we reuse
1080 * 32-bit key field as ID, check it's range.
1082 if (data[IFLA_GRE_IKEY] &&
1083 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1086 if (data[IFLA_GRE_OKEY] &&
1087 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1093 static int ipgre_netlink_parms(struct net_device *dev,
1094 struct nlattr *data[],
1095 struct nlattr *tb[],
1096 struct ip_tunnel_parm *parms,
1099 struct ip_tunnel *t = netdev_priv(dev);
1101 memset(parms, 0, sizeof(*parms));
1103 parms->iph.protocol = IPPROTO_GRE;
1108 if (data[IFLA_GRE_LINK])
1109 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1111 if (data[IFLA_GRE_IFLAGS])
1112 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1114 if (data[IFLA_GRE_OFLAGS])
1115 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1117 if (data[IFLA_GRE_IKEY])
1118 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1120 if (data[IFLA_GRE_OKEY])
1121 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1123 if (data[IFLA_GRE_LOCAL])
1124 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1126 if (data[IFLA_GRE_REMOTE])
1127 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1129 if (data[IFLA_GRE_TTL])
1130 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1132 if (data[IFLA_GRE_TOS])
1133 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1135 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1138 parms->iph.frag_off = htons(IP_DF);
1141 if (data[IFLA_GRE_COLLECT_METADATA]) {
1142 t->collect_md = true;
1143 if (dev->type == ARPHRD_IPGRE)
1144 dev->type = ARPHRD_NONE;
1147 if (data[IFLA_GRE_IGNORE_DF]) {
1148 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1149 && (parms->iph.frag_off & htons(IP_DF)))
1151 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1154 if (data[IFLA_GRE_FWMARK])
1155 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1157 if (data[IFLA_GRE_ERSPAN_VER]) {
1158 t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1160 if (t->erspan_ver != 1 && t->erspan_ver != 2)
1164 if (t->erspan_ver == 1) {
1165 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1166 t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1167 if (t->index & ~INDEX_MASK)
1170 } else if (t->erspan_ver == 2) {
1171 if (data[IFLA_GRE_ERSPAN_DIR]) {
1172 t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1173 if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1176 if (data[IFLA_GRE_ERSPAN_HWID]) {
1177 t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1178 if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1186 /* This function returns true when ENCAP attributes are present in the nl msg */
1187 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1188 struct ip_tunnel_encap *ipencap)
1192 memset(ipencap, 0, sizeof(*ipencap));
1197 if (data[IFLA_GRE_ENCAP_TYPE]) {
1199 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1202 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1204 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1207 if (data[IFLA_GRE_ENCAP_SPORT]) {
1209 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1212 if (data[IFLA_GRE_ENCAP_DPORT]) {
1214 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1220 static int gre_tap_init(struct net_device *dev)
1222 __gre_tunnel_init(dev);
1223 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1224 netif_keep_dst(dev);
1226 return ip_tunnel_init(dev);
1229 static const struct net_device_ops gre_tap_netdev_ops = {
1230 .ndo_init = gre_tap_init,
1231 .ndo_uninit = ip_tunnel_uninit,
1232 .ndo_start_xmit = gre_tap_xmit,
1233 .ndo_set_mac_address = eth_mac_addr,
1234 .ndo_validate_addr = eth_validate_addr,
1235 .ndo_change_mtu = ip_tunnel_change_mtu,
1236 .ndo_get_stats64 = ip_tunnel_get_stats64,
1237 .ndo_get_iflink = ip_tunnel_get_iflink,
1238 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1241 static int erspan_tunnel_init(struct net_device *dev)
1243 struct ip_tunnel *tunnel = netdev_priv(dev);
1245 tunnel->tun_hlen = 8;
1246 tunnel->parms.iph.protocol = IPPROTO_GRE;
1247 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1248 erspan_hdr_len(tunnel->erspan_ver);
1250 dev->features |= GRE_FEATURES;
1251 dev->hw_features |= GRE_FEATURES;
1252 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1253 netif_keep_dst(dev);
1255 return ip_tunnel_init(dev);
1258 static const struct net_device_ops erspan_netdev_ops = {
1259 .ndo_init = erspan_tunnel_init,
1260 .ndo_uninit = ip_tunnel_uninit,
1261 .ndo_start_xmit = erspan_xmit,
1262 .ndo_set_mac_address = eth_mac_addr,
1263 .ndo_validate_addr = eth_validate_addr,
1264 .ndo_change_mtu = ip_tunnel_change_mtu,
1265 .ndo_get_stats64 = ip_tunnel_get_stats64,
1266 .ndo_get_iflink = ip_tunnel_get_iflink,
1267 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1270 static void ipgre_tap_setup(struct net_device *dev)
1274 dev->netdev_ops = &gre_tap_netdev_ops;
1275 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1276 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1277 ip_tunnel_setup(dev, gre_tap_net_id);
1280 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1281 struct nlattr *tb[], struct nlattr *data[],
1282 struct netlink_ext_ack *extack)
1284 struct ip_tunnel_parm p;
1285 struct ip_tunnel_encap ipencap;
1289 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1290 struct ip_tunnel *t = netdev_priv(dev);
1291 err = ip_tunnel_encap_setup(t, &ipencap);
1297 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1300 return ip_tunnel_newlink(dev, tb, &p, fwmark);
1303 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1304 struct nlattr *data[],
1305 struct netlink_ext_ack *extack)
1307 struct ip_tunnel *t = netdev_priv(dev);
1308 struct ip_tunnel_encap ipencap;
1309 __u32 fwmark = t->fwmark;
1310 struct ip_tunnel_parm p;
1313 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1314 err = ip_tunnel_encap_setup(t, &ipencap);
1320 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1324 err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1328 t->parms.i_flags = p.i_flags;
1329 t->parms.o_flags = p.o_flags;
1331 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
1332 ipgre_link_update(dev, !tb[IFLA_MTU]);
1337 static size_t ipgre_get_size(const struct net_device *dev)
1342 /* IFLA_GRE_IFLAGS */
1344 /* IFLA_GRE_OFLAGS */
1350 /* IFLA_GRE_LOCAL */
1352 /* IFLA_GRE_REMOTE */
1358 /* IFLA_GRE_PMTUDISC */
1360 /* IFLA_GRE_ENCAP_TYPE */
1362 /* IFLA_GRE_ENCAP_FLAGS */
1364 /* IFLA_GRE_ENCAP_SPORT */
1366 /* IFLA_GRE_ENCAP_DPORT */
1368 /* IFLA_GRE_COLLECT_METADATA */
1370 /* IFLA_GRE_IGNORE_DF */
1372 /* IFLA_GRE_FWMARK */
1374 /* IFLA_GRE_ERSPAN_INDEX */
1376 /* IFLA_GRE_ERSPAN_VER */
1378 /* IFLA_GRE_ERSPAN_DIR */
1380 /* IFLA_GRE_ERSPAN_HWID */
1385 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1387 struct ip_tunnel *t = netdev_priv(dev);
1388 struct ip_tunnel_parm *p = &t->parms;
1389 __be16 o_flags = p->o_flags;
1391 if (t->erspan_ver == 1 || t->erspan_ver == 2) {
1393 o_flags |= TUNNEL_KEY;
1395 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1396 goto nla_put_failure;
1398 if (t->erspan_ver == 1) {
1399 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1400 goto nla_put_failure;
1402 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1403 goto nla_put_failure;
1404 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1405 goto nla_put_failure;
1409 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1410 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1411 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1412 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1413 gre_tnl_flags_to_gre_flags(o_flags)) ||
1414 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1415 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1416 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1417 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1418 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1419 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1420 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1421 !!(p->iph.frag_off & htons(IP_DF))) ||
1422 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1423 goto nla_put_failure;
1425 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1427 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1429 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1431 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1433 goto nla_put_failure;
1435 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1436 goto nla_put_failure;
1438 if (t->collect_md) {
1439 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1440 goto nla_put_failure;
1449 static void erspan_setup(struct net_device *dev)
1451 struct ip_tunnel *t = netdev_priv(dev);
1454 dev->netdev_ops = &erspan_netdev_ops;
1455 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1456 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1457 ip_tunnel_setup(dev, erspan_net_id);
1461 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1462 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1463 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1464 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1465 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1466 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1467 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1468 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1469 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1470 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1471 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1472 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1473 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1474 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1475 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1476 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1477 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1478 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
1479 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
1480 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
1481 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
1482 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
1485 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1487 .maxtype = IFLA_GRE_MAX,
1488 .policy = ipgre_policy,
1489 .priv_size = sizeof(struct ip_tunnel),
1490 .setup = ipgre_tunnel_setup,
1491 .validate = ipgre_tunnel_validate,
1492 .newlink = ipgre_newlink,
1493 .changelink = ipgre_changelink,
1494 .dellink = ip_tunnel_dellink,
1495 .get_size = ipgre_get_size,
1496 .fill_info = ipgre_fill_info,
1497 .get_link_net = ip_tunnel_get_link_net,
1500 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1502 .maxtype = IFLA_GRE_MAX,
1503 .policy = ipgre_policy,
1504 .priv_size = sizeof(struct ip_tunnel),
1505 .setup = ipgre_tap_setup,
1506 .validate = ipgre_tap_validate,
1507 .newlink = ipgre_newlink,
1508 .changelink = ipgre_changelink,
1509 .dellink = ip_tunnel_dellink,
1510 .get_size = ipgre_get_size,
1511 .fill_info = ipgre_fill_info,
1512 .get_link_net = ip_tunnel_get_link_net,
1515 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1517 .maxtype = IFLA_GRE_MAX,
1518 .policy = ipgre_policy,
1519 .priv_size = sizeof(struct ip_tunnel),
1520 .setup = erspan_setup,
1521 .validate = erspan_validate,
1522 .newlink = ipgre_newlink,
1523 .changelink = ipgre_changelink,
1524 .dellink = ip_tunnel_dellink,
1525 .get_size = ipgre_get_size,
1526 .fill_info = ipgre_fill_info,
1527 .get_link_net = ip_tunnel_get_link_net,
1530 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1531 u8 name_assign_type)
1533 struct nlattr *tb[IFLA_MAX + 1];
1534 struct net_device *dev;
1535 LIST_HEAD(list_kill);
1536 struct ip_tunnel *t;
1539 memset(&tb, 0, sizeof(tb));
1541 dev = rtnl_create_link(net, name, name_assign_type,
1542 &ipgre_tap_ops, tb, NULL);
1546 /* Configure flow based GRE device. */
1547 t = netdev_priv(dev);
1548 t->collect_md = true;
1550 err = ipgre_newlink(net, dev, tb, NULL, NULL);
1553 return ERR_PTR(err);
1556 /* openvswitch users expect packet sizes to be unrestricted,
1557 * so set the largest MTU we can.
1559 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1563 err = rtnl_configure_link(dev, NULL);
1569 ip_tunnel_dellink(dev, &list_kill);
1570 unregister_netdevice_many(&list_kill);
1571 return ERR_PTR(err);
1573 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1575 static int __net_init ipgre_tap_init_net(struct net *net)
1577 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1580 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1582 ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1585 static struct pernet_operations ipgre_tap_net_ops = {
1586 .init = ipgre_tap_init_net,
1587 .exit_batch = ipgre_tap_exit_batch_net,
1588 .id = &gre_tap_net_id,
1589 .size = sizeof(struct ip_tunnel_net),
1592 static int __net_init erspan_init_net(struct net *net)
1594 return ip_tunnel_init_net(net, erspan_net_id,
1595 &erspan_link_ops, "erspan0");
1598 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1600 ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1603 static struct pernet_operations erspan_net_ops = {
1604 .init = erspan_init_net,
1605 .exit_batch = erspan_exit_batch_net,
1606 .id = &erspan_net_id,
1607 .size = sizeof(struct ip_tunnel_net),
1610 static int __init ipgre_init(void)
1614 pr_info("GRE over IPv4 tunneling driver\n");
1616 err = register_pernet_device(&ipgre_net_ops);
1620 err = register_pernet_device(&ipgre_tap_net_ops);
1622 goto pnet_tap_failed;
1624 err = register_pernet_device(&erspan_net_ops);
1626 goto pnet_erspan_failed;
1628 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1630 pr_info("%s: can't add protocol\n", __func__);
1631 goto add_proto_failed;
1634 err = rtnl_link_register(&ipgre_link_ops);
1636 goto rtnl_link_failed;
1638 err = rtnl_link_register(&ipgre_tap_ops);
1640 goto tap_ops_failed;
1642 err = rtnl_link_register(&erspan_link_ops);
1644 goto erspan_link_failed;
1649 rtnl_link_unregister(&ipgre_tap_ops);
1651 rtnl_link_unregister(&ipgre_link_ops);
1653 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1655 unregister_pernet_device(&erspan_net_ops);
1657 unregister_pernet_device(&ipgre_tap_net_ops);
1659 unregister_pernet_device(&ipgre_net_ops);
1663 static void __exit ipgre_fini(void)
1665 rtnl_link_unregister(&ipgre_tap_ops);
1666 rtnl_link_unregister(&ipgre_link_ops);
1667 rtnl_link_unregister(&erspan_link_ops);
1668 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1669 unregister_pernet_device(&ipgre_tap_net_ops);
1670 unregister_pernet_device(&ipgre_net_ops);
1671 unregister_pernet_device(&erspan_net_ops);
1674 module_init(ipgre_init);
1675 module_exit(ipgre_fini);
1676 MODULE_LICENSE("GPL");
1677 MODULE_ALIAS_RTNL_LINK("gre");
1678 MODULE_ALIAS_RTNL_LINK("gretap");
1679 MODULE_ALIAS_RTNL_LINK("erspan");
1680 MODULE_ALIAS_NETDEV("gre0");
1681 MODULE_ALIAS_NETDEV("gretap0");
1682 MODULE_ALIAS_NETDEV("erspan0");