2 * Linux NET3: IP/IP protocol decoder modified to support
3 * virtual tunnel interface
6 * Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c
18 For comments look at net/ipv4/ip_gre.c --ANK
22 #include <linux/capability.h>
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/init.h>
34 #include <linux/netfilter_ipv4.h>
35 #include <linux/if_ether.h>
36 #include <linux/icmpv6.h>
41 #include <net/ip_tunnels.h>
42 #include <net/inet_ecn.h>
44 #include <net/net_namespace.h>
45 #include <net/netns/generic.h>
47 static struct rtnl_link_ops vti_link_ops __read_mostly;
49 static unsigned int vti_net_id __read_mostly;
50 static int vti_tunnel_init(struct net_device *dev);
52 static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
53 int encap_type, bool update_skb_dev)
55 struct ip_tunnel *tunnel;
56 const struct iphdr *iph = ip_hdr(skb);
57 struct net *net = dev_net(skb->dev);
58 struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
60 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
61 iph->saddr, iph->daddr, 0);
63 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
66 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
69 skb->dev = tunnel->dev;
71 return xfrm_input(skb, nexthdr, spi, encap_type);
80 static int vti_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi,
83 return vti_input(skb, nexthdr, spi, encap_type, false);
86 static int vti_rcv(struct sk_buff *skb, __be32 spi, bool update_skb_dev)
88 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
89 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
91 return vti_input(skb, ip_hdr(skb)->protocol, spi, 0, update_skb_dev);
94 static int vti_rcv_proto(struct sk_buff *skb)
96 return vti_rcv(skb, 0, false);
99 static int vti_rcv_tunnel(struct sk_buff *skb)
101 return vti_rcv(skb, ip_hdr(skb)->saddr, true);
104 static int vti_rcv_cb(struct sk_buff *skb, int err)
106 unsigned short family;
107 struct net_device *dev;
108 struct pcpu_sw_netstats *tstats;
109 struct xfrm_state *x;
110 const struct xfrm_mode *inner_mode;
111 struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
112 u32 orig_mark = skb->mark;
121 dev->stats.rx_errors++;
122 dev->stats.rx_dropped++;
127 x = xfrm_input_state(skb);
129 inner_mode = &x->inner_mode;
131 if (x->sel.family == AF_UNSPEC) {
132 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
133 if (inner_mode == NULL) {
134 XFRM_INC_STATS(dev_net(skb->dev),
135 LINUX_MIB_XFRMINSTATEMODEERROR);
140 family = inner_mode->family;
142 skb->mark = be32_to_cpu(tunnel->parms.i_key);
143 ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
144 skb->mark = orig_mark;
149 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));
152 tstats = this_cpu_ptr(dev->tstats);
154 u64_stats_update_begin(&tstats->syncp);
155 tstats->rx_packets++;
156 tstats->rx_bytes += skb->len;
157 u64_stats_update_end(&tstats->syncp);
162 static bool vti_state_check(const struct xfrm_state *x, __be32 dst, __be32 src)
164 xfrm_address_t *daddr = (xfrm_address_t *)&dst;
165 xfrm_address_t *saddr = (xfrm_address_t *)&src;
167 /* if there is no transform then this tunnel is not functional.
168 * Or if the xfrm is not mode tunnel.
170 if (!x || x->props.mode != XFRM_MODE_TUNNEL ||
171 x->props.family != AF_INET)
175 return xfrm_addr_equal(saddr, &x->props.saddr, AF_INET);
177 if (!xfrm_state_addr_check(x, daddr, saddr, AF_INET))
183 static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
186 struct ip_tunnel *tunnel = netdev_priv(dev);
187 struct ip_tunnel_parm *parms = &tunnel->parms;
188 struct dst_entry *dst = skb_dst(skb);
189 struct net_device *tdev; /* Device to other host */
190 int pkt_len = skb->len;
195 dev->stats.tx_carrier_errors++;
200 dst = xfrm_lookup(tunnel->net, dst, fl, NULL, 0);
202 dev->stats.tx_carrier_errors++;
206 if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
207 dev->stats.tx_carrier_errors++;
216 dev->stats.collisions++;
221 if (skb->len > mtu) {
222 skb_dst_update_pmtu(skb, mtu);
223 if (skb->protocol == htons(ETH_P_IP)) {
224 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
227 if (mtu < IPV6_MIN_MTU)
230 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
237 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
238 skb_dst_set(skb, dst);
239 skb->dev = skb_dst(skb)->dev;
241 err = dst_output(tunnel->net, skb->sk, skb);
242 if (net_xmit_eval(err) == 0)
244 iptunnel_xmit_stats(dev, err);
248 dst_link_failure(skb);
250 dev->stats.tx_errors++;
255 /* This function assumes it is being called from dev_queue_xmit()
256 * and that skb is filled properly by that function.
258 static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
260 struct ip_tunnel *tunnel = netdev_priv(dev);
263 if (!pskb_inet_may_pull(skb))
266 memset(&fl, 0, sizeof(fl));
268 switch (skb->protocol) {
269 case htons(ETH_P_IP):
270 xfrm_decode_session(skb, &fl, AF_INET);
271 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
273 case htons(ETH_P_IPV6):
274 xfrm_decode_session(skb, &fl, AF_INET6);
275 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
281 /* override mark with tunnel output key */
282 fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
284 return vti_xmit(skb, dev, &fl);
287 dev->stats.tx_errors++;
292 static int vti4_err(struct sk_buff *skb, u32 info)
296 struct xfrm_state *x;
297 struct ip_tunnel *tunnel;
298 struct ip_esp_hdr *esph;
299 struct ip_auth_hdr *ah ;
300 struct ip_comp_hdr *ipch;
301 struct net *net = dev_net(skb->dev);
302 const struct iphdr *iph = (const struct iphdr *)skb->data;
303 int protocol = iph->protocol;
304 struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
306 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
307 iph->daddr, iph->saddr, 0);
311 mark = be32_to_cpu(tunnel->parms.o_key);
315 esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
319 ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
323 ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
324 spi = htonl(ntohs(ipch->cpi));
330 switch (icmp_hdr(skb)->type) {
331 case ICMP_DEST_UNREACH:
332 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
340 x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
341 spi, protocol, AF_INET);
345 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
346 ipv4_update_pmtu(skb, net, info, 0, protocol);
348 ipv4_redirect(skb, net, 0, protocol);
355 vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
358 struct ip_tunnel_parm p;
360 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
363 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
364 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
369 if (!(p.i_flags & GRE_KEY))
371 if (!(p.o_flags & GRE_KEY))
374 p.i_flags = VTI_ISVTI;
376 err = ip_tunnel_ioctl(dev, &p, cmd);
380 if (cmd != SIOCDELTUNNEL) {
381 p.i_flags |= GRE_KEY;
382 p.o_flags |= GRE_KEY;
385 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
390 static const struct net_device_ops vti_netdev_ops = {
391 .ndo_init = vti_tunnel_init,
392 .ndo_uninit = ip_tunnel_uninit,
393 .ndo_start_xmit = vti_tunnel_xmit,
394 .ndo_do_ioctl = vti_tunnel_ioctl,
395 .ndo_change_mtu = ip_tunnel_change_mtu,
396 .ndo_get_stats64 = ip_tunnel_get_stats64,
397 .ndo_get_iflink = ip_tunnel_get_iflink,
400 static void vti_tunnel_setup(struct net_device *dev)
402 dev->netdev_ops = &vti_netdev_ops;
403 dev->type = ARPHRD_TUNNEL;
404 ip_tunnel_setup(dev, vti_net_id);
407 static int vti_tunnel_init(struct net_device *dev)
409 struct ip_tunnel *tunnel = netdev_priv(dev);
410 struct iphdr *iph = &tunnel->parms.iph;
412 memcpy(dev->dev_addr, &iph->saddr, 4);
413 memcpy(dev->broadcast, &iph->daddr, 4);
415 dev->flags = IFF_NOARP;
417 dev->features |= NETIF_F_LLTX;
420 return ip_tunnel_init(dev);
423 static void __net_init vti_fb_tunnel_init(struct net_device *dev)
425 struct ip_tunnel *tunnel = netdev_priv(dev);
426 struct iphdr *iph = &tunnel->parms.iph;
429 iph->protocol = IPPROTO_IPIP;
433 static struct xfrm4_protocol vti_esp4_protocol __read_mostly = {
434 .handler = vti_rcv_proto,
435 .input_handler = vti_input_proto,
436 .cb_handler = vti_rcv_cb,
437 .err_handler = vti4_err,
441 static struct xfrm4_protocol vti_ah4_protocol __read_mostly = {
442 .handler = vti_rcv_proto,
443 .input_handler = vti_input_proto,
444 .cb_handler = vti_rcv_cb,
445 .err_handler = vti4_err,
449 static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
450 .handler = vti_rcv_proto,
451 .input_handler = vti_input_proto,
452 .cb_handler = vti_rcv_cb,
453 .err_handler = vti4_err,
457 static struct xfrm_tunnel ipip_handler __read_mostly = {
458 .handler = vti_rcv_tunnel,
459 .err_handler = vti4_err,
463 static int __net_init vti_init_net(struct net *net)
466 struct ip_tunnel_net *itn;
468 err = ip_tunnel_init_net(net, vti_net_id, &vti_link_ops, "ip_vti0");
471 itn = net_generic(net, vti_net_id);
472 if (itn->fb_tunnel_dev)
473 vti_fb_tunnel_init(itn->fb_tunnel_dev);
477 static void __net_exit vti_exit_batch_net(struct list_head *list_net)
479 ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops);
482 static struct pernet_operations vti_net_ops = {
483 .init = vti_init_net,
484 .exit_batch = vti_exit_batch_net,
486 .size = sizeof(struct ip_tunnel_net),
489 static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
490 struct netlink_ext_ack *extack)
495 static void vti_netlink_parms(struct nlattr *data[],
496 struct ip_tunnel_parm *parms,
499 memset(parms, 0, sizeof(*parms));
501 parms->iph.protocol = IPPROTO_IPIP;
506 parms->i_flags = VTI_ISVTI;
508 if (data[IFLA_VTI_LINK])
509 parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
511 if (data[IFLA_VTI_IKEY])
512 parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
514 if (data[IFLA_VTI_OKEY])
515 parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
517 if (data[IFLA_VTI_LOCAL])
518 parms->iph.saddr = nla_get_in_addr(data[IFLA_VTI_LOCAL]);
520 if (data[IFLA_VTI_REMOTE])
521 parms->iph.daddr = nla_get_in_addr(data[IFLA_VTI_REMOTE]);
523 if (data[IFLA_VTI_FWMARK])
524 *fwmark = nla_get_u32(data[IFLA_VTI_FWMARK]);
527 static int vti_newlink(struct net *src_net, struct net_device *dev,
528 struct nlattr *tb[], struct nlattr *data[],
529 struct netlink_ext_ack *extack)
531 struct ip_tunnel_parm parms;
534 vti_netlink_parms(data, &parms, &fwmark);
535 return ip_tunnel_newlink(dev, tb, &parms, fwmark);
538 static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
539 struct nlattr *data[],
540 struct netlink_ext_ack *extack)
542 struct ip_tunnel *t = netdev_priv(dev);
543 __u32 fwmark = t->fwmark;
544 struct ip_tunnel_parm p;
546 vti_netlink_parms(data, &p, &fwmark);
547 return ip_tunnel_changelink(dev, tb, &p, fwmark);
550 static size_t vti_get_size(const struct net_device *dev)
561 /* IFLA_VTI_REMOTE */
563 /* IFLA_VTI_FWMARK */
568 static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
570 struct ip_tunnel *t = netdev_priv(dev);
571 struct ip_tunnel_parm *p = &t->parms;
573 if (nla_put_u32(skb, IFLA_VTI_LINK, p->link) ||
574 nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key) ||
575 nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key) ||
576 nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr) ||
577 nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr) ||
578 nla_put_u32(skb, IFLA_VTI_FWMARK, t->fwmark))
584 static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
585 [IFLA_VTI_LINK] = { .type = NLA_U32 },
586 [IFLA_VTI_IKEY] = { .type = NLA_U32 },
587 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
588 [IFLA_VTI_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
589 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
590 [IFLA_VTI_FWMARK] = { .type = NLA_U32 },
593 static struct rtnl_link_ops vti_link_ops __read_mostly = {
595 .maxtype = IFLA_VTI_MAX,
596 .policy = vti_policy,
597 .priv_size = sizeof(struct ip_tunnel),
598 .setup = vti_tunnel_setup,
599 .validate = vti_tunnel_validate,
600 .newlink = vti_newlink,
601 .changelink = vti_changelink,
602 .dellink = ip_tunnel_dellink,
603 .get_size = vti_get_size,
604 .fill_info = vti_fill_info,
605 .get_link_net = ip_tunnel_get_link_net,
608 static int __init vti_init(void)
613 pr_info("IPv4 over IPsec tunneling driver\n");
615 msg = "tunnel device";
616 err = register_pernet_device(&vti_net_ops);
618 goto pernet_dev_failed;
620 msg = "tunnel protocols";
621 err = xfrm4_protocol_register(&vti_esp4_protocol, IPPROTO_ESP);
623 goto xfrm_proto_esp_failed;
624 err = xfrm4_protocol_register(&vti_ah4_protocol, IPPROTO_AH);
626 goto xfrm_proto_ah_failed;
627 err = xfrm4_protocol_register(&vti_ipcomp4_protocol, IPPROTO_COMP);
629 goto xfrm_proto_comp_failed;
632 err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
634 goto xfrm_tunnel_failed;
636 msg = "netlink interface";
637 err = rtnl_link_register(&vti_link_ops);
639 goto rtnl_link_failed;
644 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
646 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
647 xfrm_proto_comp_failed:
648 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
649 xfrm_proto_ah_failed:
650 xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
651 xfrm_proto_esp_failed:
652 unregister_pernet_device(&vti_net_ops);
654 pr_err("vti init: failed to register %s\n", msg);
658 static void __exit vti_fini(void)
660 rtnl_link_unregister(&vti_link_ops);
661 xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
662 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
663 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
664 xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
665 unregister_pernet_device(&vti_net_ops);
668 module_init(vti_init);
669 module_exit(vti_fini);
670 MODULE_LICENSE("GPL");
671 MODULE_ALIAS_RTNL_LINK("vti");
672 MODULE_ALIAS_NETDEV("ip_vti0");