1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2013 Nicira, Inc.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/netdevice.h>
13 #include <linux/if_arp.h>
14 #include <linux/init.h>
15 #include <linux/in6.h>
16 #include <linux/inetdevice.h>
17 #include <linux/netfilter_ipv4.h>
18 #include <linux/etherdevice.h>
19 #include <linux/if_ether.h>
20 #include <linux/if_vlan.h>
21 #include <linux/static_key.h>
25 #include <net/protocol.h>
26 #include <net/ip_tunnels.h>
27 #include <net/ip6_tunnel.h>
29 #include <net/checksum.h>
30 #include <net/dsfield.h>
31 #include <net/inet_ecn.h>
33 #include <net/net_namespace.h>
34 #include <net/netns/generic.h>
35 #include <net/rtnetlink.h>
36 #include <net/dst_metadata.h>
37 #include <net/geneve.h>
38 #include <net/vxlan.h>
39 #include <net/erspan.h>
41 const struct ip_tunnel_encap_ops __rcu *
42 iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
43 EXPORT_SYMBOL(iptun_encaps);
45 const struct ip6_tnl_encap_ops __rcu *
46 ip6tun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
47 EXPORT_SYMBOL(ip6tun_encaps);
49 void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
50 __be32 src, __be32 dst, __u8 proto,
51 __u8 tos, __u8 ttl, __be16 df, bool xnet)
53 int pkt_len = skb->len - skb_inner_network_offset(skb);
54 struct net *net = dev_net(rt->dst.dev);
55 struct net_device *dev = skb->dev;
59 skb_scrub_packet(skb, xnet);
61 skb_clear_hash_if_not_l4(skb);
62 skb_dst_set(skb, &rt->dst);
63 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
65 /* Push down and install the IP header. */
66 skb_push(skb, sizeof(struct iphdr));
67 skb_reset_network_header(skb);
72 iph->ihl = sizeof(struct iphdr) >> 2;
73 iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df;
74 iph->protocol = proto;
79 __ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1);
81 err = ip_local_out(net, sk, skb);
84 if (unlikely(net_xmit_eval(err)))
86 iptunnel_xmit_stats(dev, pkt_len);
89 EXPORT_SYMBOL_GPL(iptunnel_xmit);
91 int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
92 __be16 inner_proto, bool raw_proto, bool xnet)
94 if (unlikely(!pskb_may_pull(skb, hdr_len)))
97 skb_pull_rcsum(skb, hdr_len);
99 if (!raw_proto && inner_proto == htons(ETH_P_TEB)) {
102 if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
105 eh = (struct ethhdr *)skb->data;
106 if (likely(eth_proto_is_802_3(eh->h_proto)))
107 skb->protocol = eh->h_proto;
109 skb->protocol = htons(ETH_P_802_2);
112 skb->protocol = inner_proto;
115 skb_clear_hash_if_not_l4(skb);
116 __vlan_hwaccel_clear_tag(skb);
117 skb_set_queue_mapping(skb, 0);
118 skb_scrub_packet(skb, xnet);
120 return iptunnel_pull_offloads(skb);
122 EXPORT_SYMBOL_GPL(__iptunnel_pull_header);
124 struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
127 struct metadata_dst *res;
128 struct ip_tunnel_info *dst, *src;
130 if (!md || md->type != METADATA_IP_TUNNEL ||
131 md->u.tun_info.mode & IP_TUNNEL_INFO_TX)
134 src = &md->u.tun_info;
135 res = metadata_dst_alloc(src->options_len, METADATA_IP_TUNNEL, flags);
139 dst = &res->u.tun_info;
140 dst->key.tun_id = src->key.tun_id;
141 if (src->mode & IP_TUNNEL_INFO_IPV6)
142 memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src,
143 sizeof(struct in6_addr));
145 dst->key.u.ipv4.dst = src->key.u.ipv4.src;
146 dst->key.tun_flags = src->key.tun_flags;
147 dst->mode = src->mode | IP_TUNNEL_INFO_TX;
148 ip_tunnel_info_opts_set(dst, ip_tunnel_info_opts(src),
149 src->options_len, 0);
153 EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
155 int iptunnel_handle_offloads(struct sk_buff *skb,
160 if (likely(!skb->encapsulation)) {
161 skb_reset_inner_headers(skb);
162 skb->encapsulation = 1;
165 if (skb_is_gso(skb)) {
166 err = skb_header_unclone(skb, GFP_ATOMIC);
169 skb_shinfo(skb)->gso_type |= gso_type_mask;
173 if (skb->ip_summed != CHECKSUM_PARTIAL) {
174 skb->ip_summed = CHECKSUM_NONE;
175 /* We clear encapsulation here to prevent badly-written
176 * drivers potentially deciding to offload an inner checksum
177 * if we set CHECKSUM_PARTIAL on the outer header.
178 * This should go away when the drivers are all fixed.
180 skb->encapsulation = 0;
185 EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
187 /* Often modified stats are per cpu, other are shared (netdev->stats) */
188 void ip_tunnel_get_stats64(struct net_device *dev,
189 struct rtnl_link_stats64 *tot)
193 netdev_stats_to_stats64(tot, &dev->stats);
195 for_each_possible_cpu(i) {
196 const struct pcpu_sw_netstats *tstats =
197 per_cpu_ptr(dev->tstats, i);
198 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
202 start = u64_stats_fetch_begin_irq(&tstats->syncp);
203 rx_packets = tstats->rx_packets;
204 tx_packets = tstats->tx_packets;
205 rx_bytes = tstats->rx_bytes;
206 tx_bytes = tstats->tx_bytes;
207 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
209 tot->rx_packets += rx_packets;
210 tot->tx_packets += tx_packets;
211 tot->rx_bytes += rx_bytes;
212 tot->tx_bytes += tx_bytes;
215 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
217 static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
218 [LWTUNNEL_IP_ID] = { .type = NLA_U64 },
219 [LWTUNNEL_IP_DST] = { .type = NLA_U32 },
220 [LWTUNNEL_IP_SRC] = { .type = NLA_U32 },
221 [LWTUNNEL_IP_TTL] = { .type = NLA_U8 },
222 [LWTUNNEL_IP_TOS] = { .type = NLA_U8 },
223 [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 },
224 [LWTUNNEL_IP_OPTS] = { .type = NLA_NESTED },
227 static const struct nla_policy ip_opts_policy[LWTUNNEL_IP_OPTS_MAX + 1] = {
228 [LWTUNNEL_IP_OPTS_GENEVE] = { .type = NLA_NESTED },
229 [LWTUNNEL_IP_OPTS_VXLAN] = { .type = NLA_NESTED },
230 [LWTUNNEL_IP_OPTS_ERSPAN] = { .type = NLA_NESTED },
233 static const struct nla_policy
234 geneve_opt_policy[LWTUNNEL_IP_OPT_GENEVE_MAX + 1] = {
235 [LWTUNNEL_IP_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
236 [LWTUNNEL_IP_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
237 [LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
240 static const struct nla_policy
241 vxlan_opt_policy[LWTUNNEL_IP_OPT_VXLAN_MAX + 1] = {
242 [LWTUNNEL_IP_OPT_VXLAN_GBP] = { .type = NLA_U32 },
245 static const struct nla_policy
246 erspan_opt_policy[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1] = {
247 [LWTUNNEL_IP_OPT_ERSPAN_VER] = { .type = NLA_U8 },
248 [LWTUNNEL_IP_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
249 [LWTUNNEL_IP_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
250 [LWTUNNEL_IP_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
253 static int ip_tun_parse_opts_geneve(struct nlattr *attr,
254 struct ip_tunnel_info *info,
255 struct netlink_ext_ack *extack)
257 struct nlattr *tb[LWTUNNEL_IP_OPT_GENEVE_MAX + 1];
260 err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_GENEVE_MAX, attr,
261 geneve_opt_policy, extack);
265 if (!tb[LWTUNNEL_IP_OPT_GENEVE_CLASS] ||
266 !tb[LWTUNNEL_IP_OPT_GENEVE_TYPE] ||
267 !tb[LWTUNNEL_IP_OPT_GENEVE_DATA])
270 attr = tb[LWTUNNEL_IP_OPT_GENEVE_DATA];
271 data_len = nla_len(attr);
276 struct geneve_opt *opt = ip_tunnel_info_opts(info);
278 memcpy(opt->opt_data, nla_data(attr), data_len);
279 opt->length = data_len / 4;
280 attr = tb[LWTUNNEL_IP_OPT_GENEVE_CLASS];
281 opt->opt_class = nla_get_be16(attr);
282 attr = tb[LWTUNNEL_IP_OPT_GENEVE_TYPE];
283 opt->type = nla_get_u8(attr);
284 info->key.tun_flags |= TUNNEL_GENEVE_OPT;
287 return sizeof(struct geneve_opt) + data_len;
290 static int ip_tun_parse_opts_vxlan(struct nlattr *attr,
291 struct ip_tunnel_info *info,
292 struct netlink_ext_ack *extack)
294 struct nlattr *tb[LWTUNNEL_IP_OPT_VXLAN_MAX + 1];
297 err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_VXLAN_MAX, attr,
298 vxlan_opt_policy, extack);
302 if (!tb[LWTUNNEL_IP_OPT_VXLAN_GBP])
306 struct vxlan_metadata *md = ip_tunnel_info_opts(info);
308 attr = tb[LWTUNNEL_IP_OPT_VXLAN_GBP];
309 md->gbp = nla_get_u32(attr);
310 info->key.tun_flags |= TUNNEL_VXLAN_OPT;
313 return sizeof(struct vxlan_metadata);
316 static int ip_tun_parse_opts_erspan(struct nlattr *attr,
317 struct ip_tunnel_info *info,
318 struct netlink_ext_ack *extack)
320 struct nlattr *tb[LWTUNNEL_IP_OPT_ERSPAN_MAX + 1];
323 err = nla_parse_nested(tb, LWTUNNEL_IP_OPT_ERSPAN_MAX, attr,
324 erspan_opt_policy, extack);
328 if (!tb[LWTUNNEL_IP_OPT_ERSPAN_VER])
332 struct erspan_metadata *md = ip_tunnel_info_opts(info);
334 attr = tb[LWTUNNEL_IP_OPT_ERSPAN_VER];
335 md->version = nla_get_u8(attr);
337 if (md->version == 1 && tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX]) {
338 attr = tb[LWTUNNEL_IP_OPT_ERSPAN_INDEX];
339 md->u.index = nla_get_be32(attr);
340 } else if (md->version == 2 && tb[LWTUNNEL_IP_OPT_ERSPAN_DIR] &&
341 tb[LWTUNNEL_IP_OPT_ERSPAN_HWID]) {
342 attr = tb[LWTUNNEL_IP_OPT_ERSPAN_DIR];
343 md->u.md2.dir = nla_get_u8(attr);
344 attr = tb[LWTUNNEL_IP_OPT_ERSPAN_HWID];
345 set_hwid(&md->u.md2, nla_get_u8(attr));
350 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
353 return sizeof(struct erspan_metadata);
356 static int ip_tun_parse_opts(struct nlattr *attr, struct ip_tunnel_info *info,
357 struct netlink_ext_ack *extack)
359 struct nlattr *tb[LWTUNNEL_IP_OPTS_MAX + 1];
365 err = nla_parse_nested(tb, LWTUNNEL_IP_OPTS_MAX, attr,
366 ip_opts_policy, extack);
370 if (tb[LWTUNNEL_IP_OPTS_GENEVE])
371 err = ip_tun_parse_opts_geneve(tb[LWTUNNEL_IP_OPTS_GENEVE],
373 else if (tb[LWTUNNEL_IP_OPTS_VXLAN])
374 err = ip_tun_parse_opts_vxlan(tb[LWTUNNEL_IP_OPTS_VXLAN],
376 else if (tb[LWTUNNEL_IP_OPTS_ERSPAN])
377 err = ip_tun_parse_opts_erspan(tb[LWTUNNEL_IP_OPTS_ERSPAN],
385 static int ip_tun_get_optlen(struct nlattr *attr,
386 struct netlink_ext_ack *extack)
388 return ip_tun_parse_opts(attr, NULL, extack);
391 static int ip_tun_set_opts(struct nlattr *attr, struct ip_tunnel_info *info,
392 struct netlink_ext_ack *extack)
394 return ip_tun_parse_opts(attr, info, extack);
397 static int ip_tun_build_state(struct nlattr *attr,
398 unsigned int family, const void *cfg,
399 struct lwtunnel_state **ts,
400 struct netlink_ext_ack *extack)
402 struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
403 struct lwtunnel_state *new_state;
404 struct ip_tunnel_info *tun_info;
407 err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP_MAX, attr,
408 ip_tun_policy, extack);
412 opt_len = ip_tun_get_optlen(tb[LWTUNNEL_IP_OPTS], extack);
416 new_state = lwtunnel_state_alloc(sizeof(*tun_info) + opt_len);
420 new_state->type = LWTUNNEL_ENCAP_IP;
422 tun_info = lwt_tun_info(new_state);
424 err = ip_tun_set_opts(tb[LWTUNNEL_IP_OPTS], tun_info, extack);
426 lwtstate_free(new_state);
430 #ifdef CONFIG_DST_CACHE
431 err = dst_cache_init(&tun_info->dst_cache, GFP_KERNEL);
433 lwtstate_free(new_state);
438 if (tb[LWTUNNEL_IP_ID])
439 tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]);
441 if (tb[LWTUNNEL_IP_DST])
442 tun_info->key.u.ipv4.dst = nla_get_in_addr(tb[LWTUNNEL_IP_DST]);
444 if (tb[LWTUNNEL_IP_SRC])
445 tun_info->key.u.ipv4.src = nla_get_in_addr(tb[LWTUNNEL_IP_SRC]);
447 if (tb[LWTUNNEL_IP_TTL])
448 tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
450 if (tb[LWTUNNEL_IP_TOS])
451 tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
453 if (tb[LWTUNNEL_IP_FLAGS])
454 tun_info->key.tun_flags |=
455 (nla_get_be16(tb[LWTUNNEL_IP_FLAGS]) &
456 ~TUNNEL_OPTIONS_PRESENT);
458 tun_info->mode = IP_TUNNEL_INFO_TX;
459 tun_info->options_len = opt_len;
466 static void ip_tun_destroy_state(struct lwtunnel_state *lwtstate)
468 #ifdef CONFIG_DST_CACHE
469 struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
471 dst_cache_destroy(&tun_info->dst_cache);
475 static int ip_tun_fill_encap_opts_geneve(struct sk_buff *skb,
476 struct ip_tunnel_info *tun_info)
478 struct geneve_opt *opt;
481 nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_GENEVE);
485 opt = ip_tunnel_info_opts(tun_info);
486 if (nla_put_be16(skb, LWTUNNEL_IP_OPT_GENEVE_CLASS, opt->opt_class) ||
487 nla_put_u8(skb, LWTUNNEL_IP_OPT_GENEVE_TYPE, opt->type) ||
488 nla_put(skb, LWTUNNEL_IP_OPT_GENEVE_DATA, opt->length * 4,
490 nla_nest_cancel(skb, nest);
494 nla_nest_end(skb, nest);
498 static int ip_tun_fill_encap_opts_vxlan(struct sk_buff *skb,
499 struct ip_tunnel_info *tun_info)
501 struct vxlan_metadata *md;
504 nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_VXLAN);
508 md = ip_tunnel_info_opts(tun_info);
509 if (nla_put_u32(skb, LWTUNNEL_IP_OPT_VXLAN_GBP, md->gbp)) {
510 nla_nest_cancel(skb, nest);
514 nla_nest_end(skb, nest);
518 static int ip_tun_fill_encap_opts_erspan(struct sk_buff *skb,
519 struct ip_tunnel_info *tun_info)
521 struct erspan_metadata *md;
524 nest = nla_nest_start_noflag(skb, LWTUNNEL_IP_OPTS_ERSPAN);
528 md = ip_tunnel_info_opts(tun_info);
529 if (nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_VER, md->version))
532 if (md->version == 1 &&
533 nla_put_be32(skb, LWTUNNEL_IP_OPT_ERSPAN_INDEX, md->u.index))
536 if (md->version == 2 &&
537 (nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_DIR, md->u.md2.dir) ||
538 nla_put_u8(skb, LWTUNNEL_IP_OPT_ERSPAN_HWID,
539 get_hwid(&md->u.md2))))
542 nla_nest_end(skb, nest);
545 nla_nest_cancel(skb, nest);
549 static int ip_tun_fill_encap_opts(struct sk_buff *skb, int type,
550 struct ip_tunnel_info *tun_info)
555 if (!(tun_info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
558 nest = nla_nest_start_noflag(skb, type);
562 if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT)
563 err = ip_tun_fill_encap_opts_geneve(skb, tun_info);
564 else if (tun_info->key.tun_flags & TUNNEL_VXLAN_OPT)
565 err = ip_tun_fill_encap_opts_vxlan(skb, tun_info);
566 else if (tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)
567 err = ip_tun_fill_encap_opts_erspan(skb, tun_info);
570 nla_nest_cancel(skb, nest);
574 nla_nest_end(skb, nest);
578 static int ip_tun_fill_encap_info(struct sk_buff *skb,
579 struct lwtunnel_state *lwtstate)
581 struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
583 if (nla_put_be64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id,
585 nla_put_in_addr(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
586 nla_put_in_addr(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
587 nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
588 nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
589 nla_put_be16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags) ||
590 ip_tun_fill_encap_opts(skb, LWTUNNEL_IP_OPTS, tun_info))
596 static int ip_tun_opts_nlsize(struct ip_tunnel_info *info)
600 if (!(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))
603 opt_len = nla_total_size(0); /* LWTUNNEL_IP_OPTS */
604 if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
605 struct geneve_opt *opt = ip_tunnel_info_opts(info);
607 opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_GENEVE */
608 + nla_total_size(2) /* OPT_GENEVE_CLASS */
609 + nla_total_size(1) /* OPT_GENEVE_TYPE */
610 + nla_total_size(opt->length * 4);
611 /* OPT_GENEVE_DATA */
612 } else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
613 opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_VXLAN */
614 + nla_total_size(4); /* OPT_VXLAN_GBP */
615 } else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
616 struct erspan_metadata *md = ip_tunnel_info_opts(info);
618 opt_len += nla_total_size(0) /* LWTUNNEL_IP_OPTS_ERSPAN */
619 + nla_total_size(1) /* OPT_ERSPAN_VER */
620 + (md->version == 1 ? nla_total_size(4)
621 /* OPT_ERSPAN_INDEX (v1) */
622 : nla_total_size(1) +
624 /* OPT_ERSPAN_DIR + HWID (v2) */
630 static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
632 return nla_total_size_64bit(8) /* LWTUNNEL_IP_ID */
633 + nla_total_size(4) /* LWTUNNEL_IP_DST */
634 + nla_total_size(4) /* LWTUNNEL_IP_SRC */
635 + nla_total_size(1) /* LWTUNNEL_IP_TOS */
636 + nla_total_size(1) /* LWTUNNEL_IP_TTL */
637 + nla_total_size(2) /* LWTUNNEL_IP_FLAGS */
638 + ip_tun_opts_nlsize(lwt_tun_info(lwtstate));
639 /* LWTUNNEL_IP_OPTS */
642 static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
644 struct ip_tunnel_info *info_a = lwt_tun_info(a);
645 struct ip_tunnel_info *info_b = lwt_tun_info(b);
647 return memcmp(info_a, info_b, sizeof(info_a->key)) ||
648 info_a->mode != info_b->mode ||
649 info_a->options_len != info_b->options_len ||
650 memcmp(ip_tunnel_info_opts(info_a),
651 ip_tunnel_info_opts(info_b), info_a->options_len);
654 static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
655 .build_state = ip_tun_build_state,
656 .destroy_state = ip_tun_destroy_state,
657 .fill_encap = ip_tun_fill_encap_info,
658 .get_encap_size = ip_tun_encap_nlsize,
659 .cmp_encap = ip_tun_cmp_encap,
660 .owner = THIS_MODULE,
663 static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
664 [LWTUNNEL_IP6_ID] = { .type = NLA_U64 },
665 [LWTUNNEL_IP6_DST] = { .len = sizeof(struct in6_addr) },
666 [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) },
667 [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 },
668 [LWTUNNEL_IP6_TC] = { .type = NLA_U8 },
669 [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 },
672 static int ip6_tun_build_state(struct nlattr *attr,
673 unsigned int family, const void *cfg,
674 struct lwtunnel_state **ts,
675 struct netlink_ext_ack *extack)
677 struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
678 struct lwtunnel_state *new_state;
679 struct ip_tunnel_info *tun_info;
682 err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP6_MAX, attr,
683 ip6_tun_policy, extack);
687 opt_len = ip_tun_get_optlen(tb[LWTUNNEL_IP6_OPTS], extack);
691 new_state = lwtunnel_state_alloc(sizeof(*tun_info) + opt_len);
695 new_state->type = LWTUNNEL_ENCAP_IP6;
697 tun_info = lwt_tun_info(new_state);
699 err = ip_tun_set_opts(tb[LWTUNNEL_IP6_OPTS], tun_info, extack);
701 lwtstate_free(new_state);
705 if (tb[LWTUNNEL_IP6_ID])
706 tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP6_ID]);
708 if (tb[LWTUNNEL_IP6_DST])
709 tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);
711 if (tb[LWTUNNEL_IP6_SRC])
712 tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]);
714 if (tb[LWTUNNEL_IP6_HOPLIMIT])
715 tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]);
717 if (tb[LWTUNNEL_IP6_TC])
718 tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
720 if (tb[LWTUNNEL_IP6_FLAGS])
721 tun_info->key.tun_flags |=
722 (nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]) &
723 ~TUNNEL_OPTIONS_PRESENT);
725 tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6;
726 tun_info->options_len = opt_len;
733 static int ip6_tun_fill_encap_info(struct sk_buff *skb,
734 struct lwtunnel_state *lwtstate)
736 struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
738 if (nla_put_be64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id,
740 nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
741 nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
742 nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.tos) ||
743 nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.ttl) ||
744 nla_put_be16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags) ||
745 ip_tun_fill_encap_opts(skb, LWTUNNEL_IP6_OPTS, tun_info))
751 static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
753 return nla_total_size_64bit(8) /* LWTUNNEL_IP6_ID */
754 + nla_total_size(16) /* LWTUNNEL_IP6_DST */
755 + nla_total_size(16) /* LWTUNNEL_IP6_SRC */
756 + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */
757 + nla_total_size(1) /* LWTUNNEL_IP6_TC */
758 + nla_total_size(2) /* LWTUNNEL_IP6_FLAGS */
759 + ip_tun_opts_nlsize(lwt_tun_info(lwtstate));
760 /* LWTUNNEL_IP6_OPTS */
763 static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
764 .build_state = ip6_tun_build_state,
765 .fill_encap = ip6_tun_fill_encap_info,
766 .get_encap_size = ip6_tun_encap_nlsize,
767 .cmp_encap = ip_tun_cmp_encap,
768 .owner = THIS_MODULE,
771 void __init ip_tunnel_core_init(void)
773 /* If you land here, make sure whether increasing ip_tunnel_info's
774 * options_len is a reasonable choice with its usage in front ends
775 * (f.e., it's part of flow keys, etc).
777 BUILD_BUG_ON(IP_TUNNEL_OPTS_MAX != 255);
779 lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
780 lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6);
783 DEFINE_STATIC_KEY_FALSE(ip_tunnel_metadata_cnt);
784 EXPORT_SYMBOL(ip_tunnel_metadata_cnt);
786 void ip_tunnel_need_metadata(void)
788 static_branch_inc(&ip_tunnel_metadata_cnt);
790 EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata);
792 void ip_tunnel_unneed_metadata(void)
794 static_branch_dec(&ip_tunnel_metadata_cnt);
796 EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);