2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17 #include <linux/udp.h>
18 #include <linux/igmp.h>
19 #include <linux/if_ether.h>
20 #include <linux/ethtool.h>
22 #include <net/ndisc.h>
25 #include <net/rtnetlink.h>
26 #include <net/inet_ecn.h>
27 #include <net/net_namespace.h>
28 #include <net/netns/generic.h>
29 #include <net/tun_proto.h>
30 #include <net/vxlan.h>
32 #if IS_ENABLED(CONFIG_IPV6)
33 #include <net/ip6_tunnel.h>
34 #include <net/ip6_checksum.h>
37 #define VXLAN_VERSION "0.1"
39 #define PORT_HASH_BITS 8
40 #define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
41 #define FDB_AGE_DEFAULT 300 /* 5 min */
42 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
44 /* UDP port for VXLAN traffic.
45 * The IANA assigned port is 4789, but the Linux default is 8472
46 * for compatibility with early adopters.
48 static unsigned short vxlan_port __read_mostly = 8472;
49 module_param_named(udp_port, vxlan_port, ushort, 0444);
50 MODULE_PARM_DESC(udp_port, "Destination UDP port");
52 static bool log_ecn_error = true;
53 module_param(log_ecn_error, bool, 0644);
54 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
56 static unsigned int vxlan_net_id;
57 static struct rtnl_link_ops vxlan_link_ops;
59 static const u8 all_zeros_mac[ETH_ALEN + 2];
61 static int vxlan_sock_add(struct vxlan_dev *vxlan);
63 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
65 /* per-network namespace private data for this module */
67 struct list_head vxlan_list;
68 struct hlist_head sock_list[PORT_HASH_SIZE];
72 /* Forwarding table entry */
74 struct hlist_node hlist; /* linked list of entries */
76 unsigned long updated; /* jiffies */
78 struct list_head remotes;
79 u8 eth_addr[ETH_ALEN];
80 u16 state; /* see ndm_state */
82 u16 flags; /* see ndm_flags and below */
85 #define NTF_VXLAN_ADDED_BY_USER 0x100
87 /* salt for hash table */
88 static u32 vxlan_salt __read_mostly;
90 static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
92 return vs->flags & VXLAN_F_COLLECT_METADATA ||
93 ip_tunnel_collect_metadata();
96 #if IS_ENABLED(CONFIG_IPV6)
98 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
100 if (a->sa.sa_family != b->sa.sa_family)
102 if (a->sa.sa_family == AF_INET6)
103 return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
105 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
108 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
110 if (nla_len(nla) >= sizeof(struct in6_addr)) {
111 ip->sin6.sin6_addr = nla_get_in6_addr(nla);
112 ip->sa.sa_family = AF_INET6;
114 } else if (nla_len(nla) >= sizeof(__be32)) {
115 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
116 ip->sa.sa_family = AF_INET;
119 return -EAFNOSUPPORT;
123 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
124 const union vxlan_addr *ip)
126 if (ip->sa.sa_family == AF_INET6)
127 return nla_put_in6_addr(skb, attr, &ip->sin6.sin6_addr);
129 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
132 #else /* !CONFIG_IPV6 */
135 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
137 return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
140 static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
142 if (nla_len(nla) >= sizeof(struct in6_addr)) {
143 return -EAFNOSUPPORT;
144 } else if (nla_len(nla) >= sizeof(__be32)) {
145 ip->sin.sin_addr.s_addr = nla_get_in_addr(nla);
146 ip->sa.sa_family = AF_INET;
149 return -EAFNOSUPPORT;
153 static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
154 const union vxlan_addr *ip)
156 return nla_put_in_addr(skb, attr, ip->sin.sin_addr.s_addr);
160 /* Virtual Network hash table head */
161 static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni)
163 return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
166 /* Socket hash table head */
167 static inline struct hlist_head *vs_head(struct net *net, __be16 port)
169 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
171 return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
174 /* First remote destination for a forwarding entry.
175 * Guaranteed to be non-NULL because remotes are never deleted.
177 static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
179 return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
182 static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
184 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
187 /* Find VXLAN socket based on network namespace, address family and UDP port
188 * and enabled unshareable flags.
190 static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
191 __be16 port, u32 flags, int ifindex)
193 struct vxlan_sock *vs;
195 flags &= VXLAN_F_RCV_FLAGS;
197 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
198 if (inet_sk(vs->sock->sk)->inet_sport == port &&
199 vxlan_get_sk_family(vs) == family &&
200 vs->flags == flags &&
201 vs->sock->sk->sk_bound_dev_if == ifindex)
207 static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, int ifindex,
210 struct vxlan_dev_node *node;
212 /* For flow based devices, map all packets to VNI 0 */
213 if (vs->flags & VXLAN_F_COLLECT_METADATA)
216 hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) {
217 if (node->vxlan->default_dst.remote_vni != vni)
220 if (IS_ENABLED(CONFIG_IPV6)) {
221 const struct vxlan_config *cfg = &node->vxlan->cfg;
223 if ((cfg->flags & VXLAN_F_IPV6_LINKLOCAL) &&
224 cfg->remote_ifindex != ifindex)
234 /* Look up VNI in a per net namespace table */
235 static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex,
236 __be32 vni, sa_family_t family,
237 __be16 port, u32 flags)
239 struct vxlan_sock *vs;
241 vs = vxlan_find_sock(net, family, port, flags, ifindex);
245 return vxlan_vs_find_vni(vs, ifindex, vni);
248 /* Fill in neighbour message in skbuff. */
249 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
250 const struct vxlan_fdb *fdb,
251 u32 portid, u32 seq, int type, unsigned int flags,
252 const struct vxlan_rdst *rdst)
254 unsigned long now = jiffies;
255 struct nda_cacheinfo ci;
256 struct nlmsghdr *nlh;
258 bool send_ip, send_eth;
260 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
264 ndm = nlmsg_data(nlh);
265 memset(ndm, 0, sizeof(*ndm));
267 send_eth = send_ip = true;
269 if (type == RTM_GETNEIGH) {
270 send_ip = !vxlan_addr_any(&rdst->remote_ip);
271 send_eth = !is_zero_ether_addr(fdb->eth_addr);
272 ndm->ndm_family = send_ip ? rdst->remote_ip.sa.sa_family : AF_INET;
274 ndm->ndm_family = AF_BRIDGE;
275 ndm->ndm_state = fdb->state;
276 ndm->ndm_ifindex = vxlan->dev->ifindex;
277 ndm->ndm_flags = fdb->flags;
279 ndm->ndm_flags |= NTF_OFFLOADED;
280 ndm->ndm_type = RTN_UNICAST;
282 if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
283 nla_put_s32(skb, NDA_LINK_NETNSID,
284 peernet2id(dev_net(vxlan->dev), vxlan->net)))
285 goto nla_put_failure;
287 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
288 goto nla_put_failure;
290 if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
291 goto nla_put_failure;
293 if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
294 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
295 goto nla_put_failure;
296 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
297 nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
298 goto nla_put_failure;
299 if ((vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
300 nla_put_u32(skb, NDA_SRC_VNI,
301 be32_to_cpu(fdb->vni)))
302 goto nla_put_failure;
303 if (rdst->remote_ifindex &&
304 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
305 goto nla_put_failure;
307 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
308 ci.ndm_confirmed = 0;
309 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
312 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
313 goto nla_put_failure;
319 nlmsg_cancel(skb, nlh);
323 static inline size_t vxlan_nlmsg_size(void)
325 return NLMSG_ALIGN(sizeof(struct ndmsg))
326 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
327 + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
328 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
329 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
330 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
331 + nla_total_size(sizeof(__s32)) /* NDA_LINK_NETNSID */
332 + nla_total_size(sizeof(struct nda_cacheinfo));
335 static void __vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
336 struct vxlan_rdst *rd, int type)
338 struct net *net = dev_net(vxlan->dev);
342 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
346 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
348 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
349 WARN_ON(err == -EMSGSIZE);
354 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
358 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
361 static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan,
362 const struct vxlan_fdb *fdb,
363 const struct vxlan_rdst *rd,
364 struct switchdev_notifier_vxlan_fdb_info *fdb_info)
366 fdb_info->info.dev = vxlan->dev;
367 fdb_info->remote_ip = rd->remote_ip;
368 fdb_info->remote_port = rd->remote_port;
369 fdb_info->remote_vni = rd->remote_vni;
370 fdb_info->remote_ifindex = rd->remote_ifindex;
371 memcpy(fdb_info->eth_addr, fdb->eth_addr, ETH_ALEN);
372 fdb_info->vni = fdb->vni;
373 fdb_info->offloaded = rd->offloaded;
374 fdb_info->added_by_user = fdb->flags & NTF_VXLAN_ADDED_BY_USER;
377 static void vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
378 struct vxlan_fdb *fdb,
379 struct vxlan_rdst *rd,
382 struct switchdev_notifier_vxlan_fdb_info info;
383 enum switchdev_notifier_type notifier_type;
388 notifier_type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE
389 : SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE;
390 vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, &info);
391 call_switchdev_notifiers(notifier_type, vxlan->dev,
395 static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
396 struct vxlan_rdst *rd, int type, bool swdev_notify)
401 vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
405 vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
411 __vxlan_fdb_notify(vxlan, fdb, rd, type);
414 static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
416 struct vxlan_dev *vxlan = netdev_priv(dev);
417 struct vxlan_fdb f = {
420 struct vxlan_rdst remote = {
421 .remote_ip = *ipa, /* goes to NDA_DST */
422 .remote_vni = cpu_to_be32(VXLAN_N_VID),
425 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true);
428 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
430 struct vxlan_fdb f = {
433 struct vxlan_rdst remote = { };
435 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
437 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true);
440 /* Hash Ethernet address */
441 static u32 eth_hash(const unsigned char *addr)
443 u64 value = get_unaligned((u64 *)addr);
445 /* only want 6 bytes */
451 return hash_64(value, FDB_HASH_BITS);
454 static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
456 /* use 1 byte of OUI and 3 bytes of NIC */
457 u32 key = get_unaligned((u32 *)(addr + 2));
459 return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
462 /* Hash chain to use given mac address */
463 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
464 const u8 *mac, __be32 vni)
466 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
467 return &vxlan->fdb_head[eth_vni_hash(mac, vni)];
469 return &vxlan->fdb_head[eth_hash(mac)];
472 /* Look up Ethernet address in forwarding table */
473 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
474 const u8 *mac, __be32 vni)
476 struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni);
479 hlist_for_each_entry_rcu(f, head, hlist) {
480 if (ether_addr_equal(mac, f->eth_addr)) {
481 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
493 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
494 const u8 *mac, __be32 vni)
498 f = __vxlan_find_mac(vxlan, mac, vni);
499 if (f && f->used != jiffies)
505 /* caller should hold vxlan->hash_lock */
506 static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
507 union vxlan_addr *ip, __be16 port,
508 __be32 vni, __u32 ifindex)
510 struct vxlan_rdst *rd;
512 list_for_each_entry(rd, &f->remotes, list) {
513 if (vxlan_addr_equal(&rd->remote_ip, ip) &&
514 rd->remote_port == port &&
515 rd->remote_vni == vni &&
516 rd->remote_ifindex == ifindex)
523 int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
524 struct switchdev_notifier_vxlan_fdb_info *fdb_info)
526 struct vxlan_dev *vxlan = netdev_priv(dev);
527 u8 eth_addr[ETH_ALEN + 2] = { 0 };
528 struct vxlan_rdst *rdst;
532 if (is_multicast_ether_addr(mac) ||
533 is_zero_ether_addr(mac))
536 ether_addr_copy(eth_addr, mac);
540 f = __vxlan_find_mac(vxlan, eth_addr, vni);
546 rdst = first_remote_rcu(f);
547 vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, fdb_info);
553 EXPORT_SYMBOL_GPL(vxlan_fdb_find_uc);
555 /* Replace destination of unicast mac */
556 static int vxlan_fdb_replace(struct vxlan_fdb *f,
557 union vxlan_addr *ip, __be16 port, __be32 vni,
560 struct vxlan_rdst *rd;
562 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
566 rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
570 dst_cache_reset(&rd->dst_cache);
572 rd->remote_port = port;
573 rd->remote_vni = vni;
574 rd->remote_ifindex = ifindex;
578 /* Add/update destinations for multicast */
579 static int vxlan_fdb_append(struct vxlan_fdb *f,
580 union vxlan_addr *ip, __be16 port, __be32 vni,
581 __u32 ifindex, struct vxlan_rdst **rdp)
583 struct vxlan_rdst *rd;
585 rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
589 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
593 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
599 rd->remote_port = port;
600 rd->offloaded = false;
601 rd->remote_vni = vni;
602 rd->remote_ifindex = ifindex;
604 list_add_tail_rcu(&rd->list, &f->remotes);
610 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
612 struct vxlanhdr *vh, size_t hdrlen,
614 struct gro_remcsum *grc,
617 size_t start, offset;
619 if (skb->remcsum_offload)
622 if (!NAPI_GRO_CB(skb)->csum_valid)
625 start = vxlan_rco_start(vni_field);
626 offset = start + vxlan_rco_offset(vni_field);
628 vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
629 start, offset, grc, nopartial);
631 skb->remcsum_offload = 1;
636 static struct sk_buff *vxlan_gro_receive(struct sock *sk,
637 struct list_head *head,
640 struct sk_buff *pp = NULL;
642 struct vxlanhdr *vh, *vh2;
643 unsigned int hlen, off_vx;
645 struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
647 struct gro_remcsum grc;
649 skb_gro_remcsum_init(&grc);
651 off_vx = skb_gro_offset(skb);
652 hlen = off_vx + sizeof(*vh);
653 vh = skb_gro_header_fast(skb, off_vx);
654 if (skb_gro_header_hard(skb, hlen)) {
655 vh = skb_gro_header_slow(skb, hlen, off_vx);
660 skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
662 flags = vh->vx_flags;
664 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) {
665 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr),
668 VXLAN_F_REMCSUM_NOPARTIAL));
674 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
676 list_for_each_entry(p, head, list) {
677 if (!NAPI_GRO_CB(p)->same_flow)
680 vh2 = (struct vxlanhdr *)(p->data + off_vx);
681 if (vh->vx_flags != vh2->vx_flags ||
682 vh->vx_vni != vh2->vx_vni) {
683 NAPI_GRO_CB(p)->same_flow = 0;
688 pp = call_gro_receive(eth_gro_receive, head, skb);
692 skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
697 static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
699 /* Sets 'skb->inner_mac_header' since we are always called with
700 * 'skb->encapsulation' set.
702 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
705 static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
706 const u8 *mac, __u16 state,
707 __be32 src_vni, __u16 ndm_flags)
711 f = kmalloc(sizeof(*f), GFP_ATOMIC);
715 f->flags = ndm_flags;
716 f->updated = f->used = jiffies;
718 INIT_LIST_HEAD(&f->remotes);
719 memcpy(f->eth_addr, mac, ETH_ALEN);
724 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
725 const u8 *mac, union vxlan_addr *ip,
726 __u16 state, __be16 port, __be32 src_vni,
727 __be32 vni, __u32 ifindex, __u16 ndm_flags,
728 struct vxlan_fdb **fdb)
730 struct vxlan_rdst *rd = NULL;
734 if (vxlan->cfg.addrmax &&
735 vxlan->addrcnt >= vxlan->cfg.addrmax)
738 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
739 f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
743 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
750 hlist_add_head_rcu(&f->hlist,
751 vxlan_fdb_head(vxlan, mac, src_vni));
758 /* Add new entry to forwarding table -- assumes lock held */
759 static int vxlan_fdb_update(struct vxlan_dev *vxlan,
760 const u8 *mac, union vxlan_addr *ip,
761 __u16 state, __u16 flags,
762 __be16 port, __be32 src_vni, __be32 vni,
763 __u32 ifindex, __u16 ndm_flags,
766 __u16 fdb_flags = (ndm_flags & ~NTF_USE);
767 struct vxlan_rdst *rd = NULL;
772 f = __vxlan_find_mac(vxlan, mac, src_vni);
774 if (flags & NLM_F_EXCL) {
775 netdev_dbg(vxlan->dev,
776 "lost race to create %pM\n", mac);
780 /* Do not allow an externally learned entry to take over an
781 * entry added by the user.
783 if (!(fdb_flags & NTF_EXT_LEARNED) ||
784 !(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
785 if (f->state != state) {
787 f->updated = jiffies;
790 if (f->flags != fdb_flags) {
791 f->flags = fdb_flags;
792 f->updated = jiffies;
797 if ((flags & NLM_F_REPLACE)) {
798 /* Only change unicasts */
799 if (!(is_multicast_ether_addr(f->eth_addr) ||
800 is_zero_ether_addr(f->eth_addr))) {
801 notify |= vxlan_fdb_replace(f, ip, port, vni,
806 if ((flags & NLM_F_APPEND) &&
807 (is_multicast_ether_addr(f->eth_addr) ||
808 is_zero_ether_addr(f->eth_addr))) {
809 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
816 if (ndm_flags & NTF_USE)
819 if (!(flags & NLM_F_CREATE))
822 /* Disallow replace to add a multicast entry */
823 if ((flags & NLM_F_REPLACE) &&
824 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
827 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
828 rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
829 vni, ifindex, fdb_flags, &f);
837 rd = first_remote_rtnl(f);
838 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, swdev_notify);
844 static void vxlan_fdb_free(struct rcu_head *head)
846 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
847 struct vxlan_rdst *rd, *nd;
849 list_for_each_entry_safe(rd, nd, &f->remotes, list) {
850 dst_cache_destroy(&rd->dst_cache);
856 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
857 bool do_notify, bool swdev_notify)
859 struct vxlan_rdst *rd;
861 netdev_dbg(vxlan->dev,
862 "delete %pM\n", f->eth_addr);
866 list_for_each_entry(rd, &f->remotes, list)
867 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH,
870 hlist_del_rcu(&f->hlist);
871 call_rcu(&f->rcu, vxlan_fdb_free);
874 static void vxlan_dst_free(struct rcu_head *head)
876 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
878 dst_cache_destroy(&rd->dst_cache);
882 static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
883 struct vxlan_rdst *rd, bool swdev_notify)
885 list_del_rcu(&rd->list);
886 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify);
887 call_rcu(&rd->rcu, vxlan_dst_free);
890 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
891 union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
892 __be32 *vni, u32 *ifindex)
894 struct net *net = dev_net(vxlan->dev);
898 err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
902 union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
903 if (remote->sa.sa_family == AF_INET) {
904 ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
905 ip->sa.sa_family = AF_INET;
906 #if IS_ENABLED(CONFIG_IPV6)
908 ip->sin6.sin6_addr = in6addr_any;
909 ip->sa.sa_family = AF_INET6;
915 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
917 *port = nla_get_be16(tb[NDA_PORT]);
919 *port = vxlan->cfg.dst_port;
923 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
925 *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
927 *vni = vxlan->default_dst.remote_vni;
930 if (tb[NDA_SRC_VNI]) {
931 if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32))
933 *src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI]));
935 *src_vni = vxlan->default_dst.remote_vni;
938 if (tb[NDA_IFINDEX]) {
939 struct net_device *tdev;
941 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
943 *ifindex = nla_get_u32(tb[NDA_IFINDEX]);
944 tdev = __dev_get_by_index(net, *ifindex);
946 return -EADDRNOTAVAIL;
954 /* Add static entry (via netlink) */
955 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
956 struct net_device *dev,
957 const unsigned char *addr, u16 vid, u16 flags)
959 struct vxlan_dev *vxlan = netdev_priv(dev);
960 /* struct net *net = dev_net(vxlan->dev); */
967 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
968 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
973 if (tb[NDA_DST] == NULL)
976 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
980 if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
981 return -EAFNOSUPPORT;
983 spin_lock_bh(&vxlan->hash_lock);
984 err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
985 port, src_vni, vni, ifindex,
986 ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
988 spin_unlock_bh(&vxlan->hash_lock);
993 static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
994 const unsigned char *addr, union vxlan_addr ip,
995 __be16 port, __be32 src_vni, __be32 vni,
996 u32 ifindex, bool swdev_notify)
999 struct vxlan_rdst *rd = NULL;
1002 f = vxlan_find_mac(vxlan, addr, src_vni);
1006 if (!vxlan_addr_any(&ip)) {
1007 rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
1012 /* remove a destination if it's not the only one on the list,
1013 * otherwise destroy the fdb entry
1015 if (rd && !list_is_singular(&f->remotes)) {
1016 vxlan_fdb_dst_destroy(vxlan, f, rd, swdev_notify);
1020 vxlan_fdb_destroy(vxlan, f, true, swdev_notify);
1026 /* Delete entry (via netlink) */
1027 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
1028 struct net_device *dev,
1029 const unsigned char *addr, u16 vid)
1031 struct vxlan_dev *vxlan = netdev_priv(dev);
1032 union vxlan_addr ip;
1033 __be32 src_vni, vni;
1038 err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
1042 spin_lock_bh(&vxlan->hash_lock);
1043 err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
1045 spin_unlock_bh(&vxlan->hash_lock);
1050 /* Dump forwarding table */
1051 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
1052 struct net_device *dev,
1053 struct net_device *filter_dev, int *idx)
1055 struct vxlan_dev *vxlan = netdev_priv(dev);
1059 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1060 struct vxlan_fdb *f;
1062 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
1063 struct vxlan_rdst *rd;
1065 list_for_each_entry_rcu(rd, &f->remotes, list) {
1066 if (*idx < cb->args[2])
1069 err = vxlan_fdb_info(skb, vxlan, f,
1070 NETLINK_CB(cb->skb).portid,
1085 /* Watch incoming packets to learn mapping between Ethernet address
1086 * and Tunnel endpoint.
1087 * Return true if packet is bogus and should be dropped.
1089 static bool vxlan_snoop(struct net_device *dev,
1090 union vxlan_addr *src_ip, const u8 *src_mac,
1091 u32 src_ifindex, __be32 vni)
1093 struct vxlan_dev *vxlan = netdev_priv(dev);
1094 struct vxlan_fdb *f;
1097 #if IS_ENABLED(CONFIG_IPV6)
1098 if (src_ip->sa.sa_family == AF_INET6 &&
1099 (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
1100 ifindex = src_ifindex;
1103 f = vxlan_find_mac(vxlan, src_mac, vni);
1105 struct vxlan_rdst *rdst = first_remote_rcu(f);
1107 if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) &&
1108 rdst->remote_ifindex == ifindex))
1111 /* Don't migrate static entries, drop packets */
1112 if (f->state & (NUD_PERMANENT | NUD_NOARP))
1115 if (net_ratelimit())
1117 "%pM migrated from %pIS to %pIS\n",
1118 src_mac, &rdst->remote_ip.sa, &src_ip->sa);
1120 rdst->remote_ip = *src_ip;
1121 f->updated = jiffies;
1122 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true);
1124 /* learned new entry */
1125 spin_lock(&vxlan->hash_lock);
1127 /* close off race between vxlan_flush and incoming packets */
1128 if (netif_running(dev))
1129 vxlan_fdb_update(vxlan, src_mac, src_ip,
1131 NLM_F_EXCL|NLM_F_CREATE,
1132 vxlan->cfg.dst_port,
1134 vxlan->default_dst.remote_vni,
1135 ifindex, NTF_SELF, true);
1136 spin_unlock(&vxlan->hash_lock);
1142 /* See if multicast group is already in use by other ID */
1143 static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
1145 struct vxlan_dev *vxlan;
1146 struct vxlan_sock *sock4;
1147 #if IS_ENABLED(CONFIG_IPV6)
1148 struct vxlan_sock *sock6;
1150 unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
1152 sock4 = rtnl_dereference(dev->vn4_sock);
1154 /* The vxlan_sock is only used by dev, leaving group has
1155 * no effect on other vxlan devices.
1157 if (family == AF_INET && sock4 && refcount_read(&sock4->refcnt) == 1)
1159 #if IS_ENABLED(CONFIG_IPV6)
1160 sock6 = rtnl_dereference(dev->vn6_sock);
1161 if (family == AF_INET6 && sock6 && refcount_read(&sock6->refcnt) == 1)
1165 list_for_each_entry(vxlan, &vn->vxlan_list, next) {
1166 if (!netif_running(vxlan->dev) || vxlan == dev)
1169 if (family == AF_INET &&
1170 rtnl_dereference(vxlan->vn4_sock) != sock4)
1172 #if IS_ENABLED(CONFIG_IPV6)
1173 if (family == AF_INET6 &&
1174 rtnl_dereference(vxlan->vn6_sock) != sock6)
1178 if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
1179 &dev->default_dst.remote_ip))
1182 if (vxlan->default_dst.remote_ifindex !=
1183 dev->default_dst.remote_ifindex)
1192 static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
1194 struct vxlan_net *vn;
1198 if (!refcount_dec_and_test(&vs->refcnt))
1201 vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
1202 spin_lock(&vn->sock_lock);
1203 hlist_del_rcu(&vs->hlist);
1204 udp_tunnel_notify_del_rx_port(vs->sock,
1205 (vs->flags & VXLAN_F_GPE) ?
1206 UDP_TUNNEL_TYPE_VXLAN_GPE :
1207 UDP_TUNNEL_TYPE_VXLAN);
1208 spin_unlock(&vn->sock_lock);
1213 static void vxlan_sock_release(struct vxlan_dev *vxlan)
1215 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1216 #if IS_ENABLED(CONFIG_IPV6)
1217 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1219 RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
1222 RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
1225 vxlan_vs_del_dev(vxlan);
1227 if (__vxlan_sock_release_prep(sock4)) {
1228 udp_tunnel_sock_release(sock4->sock);
1232 #if IS_ENABLED(CONFIG_IPV6)
1233 if (__vxlan_sock_release_prep(sock6)) {
1234 udp_tunnel_sock_release(sock6->sock);
1240 /* Update multicast group membership when first VNI on
1241 * multicast address is brought up
1243 static int vxlan_igmp_join(struct vxlan_dev *vxlan)
1246 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1247 int ifindex = vxlan->default_dst.remote_ifindex;
1250 if (ip->sa.sa_family == AF_INET) {
1251 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1252 struct ip_mreqn mreq = {
1253 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1254 .imr_ifindex = ifindex,
1257 sk = sock4->sock->sk;
1259 ret = ip_mc_join_group(sk, &mreq);
1261 #if IS_ENABLED(CONFIG_IPV6)
1263 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1265 sk = sock6->sock->sk;
1267 ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
1268 &ip->sin6.sin6_addr);
1276 /* Inverse of vxlan_igmp_join when last VNI is brought down */
1277 static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
1280 union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
1281 int ifindex = vxlan->default_dst.remote_ifindex;
1284 if (ip->sa.sa_family == AF_INET) {
1285 struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
1286 struct ip_mreqn mreq = {
1287 .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
1288 .imr_ifindex = ifindex,
1291 sk = sock4->sock->sk;
1293 ret = ip_mc_leave_group(sk, &mreq);
1295 #if IS_ENABLED(CONFIG_IPV6)
1297 struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
1299 sk = sock6->sock->sk;
1301 ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
1302 &ip->sin6.sin6_addr);
1310 static bool vxlan_remcsum(struct vxlanhdr *unparsed,
1311 struct sk_buff *skb, u32 vxflags)
1313 size_t start, offset;
1315 if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload)
1318 start = vxlan_rco_start(unparsed->vx_vni);
1319 offset = start + vxlan_rco_offset(unparsed->vx_vni);
1321 if (!pskb_may_pull(skb, offset + sizeof(u16)))
1324 skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset,
1325 !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL));
1327 unparsed->vx_flags &= ~VXLAN_HF_RCO;
1328 unparsed->vx_vni &= VXLAN_VNI_MASK;
1332 static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
1333 struct sk_buff *skb, u32 vxflags,
1334 struct vxlan_metadata *md)
1336 struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed;
1337 struct metadata_dst *tun_dst;
1339 if (!(unparsed->vx_flags & VXLAN_HF_GBP))
1342 md->gbp = ntohs(gbp->policy_id);
1344 tun_dst = (struct metadata_dst *)skb_dst(skb);
1346 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
1347 tun_dst->u.tun_info.options_len = sizeof(*md);
1349 if (gbp->dont_learn)
1350 md->gbp |= VXLAN_GBP_DONT_LEARN;
1352 if (gbp->policy_applied)
1353 md->gbp |= VXLAN_GBP_POLICY_APPLIED;
1355 /* In flow-based mode, GBP is carried in dst_metadata */
1356 if (!(vxflags & VXLAN_F_COLLECT_METADATA))
1357 skb->mark = md->gbp;
1359 unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
1362 static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
1364 struct sk_buff *skb, u32 vxflags)
1366 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
1368 /* Need to have Next Protocol set for interfaces in GPE mode. */
1369 if (!gpe->np_applied)
1371 /* "The initial version is 0. If a receiver does not support the
1372 * version indicated it MUST drop the packet.
1374 if (gpe->version != 0)
1376 /* "When the O bit is set to 1, the packet is an OAM packet and OAM
1377 * processing MUST occur." However, we don't implement OAM
1378 * processing, thus drop the packet.
1383 *protocol = tun_p_to_eth_p(gpe->next_protocol);
1387 unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
1391 static bool vxlan_set_mac(struct vxlan_dev *vxlan,
1392 struct vxlan_sock *vs,
1393 struct sk_buff *skb, __be32 vni)
1395 union vxlan_addr saddr;
1396 u32 ifindex = skb->dev->ifindex;
1398 skb_reset_mac_header(skb);
1399 skb->protocol = eth_type_trans(skb, vxlan->dev);
1400 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1402 /* Ignore packet loops (and multicast echo) */
1403 if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
1406 /* Get address from the outer IP header */
1407 if (vxlan_get_sk_family(vs) == AF_INET) {
1408 saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
1409 saddr.sa.sa_family = AF_INET;
1410 #if IS_ENABLED(CONFIG_IPV6)
1412 saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
1413 saddr.sa.sa_family = AF_INET6;
1417 if ((vxlan->cfg.flags & VXLAN_F_LEARN) &&
1418 vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, ifindex, vni))
1424 static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
1425 struct sk_buff *skb)
1429 if (vxlan_get_sk_family(vs) == AF_INET)
1430 err = IP_ECN_decapsulate(oiph, skb);
1431 #if IS_ENABLED(CONFIG_IPV6)
1433 err = IP6_ECN_decapsulate(oiph, skb);
1436 if (unlikely(err) && log_ecn_error) {
1437 if (vxlan_get_sk_family(vs) == AF_INET)
1438 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
1439 &((struct iphdr *)oiph)->saddr,
1440 ((struct iphdr *)oiph)->tos);
1442 net_info_ratelimited("non-ECT from %pI6\n",
1443 &((struct ipv6hdr *)oiph)->saddr);
1448 /* Callback from net/ipv4/udp.c to receive packets */
1449 static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
1451 struct pcpu_sw_netstats *stats;
1452 struct vxlan_dev *vxlan;
1453 struct vxlan_sock *vs;
1454 struct vxlanhdr unparsed;
1455 struct vxlan_metadata _md;
1456 struct vxlan_metadata *md = &_md;
1457 __be16 protocol = htons(ETH_P_TEB);
1458 bool raw_proto = false;
1462 /* Need UDP and VXLAN header to be present */
1463 if (!pskb_may_pull(skb, VXLAN_HLEN))
1466 unparsed = *vxlan_hdr(skb);
1467 /* VNI flag always required to be set */
1468 if (!(unparsed.vx_flags & VXLAN_HF_VNI)) {
1469 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
1470 ntohl(vxlan_hdr(skb)->vx_flags),
1471 ntohl(vxlan_hdr(skb)->vx_vni));
1472 /* Return non vxlan pkt */
1475 unparsed.vx_flags &= ~VXLAN_HF_VNI;
1476 unparsed.vx_vni &= ~VXLAN_VNI_MASK;
1478 vs = rcu_dereference_sk_user_data(sk);
1482 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
1484 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
1488 /* For backwards compatibility, only allow reserved fields to be
1489 * used by VXLAN extensions if explicitly requested.
1491 if (vs->flags & VXLAN_F_GPE) {
1492 if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
1497 if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
1498 !net_eq(vxlan->net, dev_net(vxlan->dev))))
1501 if (vxlan_collect_metadata(vs)) {
1502 struct metadata_dst *tun_dst;
1504 tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
1505 key32_to_tunnel_id(vni), sizeof(*md));
1510 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
1512 skb_dst_set(skb, (struct dst_entry *)tun_dst);
1514 memset(md, 0, sizeof(*md));
1517 if (vs->flags & VXLAN_F_REMCSUM_RX)
1518 if (!vxlan_remcsum(&unparsed, skb, vs->flags))
1520 if (vs->flags & VXLAN_F_GBP)
1521 vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
1522 /* Note that GBP and GPE can never be active together. This is
1523 * ensured in vxlan_dev_configure.
1526 if (unparsed.vx_flags || unparsed.vx_vni) {
1527 /* If there are any unprocessed flags remaining treat
1528 * this as a malformed packet. This behavior diverges from
1529 * VXLAN RFC (RFC7348) which stipulates that bits in reserved
1530 * in reserved fields are to be ignored. The approach here
1531 * maintains compatibility with previous stack code, and also
1532 * is more robust and provides a little more security in
1533 * adding extensions to VXLAN.
1539 if (!vxlan_set_mac(vxlan, vs, skb, vni))
1542 skb_reset_mac_header(skb);
1543 skb->dev = vxlan->dev;
1544 skb->pkt_type = PACKET_HOST;
1547 oiph = skb_network_header(skb);
1548 skb_reset_network_header(skb);
1550 if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
1551 ++vxlan->dev->stats.rx_frame_errors;
1552 ++vxlan->dev->stats.rx_errors;
1556 stats = this_cpu_ptr(vxlan->dev->tstats);
1557 u64_stats_update_begin(&stats->syncp);
1558 stats->rx_packets++;
1559 stats->rx_bytes += skb->len;
1560 u64_stats_update_end(&stats->syncp);
1562 gro_cells_receive(&vxlan->gro_cells, skb);
1566 /* Consume bad packet */
1571 /* Callback from net/ipv{4,6}/udp.c to check that we have a VNI for errors */
1572 static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
1574 struct vxlan_dev *vxlan;
1575 struct vxlan_sock *vs;
1576 struct vxlanhdr *hdr;
1579 if (skb->len < VXLAN_HLEN)
1582 hdr = vxlan_hdr(skb);
1584 if (!(hdr->vx_flags & VXLAN_HF_VNI))
1587 vs = rcu_dereference_sk_user_data(sk);
1591 vni = vxlan_vni(hdr->vx_vni);
1592 vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
1599 static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
1601 struct vxlan_dev *vxlan = netdev_priv(dev);
1602 struct arphdr *parp;
1605 struct neighbour *n;
1607 if (dev->flags & IFF_NOARP)
1610 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
1611 dev->stats.tx_dropped++;
1614 parp = arp_hdr(skb);
1616 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
1617 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
1618 parp->ar_pro != htons(ETH_P_IP) ||
1619 parp->ar_op != htons(ARPOP_REQUEST) ||
1620 parp->ar_hln != dev->addr_len ||
1623 arpptr = (u8 *)parp + sizeof(struct arphdr);
1625 arpptr += dev->addr_len; /* sha */
1626 memcpy(&sip, arpptr, sizeof(sip));
1627 arpptr += sizeof(sip);
1628 arpptr += dev->addr_len; /* tha */
1629 memcpy(&tip, arpptr, sizeof(tip));
1631 if (ipv4_is_loopback(tip) ||
1632 ipv4_is_multicast(tip))
1635 n = neigh_lookup(&arp_tbl, &tip, dev);
1638 struct vxlan_fdb *f;
1639 struct sk_buff *reply;
1641 if (!(n->nud_state & NUD_CONNECTED)) {
1646 f = vxlan_find_mac(vxlan, n->ha, vni);
1647 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1648 /* bridge-local neighbor */
1653 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1661 skb_reset_mac_header(reply);
1662 __skb_pull(reply, skb_network_offset(reply));
1663 reply->ip_summed = CHECKSUM_UNNECESSARY;
1664 reply->pkt_type = PACKET_HOST;
1666 if (netif_rx_ni(reply) == NET_RX_DROP)
1667 dev->stats.rx_dropped++;
1668 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
1669 union vxlan_addr ipa = {
1670 .sin.sin_addr.s_addr = tip,
1671 .sin.sin_family = AF_INET,
1674 vxlan_ip_miss(dev, &ipa);
1678 return NETDEV_TX_OK;
1681 #if IS_ENABLED(CONFIG_IPV6)
1682 static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1683 struct neighbour *n, bool isrouter)
1685 struct net_device *dev = request->dev;
1686 struct sk_buff *reply;
1687 struct nd_msg *ns, *na;
1688 struct ipv6hdr *pip6;
1690 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1694 if (dev == NULL || !pskb_may_pull(request, request->len))
1697 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1698 sizeof(*na) + na_olen + dev->needed_tailroom;
1699 reply = alloc_skb(len, GFP_ATOMIC);
1703 reply->protocol = htons(ETH_P_IPV6);
1705 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1706 skb_push(reply, sizeof(struct ethhdr));
1707 skb_reset_mac_header(reply);
1709 ns = (struct nd_msg *)(ipv6_hdr(request) + 1);
1711 daddr = eth_hdr(request)->h_source;
1712 ns_olen = request->len - skb_network_offset(request) -
1713 sizeof(struct ipv6hdr) - sizeof(*ns);
1714 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1715 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1716 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1721 /* Ethernet header */
1722 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1723 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1724 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1725 reply->protocol = htons(ETH_P_IPV6);
1727 skb_pull(reply, sizeof(struct ethhdr));
1728 skb_reset_network_header(reply);
1729 skb_put(reply, sizeof(struct ipv6hdr));
1733 pip6 = ipv6_hdr(reply);
1734 memset(pip6, 0, sizeof(struct ipv6hdr));
1736 pip6->priority = ipv6_hdr(request)->priority;
1737 pip6->nexthdr = IPPROTO_ICMPV6;
1738 pip6->hop_limit = 255;
1739 pip6->daddr = ipv6_hdr(request)->saddr;
1740 pip6->saddr = *(struct in6_addr *)n->primary_key;
1742 skb_pull(reply, sizeof(struct ipv6hdr));
1743 skb_reset_transport_header(reply);
1745 /* Neighbor Advertisement */
1746 na = skb_put_zero(reply, sizeof(*na) + na_olen);
1747 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1748 na->icmph.icmp6_router = isrouter;
1749 na->icmph.icmp6_override = 1;
1750 na->icmph.icmp6_solicited = 1;
1751 na->target = ns->target;
1752 ether_addr_copy(&na->opt[2], n->ha);
1753 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1754 na->opt[1] = na_olen >> 3;
1756 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1757 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1758 csum_partial(na, sizeof(*na)+na_olen, 0));
1760 pip6->payload_len = htons(sizeof(*na)+na_olen);
1762 skb_push(reply, sizeof(struct ipv6hdr));
1764 reply->ip_summed = CHECKSUM_UNNECESSARY;
1769 static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
1771 struct vxlan_dev *vxlan = netdev_priv(dev);
1772 const struct in6_addr *daddr;
1773 const struct ipv6hdr *iphdr;
1774 struct inet6_dev *in6_dev;
1775 struct neighbour *n;
1778 in6_dev = __in6_dev_get(dev);
1782 iphdr = ipv6_hdr(skb);
1783 daddr = &iphdr->daddr;
1784 msg = (struct nd_msg *)(iphdr + 1);
1786 if (ipv6_addr_loopback(daddr) ||
1787 ipv6_addr_is_multicast(&msg->target))
1790 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1793 struct vxlan_fdb *f;
1794 struct sk_buff *reply;
1796 if (!(n->nud_state & NUD_CONNECTED)) {
1801 f = vxlan_find_mac(vxlan, n->ha, vni);
1802 if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
1803 /* bridge-local neighbor */
1808 reply = vxlan_na_create(skb, n,
1809 !!(f ? f->flags & NTF_ROUTER : 0));
1816 if (netif_rx_ni(reply) == NET_RX_DROP)
1817 dev->stats.rx_dropped++;
1819 } else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
1820 union vxlan_addr ipa = {
1821 .sin6.sin6_addr = msg->target,
1822 .sin6.sin6_family = AF_INET6,
1825 vxlan_ip_miss(dev, &ipa);
1830 return NETDEV_TX_OK;
1834 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1836 struct vxlan_dev *vxlan = netdev_priv(dev);
1837 struct neighbour *n;
1839 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
1843 switch (ntohs(eth_hdr(skb)->h_proto)) {
1848 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
1851 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
1852 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
1853 union vxlan_addr ipa = {
1854 .sin.sin_addr.s_addr = pip->daddr,
1855 .sin.sin_family = AF_INET,
1858 vxlan_ip_miss(dev, &ipa);
1864 #if IS_ENABLED(CONFIG_IPV6)
1867 struct ipv6hdr *pip6;
1869 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
1871 pip6 = ipv6_hdr(skb);
1872 n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
1873 if (!n && (vxlan->cfg.flags & VXLAN_F_L3MISS)) {
1874 union vxlan_addr ipa = {
1875 .sin6.sin6_addr = pip6->daddr,
1876 .sin6.sin6_family = AF_INET6,
1879 vxlan_ip_miss(dev, &ipa);
1893 diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
1895 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
1897 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
1906 static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
1907 struct vxlan_metadata *md)
1909 struct vxlanhdr_gbp *gbp;
1914 gbp = (struct vxlanhdr_gbp *)vxh;
1915 vxh->vx_flags |= VXLAN_HF_GBP;
1917 if (md->gbp & VXLAN_GBP_DONT_LEARN)
1918 gbp->dont_learn = 1;
1920 if (md->gbp & VXLAN_GBP_POLICY_APPLIED)
1921 gbp->policy_applied = 1;
1923 gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
1926 static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags,
1929 struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh;
1931 gpe->np_applied = 1;
1932 gpe->next_protocol = tun_p_from_eth_p(protocol);
1933 if (!gpe->next_protocol)
1934 return -EPFNOSUPPORT;
1938 static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1939 int iphdr_len, __be32 vni,
1940 struct vxlan_metadata *md, u32 vxflags,
1943 struct vxlanhdr *vxh;
1946 int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
1947 __be16 inner_protocol = htons(ETH_P_TEB);
1949 if ((vxflags & VXLAN_F_REMCSUM_TX) &&
1950 skb->ip_summed == CHECKSUM_PARTIAL) {
1951 int csum_start = skb_checksum_start_offset(skb);
1953 if (csum_start <= VXLAN_MAX_REMCSUM_START &&
1954 !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
1955 (skb->csum_offset == offsetof(struct udphdr, check) ||
1956 skb->csum_offset == offsetof(struct tcphdr, check)))
1957 type |= SKB_GSO_TUNNEL_REMCSUM;
1960 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
1961 + VXLAN_HLEN + iphdr_len;
1963 /* Need space for new headers (invalidates iph ptr) */
1964 err = skb_cow_head(skb, min_headroom);
1968 err = iptunnel_handle_offloads(skb, type);
1972 vxh = __skb_push(skb, sizeof(*vxh));
1973 vxh->vx_flags = VXLAN_HF_VNI;
1974 vxh->vx_vni = vxlan_vni_field(vni);
1976 if (type & SKB_GSO_TUNNEL_REMCSUM) {
1979 start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr);
1980 vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset);
1981 vxh->vx_flags |= VXLAN_HF_RCO;
1983 if (!skb_is_gso(skb)) {
1984 skb->ip_summed = CHECKSUM_NONE;
1985 skb->encapsulation = 0;
1989 if (vxflags & VXLAN_F_GBP)
1990 vxlan_build_gbp_hdr(vxh, vxflags, md);
1991 if (vxflags & VXLAN_F_GPE) {
1992 err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
1995 inner_protocol = skb->protocol;
1998 skb_set_inner_protocol(skb, inner_protocol);
2002 static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
2003 struct vxlan_sock *sock4,
2004 struct sk_buff *skb, int oif, u8 tos,
2005 __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
2006 struct dst_cache *dst_cache,
2007 const struct ip_tunnel_info *info)
2009 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
2010 struct rtable *rt = NULL;
2014 return ERR_PTR(-EIO);
2019 rt = dst_cache_get_ip4(dst_cache, saddr);
2024 memset(&fl4, 0, sizeof(fl4));
2025 fl4.flowi4_oif = oif;
2026 fl4.flowi4_tos = RT_TOS(tos);
2027 fl4.flowi4_mark = skb->mark;
2028 fl4.flowi4_proto = IPPROTO_UDP;
2031 fl4.fl4_dport = dport;
2032 fl4.fl4_sport = sport;
2034 rt = ip_route_output_key(vxlan->net, &fl4);
2035 if (likely(!IS_ERR(rt))) {
2036 if (rt->dst.dev == dev) {
2037 netdev_dbg(dev, "circular route to %pI4\n", &daddr);
2039 return ERR_PTR(-ELOOP);
2044 dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr);
2046 netdev_dbg(dev, "no route to %pI4\n", &daddr);
2047 return ERR_PTR(-ENETUNREACH);
2052 #if IS_ENABLED(CONFIG_IPV6)
2053 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
2054 struct net_device *dev,
2055 struct vxlan_sock *sock6,
2056 struct sk_buff *skb, int oif, u8 tos,
2058 const struct in6_addr *daddr,
2059 struct in6_addr *saddr,
2060 __be16 dport, __be16 sport,
2061 struct dst_cache *dst_cache,
2062 const struct ip_tunnel_info *info)
2064 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
2065 struct dst_entry *ndst;
2070 return ERR_PTR(-EIO);
2075 ndst = dst_cache_get_ip6(dst_cache, saddr);
2080 memset(&fl6, 0, sizeof(fl6));
2081 fl6.flowi6_oif = oif;
2084 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
2085 fl6.flowi6_mark = skb->mark;
2086 fl6.flowi6_proto = IPPROTO_UDP;
2087 fl6.fl6_dport = dport;
2088 fl6.fl6_sport = sport;
2090 err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
2093 if (unlikely(err < 0)) {
2094 netdev_dbg(dev, "no route to %pI6\n", daddr);
2095 return ERR_PTR(-ENETUNREACH);
2098 if (unlikely(ndst->dev == dev)) {
2099 netdev_dbg(dev, "circular route to %pI6\n", daddr);
2101 return ERR_PTR(-ELOOP);
2106 dst_cache_set_ip6(dst_cache, ndst, saddr);
2111 /* Bypass encapsulation if the destination is local */
2112 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
2113 struct vxlan_dev *dst_vxlan, __be32 vni)
2115 struct pcpu_sw_netstats *tx_stats, *rx_stats;
2116 union vxlan_addr loopback;
2117 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
2118 struct net_device *dev = skb->dev;
2121 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
2122 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
2123 skb->pkt_type = PACKET_HOST;
2124 skb->encapsulation = 0;
2125 skb->dev = dst_vxlan->dev;
2126 __skb_pull(skb, skb_network_offset(skb));
2128 if (remote_ip->sa.sa_family == AF_INET) {
2129 loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
2130 loopback.sa.sa_family = AF_INET;
2131 #if IS_ENABLED(CONFIG_IPV6)
2133 loopback.sin6.sin6_addr = in6addr_loopback;
2134 loopback.sa.sa_family = AF_INET6;
2138 if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
2139 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0,
2142 u64_stats_update_begin(&tx_stats->syncp);
2143 tx_stats->tx_packets++;
2144 tx_stats->tx_bytes += len;
2145 u64_stats_update_end(&tx_stats->syncp);
2147 if (netif_rx(skb) == NET_RX_SUCCESS) {
2148 u64_stats_update_begin(&rx_stats->syncp);
2149 rx_stats->rx_packets++;
2150 rx_stats->rx_bytes += len;
2151 u64_stats_update_end(&rx_stats->syncp);
2153 dev->stats.rx_dropped++;
2157 static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
2158 struct vxlan_dev *vxlan,
2159 union vxlan_addr *daddr,
2160 __be16 dst_port, int dst_ifindex, __be32 vni,
2161 struct dst_entry *dst,
2164 #if IS_ENABLED(CONFIG_IPV6)
2165 /* IPv6 rt-flags are checked against RTF_LOCAL, but the value of
2166 * RTF_LOCAL is equal to RTCF_LOCAL. So to keep code simple
2167 * we can use RTCF_LOCAL which works for ipv4 and ipv6 route entry.
2169 BUILD_BUG_ON(RTCF_LOCAL != RTF_LOCAL);
2171 /* Bypass encapsulation if the destination is local */
2172 if (rt_flags & RTCF_LOCAL &&
2173 !(rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
2174 struct vxlan_dev *dst_vxlan;
2177 dst_vxlan = vxlan_find_vni(vxlan->net, dst_ifindex, vni,
2178 daddr->sa.sa_family, dst_port,
2181 dev->stats.tx_errors++;
2186 vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni);
2193 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
2194 __be32 default_vni, struct vxlan_rdst *rdst,
2197 struct dst_cache *dst_cache;
2198 struct ip_tunnel_info *info;
2199 struct vxlan_dev *vxlan = netdev_priv(dev);
2200 const struct iphdr *old_iph = ip_hdr(skb);
2201 union vxlan_addr *dst;
2202 union vxlan_addr remote_ip, local_ip;
2203 struct vxlan_metadata _md;
2204 struct vxlan_metadata *md = &_md;
2205 __be16 src_port = 0, dst_port;
2206 struct dst_entry *ndst = NULL;
2211 u32 flags = vxlan->cfg.flags;
2212 bool udp_sum = false;
2213 bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
2215 info = skb_tunnel_info(skb);
2218 dst = &rdst->remote_ip;
2219 if (vxlan_addr_any(dst)) {
2221 /* short-circuited back to local bridge */
2222 vxlan_encap_bypass(skb, vxlan, vxlan, default_vni);
2228 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
2229 vni = (rdst->remote_vni) ? : default_vni;
2230 ifindex = rdst->remote_ifindex;
2231 local_ip = vxlan->cfg.saddr;
2232 dst_cache = &rdst->dst_cache;
2233 md->gbp = skb->mark;
2234 if (flags & VXLAN_F_TTL_INHERIT) {
2235 ttl = ip_tunnel_get_ttl(old_iph, skb);
2237 ttl = vxlan->cfg.ttl;
2238 if (!ttl && vxlan_addr_multicast(dst))
2242 tos = vxlan->cfg.tos;
2244 tos = ip_tunnel_get_dsfield(old_iph, skb);
2246 if (dst->sa.sa_family == AF_INET)
2247 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX);
2249 udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2250 label = vxlan->cfg.label;
2253 WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
2257 remote_ip.sa.sa_family = ip_tunnel_info_af(info);
2258 if (remote_ip.sa.sa_family == AF_INET) {
2259 remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
2260 local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src;
2262 remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
2263 local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
2266 dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
2267 vni = tunnel_id_to_key32(info->key.tun_id);
2269 dst_cache = &info->dst_cache;
2270 if (info->options_len &&
2271 info->key.tun_flags & TUNNEL_VXLAN_OPT)
2272 md = ip_tunnel_info_opts(info);
2273 ttl = info->key.ttl;
2274 tos = info->key.tos;
2275 label = info->key.label;
2276 udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
2278 src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2279 vxlan->cfg.port_max, true);
2282 if (dst->sa.sa_family == AF_INET) {
2283 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2288 ifindex = sock4->sock->sk->sk_bound_dev_if;
2290 rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
2291 dst->sin.sin_addr.s_addr,
2292 &local_ip.sin.sin_addr.s_addr,
2301 /* Bypass encapsulation if the destination is local */
2302 err = encap_bypass_if_local(skb, dev, vxlan, dst,
2303 dst_port, ifindex, vni,
2304 &rt->dst, rt->rt_flags);
2308 if (vxlan->cfg.df == VXLAN_DF_SET) {
2310 } else if (vxlan->cfg.df == VXLAN_DF_INHERIT) {
2311 struct ethhdr *eth = eth_hdr(skb);
2313 if (ntohs(eth->h_proto) == ETH_P_IPV6 ||
2314 (ntohs(eth->h_proto) == ETH_P_IP &&
2315 old_iph->frag_off & htons(IP_DF)))
2318 } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
2323 skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
2325 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2326 ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
2327 err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
2328 vni, md, flags, udp_sum);
2332 udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr,
2333 dst->sin.sin_addr.s_addr, tos, ttl, df,
2334 src_port, dst_port, xnet, !udp_sum);
2335 #if IS_ENABLED(CONFIG_IPV6)
2337 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2340 ifindex = sock6->sock->sk->sk_bound_dev_if;
2342 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos,
2343 label, &dst->sin6.sin6_addr,
2344 &local_ip.sin6.sin6_addr,
2348 err = PTR_ERR(ndst);
2354 u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
2356 err = encap_bypass_if_local(skb, dev, vxlan, dst,
2357 dst_port, ifindex, vni,
2363 skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
2365 tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
2366 ttl = ttl ? : ip6_dst_hoplimit(ndst);
2367 skb_scrub_packet(skb, xnet);
2368 err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
2369 vni, md, flags, udp_sum);
2373 udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
2374 &local_ip.sin6.sin6_addr,
2375 &dst->sin6.sin6_addr, tos, ttl,
2376 label, src_port, dst_port, !udp_sum);
2384 dev->stats.tx_dropped++;
2391 dev->stats.collisions++;
2392 else if (err == -ENETUNREACH)
2393 dev->stats.tx_carrier_errors++;
2395 dev->stats.tx_errors++;
2399 /* Transmit local packets over Vxlan
2401 * Outer IP header inherits ECN and DF from inner header.
2402 * Outer UDP destination is the VXLAN assigned port.
2403 * source port is based on hash of flow
2405 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2407 struct vxlan_dev *vxlan = netdev_priv(dev);
2408 struct vxlan_rdst *rdst, *fdst = NULL;
2409 const struct ip_tunnel_info *info;
2410 bool did_rsc = false;
2411 struct vxlan_fdb *f;
2415 info = skb_tunnel_info(skb);
2417 skb_reset_mac_header(skb);
2419 if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA) {
2420 if (info && info->mode & IP_TUNNEL_INFO_BRIDGE &&
2421 info->mode & IP_TUNNEL_INFO_TX) {
2422 vni = tunnel_id_to_key32(info->key.tun_id);
2424 if (info && info->mode & IP_TUNNEL_INFO_TX)
2425 vxlan_xmit_one(skb, dev, vni, NULL, false);
2428 return NETDEV_TX_OK;
2432 if (vxlan->cfg.flags & VXLAN_F_PROXY) {
2434 if (ntohs(eth->h_proto) == ETH_P_ARP)
2435 return arp_reduce(dev, skb, vni);
2436 #if IS_ENABLED(CONFIG_IPV6)
2437 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
2438 pskb_may_pull(skb, sizeof(struct ipv6hdr) +
2439 sizeof(struct nd_msg)) &&
2440 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
2441 struct nd_msg *m = (struct nd_msg *)(ipv6_hdr(skb) + 1);
2443 if (m->icmph.icmp6_code == 0 &&
2444 m->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
2445 return neigh_reduce(dev, skb, vni);
2451 f = vxlan_find_mac(vxlan, eth->h_dest, vni);
2454 if (f && (f->flags & NTF_ROUTER) && (vxlan->cfg.flags & VXLAN_F_RSC) &&
2455 (ntohs(eth->h_proto) == ETH_P_IP ||
2456 ntohs(eth->h_proto) == ETH_P_IPV6)) {
2457 did_rsc = route_shortcircuit(dev, skb);
2459 f = vxlan_find_mac(vxlan, eth->h_dest, vni);
2463 f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
2465 if ((vxlan->cfg.flags & VXLAN_F_L2MISS) &&
2466 !is_multicast_ether_addr(eth->h_dest))
2467 vxlan_fdb_miss(vxlan, eth->h_dest);
2469 dev->stats.tx_dropped++;
2471 return NETDEV_TX_OK;
2475 list_for_each_entry_rcu(rdst, &f->remotes, list) {
2476 struct sk_buff *skb1;
2482 skb1 = skb_clone(skb, GFP_ATOMIC);
2484 vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc);
2488 vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
2491 return NETDEV_TX_OK;
2494 /* Walk the forwarding table and purge stale entries */
2495 static void vxlan_cleanup(struct timer_list *t)
2497 struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer);
2498 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
2501 if (!netif_running(vxlan->dev))
2504 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2505 struct hlist_node *p, *n;
2507 spin_lock_bh(&vxlan->hash_lock);
2508 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2510 = container_of(p, struct vxlan_fdb, hlist);
2511 unsigned long timeout;
2513 if (f->state & (NUD_PERMANENT | NUD_NOARP))
2516 if (f->flags & NTF_EXT_LEARNED)
2519 timeout = f->used + vxlan->cfg.age_interval * HZ;
2520 if (time_before_eq(timeout, jiffies)) {
2521 netdev_dbg(vxlan->dev,
2522 "garbage collect %pM\n",
2524 f->state = NUD_STALE;
2525 vxlan_fdb_destroy(vxlan, f, true, true);
2526 } else if (time_before(timeout, next_timer))
2527 next_timer = timeout;
2529 spin_unlock_bh(&vxlan->hash_lock);
2532 mod_timer(&vxlan->age_timer, next_timer);
2535 static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
2537 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2539 spin_lock(&vn->sock_lock);
2540 hlist_del_init_rcu(&vxlan->hlist4.hlist);
2541 #if IS_ENABLED(CONFIG_IPV6)
2542 hlist_del_init_rcu(&vxlan->hlist6.hlist);
2544 spin_unlock(&vn->sock_lock);
2547 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
2548 struct vxlan_dev_node *node)
2550 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2551 __be32 vni = vxlan->default_dst.remote_vni;
2553 node->vxlan = vxlan;
2554 spin_lock(&vn->sock_lock);
2555 hlist_add_head_rcu(&node->hlist, vni_head(vs, vni));
2556 spin_unlock(&vn->sock_lock);
2559 /* Setup stats when device is created */
2560 static int vxlan_init(struct net_device *dev)
2562 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2569 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
2571 struct vxlan_fdb *f;
2573 spin_lock_bh(&vxlan->hash_lock);
2574 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
2576 vxlan_fdb_destroy(vxlan, f, true, true);
2577 spin_unlock_bh(&vxlan->hash_lock);
2580 static void vxlan_uninit(struct net_device *dev)
2582 struct vxlan_dev *vxlan = netdev_priv(dev);
2584 vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
2586 free_percpu(dev->tstats);
2589 /* Start ageing timer and join group when device is brought up */
2590 static int vxlan_open(struct net_device *dev)
2592 struct vxlan_dev *vxlan = netdev_priv(dev);
2595 ret = vxlan_sock_add(vxlan);
2599 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
2600 ret = vxlan_igmp_join(vxlan);
2601 if (ret == -EADDRINUSE)
2604 vxlan_sock_release(vxlan);
2609 if (vxlan->cfg.age_interval)
2610 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
2615 /* Purge the forwarding table */
2616 static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
2620 spin_lock_bh(&vxlan->hash_lock);
2621 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2622 struct hlist_node *p, *n;
2623 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
2625 = container_of(p, struct vxlan_fdb, hlist);
2626 if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
2628 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2629 if (!is_zero_ether_addr(f->eth_addr))
2630 vxlan_fdb_destroy(vxlan, f, true, true);
2633 spin_unlock_bh(&vxlan->hash_lock);
2636 /* Cleanup timer and forwarding table on shutdown */
2637 static int vxlan_stop(struct net_device *dev)
2639 struct vxlan_dev *vxlan = netdev_priv(dev);
2640 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2643 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
2644 !vxlan_group_used(vn, vxlan))
2645 ret = vxlan_igmp_leave(vxlan);
2647 del_timer_sync(&vxlan->age_timer);
2649 vxlan_flush(vxlan, false);
2650 vxlan_sock_release(vxlan);
2655 /* Stub, nothing needs to be done. */
2656 static void vxlan_set_multicast_list(struct net_device *dev)
2660 static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2662 struct vxlan_dev *vxlan = netdev_priv(dev);
2663 struct vxlan_rdst *dst = &vxlan->default_dst;
2664 struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
2665 dst->remote_ifindex);
2666 bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6);
2668 /* This check is different than dev->max_mtu, because it looks at
2669 * the lowerdev->mtu, rather than the static dev->max_mtu
2672 int max_mtu = lowerdev->mtu -
2673 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2674 if (new_mtu > max_mtu)
2682 static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
2684 struct vxlan_dev *vxlan = netdev_priv(dev);
2685 struct ip_tunnel_info *info = skb_tunnel_info(skb);
2686 __be16 sport, dport;
2688 sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
2689 vxlan->cfg.port_max, true);
2690 dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
2692 if (ip_tunnel_info_af(info) == AF_INET) {
2693 struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
2696 rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
2697 info->key.u.ipv4.dst,
2698 &info->key.u.ipv4.src, dport, sport,
2699 &info->dst_cache, info);
2704 #if IS_ENABLED(CONFIG_IPV6)
2705 struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
2706 struct dst_entry *ndst;
2708 ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
2709 info->key.label, &info->key.u.ipv6.dst,
2710 &info->key.u.ipv6.src, dport, sport,
2711 &info->dst_cache, info);
2713 return PTR_ERR(ndst);
2715 #else /* !CONFIG_IPV6 */
2716 return -EPFNOSUPPORT;
2719 info->key.tp_src = sport;
2720 info->key.tp_dst = dport;
2724 static const struct net_device_ops vxlan_netdev_ether_ops = {
2725 .ndo_init = vxlan_init,
2726 .ndo_uninit = vxlan_uninit,
2727 .ndo_open = vxlan_open,
2728 .ndo_stop = vxlan_stop,
2729 .ndo_start_xmit = vxlan_xmit,
2730 .ndo_get_stats64 = ip_tunnel_get_stats64,
2731 .ndo_set_rx_mode = vxlan_set_multicast_list,
2732 .ndo_change_mtu = vxlan_change_mtu,
2733 .ndo_validate_addr = eth_validate_addr,
2734 .ndo_set_mac_address = eth_mac_addr,
2735 .ndo_fdb_add = vxlan_fdb_add,
2736 .ndo_fdb_del = vxlan_fdb_delete,
2737 .ndo_fdb_dump = vxlan_fdb_dump,
2738 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2741 static const struct net_device_ops vxlan_netdev_raw_ops = {
2742 .ndo_init = vxlan_init,
2743 .ndo_uninit = vxlan_uninit,
2744 .ndo_open = vxlan_open,
2745 .ndo_stop = vxlan_stop,
2746 .ndo_start_xmit = vxlan_xmit,
2747 .ndo_get_stats64 = ip_tunnel_get_stats64,
2748 .ndo_change_mtu = vxlan_change_mtu,
2749 .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
2752 /* Info for udev, that this is a virtual tunnel endpoint */
2753 static struct device_type vxlan_type = {
2757 /* Calls the ndo_udp_tunnel_add of the caller in order to
2758 * supply the listening VXLAN udp ports. Callers are expected
2759 * to implement the ndo_udp_tunnel_add.
2761 static void vxlan_offload_rx_ports(struct net_device *dev, bool push)
2763 struct vxlan_sock *vs;
2764 struct net *net = dev_net(dev);
2765 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2768 spin_lock(&vn->sock_lock);
2769 for (i = 0; i < PORT_HASH_SIZE; ++i) {
2770 hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
2771 unsigned short type;
2773 if (vs->flags & VXLAN_F_GPE)
2774 type = UDP_TUNNEL_TYPE_VXLAN_GPE;
2776 type = UDP_TUNNEL_TYPE_VXLAN;
2779 udp_tunnel_push_rx_port(dev, vs->sock, type);
2781 udp_tunnel_drop_rx_port(dev, vs->sock, type);
2784 spin_unlock(&vn->sock_lock);
2787 /* Initialize the device structure. */
2788 static void vxlan_setup(struct net_device *dev)
2790 struct vxlan_dev *vxlan = netdev_priv(dev);
2793 eth_hw_addr_random(dev);
2796 dev->needs_free_netdev = true;
2797 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
2799 dev->features |= NETIF_F_LLTX;
2800 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2801 dev->features |= NETIF_F_RXCSUM;
2802 dev->features |= NETIF_F_GSO_SOFTWARE;
2804 dev->vlan_features = dev->features;
2805 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2806 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2807 netif_keep_dst(dev);
2808 dev->priv_flags |= IFF_NO_QUEUE;
2810 /* MTU range: 68 - 65535 */
2811 dev->min_mtu = ETH_MIN_MTU;
2812 dev->max_mtu = ETH_MAX_MTU;
2814 INIT_LIST_HEAD(&vxlan->next);
2815 spin_lock_init(&vxlan->hash_lock);
2817 timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
2821 gro_cells_init(&vxlan->gro_cells, dev);
2823 for (h = 0; h < FDB_HASH_SIZE; ++h)
2824 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2827 static void vxlan_ether_setup(struct net_device *dev)
2829 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2830 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2831 dev->netdev_ops = &vxlan_netdev_ether_ops;
2834 static void vxlan_raw_setup(struct net_device *dev)
2836 dev->header_ops = NULL;
2837 dev->type = ARPHRD_NONE;
2838 dev->hard_header_len = 0;
2840 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
2841 dev->netdev_ops = &vxlan_netdev_raw_ops;
2844 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
2845 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
2846 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
2847 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
2848 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
2849 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
2850 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
2851 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
2852 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
2853 [IFLA_VXLAN_LABEL] = { .type = NLA_U32 },
2854 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
2855 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
2856 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
2857 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
2858 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
2859 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
2860 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
2861 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
2862 [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 },
2863 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
2864 [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 },
2865 [IFLA_VXLAN_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
2866 [IFLA_VXLAN_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
2867 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
2868 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
2869 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
2870 [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
2871 [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
2872 [IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG },
2873 [IFLA_VXLAN_DF] = { .type = NLA_U8 },
2876 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
2877 struct netlink_ext_ack *extack)
2879 if (tb[IFLA_ADDRESS]) {
2880 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
2881 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
2882 "Provided link layer address is not Ethernet");
2886 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
2887 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS],
2888 "Provided Ethernet address is not unicast");
2889 return -EADDRNOTAVAIL;
2894 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
2896 if (mtu < ETH_MIN_MTU || mtu > ETH_MAX_MTU) {
2897 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU],
2898 "MTU must be between 68 and 65535");
2904 NL_SET_ERR_MSG(extack,
2905 "Required attributes not provided to perform the operation");
2909 if (data[IFLA_VXLAN_ID]) {
2910 u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
2912 if (id >= VXLAN_N_VID) {
2913 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID],
2914 "VXLAN ID must be lower than 16777216");
2919 if (data[IFLA_VXLAN_PORT_RANGE]) {
2920 const struct ifla_vxlan_port_range *p
2921 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
2923 if (ntohs(p->high) < ntohs(p->low)) {
2924 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE],
2925 "Invalid source port range");
2930 if (data[IFLA_VXLAN_DF]) {
2931 enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]);
2933 if (df < 0 || df > VXLAN_DF_MAX) {
2934 NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_DF],
2935 "Invalid DF attribute");
2943 static void vxlan_get_drvinfo(struct net_device *netdev,
2944 struct ethtool_drvinfo *drvinfo)
2946 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
2947 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
2950 static const struct ethtool_ops vxlan_ethtool_ops = {
2951 .get_drvinfo = vxlan_get_drvinfo,
2952 .get_link = ethtool_op_get_link,
2955 static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2956 __be16 port, u32 flags, int ifindex)
2958 struct socket *sock;
2959 struct udp_port_cfg udp_conf;
2962 memset(&udp_conf, 0, sizeof(udp_conf));
2965 udp_conf.family = AF_INET6;
2966 udp_conf.use_udp6_rx_checksums =
2967 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2968 udp_conf.ipv6_v6only = 1;
2970 udp_conf.family = AF_INET;
2973 udp_conf.local_udp_port = port;
2974 udp_conf.bind_ifindex = ifindex;
2976 /* Open UDP socket */
2977 err = udp_sock_create(net, &udp_conf, &sock);
2979 return ERR_PTR(err);
2984 /* Create new listen socket if needed */
2985 static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
2986 __be16 port, u32 flags,
2989 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2990 struct vxlan_sock *vs;
2991 struct socket *sock;
2993 struct udp_tunnel_sock_cfg tunnel_cfg;
2995 vs = kzalloc(sizeof(*vs), GFP_KERNEL);
2997 return ERR_PTR(-ENOMEM);
2999 for (h = 0; h < VNI_HASH_SIZE; ++h)
3000 INIT_HLIST_HEAD(&vs->vni_list[h]);
3002 sock = vxlan_create_sock(net, ipv6, port, flags, ifindex);
3005 return ERR_CAST(sock);
3009 refcount_set(&vs->refcnt, 1);
3010 vs->flags = (flags & VXLAN_F_RCV_FLAGS);
3012 spin_lock(&vn->sock_lock);
3013 hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
3014 udp_tunnel_notify_add_rx_port(sock,
3015 (vs->flags & VXLAN_F_GPE) ?
3016 UDP_TUNNEL_TYPE_VXLAN_GPE :
3017 UDP_TUNNEL_TYPE_VXLAN);
3018 spin_unlock(&vn->sock_lock);
3020 /* Mark socket as an encapsulation socket. */
3021 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
3022 tunnel_cfg.sk_user_data = vs;
3023 tunnel_cfg.encap_type = 1;
3024 tunnel_cfg.encap_rcv = vxlan_rcv;
3025 tunnel_cfg.encap_err_lookup = vxlan_err_lookup;
3026 tunnel_cfg.encap_destroy = NULL;
3027 tunnel_cfg.gro_receive = vxlan_gro_receive;
3028 tunnel_cfg.gro_complete = vxlan_gro_complete;
3030 setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
3035 static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
3037 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
3038 struct vxlan_sock *vs = NULL;
3039 struct vxlan_dev_node *node;
3040 int l3mdev_index = 0;
3042 if (vxlan->cfg.remote_ifindex)
3043 l3mdev_index = l3mdev_master_upper_ifindex_by_index(
3044 vxlan->net, vxlan->cfg.remote_ifindex);
3046 if (!vxlan->cfg.no_share) {
3047 spin_lock(&vn->sock_lock);
3048 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
3049 vxlan->cfg.dst_port, vxlan->cfg.flags,
3051 if (vs && !refcount_inc_not_zero(&vs->refcnt)) {
3052 spin_unlock(&vn->sock_lock);
3055 spin_unlock(&vn->sock_lock);
3058 vs = vxlan_socket_create(vxlan->net, ipv6,
3059 vxlan->cfg.dst_port, vxlan->cfg.flags,
3063 #if IS_ENABLED(CONFIG_IPV6)
3065 rcu_assign_pointer(vxlan->vn6_sock, vs);
3066 node = &vxlan->hlist6;
3070 rcu_assign_pointer(vxlan->vn4_sock, vs);
3071 node = &vxlan->hlist4;
3073 vxlan_vs_add_dev(vs, vxlan, node);
3077 static int vxlan_sock_add(struct vxlan_dev *vxlan)
3079 bool metadata = vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA;
3080 bool ipv6 = vxlan->cfg.flags & VXLAN_F_IPV6 || metadata;
3081 bool ipv4 = !ipv6 || metadata;
3084 RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
3085 #if IS_ENABLED(CONFIG_IPV6)
3086 RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
3088 ret = __vxlan_sock_add(vxlan, true);
3089 if (ret < 0 && ret != -EAFNOSUPPORT)
3094 ret = __vxlan_sock_add(vxlan, false);
3096 vxlan_sock_release(vxlan);
3100 static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf,
3101 struct net_device **lower,
3102 struct vxlan_dev *old,
3103 struct netlink_ext_ack *extack)
3105 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
3106 struct vxlan_dev *tmp;
3107 bool use_ipv6 = false;
3109 if (conf->flags & VXLAN_F_GPE) {
3110 /* For now, allow GPE only together with
3111 * COLLECT_METADATA. This can be relaxed later; in such
3112 * case, the other side of the PtP link will have to be
3115 if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
3116 !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
3117 NL_SET_ERR_MSG(extack,
3118 "VXLAN GPE does not support this combination of attributes");
3123 if (!conf->remote_ip.sa.sa_family && !conf->saddr.sa.sa_family) {
3124 /* Unless IPv6 is explicitly requested, assume IPv4 */
3125 conf->remote_ip.sa.sa_family = AF_INET;
3126 conf->saddr.sa.sa_family = AF_INET;
3127 } else if (!conf->remote_ip.sa.sa_family) {
3128 conf->remote_ip.sa.sa_family = conf->saddr.sa.sa_family;
3129 } else if (!conf->saddr.sa.sa_family) {
3130 conf->saddr.sa.sa_family = conf->remote_ip.sa.sa_family;
3133 if (conf->saddr.sa.sa_family != conf->remote_ip.sa.sa_family) {
3134 NL_SET_ERR_MSG(extack,
3135 "Local and remote address must be from the same family");
3139 if (vxlan_addr_multicast(&conf->saddr)) {
3140 NL_SET_ERR_MSG(extack, "Local address cannot be multicast");
3144 if (conf->saddr.sa.sa_family == AF_INET6) {
3145 if (!IS_ENABLED(CONFIG_IPV6)) {
3146 NL_SET_ERR_MSG(extack,
3147 "IPv6 support not enabled in the kernel");
3148 return -EPFNOSUPPORT;
3151 conf->flags |= VXLAN_F_IPV6;
3153 if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) {
3155 ipv6_addr_type(&conf->saddr.sin6.sin6_addr);
3157 ipv6_addr_type(&conf->remote_ip.sin6.sin6_addr);
3159 if (local_type & IPV6_ADDR_LINKLOCAL) {
3160 if (!(remote_type & IPV6_ADDR_LINKLOCAL) &&
3161 (remote_type != IPV6_ADDR_ANY)) {
3162 NL_SET_ERR_MSG(extack,
3163 "Invalid combination of local and remote address scopes");
3167 conf->flags |= VXLAN_F_IPV6_LINKLOCAL;
3170 (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)) {
3171 NL_SET_ERR_MSG(extack,
3172 "Invalid combination of local and remote address scopes");
3176 conf->flags &= ~VXLAN_F_IPV6_LINKLOCAL;
3181 if (conf->label && !use_ipv6) {
3182 NL_SET_ERR_MSG(extack,
3183 "Label attribute only applies to IPv6 VXLAN devices");
3187 if (conf->remote_ifindex) {
3188 struct net_device *lowerdev;
3190 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
3192 NL_SET_ERR_MSG(extack,
3193 "Invalid local interface, device not found");
3197 #if IS_ENABLED(CONFIG_IPV6)
3199 struct inet6_dev *idev = __in6_dev_get(lowerdev);
3200 if (idev && idev->cnf.disable_ipv6) {
3201 NL_SET_ERR_MSG(extack,
3202 "IPv6 support disabled by administrator");
3210 if (vxlan_addr_multicast(&conf->remote_ip)) {
3211 NL_SET_ERR_MSG(extack,
3212 "Local interface required for multicast remote destination");
3217 #if IS_ENABLED(CONFIG_IPV6)
3218 if (conf->flags & VXLAN_F_IPV6_LINKLOCAL) {
3219 NL_SET_ERR_MSG(extack,
3220 "Local interface required for link-local local/remote addresses");
3228 if (!conf->dst_port) {
3229 if (conf->flags & VXLAN_F_GPE)
3230 conf->dst_port = htons(4790); /* IANA VXLAN-GPE port */
3232 conf->dst_port = htons(vxlan_port);
3235 if (!conf->age_interval)
3236 conf->age_interval = FDB_AGE_DEFAULT;
3238 list_for_each_entry(tmp, &vn->vxlan_list, next) {
3242 if (tmp->cfg.vni != conf->vni)
3244 if (tmp->cfg.dst_port != conf->dst_port)
3246 if ((tmp->cfg.flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)) !=
3247 (conf->flags & (VXLAN_F_RCV_FLAGS | VXLAN_F_IPV6)))
3250 if ((conf->flags & VXLAN_F_IPV6_LINKLOCAL) &&
3251 tmp->cfg.remote_ifindex != conf->remote_ifindex)
3254 NL_SET_ERR_MSG(extack,
3255 "A VXLAN device with the specified VNI already exists");
3262 static void vxlan_config_apply(struct net_device *dev,
3263 struct vxlan_config *conf,
3264 struct net_device *lowerdev,
3265 struct net *src_net,
3268 struct vxlan_dev *vxlan = netdev_priv(dev);
3269 struct vxlan_rdst *dst = &vxlan->default_dst;
3270 unsigned short needed_headroom = ETH_HLEN;
3271 bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6);
3272 int max_mtu = ETH_MAX_MTU;
3275 if (conf->flags & VXLAN_F_GPE)
3276 vxlan_raw_setup(dev);
3278 vxlan_ether_setup(dev);
3281 dev->mtu = conf->mtu;
3283 vxlan->net = src_net;
3286 dst->remote_vni = conf->vni;
3288 memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
3291 dst->remote_ifindex = conf->remote_ifindex;
3293 dev->gso_max_size = lowerdev->gso_max_size;
3294 dev->gso_max_segs = lowerdev->gso_max_segs;
3296 needed_headroom = lowerdev->hard_header_len;
3298 max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
3300 if (max_mtu < ETH_MIN_MTU)
3301 max_mtu = ETH_MIN_MTU;
3303 if (!changelink && !conf->mtu)
3307 if (dev->mtu > max_mtu)
3310 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
3311 needed_headroom += VXLAN6_HEADROOM;
3313 needed_headroom += VXLAN_HEADROOM;
3314 dev->needed_headroom = needed_headroom;
3316 memcpy(&vxlan->cfg, conf, sizeof(*conf));
3319 static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
3320 struct vxlan_config *conf, bool changelink,
3321 struct netlink_ext_ack *extack)
3323 struct vxlan_dev *vxlan = netdev_priv(dev);
3324 struct net_device *lowerdev;
3327 ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan, extack);
3331 vxlan_config_apply(dev, conf, lowerdev, src_net, changelink);
3336 static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3337 struct vxlan_config *conf,
3338 struct netlink_ext_ack *extack)
3340 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3341 struct vxlan_dev *vxlan = netdev_priv(dev);
3342 struct vxlan_fdb *f = NULL;
3345 err = vxlan_dev_configure(net, dev, conf, false, extack);
3349 dev->ethtool_ops = &vxlan_ethtool_ops;
3351 /* create an fdb entry for a valid default destination */
3352 if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
3353 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3354 &vxlan->default_dst.remote_ip,
3355 NUD_REACHABLE | NUD_PERMANENT,
3356 vxlan->cfg.dst_port,
3357 vxlan->default_dst.remote_vni,
3358 vxlan->default_dst.remote_vni,
3359 vxlan->default_dst.remote_ifindex,
3365 err = register_netdevice(dev);
3369 err = rtnl_configure_link(dev, NULL);
3371 unregister_netdevice(dev);
3375 /* notify default fdb entry */
3377 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
3380 list_add(&vxlan->next, &vn->vxlan_list);
3384 vxlan_fdb_destroy(vxlan, f, false, false);
3388 static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
3389 struct net_device *dev, struct vxlan_config *conf,
3392 struct vxlan_dev *vxlan = netdev_priv(dev);
3394 memset(conf, 0, sizeof(*conf));
3396 /* if changelink operation, start with old existing cfg */
3398 memcpy(conf, &vxlan->cfg, sizeof(*conf));
3400 if (data[IFLA_VXLAN_ID]) {
3401 __be32 vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
3403 if (changelink && (vni != conf->vni))
3405 conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID]));
3408 if (data[IFLA_VXLAN_GROUP]) {
3409 if (changelink && (conf->remote_ip.sa.sa_family != AF_INET))
3412 conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
3413 conf->remote_ip.sa.sa_family = AF_INET;
3414 } else if (data[IFLA_VXLAN_GROUP6]) {
3415 if (!IS_ENABLED(CONFIG_IPV6))
3416 return -EPFNOSUPPORT;
3418 if (changelink && (conf->remote_ip.sa.sa_family != AF_INET6))
3421 conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
3422 conf->remote_ip.sa.sa_family = AF_INET6;
3425 if (data[IFLA_VXLAN_LOCAL]) {
3426 if (changelink && (conf->saddr.sa.sa_family != AF_INET))
3429 conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
3430 conf->saddr.sa.sa_family = AF_INET;
3431 } else if (data[IFLA_VXLAN_LOCAL6]) {
3432 if (!IS_ENABLED(CONFIG_IPV6))
3433 return -EPFNOSUPPORT;
3435 if (changelink && (conf->saddr.sa.sa_family != AF_INET6))
3438 /* TODO: respect scope id */
3439 conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
3440 conf->saddr.sa.sa_family = AF_INET6;
3443 if (data[IFLA_VXLAN_LINK])
3444 conf->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
3446 if (data[IFLA_VXLAN_TOS])
3447 conf->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
3449 if (data[IFLA_VXLAN_TTL])
3450 conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
3452 if (data[IFLA_VXLAN_TTL_INHERIT]) {
3455 conf->flags |= VXLAN_F_TTL_INHERIT;
3458 if (data[IFLA_VXLAN_LABEL])
3459 conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) &
3460 IPV6_FLOWLABEL_MASK;
3462 if (data[IFLA_VXLAN_LEARNING]) {
3463 if (nla_get_u8(data[IFLA_VXLAN_LEARNING]))
3464 conf->flags |= VXLAN_F_LEARN;
3466 conf->flags &= ~VXLAN_F_LEARN;
3467 } else if (!changelink) {
3468 /* default to learn on a new device */
3469 conf->flags |= VXLAN_F_LEARN;
3472 if (data[IFLA_VXLAN_AGEING])
3473 conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
3475 if (data[IFLA_VXLAN_PROXY]) {
3478 if (nla_get_u8(data[IFLA_VXLAN_PROXY]))
3479 conf->flags |= VXLAN_F_PROXY;
3482 if (data[IFLA_VXLAN_RSC]) {
3485 if (nla_get_u8(data[IFLA_VXLAN_RSC]))
3486 conf->flags |= VXLAN_F_RSC;
3489 if (data[IFLA_VXLAN_L2MISS]) {
3492 if (nla_get_u8(data[IFLA_VXLAN_L2MISS]))
3493 conf->flags |= VXLAN_F_L2MISS;
3496 if (data[IFLA_VXLAN_L3MISS]) {
3499 if (nla_get_u8(data[IFLA_VXLAN_L3MISS]))
3500 conf->flags |= VXLAN_F_L3MISS;
3503 if (data[IFLA_VXLAN_LIMIT]) {
3506 conf->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
3509 if (data[IFLA_VXLAN_COLLECT_METADATA]) {
3512 if (nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
3513 conf->flags |= VXLAN_F_COLLECT_METADATA;
3516 if (data[IFLA_VXLAN_PORT_RANGE]) {
3518 const struct ifla_vxlan_port_range *p
3519 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
3520 conf->port_min = ntohs(p->low);
3521 conf->port_max = ntohs(p->high);
3527 if (data[IFLA_VXLAN_PORT]) {
3530 conf->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
3533 if (data[IFLA_VXLAN_UDP_CSUM]) {
3536 if (!nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
3537 conf->flags |= VXLAN_F_UDP_ZERO_CSUM_TX;
3540 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]) {
3543 if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
3544 conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
3547 if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]) {
3550 if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
3551 conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
3554 if (data[IFLA_VXLAN_REMCSUM_TX]) {
3557 if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
3558 conf->flags |= VXLAN_F_REMCSUM_TX;
3561 if (data[IFLA_VXLAN_REMCSUM_RX]) {
3564 if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
3565 conf->flags |= VXLAN_F_REMCSUM_RX;
3568 if (data[IFLA_VXLAN_GBP]) {
3571 conf->flags |= VXLAN_F_GBP;
3574 if (data[IFLA_VXLAN_GPE]) {
3577 conf->flags |= VXLAN_F_GPE;
3580 if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) {
3583 conf->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
3589 conf->mtu = nla_get_u32(tb[IFLA_MTU]);
3592 if (data[IFLA_VXLAN_DF])
3593 conf->df = nla_get_u8(data[IFLA_VXLAN_DF]);
3598 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
3599 struct nlattr *tb[], struct nlattr *data[],
3600 struct netlink_ext_ack *extack)
3602 struct vxlan_config conf;
3605 err = vxlan_nl2conf(tb, data, dev, &conf, false);
3609 return __vxlan_dev_create(src_net, dev, &conf, extack);
3612 static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3613 struct nlattr *data[],
3614 struct netlink_ext_ack *extack)
3616 struct vxlan_dev *vxlan = netdev_priv(dev);
3617 struct vxlan_rdst *dst = &vxlan->default_dst;
3618 unsigned long old_age_interval;
3619 struct vxlan_rdst old_dst;
3620 struct vxlan_config conf;
3621 struct vxlan_fdb *f = NULL;
3624 err = vxlan_nl2conf(tb, data,
3629 old_age_interval = vxlan->cfg.age_interval;
3630 memcpy(&old_dst, dst, sizeof(struct vxlan_rdst));
3632 err = vxlan_dev_configure(vxlan->net, dev, &conf, true, extack);
3636 if (old_age_interval != vxlan->cfg.age_interval)
3637 mod_timer(&vxlan->age_timer, jiffies);
3639 /* handle default dst entry */
3640 if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) {
3641 spin_lock_bh(&vxlan->hash_lock);
3642 if (!vxlan_addr_any(&old_dst.remote_ip))
3643 __vxlan_fdb_delete(vxlan, all_zeros_mac,
3645 vxlan->cfg.dst_port,
3648 old_dst.remote_ifindex,
3651 if (!vxlan_addr_any(&dst->remote_ip)) {
3652 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3654 NUD_REACHABLE | NUD_PERMANENT,
3655 vxlan->cfg.dst_port,
3658 dst->remote_ifindex,
3661 spin_unlock_bh(&vxlan->hash_lock);
3664 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
3665 RTM_NEWNEIGH, true);
3667 spin_unlock_bh(&vxlan->hash_lock);
3673 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3675 struct vxlan_dev *vxlan = netdev_priv(dev);
3677 vxlan_flush(vxlan, true);
3679 gro_cells_destroy(&vxlan->gro_cells);
3680 list_del(&vxlan->next);
3681 unregister_netdevice_queue(dev, head);
3684 static size_t vxlan_get_size(const struct net_device *dev)
3687 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
3688 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
3689 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
3690 nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
3691 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
3692 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */
3693 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
3694 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_DF */
3695 nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
3696 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
3697 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
3698 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
3699 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
3700 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
3701 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */
3702 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
3703 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
3704 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
3705 nla_total_size(sizeof(__be16)) + /* IFLA_VXLAN_PORT */
3706 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_CSUM */
3707 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_TX */
3708 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_UDP_ZERO_CSUM6_RX */
3709 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
3710 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
3714 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
3716 const struct vxlan_dev *vxlan = netdev_priv(dev);
3717 const struct vxlan_rdst *dst = &vxlan->default_dst;
3718 struct ifla_vxlan_port_range ports = {
3719 .low = htons(vxlan->cfg.port_min),
3720 .high = htons(vxlan->cfg.port_max),
3723 if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni)))
3724 goto nla_put_failure;
3726 if (!vxlan_addr_any(&dst->remote_ip)) {
3727 if (dst->remote_ip.sa.sa_family == AF_INET) {
3728 if (nla_put_in_addr(skb, IFLA_VXLAN_GROUP,
3729 dst->remote_ip.sin.sin_addr.s_addr))
3730 goto nla_put_failure;
3731 #if IS_ENABLED(CONFIG_IPV6)
3733 if (nla_put_in6_addr(skb, IFLA_VXLAN_GROUP6,
3734 &dst->remote_ip.sin6.sin6_addr))
3735 goto nla_put_failure;
3740 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
3741 goto nla_put_failure;
3743 if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
3744 if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
3745 if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
3746 vxlan->cfg.saddr.sin.sin_addr.s_addr))
3747 goto nla_put_failure;
3748 #if IS_ENABLED(CONFIG_IPV6)
3750 if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
3751 &vxlan->cfg.saddr.sin6.sin6_addr))
3752 goto nla_put_failure;
3757 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
3758 nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
3759 !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
3760 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
3761 nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) ||
3762 nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
3763 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
3764 !!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
3765 nla_put_u8(skb, IFLA_VXLAN_PROXY,
3766 !!(vxlan->cfg.flags & VXLAN_F_PROXY)) ||
3767 nla_put_u8(skb, IFLA_VXLAN_RSC,
3768 !!(vxlan->cfg.flags & VXLAN_F_RSC)) ||
3769 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
3770 !!(vxlan->cfg.flags & VXLAN_F_L2MISS)) ||
3771 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
3772 !!(vxlan->cfg.flags & VXLAN_F_L3MISS)) ||
3773 nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
3774 !!(vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)) ||
3775 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
3776 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
3777 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
3778 nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
3779 !(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ||
3780 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
3781 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ||
3782 nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX,
3783 !!(vxlan->cfg.flags & VXLAN_F_UDP_ZERO_CSUM6_RX)) ||
3784 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_TX,
3785 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_TX)) ||
3786 nla_put_u8(skb, IFLA_VXLAN_REMCSUM_RX,
3787 !!(vxlan->cfg.flags & VXLAN_F_REMCSUM_RX)))
3788 goto nla_put_failure;
3790 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
3791 goto nla_put_failure;
3793 if (vxlan->cfg.flags & VXLAN_F_GBP &&
3794 nla_put_flag(skb, IFLA_VXLAN_GBP))
3795 goto nla_put_failure;
3797 if (vxlan->cfg.flags & VXLAN_F_GPE &&
3798 nla_put_flag(skb, IFLA_VXLAN_GPE))
3799 goto nla_put_failure;
3801 if (vxlan->cfg.flags & VXLAN_F_REMCSUM_NOPARTIAL &&
3802 nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
3803 goto nla_put_failure;
3811 static struct net *vxlan_get_link_net(const struct net_device *dev)
3813 struct vxlan_dev *vxlan = netdev_priv(dev);
3818 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
3820 .maxtype = IFLA_VXLAN_MAX,
3821 .policy = vxlan_policy,
3822 .priv_size = sizeof(struct vxlan_dev),
3823 .setup = vxlan_setup,
3824 .validate = vxlan_validate,
3825 .newlink = vxlan_newlink,
3826 .changelink = vxlan_changelink,
3827 .dellink = vxlan_dellink,
3828 .get_size = vxlan_get_size,
3829 .fill_info = vxlan_fill_info,
3830 .get_link_net = vxlan_get_link_net,
3833 struct net_device *vxlan_dev_create(struct net *net, const char *name,
3834 u8 name_assign_type,
3835 struct vxlan_config *conf)
3837 struct nlattr *tb[IFLA_MAX + 1];
3838 struct net_device *dev;
3841 memset(&tb, 0, sizeof(tb));
3843 dev = rtnl_create_link(net, name, name_assign_type,
3844 &vxlan_link_ops, tb, NULL);
3848 err = __vxlan_dev_create(net, dev, conf, NULL);
3851 return ERR_PTR(err);
3854 err = rtnl_configure_link(dev, NULL);
3856 LIST_HEAD(list_kill);
3858 vxlan_dellink(dev, &list_kill);
3859 unregister_netdevice_many(&list_kill);
3860 return ERR_PTR(err);
3865 EXPORT_SYMBOL_GPL(vxlan_dev_create);
3867 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
3868 struct net_device *dev)
3870 struct vxlan_dev *vxlan, *next;
3871 LIST_HEAD(list_kill);
3873 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
3874 struct vxlan_rdst *dst = &vxlan->default_dst;
3876 /* In case we created vxlan device with carrier
3877 * and we loose the carrier due to module unload
3878 * we also need to remove vxlan device. In other
3879 * cases, it's not necessary and remote_ifindex
3880 * is 0 here, so no matches.
3882 if (dst->remote_ifindex == dev->ifindex)
3883 vxlan_dellink(vxlan->dev, &list_kill);
3886 unregister_netdevice_many(&list_kill);
3889 static int vxlan_netdevice_event(struct notifier_block *unused,
3890 unsigned long event, void *ptr)
3892 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3893 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
3895 if (event == NETDEV_UNREGISTER) {
3896 vxlan_offload_rx_ports(dev, false);
3897 vxlan_handle_lowerdev_unregister(vn, dev);
3898 } else if (event == NETDEV_REGISTER) {
3899 vxlan_offload_rx_ports(dev, true);
3900 } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO ||
3901 event == NETDEV_UDP_TUNNEL_DROP_INFO) {
3902 vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO);
3908 static struct notifier_block vxlan_notifier_block __read_mostly = {
3909 .notifier_call = vxlan_netdevice_event,
3913 vxlan_fdb_offloaded_set(struct net_device *dev,
3914 struct switchdev_notifier_vxlan_fdb_info *fdb_info)
3916 struct vxlan_dev *vxlan = netdev_priv(dev);
3917 struct vxlan_rdst *rdst;
3918 struct vxlan_fdb *f;
3920 spin_lock_bh(&vxlan->hash_lock);
3922 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
3926 rdst = vxlan_fdb_find_rdst(f, &fdb_info->remote_ip,
3927 fdb_info->remote_port,
3928 fdb_info->remote_vni,
3929 fdb_info->remote_ifindex);
3933 rdst->offloaded = fdb_info->offloaded;
3936 spin_unlock_bh(&vxlan->hash_lock);
3940 vxlan_fdb_external_learn_add(struct net_device *dev,
3941 struct switchdev_notifier_vxlan_fdb_info *fdb_info)
3943 struct vxlan_dev *vxlan = netdev_priv(dev);
3946 spin_lock_bh(&vxlan->hash_lock);
3947 err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip,
3949 NLM_F_CREATE | NLM_F_REPLACE,
3950 fdb_info->remote_port,
3952 fdb_info->remote_vni,
3953 fdb_info->remote_ifindex,
3954 NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
3956 spin_unlock_bh(&vxlan->hash_lock);
3962 vxlan_fdb_external_learn_del(struct net_device *dev,
3963 struct switchdev_notifier_vxlan_fdb_info *fdb_info)
3965 struct vxlan_dev *vxlan = netdev_priv(dev);
3966 struct vxlan_fdb *f;
3969 spin_lock_bh(&vxlan->hash_lock);
3971 f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
3974 else if (f->flags & NTF_EXT_LEARNED)
3975 err = __vxlan_fdb_delete(vxlan, fdb_info->eth_addr,
3976 fdb_info->remote_ip,
3977 fdb_info->remote_port,
3979 fdb_info->remote_vni,
3980 fdb_info->remote_ifindex,
3983 spin_unlock_bh(&vxlan->hash_lock);
3988 static int vxlan_switchdev_event(struct notifier_block *unused,
3989 unsigned long event, void *ptr)
3991 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3992 struct switchdev_notifier_vxlan_fdb_info *fdb_info;
3996 case SWITCHDEV_VXLAN_FDB_OFFLOADED:
3997 vxlan_fdb_offloaded_set(dev, ptr);
3999 case SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE:
4001 err = vxlan_fdb_external_learn_add(dev, fdb_info);
4003 err = notifier_from_errno(err);
4006 fdb_info->offloaded = true;
4007 vxlan_fdb_offloaded_set(dev, fdb_info);
4009 case SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE:
4011 err = vxlan_fdb_external_learn_del(dev, fdb_info);
4013 err = notifier_from_errno(err);
4016 fdb_info->offloaded = false;
4017 vxlan_fdb_offloaded_set(dev, fdb_info);
4024 static struct notifier_block vxlan_switchdev_notifier_block __read_mostly = {
4025 .notifier_call = vxlan_switchdev_event,
4028 static __net_init int vxlan_init_net(struct net *net)
4030 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
4033 INIT_LIST_HEAD(&vn->vxlan_list);
4034 spin_lock_init(&vn->sock_lock);
4036 for (h = 0; h < PORT_HASH_SIZE; ++h)
4037 INIT_HLIST_HEAD(&vn->sock_list[h]);
4042 static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
4044 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
4045 struct vxlan_dev *vxlan, *next;
4046 struct net_device *dev, *aux;
4049 for_each_netdev_safe(net, dev, aux)
4050 if (dev->rtnl_link_ops == &vxlan_link_ops)
4051 unregister_netdevice_queue(dev, head);
4053 list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
4054 /* If vxlan->dev is in the same netns, it has already been added
4055 * to the list by the previous loop.
4057 if (!net_eq(dev_net(vxlan->dev), net)) {
4058 gro_cells_destroy(&vxlan->gro_cells);
4059 unregister_netdevice_queue(vxlan->dev, head);
4063 for (h = 0; h < PORT_HASH_SIZE; ++h)
4064 WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
4067 static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
4073 list_for_each_entry(net, net_list, exit_list)
4074 vxlan_destroy_tunnels(net, &list);
4076 unregister_netdevice_many(&list);
4080 static struct pernet_operations vxlan_net_ops = {
4081 .init = vxlan_init_net,
4082 .exit_batch = vxlan_exit_batch_net,
4083 .id = &vxlan_net_id,
4084 .size = sizeof(struct vxlan_net),
4087 static int __init vxlan_init_module(void)
4091 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
4093 rc = register_pernet_subsys(&vxlan_net_ops);
4097 rc = register_netdevice_notifier(&vxlan_notifier_block);
4101 rc = register_switchdev_notifier(&vxlan_switchdev_notifier_block);
4105 rc = rtnl_link_register(&vxlan_link_ops);
4111 unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
4113 unregister_netdevice_notifier(&vxlan_notifier_block);
4115 unregister_pernet_subsys(&vxlan_net_ops);
4119 late_initcall(vxlan_init_module);
4121 static void __exit vxlan_cleanup_module(void)
4123 rtnl_link_unregister(&vxlan_link_ops);
4124 unregister_switchdev_notifier(&vxlan_switchdev_notifier_block);
4125 unregister_netdevice_notifier(&vxlan_notifier_block);
4126 unregister_pernet_subsys(&vxlan_net_ops);
4127 /* rcu_barrier() is called by netns */
4129 module_exit(vxlan_cleanup_module);
4131 MODULE_LICENSE("GPL");
4132 MODULE_VERSION(VXLAN_VERSION);
4133 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
4134 MODULE_DESCRIPTION("Driver for VXLAN encapsulated traffic");
4135 MODULE_ALIAS_RTNL_LINK("vxlan");