2 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
24 * Fixed routing subtrees.
27 #define pr_fmt(fmt) "IPv6: " fmt
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <linux/jhash.h>
48 #include <net/net_namespace.h>
51 #include <net/ip6_fib.h>
52 #include <net/ip6_route.h>
53 #include <net/ndisc.h>
54 #include <net/addrconf.h>
56 #include <linux/rtnetlink.h>
58 #include <net/dst_metadata.h>
60 #include <net/netevent.h>
61 #include <net/netlink.h>
62 #include <net/nexthop.h>
63 #include <net/lwtunnel.h>
64 #include <net/ip_tunnels.h>
65 #include <net/l3mdev.h>
66 #include <trace/events/fib6.h>
68 #include <linux/uaccess.h>
71 #include <linux/sysctl.h>
75 RT6_NUD_FAIL_HARD = -3,
76 RT6_NUD_FAIL_PROBE = -2,
77 RT6_NUD_FAIL_DO_RR = -1,
81 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
82 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
83 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
84 static unsigned int ip6_mtu(const struct dst_entry *dst);
85 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
86 static void ip6_dst_destroy(struct dst_entry *);
87 static void ip6_dst_ifdown(struct dst_entry *,
88 struct net_device *dev, int how);
89 static int ip6_dst_gc(struct dst_ops *ops);
91 static int ip6_pkt_discard(struct sk_buff *skb);
92 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
93 static int ip6_pkt_prohibit(struct sk_buff *skb);
94 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
95 static void ip6_link_failure(struct sk_buff *skb);
96 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
97 struct sk_buff *skb, u32 mtu);
98 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
100 static void rt6_dst_from_metrics_check(struct rt6_info *rt);
101 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
102 static size_t rt6_nlmsg_size(struct rt6_info *rt);
103 static int rt6_fill_node(struct net *net,
104 struct sk_buff *skb, struct rt6_info *rt,
105 struct in6_addr *dst, struct in6_addr *src,
106 int iif, int type, u32 portid, u32 seq,
108 static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
109 struct in6_addr *daddr,
110 struct in6_addr *saddr);
112 #ifdef CONFIG_IPV6_ROUTE_INFO
113 static struct rt6_info *rt6_add_route_info(struct net *net,
114 const struct in6_addr *prefix, int prefixlen,
115 const struct in6_addr *gwaddr,
116 struct net_device *dev,
118 static struct rt6_info *rt6_get_route_info(struct net *net,
119 const struct in6_addr *prefix, int prefixlen,
120 const struct in6_addr *gwaddr,
121 struct net_device *dev);
124 struct uncached_list {
126 struct list_head head;
129 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
131 static void rt6_uncached_list_add(struct rt6_info *rt)
133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
135 rt->rt6i_uncached_list = ul;
137 spin_lock_bh(&ul->lock);
138 list_add_tail(&rt->rt6i_uncached, &ul->head);
139 spin_unlock_bh(&ul->lock);
142 static void rt6_uncached_list_del(struct rt6_info *rt)
144 if (!list_empty(&rt->rt6i_uncached)) {
145 struct uncached_list *ul = rt->rt6i_uncached_list;
146 struct net *net = dev_net(rt->dst.dev);
148 spin_lock_bh(&ul->lock);
149 list_del(&rt->rt6i_uncached);
150 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
151 spin_unlock_bh(&ul->lock);
155 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
157 struct net_device *loopback_dev = net->loopback_dev;
160 if (dev == loopback_dev)
163 for_each_possible_cpu(cpu) {
164 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
167 spin_lock_bh(&ul->lock);
168 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
169 struct inet6_dev *rt_idev = rt->rt6i_idev;
170 struct net_device *rt_dev = rt->dst.dev;
172 if (rt_idev->dev == dev) {
173 rt->rt6i_idev = in6_dev_get(loopback_dev);
174 in6_dev_put(rt_idev);
178 rt->dst.dev = loopback_dev;
179 dev_hold(rt->dst.dev);
183 spin_unlock_bh(&ul->lock);
187 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
189 return dst_metrics_write_ptr(rt->dst.from);
192 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
194 struct rt6_info *rt = (struct rt6_info *)dst;
196 if (rt->rt6i_flags & RTF_PCPU)
197 return rt6_pcpu_cow_metrics(rt);
198 else if (rt->rt6i_flags & RTF_CACHE)
201 return dst_cow_metrics_generic(dst, old);
204 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
208 struct in6_addr *p = &rt->rt6i_gateway;
210 if (!ipv6_addr_any(p))
211 return (const void *) p;
213 return &ipv6_hdr(skb)->daddr;
217 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
221 struct rt6_info *rt = (struct rt6_info *) dst;
224 daddr = choose_neigh_daddr(rt, skb, daddr);
225 n = __ipv6_neigh_lookup(dst->dev, daddr);
228 return neigh_create(&nd_tbl, daddr, dst->dev);
231 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
233 struct net_device *dev = dst->dev;
234 struct rt6_info *rt = (struct rt6_info *)dst;
236 daddr = choose_neigh_daddr(rt, NULL, daddr);
239 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
241 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
243 __ipv6_confirm_neigh(dev, daddr);
246 static struct dst_ops ip6_dst_ops_template = {
250 .check = ip6_dst_check,
251 .default_advmss = ip6_default_advmss,
253 .cow_metrics = ipv6_cow_metrics,
254 .destroy = ip6_dst_destroy,
255 .ifdown = ip6_dst_ifdown,
256 .negative_advice = ip6_negative_advice,
257 .link_failure = ip6_link_failure,
258 .update_pmtu = ip6_rt_update_pmtu,
259 .redirect = rt6_do_redirect,
260 .local_out = __ip6_local_out,
261 .neigh_lookup = ip6_neigh_lookup,
262 .confirm_neigh = ip6_confirm_neigh,
265 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
267 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
269 return mtu ? : dst->dev->mtu;
272 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
273 struct sk_buff *skb, u32 mtu)
277 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
282 static struct dst_ops ip6_dst_blackhole_ops = {
284 .destroy = ip6_dst_destroy,
285 .check = ip6_dst_check,
286 .mtu = ip6_blackhole_mtu,
287 .default_advmss = ip6_default_advmss,
288 .update_pmtu = ip6_rt_blackhole_update_pmtu,
289 .redirect = ip6_rt_blackhole_redirect,
290 .cow_metrics = dst_cow_metrics_generic,
291 .neigh_lookup = ip6_neigh_lookup,
294 static const u32 ip6_template_metrics[RTAX_MAX] = {
295 [RTAX_HOPLIMIT - 1] = 0,
298 static const struct rt6_info ip6_null_entry_template = {
300 .__refcnt = ATOMIC_INIT(1),
302 .obsolete = DST_OBSOLETE_FORCE_CHK,
303 .error = -ENETUNREACH,
304 .input = ip6_pkt_discard,
305 .output = ip6_pkt_discard_out,
307 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
308 .rt6i_protocol = RTPROT_KERNEL,
309 .rt6i_metric = ~(u32) 0,
310 .rt6i_ref = ATOMIC_INIT(1),
313 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
315 static const struct rt6_info ip6_prohibit_entry_template = {
317 .__refcnt = ATOMIC_INIT(1),
319 .obsolete = DST_OBSOLETE_FORCE_CHK,
321 .input = ip6_pkt_prohibit,
322 .output = ip6_pkt_prohibit_out,
324 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
325 .rt6i_protocol = RTPROT_KERNEL,
326 .rt6i_metric = ~(u32) 0,
327 .rt6i_ref = ATOMIC_INIT(1),
330 static const struct rt6_info ip6_blk_hole_entry_template = {
332 .__refcnt = ATOMIC_INIT(1),
334 .obsolete = DST_OBSOLETE_FORCE_CHK,
336 .input = dst_discard,
337 .output = dst_discard_out,
339 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
340 .rt6i_protocol = RTPROT_KERNEL,
341 .rt6i_metric = ~(u32) 0,
342 .rt6i_ref = ATOMIC_INIT(1),
347 static void rt6_info_init(struct rt6_info *rt)
349 struct dst_entry *dst = &rt->dst;
351 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
352 INIT_LIST_HEAD(&rt->rt6i_siblings);
353 INIT_LIST_HEAD(&rt->rt6i_uncached);
356 /* allocate dst with ip6_dst_ops */
357 static struct rt6_info *__ip6_dst_alloc(struct net *net,
358 struct net_device *dev,
361 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
362 1, DST_OBSOLETE_FORCE_CHK, flags);
366 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
372 struct rt6_info *ip6_dst_alloc(struct net *net,
373 struct net_device *dev,
376 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
379 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
380 if (!rt->rt6i_pcpu) {
381 dst_release_immediate(&rt->dst);
388 EXPORT_SYMBOL(ip6_dst_alloc);
390 static void ip6_dst_destroy(struct dst_entry *dst)
392 struct rt6_info *rt = (struct rt6_info *)dst;
393 struct rt6_exception_bucket *bucket;
394 struct dst_entry *from = dst->from;
395 struct inet6_dev *idev;
397 dst_destroy_metrics_generic(dst);
398 free_percpu(rt->rt6i_pcpu);
399 rt6_uncached_list_del(rt);
401 idev = rt->rt6i_idev;
403 rt->rt6i_idev = NULL;
406 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1);
408 rt->rt6i_exception_bucket = NULL;
416 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
419 struct rt6_info *rt = (struct rt6_info *)dst;
420 struct inet6_dev *idev = rt->rt6i_idev;
421 struct net_device *loopback_dev =
422 dev_net(dev)->loopback_dev;
424 if (idev && idev->dev != loopback_dev) {
425 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
427 rt->rt6i_idev = loopback_idev;
433 static bool __rt6_check_expired(const struct rt6_info *rt)
435 if (rt->rt6i_flags & RTF_EXPIRES)
436 return time_after(jiffies, rt->dst.expires);
441 static bool rt6_check_expired(const struct rt6_info *rt)
443 if (rt->rt6i_flags & RTF_EXPIRES) {
444 if (time_after(jiffies, rt->dst.expires))
446 } else if (rt->dst.from) {
447 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
448 rt6_check_expired((struct rt6_info *)rt->dst.from);
453 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
454 struct flowi6 *fl6, int oif,
457 struct rt6_info *sibling, *next_sibling;
460 /* We might have already computed the hash for ICMPv6 errors. In such
461 * case it will always be non-zero. Otherwise now is the time to do it.
464 fl6->mp_hash = rt6_multipath_hash(fl6, NULL);
466 route_choosen = fl6->mp_hash % (match->rt6i_nsiblings + 1);
467 /* Don't change the route, if route_choosen == 0
468 * (siblings does not include ourself)
471 list_for_each_entry_safe(sibling, next_sibling,
472 &match->rt6i_siblings, rt6i_siblings) {
474 if (route_choosen == 0) {
475 if (rt6_score_route(sibling, oif, strict) < 0)
485 * Route lookup. rcu_read_lock() should be held.
488 static inline struct rt6_info *rt6_device_match(struct net *net,
490 const struct in6_addr *saddr,
494 struct rt6_info *local = NULL;
495 struct rt6_info *sprt;
497 if (!oif && ipv6_addr_any(saddr))
500 for (sprt = rt; sprt; sprt = rcu_dereference(sprt->dst.rt6_next)) {
501 struct net_device *dev = sprt->dst.dev;
504 if (dev->ifindex == oif)
506 if (dev->flags & IFF_LOOPBACK) {
507 if (!sprt->rt6i_idev ||
508 sprt->rt6i_idev->dev->ifindex != oif) {
509 if (flags & RT6_LOOKUP_F_IFACE)
512 local->rt6i_idev->dev->ifindex == oif)
518 if (ipv6_chk_addr(net, saddr, dev,
519 flags & RT6_LOOKUP_F_IFACE))
528 if (flags & RT6_LOOKUP_F_IFACE)
529 return net->ipv6.ip6_null_entry;
535 #ifdef CONFIG_IPV6_ROUTER_PREF
536 struct __rt6_probe_work {
537 struct work_struct work;
538 struct in6_addr target;
539 struct net_device *dev;
542 static void rt6_probe_deferred(struct work_struct *w)
544 struct in6_addr mcaddr;
545 struct __rt6_probe_work *work =
546 container_of(w, struct __rt6_probe_work, work);
548 addrconf_addr_solict_mult(&work->target, &mcaddr);
549 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
554 static void rt6_probe(struct rt6_info *rt)
556 struct __rt6_probe_work *work;
557 struct neighbour *neigh;
559 * Okay, this does not seem to be appropriate
560 * for now, however, we need to check if it
561 * is really so; aka Router Reachability Probing.
563 * Router Reachability Probe MUST be rate-limited
564 * to no more than one per minute.
566 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
569 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
571 if (neigh->nud_state & NUD_VALID)
575 write_lock(&neigh->lock);
576 if (!(neigh->nud_state & NUD_VALID) &&
579 rt->rt6i_idev->cnf.rtr_probe_interval)) {
580 work = kmalloc(sizeof(*work), GFP_ATOMIC);
582 __neigh_set_probe_once(neigh);
584 write_unlock(&neigh->lock);
586 work = kmalloc(sizeof(*work), GFP_ATOMIC);
590 INIT_WORK(&work->work, rt6_probe_deferred);
591 work->target = rt->rt6i_gateway;
592 dev_hold(rt->dst.dev);
593 work->dev = rt->dst.dev;
594 schedule_work(&work->work);
598 rcu_read_unlock_bh();
601 static inline void rt6_probe(struct rt6_info *rt)
607 * Default Router Selection (RFC 2461 6.3.6)
609 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
611 struct net_device *dev = rt->dst.dev;
612 if (!oif || dev->ifindex == oif)
614 if ((dev->flags & IFF_LOOPBACK) &&
615 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
620 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
622 struct neighbour *neigh;
623 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
625 if (rt->rt6i_flags & RTF_NONEXTHOP ||
626 !(rt->rt6i_flags & RTF_GATEWAY))
627 return RT6_NUD_SUCCEED;
630 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
632 read_lock(&neigh->lock);
633 if (neigh->nud_state & NUD_VALID)
634 ret = RT6_NUD_SUCCEED;
635 #ifdef CONFIG_IPV6_ROUTER_PREF
636 else if (!(neigh->nud_state & NUD_FAILED))
637 ret = RT6_NUD_SUCCEED;
639 ret = RT6_NUD_FAIL_PROBE;
641 read_unlock(&neigh->lock);
643 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
644 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
646 rcu_read_unlock_bh();
651 static int rt6_score_route(struct rt6_info *rt, int oif,
656 m = rt6_check_dev(rt, oif);
657 if (!m && (strict & RT6_LOOKUP_F_IFACE))
658 return RT6_NUD_FAIL_HARD;
659 #ifdef CONFIG_IPV6_ROUTER_PREF
660 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
662 if (strict & RT6_LOOKUP_F_REACHABLE) {
663 int n = rt6_check_neigh(rt);
670 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
671 int *mpri, struct rt6_info *match,
675 bool match_do_rr = false;
676 struct inet6_dev *idev = rt->rt6i_idev;
677 struct net_device *dev = rt->dst.dev;
679 if (dev && !netif_carrier_ok(dev) &&
680 idev->cnf.ignore_routes_with_linkdown &&
681 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
684 if (rt6_check_expired(rt))
687 m = rt6_score_route(rt, oif, strict);
688 if (m == RT6_NUD_FAIL_DO_RR) {
690 m = 0; /* lowest valid score */
691 } else if (m == RT6_NUD_FAIL_HARD) {
695 if (strict & RT6_LOOKUP_F_REACHABLE)
698 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
700 *do_rr = match_do_rr;
708 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
709 struct rt6_info *leaf,
710 struct rt6_info *rr_head,
711 u32 metric, int oif, int strict,
714 struct rt6_info *rt, *match, *cont;
719 for (rt = rr_head; rt; rt = rcu_dereference(rt->dst.rt6_next)) {
720 if (rt->rt6i_metric != metric) {
725 match = find_match(rt, oif, strict, &mpri, match, do_rr);
728 for (rt = leaf; rt && rt != rr_head;
729 rt = rcu_dereference(rt->dst.rt6_next)) {
730 if (rt->rt6i_metric != metric) {
735 match = find_match(rt, oif, strict, &mpri, match, do_rr);
741 for (rt = cont; rt; rt = rcu_dereference(rt->dst.rt6_next))
742 match = find_match(rt, oif, strict, &mpri, match, do_rr);
747 static struct rt6_info *rt6_select(struct net *net, struct fib6_node *fn,
750 struct rt6_info *leaf = rcu_dereference(fn->leaf);
751 struct rt6_info *match, *rt0;
756 return net->ipv6.ip6_null_entry;
758 rt0 = rcu_dereference(fn->rr_ptr);
762 /* Double check to make sure fn is not an intermediate node
763 * and fn->leaf does not points to its child's leaf
764 * (This might happen if all routes under fn are deleted from
765 * the tree and fib6_repair_tree() is called on the node.)
767 key_plen = rt0->rt6i_dst.plen;
768 #ifdef CONFIG_IPV6_SUBTREES
769 if (rt0->rt6i_src.plen)
770 key_plen = rt0->rt6i_src.plen;
772 if (fn->fn_bit != key_plen)
773 return net->ipv6.ip6_null_entry;
775 match = find_rr_leaf(fn, leaf, rt0, rt0->rt6i_metric, oif, strict,
779 struct rt6_info *next = rcu_dereference(rt0->dst.rt6_next);
781 /* no entries matched; do round-robin */
782 if (!next || next->rt6i_metric != rt0->rt6i_metric)
786 spin_lock_bh(&leaf->rt6i_table->tb6_lock);
787 /* make sure next is not being deleted from the tree */
789 rcu_assign_pointer(fn->rr_ptr, next);
790 spin_unlock_bh(&leaf->rt6i_table->tb6_lock);
794 return match ? match : net->ipv6.ip6_null_entry;
797 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
799 return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
802 #ifdef CONFIG_IPV6_ROUTE_INFO
803 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
804 const struct in6_addr *gwaddr)
806 struct net *net = dev_net(dev);
807 struct route_info *rinfo = (struct route_info *) opt;
808 struct in6_addr prefix_buf, *prefix;
810 unsigned long lifetime;
813 if (len < sizeof(struct route_info)) {
817 /* Sanity check for prefix_len and length */
818 if (rinfo->length > 3) {
820 } else if (rinfo->prefix_len > 128) {
822 } else if (rinfo->prefix_len > 64) {
823 if (rinfo->length < 2) {
826 } else if (rinfo->prefix_len > 0) {
827 if (rinfo->length < 1) {
832 pref = rinfo->route_pref;
833 if (pref == ICMPV6_ROUTER_PREF_INVALID)
836 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
838 if (rinfo->length == 3)
839 prefix = (struct in6_addr *)rinfo->prefix;
841 /* this function is safe */
842 ipv6_addr_prefix(&prefix_buf,
843 (struct in6_addr *)rinfo->prefix,
845 prefix = &prefix_buf;
848 if (rinfo->prefix_len == 0)
849 rt = rt6_get_dflt_router(gwaddr, dev);
851 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
854 if (rt && !lifetime) {
860 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
863 rt->rt6i_flags = RTF_ROUTEINFO |
864 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
867 if (!addrconf_finite_timeout(lifetime))
868 rt6_clean_expires(rt);
870 rt6_set_expires(rt, jiffies + HZ * lifetime);
878 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
879 struct in6_addr *saddr)
881 struct fib6_node *pn, *sn;
883 if (fn->fn_flags & RTN_TL_ROOT)
885 pn = rcu_dereference(fn->parent);
886 sn = FIB6_SUBTREE(pn);
888 fn = fib6_lookup(sn, NULL, saddr);
891 if (fn->fn_flags & RTN_RTINFO)
896 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt,
899 struct rt6_info *rt = *prt;
901 if (dst_hold_safe(&rt->dst))
904 rt = net->ipv6.ip6_null_entry;
913 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
914 struct fib6_table *table,
915 struct flowi6 *fl6, int flags)
917 struct rt6_info *rt, *rt_cache;
918 struct fib6_node *fn;
921 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
923 rt = rcu_dereference(fn->leaf);
925 rt = net->ipv6.ip6_null_entry;
927 rt = rt6_device_match(net, rt, &fl6->saddr,
928 fl6->flowi6_oif, flags);
929 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
930 rt = rt6_multipath_select(rt, fl6,
931 fl6->flowi6_oif, flags);
933 if (rt == net->ipv6.ip6_null_entry) {
934 fn = fib6_backtrack(fn, &fl6->saddr);
938 /* Search through exception table */
939 rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr);
943 if (ip6_hold_safe(net, &rt, true))
944 dst_use_noref(&rt->dst, jiffies);
948 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
954 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
957 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
959 EXPORT_SYMBOL_GPL(ip6_route_lookup);
961 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
962 const struct in6_addr *saddr, int oif, int strict)
964 struct flowi6 fl6 = {
968 struct dst_entry *dst;
969 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
972 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
973 flags |= RT6_LOOKUP_F_HAS_SADDR;
976 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
978 return (struct rt6_info *) dst;
984 EXPORT_SYMBOL(rt6_lookup);
986 /* ip6_ins_rt is called with FREE table->tb6_lock.
987 * It takes new route entry, the addition fails by any reason the
989 * Caller must hold dst before calling it.
992 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
993 struct mx6_config *mxc,
994 struct netlink_ext_ack *extack)
997 struct fib6_table *table;
999 table = rt->rt6i_table;
1000 spin_lock_bh(&table->tb6_lock);
1001 err = fib6_add(&table->tb6_root, rt, info, mxc, extack);
1002 spin_unlock_bh(&table->tb6_lock);
1007 int ip6_ins_rt(struct rt6_info *rt)
1009 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
1010 struct mx6_config mxc = { .mx = NULL, };
1012 /* Hold dst to account for the reference from the fib6 tree */
1014 return __ip6_ins_rt(rt, &info, &mxc, NULL);
1017 /* called with rcu_lock held */
1018 static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt)
1020 struct net_device *dev = rt->dst.dev;
1022 if (rt->rt6i_flags & RTF_LOCAL) {
1023 /* for copies of local routes, dst->dev needs to be the
1024 * device if it is a master device, the master device if
1025 * device is enslaved, and the loopback as the default
1027 if (netif_is_l3_slave(dev) &&
1028 !rt6_need_strict(&rt->rt6i_dst.addr))
1029 dev = l3mdev_master_dev_rcu(dev);
1030 else if (!netif_is_l3_master(dev))
1031 dev = dev_net(dev)->loopback_dev;
1032 /* last case is netif_is_l3_master(dev) is true in which
1033 * case we want dev returned to be dev
1040 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
1041 const struct in6_addr *daddr,
1042 const struct in6_addr *saddr)
1044 struct net_device *dev;
1045 struct rt6_info *rt;
1051 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
1052 ort = (struct rt6_info *)ort->dst.from;
1055 dev = ip6_rt_get_dev_rcu(ort);
1056 rt = __ip6_dst_alloc(dev_net(dev), dev, 0);
1061 ip6_rt_copy_init(rt, ort);
1062 rt->rt6i_flags |= RTF_CACHE;
1063 rt->rt6i_metric = 0;
1064 rt->dst.flags |= DST_HOST;
1065 rt->rt6i_dst.addr = *daddr;
1066 rt->rt6i_dst.plen = 128;
1068 if (!rt6_is_gw_or_nonexthop(ort)) {
1069 if (ort->rt6i_dst.plen != 128 &&
1070 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
1071 rt->rt6i_flags |= RTF_ANYCAST;
1072 #ifdef CONFIG_IPV6_SUBTREES
1073 if (rt->rt6i_src.plen && saddr) {
1074 rt->rt6i_src.addr = *saddr;
1075 rt->rt6i_src.plen = 128;
1083 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
1085 struct net_device *dev;
1086 struct rt6_info *pcpu_rt;
1089 dev = ip6_rt_get_dev_rcu(rt);
1090 pcpu_rt = __ip6_dst_alloc(dev_net(dev), dev, rt->dst.flags);
1094 ip6_rt_copy_init(pcpu_rt, rt);
1095 pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
1096 pcpu_rt->rt6i_flags |= RTF_PCPU;
1100 /* It should be called with rcu_read_lock() acquired */
1101 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1103 struct rt6_info *pcpu_rt, **p;
1105 p = this_cpu_ptr(rt->rt6i_pcpu);
1108 if (pcpu_rt && ip6_hold_safe(NULL, &pcpu_rt, false))
1109 rt6_dst_from_metrics_check(pcpu_rt);
1114 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1116 struct rt6_info *pcpu_rt, *prev, **p;
1118 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1120 struct net *net = dev_net(rt->dst.dev);
1122 dst_hold(&net->ipv6.ip6_null_entry->dst);
1123 return net->ipv6.ip6_null_entry;
1126 dst_hold(&pcpu_rt->dst);
1127 p = this_cpu_ptr(rt->rt6i_pcpu);
1128 prev = cmpxchg(p, NULL, pcpu_rt);
1131 rt6_dst_from_metrics_check(pcpu_rt);
1135 /* exception hash table implementation
1137 static DEFINE_SPINLOCK(rt6_exception_lock);
1139 /* Remove rt6_ex from hash table and free the memory
1140 * Caller must hold rt6_exception_lock
1142 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1143 struct rt6_exception *rt6_ex)
1147 if (!bucket || !rt6_ex)
1150 net = dev_net(rt6_ex->rt6i->dst.dev);
1151 rt6_ex->rt6i->rt6i_node = NULL;
1152 hlist_del_rcu(&rt6_ex->hlist);
1153 rt6_release(rt6_ex->rt6i);
1154 kfree_rcu(rt6_ex, rcu);
1155 WARN_ON_ONCE(!bucket->depth);
1157 net->ipv6.rt6_stats->fib_rt_cache--;
1160 /* Remove oldest rt6_ex in bucket and free the memory
1161 * Caller must hold rt6_exception_lock
1163 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1165 struct rt6_exception *rt6_ex, *oldest = NULL;
1170 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1171 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1174 rt6_remove_exception(bucket, oldest);
1177 static u32 rt6_exception_hash(const struct in6_addr *dst,
1178 const struct in6_addr *src)
1180 static u32 seed __read_mostly;
1183 net_get_random_once(&seed, sizeof(seed));
1184 val = jhash(dst, sizeof(*dst), seed);
1186 #ifdef CONFIG_IPV6_SUBTREES
1188 val = jhash(src, sizeof(*src), val);
1190 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1193 /* Helper function to find the cached rt in the hash table
1194 * and update bucket pointer to point to the bucket for this
1195 * (daddr, saddr) pair
1196 * Caller must hold rt6_exception_lock
1198 static struct rt6_exception *
1199 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1200 const struct in6_addr *daddr,
1201 const struct in6_addr *saddr)
1203 struct rt6_exception *rt6_ex;
1206 if (!(*bucket) || !daddr)
1209 hval = rt6_exception_hash(daddr, saddr);
1212 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1213 struct rt6_info *rt6 = rt6_ex->rt6i;
1214 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1216 #ifdef CONFIG_IPV6_SUBTREES
1217 if (matched && saddr)
1218 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1226 /* Helper function to find the cached rt in the hash table
1227 * and update bucket pointer to point to the bucket for this
1228 * (daddr, saddr) pair
1229 * Caller must hold rcu_read_lock()
1231 static struct rt6_exception *
1232 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1233 const struct in6_addr *daddr,
1234 const struct in6_addr *saddr)
1236 struct rt6_exception *rt6_ex;
1239 WARN_ON_ONCE(!rcu_read_lock_held());
1241 if (!(*bucket) || !daddr)
1244 hval = rt6_exception_hash(daddr, saddr);
1247 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1248 struct rt6_info *rt6 = rt6_ex->rt6i;
1249 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1251 #ifdef CONFIG_IPV6_SUBTREES
1252 if (matched && saddr)
1253 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1261 static int rt6_insert_exception(struct rt6_info *nrt,
1262 struct rt6_info *ort)
1264 struct net *net = dev_net(ort->dst.dev);
1265 struct rt6_exception_bucket *bucket;
1266 struct in6_addr *src_key = NULL;
1267 struct rt6_exception *rt6_ex;
1270 /* ort can't be a cache or pcpu route */
1271 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
1272 ort = (struct rt6_info *)ort->dst.from;
1273 WARN_ON_ONCE(ort->rt6i_flags & (RTF_CACHE | RTF_PCPU));
1275 spin_lock_bh(&rt6_exception_lock);
1277 if (ort->exception_bucket_flushed) {
1282 bucket = rcu_dereference_protected(ort->rt6i_exception_bucket,
1283 lockdep_is_held(&rt6_exception_lock));
1285 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1291 rcu_assign_pointer(ort->rt6i_exception_bucket, bucket);
1294 #ifdef CONFIG_IPV6_SUBTREES
1295 /* rt6i_src.plen != 0 indicates ort is in subtree
1296 * and exception table is indexed by a hash of
1297 * both rt6i_dst and rt6i_src.
1298 * Otherwise, the exception table is indexed by
1299 * a hash of only rt6i_dst.
1301 if (ort->rt6i_src.plen)
1302 src_key = &nrt->rt6i_src.addr;
1305 /* Update rt6i_prefsrc as it could be changed
1306 * in rt6_remove_prefsrc()
1308 nrt->rt6i_prefsrc = ort->rt6i_prefsrc;
1309 /* rt6_mtu_change() might lower mtu on ort.
1310 * Only insert this exception route if its mtu
1311 * is less than ort's mtu value.
1313 if (nrt->rt6i_pmtu >= dst_mtu(&ort->dst)) {
1318 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1321 rt6_remove_exception(bucket, rt6_ex);
1323 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1329 rt6_ex->stamp = jiffies;
1330 atomic_inc(&nrt->rt6i_ref);
1331 nrt->rt6i_node = ort->rt6i_node;
1332 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1334 net->ipv6.rt6_stats->fib_rt_cache++;
1336 if (bucket->depth > FIB6_MAX_DEPTH)
1337 rt6_exception_remove_oldest(bucket);
1340 spin_unlock_bh(&rt6_exception_lock);
1342 /* Update fn->fn_sernum to invalidate all cached dst */
1344 fib6_update_sernum(ort);
1349 void rt6_flush_exceptions(struct rt6_info *rt)
1351 struct rt6_exception_bucket *bucket;
1352 struct rt6_exception *rt6_ex;
1353 struct hlist_node *tmp;
1356 spin_lock_bh(&rt6_exception_lock);
1357 /* Prevent rt6_insert_exception() to recreate the bucket list */
1358 rt->exception_bucket_flushed = 1;
1360 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1361 lockdep_is_held(&rt6_exception_lock));
1365 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1366 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist)
1367 rt6_remove_exception(bucket, rt6_ex);
1368 WARN_ON_ONCE(bucket->depth);
1373 spin_unlock_bh(&rt6_exception_lock);
1376 /* Find cached rt in the hash table inside passed in rt
1377 * Caller has to hold rcu_read_lock()
1379 static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
1380 struct in6_addr *daddr,
1381 struct in6_addr *saddr)
1383 struct rt6_exception_bucket *bucket;
1384 struct in6_addr *src_key = NULL;
1385 struct rt6_exception *rt6_ex;
1386 struct rt6_info *res = NULL;
1388 bucket = rcu_dereference(rt->rt6i_exception_bucket);
1390 #ifdef CONFIG_IPV6_SUBTREES
1391 /* rt6i_src.plen != 0 indicates rt is in subtree
1392 * and exception table is indexed by a hash of
1393 * both rt6i_dst and rt6i_src.
1394 * Otherwise, the exception table is indexed by
1395 * a hash of only rt6i_dst.
1397 if (rt->rt6i_src.plen)
1400 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1402 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1408 /* Remove the passed in cached rt from the hash table that contains it */
1409 int rt6_remove_exception_rt(struct rt6_info *rt)
1411 struct rt6_info *from = (struct rt6_info *)rt->dst.from;
1412 struct rt6_exception_bucket *bucket;
1413 struct in6_addr *src_key = NULL;
1414 struct rt6_exception *rt6_ex;
1418 !(rt->rt6i_flags & RTF_CACHE))
1421 if (!rcu_access_pointer(from->rt6i_exception_bucket))
1424 spin_lock_bh(&rt6_exception_lock);
1425 bucket = rcu_dereference_protected(from->rt6i_exception_bucket,
1426 lockdep_is_held(&rt6_exception_lock));
1427 #ifdef CONFIG_IPV6_SUBTREES
1428 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1429 * and exception table is indexed by a hash of
1430 * both rt6i_dst and rt6i_src.
1431 * Otherwise, the exception table is indexed by
1432 * a hash of only rt6i_dst.
1434 if (from->rt6i_src.plen)
1435 src_key = &rt->rt6i_src.addr;
1437 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1441 rt6_remove_exception(bucket, rt6_ex);
1447 spin_unlock_bh(&rt6_exception_lock);
1451 /* Find rt6_ex which contains the passed in rt cache and
1454 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1456 struct rt6_info *from = (struct rt6_info *)rt->dst.from;
1457 struct rt6_exception_bucket *bucket;
1458 struct in6_addr *src_key = NULL;
1459 struct rt6_exception *rt6_ex;
1462 !(rt->rt6i_flags & RTF_CACHE))
1466 bucket = rcu_dereference(from->rt6i_exception_bucket);
1468 #ifdef CONFIG_IPV6_SUBTREES
1469 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1470 * and exception table is indexed by a hash of
1471 * both rt6i_dst and rt6i_src.
1472 * Otherwise, the exception table is indexed by
1473 * a hash of only rt6i_dst.
1475 if (from->rt6i_src.plen)
1476 src_key = &rt->rt6i_src.addr;
1478 rt6_ex = __rt6_find_exception_rcu(&bucket,
1482 rt6_ex->stamp = jiffies;
1487 static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt)
1489 struct rt6_exception_bucket *bucket;
1490 struct rt6_exception *rt6_ex;
1493 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1494 lockdep_is_held(&rt6_exception_lock));
1497 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1498 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1499 rt6_ex->rt6i->rt6i_prefsrc.plen = 0;
1506 static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu)
1508 struct rt6_exception_bucket *bucket;
1509 struct rt6_exception *rt6_ex;
1512 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1513 lockdep_is_held(&rt6_exception_lock));
1516 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1517 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1518 struct rt6_info *entry = rt6_ex->rt6i;
1519 /* For RTF_CACHE with rt6i_pmtu == 0
1520 * (i.e. a redirected route),
1521 * the metrics of its rt->dst.from has already
1524 if (entry->rt6i_pmtu && entry->rt6i_pmtu > mtu)
1525 entry->rt6i_pmtu = mtu;
1532 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
1534 static void rt6_exceptions_clean_tohost(struct rt6_info *rt,
1535 struct in6_addr *gateway)
1537 struct rt6_exception_bucket *bucket;
1538 struct rt6_exception *rt6_ex;
1539 struct hlist_node *tmp;
1542 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1545 spin_lock_bh(&rt6_exception_lock);
1546 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1547 lockdep_is_held(&rt6_exception_lock));
1550 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1551 hlist_for_each_entry_safe(rt6_ex, tmp,
1552 &bucket->chain, hlist) {
1553 struct rt6_info *entry = rt6_ex->rt6i;
1555 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
1556 RTF_CACHE_GATEWAY &&
1557 ipv6_addr_equal(gateway,
1558 &entry->rt6i_gateway)) {
1559 rt6_remove_exception(bucket, rt6_ex);
1566 spin_unlock_bh(&rt6_exception_lock);
1569 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
1570 struct rt6_exception *rt6_ex,
1571 struct fib6_gc_args *gc_args,
1574 struct rt6_info *rt = rt6_ex->rt6i;
1576 if (atomic_read(&rt->dst.__refcnt) == 1 &&
1577 time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
1578 RT6_TRACE("aging clone %p\n", rt);
1579 rt6_remove_exception(bucket, rt6_ex);
1581 } else if (rt->rt6i_flags & RTF_GATEWAY) {
1582 struct neighbour *neigh;
1583 __u8 neigh_flags = 0;
1585 neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway);
1587 neigh_flags = neigh->flags;
1588 neigh_release(neigh);
1590 if (!(neigh_flags & NTF_ROUTER)) {
1591 RT6_TRACE("purging route %p via non-router but gateway\n",
1593 rt6_remove_exception(bucket, rt6_ex);
1600 void rt6_age_exceptions(struct rt6_info *rt,
1601 struct fib6_gc_args *gc_args,
1604 struct rt6_exception_bucket *bucket;
1605 struct rt6_exception *rt6_ex;
1606 struct hlist_node *tmp;
1609 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1612 spin_lock_bh(&rt6_exception_lock);
1613 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1614 lockdep_is_held(&rt6_exception_lock));
1617 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1618 hlist_for_each_entry_safe(rt6_ex, tmp,
1619 &bucket->chain, hlist) {
1620 rt6_age_examine_exception(bucket, rt6_ex,
1626 spin_unlock_bh(&rt6_exception_lock);
1629 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1630 int oif, struct flowi6 *fl6, int flags)
1632 struct fib6_node *fn, *saved_fn;
1633 struct rt6_info *rt, *rt_cache;
1636 strict |= flags & RT6_LOOKUP_F_IFACE;
1637 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1638 if (net->ipv6.devconf_all->forwarding == 0)
1639 strict |= RT6_LOOKUP_F_REACHABLE;
1643 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1646 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1650 rt = rt6_select(net, fn, oif, strict);
1651 if (rt->rt6i_nsiblings)
1652 rt = rt6_multipath_select(rt, fl6, oif, strict);
1653 if (rt == net->ipv6.ip6_null_entry) {
1654 fn = fib6_backtrack(fn, &fl6->saddr);
1656 goto redo_rt6_select;
1657 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1658 /* also consider unreachable route */
1659 strict &= ~RT6_LOOKUP_F_REACHABLE;
1661 goto redo_rt6_select;
1665 /*Search through exception table */
1666 rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr);
1670 if (rt == net->ipv6.ip6_null_entry) {
1673 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
1675 } else if (rt->rt6i_flags & RTF_CACHE) {
1676 if (ip6_hold_safe(net, &rt, true)) {
1677 dst_use_noref(&rt->dst, jiffies);
1678 rt6_dst_from_metrics_check(rt);
1681 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
1683 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1684 !(rt->rt6i_flags & RTF_GATEWAY))) {
1685 /* Create a RTF_CACHE clone which will not be
1686 * owned by the fib6 tree. It is for the special case where
1687 * the daddr in the skb during the neighbor look-up is different
1688 * from the fl6->daddr used to look-up route here.
1691 struct rt6_info *uncached_rt;
1693 if (ip6_hold_safe(net, &rt, true)) {
1694 dst_use_noref(&rt->dst, jiffies);
1698 goto uncached_rt_out;
1702 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1703 dst_release(&rt->dst);
1706 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1707 * No need for another dst_hold()
1709 rt6_uncached_list_add(uncached_rt);
1710 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
1712 uncached_rt = net->ipv6.ip6_null_entry;
1713 dst_hold(&uncached_rt->dst);
1717 trace_fib6_table_lookup(net, uncached_rt, table->tb6_id, fl6);
1721 /* Get a percpu copy */
1723 struct rt6_info *pcpu_rt;
1725 dst_use_noref(&rt->dst, jiffies);
1727 pcpu_rt = rt6_get_pcpu_route(rt);
1730 /* atomic_inc_not_zero() is needed when using rcu */
1731 if (atomic_inc_not_zero(&rt->rt6i_ref)) {
1732 /* No dst_hold() on rt is needed because grabbing
1733 * rt->rt6i_ref makes sure rt can't be released.
1735 pcpu_rt = rt6_make_pcpu_route(rt);
1738 /* rt is already removed from tree */
1739 pcpu_rt = net->ipv6.ip6_null_entry;
1740 dst_hold(&pcpu_rt->dst);
1745 trace_fib6_table_lookup(net, pcpu_rt, table->tb6_id, fl6);
1749 EXPORT_SYMBOL_GPL(ip6_pol_route);
1751 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
1752 struct flowi6 *fl6, int flags)
1754 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1757 struct dst_entry *ip6_route_input_lookup(struct net *net,
1758 struct net_device *dev,
1759 struct flowi6 *fl6, int flags)
1761 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1762 flags |= RT6_LOOKUP_F_IFACE;
1764 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1766 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
1768 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1769 struct flow_keys *keys)
1771 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
1772 const struct ipv6hdr *key_iph = outer_iph;
1773 const struct ipv6hdr *inner_iph;
1774 const struct icmp6hdr *icmph;
1775 struct ipv6hdr _inner_iph;
1777 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
1780 icmph = icmp6_hdr(skb);
1781 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
1782 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
1783 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
1784 icmph->icmp6_type != ICMPV6_PARAMPROB)
1787 inner_iph = skb_header_pointer(skb,
1788 skb_transport_offset(skb) + sizeof(*icmph),
1789 sizeof(_inner_iph), &_inner_iph);
1793 key_iph = inner_iph;
1795 memset(keys, 0, sizeof(*keys));
1796 keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1797 keys->addrs.v6addrs.src = key_iph->saddr;
1798 keys->addrs.v6addrs.dst = key_iph->daddr;
1799 keys->tags.flow_label = ip6_flowinfo(key_iph);
1800 keys->basic.ip_proto = key_iph->nexthdr;
1803 /* if skb is set it will be used and fl6 can be NULL */
1804 u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb)
1806 struct flow_keys hash_keys;
1809 ip6_multipath_l3_keys(skb, &hash_keys);
1810 return flow_hash_from_keys(&hash_keys);
1813 return get_hash_from_flowi6(fl6);
1816 void ip6_route_input(struct sk_buff *skb)
1818 const struct ipv6hdr *iph = ipv6_hdr(skb);
1819 struct net *net = dev_net(skb->dev);
1820 int flags = RT6_LOOKUP_F_HAS_SADDR;
1821 struct ip_tunnel_info *tun_info;
1822 struct flowi6 fl6 = {
1823 .flowi6_iif = skb->dev->ifindex,
1824 .daddr = iph->daddr,
1825 .saddr = iph->saddr,
1826 .flowlabel = ip6_flowinfo(iph),
1827 .flowi6_mark = skb->mark,
1828 .flowi6_proto = iph->nexthdr,
1831 tun_info = skb_tunnel_info(skb);
1832 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1833 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
1834 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
1835 fl6.mp_hash = rt6_multipath_hash(&fl6, skb);
1837 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1840 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1841 struct flowi6 *fl6, int flags)
1843 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1846 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
1847 struct flowi6 *fl6, int flags)
1851 if (rt6_need_strict(&fl6->daddr)) {
1852 struct dst_entry *dst;
1854 dst = l3mdev_link_scope_lookup(net, fl6);
1859 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1861 any_src = ipv6_addr_any(&fl6->saddr);
1862 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
1863 (fl6->flowi6_oif && any_src))
1864 flags |= RT6_LOOKUP_F_IFACE;
1867 flags |= RT6_LOOKUP_F_HAS_SADDR;
1869 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1871 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1873 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
1875 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1877 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1878 struct net_device *loopback_dev = net->loopback_dev;
1879 struct dst_entry *new = NULL;
1881 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
1882 DST_OBSOLETE_DEAD, 0);
1885 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
1889 new->input = dst_discard;
1890 new->output = dst_discard_out;
1892 dst_copy_metrics(new, &ort->dst);
1894 rt->rt6i_idev = in6_dev_get(loopback_dev);
1895 rt->rt6i_gateway = ort->rt6i_gateway;
1896 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
1897 rt->rt6i_metric = 0;
1899 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1900 #ifdef CONFIG_IPV6_SUBTREES
1901 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1905 dst_release(dst_orig);
1906 return new ? new : ERR_PTR(-ENOMEM);
1910 * Destination cache support functions
1913 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
1916 dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
1917 dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
1920 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1924 if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
1927 if (rt6_check_expired(rt))
1933 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1935 if (!__rt6_check_expired(rt) &&
1936 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1937 rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1943 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1945 struct rt6_info *rt;
1947 rt = (struct rt6_info *) dst;
1949 /* All IPV6 dsts are created with ->obsolete set to the value
1950 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1951 * into this function always.
1954 rt6_dst_from_metrics_check(rt);
1956 if (rt->rt6i_flags & RTF_PCPU ||
1957 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
1958 return rt6_dst_from_check(rt, cookie);
1960 return rt6_check(rt, cookie);
1963 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1965 struct rt6_info *rt = (struct rt6_info *) dst;
1968 if (rt->rt6i_flags & RTF_CACHE) {
1969 if (rt6_check_expired(rt)) {
1981 static void ip6_link_failure(struct sk_buff *skb)
1983 struct rt6_info *rt;
1985 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1987 rt = (struct rt6_info *) skb_dst(skb);
1989 if (rt->rt6i_flags & RTF_CACHE) {
1990 if (dst_hold_safe(&rt->dst))
1993 struct fib6_node *fn;
1996 fn = rcu_dereference(rt->rt6i_node);
1997 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2004 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2006 struct net *net = dev_net(rt->dst.dev);
2008 rt->rt6i_flags |= RTF_MODIFIED;
2009 rt->rt6i_pmtu = mtu;
2010 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2013 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2015 return !(rt->rt6i_flags & RTF_CACHE) &&
2016 (rt->rt6i_flags & RTF_PCPU ||
2017 rcu_access_pointer(rt->rt6i_node));
2020 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2021 const struct ipv6hdr *iph, u32 mtu)
2023 const struct in6_addr *daddr, *saddr;
2024 struct rt6_info *rt6 = (struct rt6_info *)dst;
2026 if (rt6->rt6i_flags & RTF_LOCAL)
2029 if (dst_metric_locked(dst, RTAX_MTU))
2033 daddr = &iph->daddr;
2034 saddr = &iph->saddr;
2036 daddr = &sk->sk_v6_daddr;
2037 saddr = &inet6_sk(sk)->saddr;
2042 dst_confirm_neigh(dst, daddr);
2043 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2044 if (mtu >= dst_mtu(dst))
2047 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2048 rt6_do_update_pmtu(rt6, mtu);
2049 /* update rt6_ex->stamp for cache */
2050 if (rt6->rt6i_flags & RTF_CACHE)
2051 rt6_update_exception_stamp_rt(rt6);
2053 struct rt6_info *nrt6;
2055 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
2057 rt6_do_update_pmtu(nrt6, mtu);
2058 if (rt6_insert_exception(nrt6, rt6))
2059 dst_release_immediate(&nrt6->dst);
2064 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2065 struct sk_buff *skb, u32 mtu)
2067 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
2070 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2071 int oif, u32 mark, kuid_t uid)
2073 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2074 struct dst_entry *dst;
2077 memset(&fl6, 0, sizeof(fl6));
2078 fl6.flowi6_oif = oif;
2079 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
2080 fl6.daddr = iph->daddr;
2081 fl6.saddr = iph->saddr;
2082 fl6.flowlabel = ip6_flowinfo(iph);
2083 fl6.flowi6_uid = uid;
2085 dst = ip6_route_output(net, NULL, &fl6);
2087 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
2090 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2092 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2094 struct dst_entry *dst;
2096 ip6_update_pmtu(skb, sock_net(sk), mtu,
2097 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
2099 dst = __sk_dst_get(sk);
2100 if (!dst || !dst->obsolete ||
2101 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2105 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2106 ip6_datagram_dst_update(sk, false);
2109 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2111 /* Handle redirects */
2112 struct ip6rd_flowi {
2114 struct in6_addr gateway;
2117 static struct rt6_info *__ip6_route_redirect(struct net *net,
2118 struct fib6_table *table,
2122 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2123 struct rt6_info *rt, *rt_cache;
2124 struct fib6_node *fn;
2126 /* Get the "current" route for this destination and
2127 * check if the redirect has come from appropriate router.
2129 * RFC 4861 specifies that redirects should only be
2130 * accepted if they come from the nexthop to the target.
2131 * Due to the way the routes are chosen, this notion
2132 * is a bit fuzzy and one might need to check all possible
2137 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2139 for_each_fib6_node_rt_rcu(fn) {
2140 if (rt6_check_expired(rt))
2144 if (!(rt->rt6i_flags & RTF_GATEWAY))
2146 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
2148 /* rt_cache's gateway might be different from its 'parent'
2149 * in the case of an ip redirect.
2150 * So we keep searching in the exception table if the gateway
2153 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) {
2154 rt_cache = rt6_find_cached_rt(rt,
2158 ipv6_addr_equal(&rdfl->gateway,
2159 &rt_cache->rt6i_gateway)) {
2169 rt = net->ipv6.ip6_null_entry;
2170 else if (rt->dst.error) {
2171 rt = net->ipv6.ip6_null_entry;
2175 if (rt == net->ipv6.ip6_null_entry) {
2176 fn = fib6_backtrack(fn, &fl6->saddr);
2182 ip6_hold_safe(net, &rt, true);
2186 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
2190 static struct dst_entry *ip6_route_redirect(struct net *net,
2191 const struct flowi6 *fl6,
2192 const struct in6_addr *gateway)
2194 int flags = RT6_LOOKUP_F_HAS_SADDR;
2195 struct ip6rd_flowi rdfl;
2198 rdfl.gateway = *gateway;
2200 return fib6_rule_lookup(net, &rdfl.fl6,
2201 flags, __ip6_route_redirect);
2204 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2207 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2208 struct dst_entry *dst;
2211 memset(&fl6, 0, sizeof(fl6));
2212 fl6.flowi6_iif = LOOPBACK_IFINDEX;
2213 fl6.flowi6_oif = oif;
2214 fl6.flowi6_mark = mark;
2215 fl6.daddr = iph->daddr;
2216 fl6.saddr = iph->saddr;
2217 fl6.flowlabel = ip6_flowinfo(iph);
2218 fl6.flowi6_uid = uid;
2220 dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
2221 rt6_do_redirect(dst, NULL, skb);
2224 EXPORT_SYMBOL_GPL(ip6_redirect);
2226 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
2229 const struct ipv6hdr *iph = ipv6_hdr(skb);
2230 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
2231 struct dst_entry *dst;
2234 memset(&fl6, 0, sizeof(fl6));
2235 fl6.flowi6_iif = LOOPBACK_IFINDEX;
2236 fl6.flowi6_oif = oif;
2237 fl6.flowi6_mark = mark;
2238 fl6.daddr = msg->dest;
2239 fl6.saddr = iph->daddr;
2240 fl6.flowi6_uid = sock_net_uid(net, NULL);
2242 dst = ip6_route_redirect(net, &fl6, &iph->saddr);
2243 rt6_do_redirect(dst, NULL, skb);
2247 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
2249 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
2252 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
2254 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
2256 struct net_device *dev = dst->dev;
2257 unsigned int mtu = dst_mtu(dst);
2258 struct net *net = dev_net(dev);
2260 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
2262 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
2263 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
2266 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
2267 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
2268 * IPV6_MAXPLEN is also valid and means: "any MSS,
2269 * rely only on pmtu discovery"
2271 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
2276 static unsigned int ip6_mtu(const struct dst_entry *dst)
2278 const struct rt6_info *rt = (const struct rt6_info *)dst;
2279 unsigned int mtu = rt->rt6i_pmtu;
2280 struct inet6_dev *idev;
2285 mtu = dst_metric_raw(dst, RTAX_MTU);
2292 idev = __in6_dev_get(dst->dev);
2294 mtu = idev->cnf.mtu6;
2298 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2300 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
2303 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
2306 struct dst_entry *dst;
2307 struct rt6_info *rt;
2308 struct inet6_dev *idev = in6_dev_get(dev);
2309 struct net *net = dev_net(dev);
2311 if (unlikely(!idev))
2312 return ERR_PTR(-ENODEV);
2314 rt = ip6_dst_alloc(net, dev, 0);
2315 if (unlikely(!rt)) {
2317 dst = ERR_PTR(-ENOMEM);
2321 rt->dst.flags |= DST_HOST;
2322 rt->dst.output = ip6_output;
2323 rt->rt6i_gateway = fl6->daddr;
2324 rt->rt6i_dst.addr = fl6->daddr;
2325 rt->rt6i_dst.plen = 128;
2326 rt->rt6i_idev = idev;
2327 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
2329 /* Add this dst into uncached_list so that rt6_ifdown() can
2330 * do proper release of the net_device
2332 rt6_uncached_list_add(rt);
2333 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2335 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
2341 static int ip6_dst_gc(struct dst_ops *ops)
2343 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
2344 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
2345 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
2346 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
2347 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
2348 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
2351 entries = dst_entries_get_fast(ops);
2352 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
2353 entries <= rt_max_size)
2356 net->ipv6.ip6_rt_gc_expire++;
2357 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
2358 entries = dst_entries_get_slow(ops);
2359 if (entries < ops->gc_thresh)
2360 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
2362 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
2363 return entries > rt_max_size;
2366 static int ip6_convert_metrics(struct mx6_config *mxc,
2367 const struct fib6_config *cfg)
2369 bool ecn_ca = false;
2377 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
2381 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
2382 int type = nla_type(nla);
2387 if (unlikely(type > RTAX_MAX))
2390 if (type == RTAX_CC_ALGO) {
2391 char tmp[TCP_CA_NAME_MAX];
2393 nla_strlcpy(tmp, nla, sizeof(tmp));
2394 val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
2395 if (val == TCP_CA_UNSPEC)
2398 val = nla_get_u32(nla);
2400 if (type == RTAX_HOPLIMIT && val > 255)
2402 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
2406 __set_bit(type - 1, mxc->mx_valid);
2410 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
2411 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
2421 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
2422 struct fib6_config *cfg,
2423 const struct in6_addr *gw_addr)
2425 struct flowi6 fl6 = {
2426 .flowi6_oif = cfg->fc_ifindex,
2428 .saddr = cfg->fc_prefsrc,
2430 struct fib6_table *table;
2431 struct rt6_info *rt;
2432 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
2434 table = fib6_get_table(net, cfg->fc_table);
2438 if (!ipv6_addr_any(&cfg->fc_prefsrc))
2439 flags |= RT6_LOOKUP_F_HAS_SADDR;
2441 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags);
2443 /* if table lookup failed, fall back to full lookup */
2444 if (rt == net->ipv6.ip6_null_entry) {
2452 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
2453 struct netlink_ext_ack *extack)
2455 struct net *net = cfg->fc_nlinfo.nl_net;
2456 struct rt6_info *rt = NULL;
2457 struct net_device *dev = NULL;
2458 struct inet6_dev *idev = NULL;
2459 struct fib6_table *table;
2463 /* RTF_PCPU is an internal flag; can not be set by userspace */
2464 if (cfg->fc_flags & RTF_PCPU) {
2465 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
2469 if (cfg->fc_dst_len > 128) {
2470 NL_SET_ERR_MSG(extack, "Invalid prefix length");
2473 if (cfg->fc_src_len > 128) {
2474 NL_SET_ERR_MSG(extack, "Invalid source address length");
2477 #ifndef CONFIG_IPV6_SUBTREES
2478 if (cfg->fc_src_len) {
2479 NL_SET_ERR_MSG(extack,
2480 "Specifying source address requires IPV6_SUBTREES to be enabled");
2484 if (cfg->fc_ifindex) {
2486 dev = dev_get_by_index(net, cfg->fc_ifindex);
2489 idev = in6_dev_get(dev);
2494 if (cfg->fc_metric == 0)
2495 cfg->fc_metric = IP6_RT_PRIO_USER;
2498 if (cfg->fc_nlinfo.nlh &&
2499 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
2500 table = fib6_get_table(net, cfg->fc_table);
2502 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
2503 table = fib6_new_table(net, cfg->fc_table);
2506 table = fib6_new_table(net, cfg->fc_table);
2512 rt = ip6_dst_alloc(net, NULL,
2513 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
2520 if (cfg->fc_flags & RTF_EXPIRES)
2521 rt6_set_expires(rt, jiffies +
2522 clock_t_to_jiffies(cfg->fc_expires));
2524 rt6_clean_expires(rt);
2526 if (cfg->fc_protocol == RTPROT_UNSPEC)
2527 cfg->fc_protocol = RTPROT_BOOT;
2528 rt->rt6i_protocol = cfg->fc_protocol;
2530 addr_type = ipv6_addr_type(&cfg->fc_dst);
2532 if (addr_type & IPV6_ADDR_MULTICAST)
2533 rt->dst.input = ip6_mc_input;
2534 else if (cfg->fc_flags & RTF_LOCAL)
2535 rt->dst.input = ip6_input;
2537 rt->dst.input = ip6_forward;
2539 rt->dst.output = ip6_output;
2541 if (cfg->fc_encap) {
2542 struct lwtunnel_state *lwtstate;
2544 err = lwtunnel_build_state(cfg->fc_encap_type,
2545 cfg->fc_encap, AF_INET6, cfg,
2549 rt->dst.lwtstate = lwtstate_get(lwtstate);
2550 if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
2551 rt->dst.lwtstate->orig_output = rt->dst.output;
2552 rt->dst.output = lwtunnel_output;
2554 if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
2555 rt->dst.lwtstate->orig_input = rt->dst.input;
2556 rt->dst.input = lwtunnel_input;
2560 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
2561 rt->rt6i_dst.plen = cfg->fc_dst_len;
2562 if (rt->rt6i_dst.plen == 128)
2563 rt->dst.flags |= DST_HOST;
2565 #ifdef CONFIG_IPV6_SUBTREES
2566 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
2567 rt->rt6i_src.plen = cfg->fc_src_len;
2570 rt->rt6i_metric = cfg->fc_metric;
2572 /* We cannot add true routes via loopback here,
2573 they would result in kernel looping; promote them to reject routes
2575 if ((cfg->fc_flags & RTF_REJECT) ||
2576 (dev && (dev->flags & IFF_LOOPBACK) &&
2577 !(addr_type & IPV6_ADDR_LOOPBACK) &&
2578 !(cfg->fc_flags & RTF_LOCAL))) {
2579 /* hold loopback dev/idev if we haven't done so. */
2580 if (dev != net->loopback_dev) {
2585 dev = net->loopback_dev;
2587 idev = in6_dev_get(dev);
2593 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
2594 switch (cfg->fc_type) {
2596 rt->dst.error = -EINVAL;
2597 rt->dst.output = dst_discard_out;
2598 rt->dst.input = dst_discard;
2601 rt->dst.error = -EACCES;
2602 rt->dst.output = ip6_pkt_prohibit_out;
2603 rt->dst.input = ip6_pkt_prohibit;
2606 case RTN_UNREACHABLE:
2608 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
2609 : (cfg->fc_type == RTN_UNREACHABLE)
2610 ? -EHOSTUNREACH : -ENETUNREACH;
2611 rt->dst.output = ip6_pkt_discard_out;
2612 rt->dst.input = ip6_pkt_discard;
2618 if (cfg->fc_flags & RTF_GATEWAY) {
2619 const struct in6_addr *gw_addr;
2622 gw_addr = &cfg->fc_gateway;
2623 gwa_type = ipv6_addr_type(gw_addr);
2625 /* if gw_addr is local we will fail to detect this in case
2626 * address is still TENTATIVE (DAD in progress). rt6_lookup()
2627 * will return already-added prefix route via interface that
2628 * prefix route was assigned to, which might be non-loopback.
2631 if (ipv6_chk_addr_and_flags(net, gw_addr,
2632 gwa_type & IPV6_ADDR_LINKLOCAL ?
2633 dev : NULL, 0, 0)) {
2634 NL_SET_ERR_MSG(extack, "Invalid gateway address");
2637 rt->rt6i_gateway = *gw_addr;
2639 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
2640 struct rt6_info *grt = NULL;
2642 /* IPv6 strictly inhibits using not link-local
2643 addresses as nexthop address.
2644 Otherwise, router will not able to send redirects.
2645 It is very good, but in some (rare!) circumstances
2646 (SIT, PtP, NBMA NOARP links) it is handy to allow
2647 some exceptions. --ANK
2648 We allow IPv4-mapped nexthops to support RFC4798-type
2651 if (!(gwa_type & (IPV6_ADDR_UNICAST |
2652 IPV6_ADDR_MAPPED))) {
2653 NL_SET_ERR_MSG(extack,
2654 "Invalid gateway address");
2658 if (cfg->fc_table) {
2659 grt = ip6_nh_lookup_table(net, cfg, gw_addr);
2662 if (grt->rt6i_flags & RTF_GATEWAY ||
2663 (dev && dev != grt->dst.dev)) {
2671 grt = rt6_lookup(net, gw_addr, NULL,
2672 cfg->fc_ifindex, 1);
2674 err = -EHOSTUNREACH;
2678 if (dev != grt->dst.dev) {
2684 idev = grt->rt6i_idev;
2686 in6_dev_hold(grt->rt6i_idev);
2688 if (!(grt->rt6i_flags & RTF_GATEWAY))
2697 NL_SET_ERR_MSG(extack, "Egress device not specified");
2699 } else if (dev->flags & IFF_LOOPBACK) {
2700 NL_SET_ERR_MSG(extack,
2701 "Egress device can not be loopback device for this route");
2710 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
2711 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
2712 NL_SET_ERR_MSG(extack, "Invalid source address");
2716 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
2717 rt->rt6i_prefsrc.plen = 128;
2719 rt->rt6i_prefsrc.plen = 0;
2721 rt->rt6i_flags = cfg->fc_flags;
2725 rt->rt6i_idev = idev;
2726 rt->rt6i_table = table;
2728 cfg->fc_nlinfo.nl_net = dev_net(dev);
2737 dst_release_immediate(&rt->dst);
2739 return ERR_PTR(err);
2742 int ip6_route_add(struct fib6_config *cfg,
2743 struct netlink_ext_ack *extack)
2745 struct mx6_config mxc = { .mx = NULL, };
2746 struct rt6_info *rt;
2749 rt = ip6_route_info_create(cfg, extack);
2756 err = ip6_convert_metrics(&mxc, cfg);
2760 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack);
2767 dst_release_immediate(&rt->dst);
2772 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
2775 struct fib6_table *table;
2776 struct net *net = dev_net(rt->dst.dev);
2778 if (rt == net->ipv6.ip6_null_entry) {
2783 table = rt->rt6i_table;
2784 spin_lock_bh(&table->tb6_lock);
2785 err = fib6_del(rt, info);
2786 spin_unlock_bh(&table->tb6_lock);
2793 int ip6_del_rt(struct rt6_info *rt)
2795 struct nl_info info = {
2796 .nl_net = dev_net(rt->dst.dev),
2798 return __ip6_del_rt(rt, &info);
2801 static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
2803 struct nl_info *info = &cfg->fc_nlinfo;
2804 struct net *net = info->nl_net;
2805 struct sk_buff *skb = NULL;
2806 struct fib6_table *table;
2809 if (rt == net->ipv6.ip6_null_entry)
2811 table = rt->rt6i_table;
2812 spin_lock_bh(&table->tb6_lock);
2814 if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) {
2815 struct rt6_info *sibling, *next_sibling;
2817 /* prefer to send a single notification with all hops */
2818 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
2820 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2822 if (rt6_fill_node(net, skb, rt,
2823 NULL, NULL, 0, RTM_DELROUTE,
2824 info->portid, seq, 0) < 0) {
2828 info->skip_notify = 1;
2831 list_for_each_entry_safe(sibling, next_sibling,
2834 err = fib6_del(sibling, info);
2840 err = fib6_del(rt, info);
2842 spin_unlock_bh(&table->tb6_lock);
2847 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2848 info->nlh, gfp_any());
2853 static int ip6_route_del(struct fib6_config *cfg,
2854 struct netlink_ext_ack *extack)
2856 struct rt6_info *rt, *rt_cache;
2857 struct fib6_table *table;
2858 struct fib6_node *fn;
2861 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
2863 NL_SET_ERR_MSG(extack, "FIB table does not exist");
2869 fn = fib6_locate(&table->tb6_root,
2870 &cfg->fc_dst, cfg->fc_dst_len,
2871 &cfg->fc_src, cfg->fc_src_len,
2872 !(cfg->fc_flags & RTF_CACHE));
2875 for_each_fib6_node_rt_rcu(fn) {
2876 if (cfg->fc_flags & RTF_CACHE) {
2877 rt_cache = rt6_find_cached_rt(rt, &cfg->fc_dst,
2883 if (cfg->fc_ifindex &&
2885 rt->dst.dev->ifindex != cfg->fc_ifindex))
2887 if (cfg->fc_flags & RTF_GATEWAY &&
2888 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
2890 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
2892 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
2894 if (!dst_hold_safe(&rt->dst))
2898 /* if gateway was specified only delete the one hop */
2899 if (cfg->fc_flags & RTF_GATEWAY)
2900 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
2902 return __ip6_del_rt_siblings(rt, cfg);
2910 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
2912 struct netevent_redirect netevent;
2913 struct rt6_info *rt, *nrt = NULL;
2914 struct ndisc_options ndopts;
2915 struct inet6_dev *in6_dev;
2916 struct neighbour *neigh;
2918 int optlen, on_link;
2921 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
2922 optlen -= sizeof(*msg);
2925 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
2929 msg = (struct rd_msg *)icmp6_hdr(skb);
2931 if (ipv6_addr_is_multicast(&msg->dest)) {
2932 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
2937 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
2939 } else if (ipv6_addr_type(&msg->target) !=
2940 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
2941 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
2945 in6_dev = __in6_dev_get(skb->dev);
2948 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
2952 * The IP source address of the Redirect MUST be the same as the current
2953 * first-hop router for the specified ICMP Destination Address.
2956 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
2957 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
2962 if (ndopts.nd_opts_tgt_lladdr) {
2963 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
2966 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
2971 rt = (struct rt6_info *) dst;
2972 if (rt->rt6i_flags & RTF_REJECT) {
2973 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
2977 /* Redirect received -> path was valid.
2978 * Look, redirects are sent only in response to data packets,
2979 * so that this nexthop apparently is reachable. --ANK
2981 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
2983 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
2988 * We have finally decided to accept it.
2991 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
2992 NEIGH_UPDATE_F_WEAK_OVERRIDE|
2993 NEIGH_UPDATE_F_OVERRIDE|
2994 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
2995 NEIGH_UPDATE_F_ISROUTER)),
2996 NDISC_REDIRECT, &ndopts);
2998 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
3002 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
3004 nrt->rt6i_flags &= ~RTF_GATEWAY;
3006 nrt->rt6i_protocol = RTPROT_REDIRECT;
3007 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
3009 /* No need to remove rt from the exception table if rt is
3010 * a cached route because rt6_insert_exception() will
3013 if (rt6_insert_exception(nrt, rt)) {
3014 dst_release_immediate(&nrt->dst);
3018 netevent.old = &rt->dst;
3019 netevent.new = &nrt->dst;
3020 netevent.daddr = &msg->dest;
3021 netevent.neigh = neigh;
3022 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
3025 neigh_release(neigh);
3029 * Misc support functions
3032 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
3034 BUG_ON(from->dst.from);
3036 rt->rt6i_flags &= ~RTF_EXPIRES;
3037 dst_hold(&from->dst);
3038 rt->dst.from = &from->dst;
3039 dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
3042 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
3044 rt->dst.input = ort->dst.input;
3045 rt->dst.output = ort->dst.output;
3046 rt->rt6i_dst = ort->rt6i_dst;
3047 rt->dst.error = ort->dst.error;
3048 rt->rt6i_idev = ort->rt6i_idev;
3050 in6_dev_hold(rt->rt6i_idev);
3051 rt->dst.lastuse = jiffies;
3052 rt->rt6i_gateway = ort->rt6i_gateway;
3053 rt->rt6i_flags = ort->rt6i_flags;
3054 rt6_set_from(rt, ort);
3055 rt->rt6i_metric = ort->rt6i_metric;
3056 #ifdef CONFIG_IPV6_SUBTREES
3057 rt->rt6i_src = ort->rt6i_src;
3059 rt->rt6i_prefsrc = ort->rt6i_prefsrc;
3060 rt->rt6i_table = ort->rt6i_table;
3061 rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
3064 #ifdef CONFIG_IPV6_ROUTE_INFO
3065 static struct rt6_info *rt6_get_route_info(struct net *net,
3066 const struct in6_addr *prefix, int prefixlen,
3067 const struct in6_addr *gwaddr,
3068 struct net_device *dev)
3070 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
3071 int ifindex = dev->ifindex;
3072 struct fib6_node *fn;
3073 struct rt6_info *rt = NULL;
3074 struct fib6_table *table;
3076 table = fib6_get_table(net, tb_id);
3081 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
3085 for_each_fib6_node_rt_rcu(fn) {
3086 if (rt->dst.dev->ifindex != ifindex)
3088 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
3090 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
3092 ip6_hold_safe(NULL, &rt, false);
3100 static struct rt6_info *rt6_add_route_info(struct net *net,
3101 const struct in6_addr *prefix, int prefixlen,
3102 const struct in6_addr *gwaddr,
3103 struct net_device *dev,
3106 struct fib6_config cfg = {
3107 .fc_metric = IP6_RT_PRIO_USER,
3108 .fc_ifindex = dev->ifindex,
3109 .fc_dst_len = prefixlen,
3110 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
3111 RTF_UP | RTF_PREF(pref),
3112 .fc_protocol = RTPROT_RA,
3113 .fc_nlinfo.portid = 0,
3114 .fc_nlinfo.nlh = NULL,
3115 .fc_nlinfo.nl_net = net,
3118 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
3119 cfg.fc_dst = *prefix;
3120 cfg.fc_gateway = *gwaddr;
3122 /* We should treat it as a default route if prefix length is 0. */
3124 cfg.fc_flags |= RTF_DEFAULT;
3126 ip6_route_add(&cfg, NULL);
3128 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
3132 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
3134 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
3135 struct rt6_info *rt;
3136 struct fib6_table *table;
3138 table = fib6_get_table(dev_net(dev), tb_id);
3143 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3144 if (dev == rt->dst.dev &&
3145 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
3146 ipv6_addr_equal(&rt->rt6i_gateway, addr))
3150 ip6_hold_safe(NULL, &rt, false);
3155 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
3156 struct net_device *dev,
3159 struct fib6_config cfg = {
3160 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
3161 .fc_metric = IP6_RT_PRIO_USER,
3162 .fc_ifindex = dev->ifindex,
3163 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
3164 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
3165 .fc_protocol = RTPROT_RA,
3166 .fc_nlinfo.portid = 0,
3167 .fc_nlinfo.nlh = NULL,
3168 .fc_nlinfo.nl_net = dev_net(dev),
3171 cfg.fc_gateway = *gwaddr;
3173 if (!ip6_route_add(&cfg, NULL)) {
3174 struct fib6_table *table;
3176 table = fib6_get_table(dev_net(dev), cfg.fc_table);
3178 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
3181 return rt6_get_dflt_router(gwaddr, dev);
3184 static void __rt6_purge_dflt_routers(struct fib6_table *table)
3186 struct rt6_info *rt;
3190 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3191 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
3192 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
3193 if (dst_hold_safe(&rt->dst)) {
3204 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
3207 void rt6_purge_dflt_routers(struct net *net)
3209 struct fib6_table *table;
3210 struct hlist_head *head;
3215 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
3216 head = &net->ipv6.fib_table_hash[h];
3217 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
3218 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
3219 __rt6_purge_dflt_routers(table);
3226 static void rtmsg_to_fib6_config(struct net *net,
3227 struct in6_rtmsg *rtmsg,
3228 struct fib6_config *cfg)
3230 memset(cfg, 0, sizeof(*cfg));
3232 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
3234 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
3235 cfg->fc_metric = rtmsg->rtmsg_metric;
3236 cfg->fc_expires = rtmsg->rtmsg_info;
3237 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
3238 cfg->fc_src_len = rtmsg->rtmsg_src_len;
3239 cfg->fc_flags = rtmsg->rtmsg_flags;
3241 cfg->fc_nlinfo.nl_net = net;
3243 cfg->fc_dst = rtmsg->rtmsg_dst;
3244 cfg->fc_src = rtmsg->rtmsg_src;
3245 cfg->fc_gateway = rtmsg->rtmsg_gateway;
3248 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3250 struct fib6_config cfg;
3251 struct in6_rtmsg rtmsg;
3255 case SIOCADDRT: /* Add a route */
3256 case SIOCDELRT: /* Delete a route */
3257 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3259 err = copy_from_user(&rtmsg, arg,
3260 sizeof(struct in6_rtmsg));
3264 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
3269 err = ip6_route_add(&cfg, NULL);
3272 err = ip6_route_del(&cfg, NULL);
3286 * Drop the packet on the floor
3289 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
3292 struct dst_entry *dst = skb_dst(skb);
3293 switch (ipstats_mib_noroutes) {
3294 case IPSTATS_MIB_INNOROUTES:
3295 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
3296 if (type == IPV6_ADDR_ANY) {
3297 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
3298 IPSTATS_MIB_INADDRERRORS);
3302 case IPSTATS_MIB_OUTNOROUTES:
3303 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
3304 ipstats_mib_noroutes);
3307 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
3312 static int ip6_pkt_discard(struct sk_buff *skb)
3314 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
3317 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3319 skb->dev = skb_dst(skb)->dev;
3320 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
3323 static int ip6_pkt_prohibit(struct sk_buff *skb)
3325 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
3328 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3330 skb->dev = skb_dst(skb)->dev;
3331 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
3335 * Allocate a dst for local (unicast / anycast) address.
3338 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
3339 const struct in6_addr *addr,
3343 struct net *net = dev_net(idev->dev);
3344 struct net_device *dev = idev->dev;
3345 struct rt6_info *rt;
3347 rt = ip6_dst_alloc(net, dev, DST_NOCOUNT);
3349 return ERR_PTR(-ENOMEM);
3353 rt->dst.flags |= DST_HOST;
3354 rt->dst.input = ip6_input;
3355 rt->dst.output = ip6_output;
3356 rt->rt6i_idev = idev;
3358 rt->rt6i_protocol = RTPROT_KERNEL;
3359 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
3361 rt->rt6i_flags |= RTF_ANYCAST;
3363 rt->rt6i_flags |= RTF_LOCAL;
3365 rt->rt6i_gateway = *addr;
3366 rt->rt6i_dst.addr = *addr;
3367 rt->rt6i_dst.plen = 128;
3368 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
3369 rt->rt6i_table = fib6_get_table(net, tb_id);
3374 /* remove deleted ip from prefsrc entries */
3375 struct arg_dev_net_ip {
3376 struct net_device *dev;
3378 struct in6_addr *addr;
3381 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
3383 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
3384 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
3385 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
3387 if (((void *)rt->dst.dev == dev || !dev) &&
3388 rt != net->ipv6.ip6_null_entry &&
3389 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
3390 spin_lock_bh(&rt6_exception_lock);
3391 /* remove prefsrc entry */
3392 rt->rt6i_prefsrc.plen = 0;
3393 /* need to update cache as well */
3394 rt6_exceptions_remove_prefsrc(rt);
3395 spin_unlock_bh(&rt6_exception_lock);
3400 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
3402 struct net *net = dev_net(ifp->idev->dev);
3403 struct arg_dev_net_ip adni = {
3404 .dev = ifp->idev->dev,
3408 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
3411 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
3413 /* Remove routers and update dst entries when gateway turn into host. */
3414 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
3416 struct in6_addr *gateway = (struct in6_addr *)arg;
3418 if (((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
3419 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
3423 /* Further clean up cached routes in exception table.
3424 * This is needed because cached route may have a different
3425 * gateway than its 'parent' in the case of an ip redirect.
3427 rt6_exceptions_clean_tohost(rt, gateway);
3432 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
3434 fib6_clean_all(net, fib6_clean_tohost, gateway);
3437 struct arg_dev_net {
3438 struct net_device *dev;
3442 /* called with write lock held for table with rt */
3443 static int fib6_ifdown(struct rt6_info *rt, void *arg)
3445 const struct arg_dev_net *adn = arg;
3446 const struct net_device *dev = adn->dev;
3448 if ((rt->dst.dev == dev || !dev) &&
3449 rt != adn->net->ipv6.ip6_null_entry &&
3450 (rt->rt6i_nsiblings == 0 ||
3451 (dev && netdev_unregistering(dev)) ||
3452 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
3458 void rt6_ifdown(struct net *net, struct net_device *dev)
3460 struct arg_dev_net adn = {
3465 fib6_clean_all(net, fib6_ifdown, &adn);
3467 rt6_uncached_list_flush_dev(net, dev);
3470 struct rt6_mtu_change_arg {
3471 struct net_device *dev;
3475 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
3477 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
3478 struct inet6_dev *idev;
3480 /* In IPv6 pmtu discovery is not optional,
3481 so that RTAX_MTU lock cannot disable it.
3482 We still use this lock to block changes
3483 caused by addrconf/ndisc.
3486 idev = __in6_dev_get(arg->dev);
3490 /* For administrative MTU increase, there is no way to discover
3491 IPv6 PMTU increase, so PMTU increase should be updated here.
3492 Since RFC 1981 doesn't include administrative MTU increase
3493 update PMTU increase is a MUST. (i.e. jumbo frame)
3496 If new MTU is less than route PMTU, this new MTU will be the
3497 lowest MTU in the path, update the route PMTU to reflect PMTU
3498 decreases; if new MTU is greater than route PMTU, and the
3499 old MTU is the lowest MTU in the path, update the route PMTU
3500 to reflect the increase. In this case if the other nodes' MTU
3501 also have the lowest MTU, TOO BIG MESSAGE will be lead to
3504 if (rt->dst.dev == arg->dev &&
3505 dst_metric_raw(&rt->dst, RTAX_MTU) &&
3506 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
3507 spin_lock_bh(&rt6_exception_lock);
3508 if (dst_mtu(&rt->dst) >= arg->mtu ||
3509 (dst_mtu(&rt->dst) < arg->mtu &&
3510 dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
3511 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
3513 rt6_exceptions_update_pmtu(rt, arg->mtu);
3514 spin_unlock_bh(&rt6_exception_lock);
3519 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
3521 struct rt6_mtu_change_arg arg = {
3526 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
3529 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
3530 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
3531 [RTA_OIF] = { .type = NLA_U32 },
3532 [RTA_IIF] = { .type = NLA_U32 },
3533 [RTA_PRIORITY] = { .type = NLA_U32 },
3534 [RTA_METRICS] = { .type = NLA_NESTED },
3535 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
3536 [RTA_PREF] = { .type = NLA_U8 },
3537 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
3538 [RTA_ENCAP] = { .type = NLA_NESTED },
3539 [RTA_EXPIRES] = { .type = NLA_U32 },
3540 [RTA_UID] = { .type = NLA_U32 },
3541 [RTA_MARK] = { .type = NLA_U32 },
3544 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
3545 struct fib6_config *cfg,
3546 struct netlink_ext_ack *extack)
3549 struct nlattr *tb[RTA_MAX+1];
3553 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
3559 rtm = nlmsg_data(nlh);
3560 memset(cfg, 0, sizeof(*cfg));
3562 cfg->fc_table = rtm->rtm_table;
3563 cfg->fc_dst_len = rtm->rtm_dst_len;
3564 cfg->fc_src_len = rtm->rtm_src_len;
3565 cfg->fc_flags = RTF_UP;
3566 cfg->fc_protocol = rtm->rtm_protocol;
3567 cfg->fc_type = rtm->rtm_type;
3569 if (rtm->rtm_type == RTN_UNREACHABLE ||
3570 rtm->rtm_type == RTN_BLACKHOLE ||
3571 rtm->rtm_type == RTN_PROHIBIT ||
3572 rtm->rtm_type == RTN_THROW)
3573 cfg->fc_flags |= RTF_REJECT;
3575 if (rtm->rtm_type == RTN_LOCAL)
3576 cfg->fc_flags |= RTF_LOCAL;
3578 if (rtm->rtm_flags & RTM_F_CLONED)
3579 cfg->fc_flags |= RTF_CACHE;
3581 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
3582 cfg->fc_nlinfo.nlh = nlh;
3583 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
3585 if (tb[RTA_GATEWAY]) {
3586 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
3587 cfg->fc_flags |= RTF_GATEWAY;
3591 int plen = (rtm->rtm_dst_len + 7) >> 3;
3593 if (nla_len(tb[RTA_DST]) < plen)
3596 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
3600 int plen = (rtm->rtm_src_len + 7) >> 3;
3602 if (nla_len(tb[RTA_SRC]) < plen)
3605 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
3608 if (tb[RTA_PREFSRC])
3609 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
3612 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
3614 if (tb[RTA_PRIORITY])
3615 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
3617 if (tb[RTA_METRICS]) {
3618 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
3619 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
3623 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
3625 if (tb[RTA_MULTIPATH]) {
3626 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
3627 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
3629 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
3630 cfg->fc_mp_len, extack);
3636 pref = nla_get_u8(tb[RTA_PREF]);
3637 if (pref != ICMPV6_ROUTER_PREF_LOW &&
3638 pref != ICMPV6_ROUTER_PREF_HIGH)
3639 pref = ICMPV6_ROUTER_PREF_MEDIUM;
3640 cfg->fc_flags |= RTF_PREF(pref);
3644 cfg->fc_encap = tb[RTA_ENCAP];
3646 if (tb[RTA_ENCAP_TYPE]) {
3647 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
3649 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
3654 if (tb[RTA_EXPIRES]) {
3655 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
3657 if (addrconf_finite_timeout(timeout)) {
3658 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
3659 cfg->fc_flags |= RTF_EXPIRES;
3669 struct rt6_info *rt6_info;
3670 struct fib6_config r_cfg;
3671 struct mx6_config mxc;
3672 struct list_head next;
3675 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
3679 list_for_each_entry(nh, rt6_nh_list, next) {
3680 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
3681 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
3682 nh->r_cfg.fc_ifindex);
3686 static int ip6_route_info_append(struct list_head *rt6_nh_list,
3687 struct rt6_info *rt, struct fib6_config *r_cfg)
3692 list_for_each_entry(nh, rt6_nh_list, next) {
3693 /* check if rt6_info already exists */
3694 if (rt6_duplicate_nexthop(nh->rt6_info, rt))
3698 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
3702 err = ip6_convert_metrics(&nh->mxc, r_cfg);
3707 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
3708 list_add_tail(&nh->next, rt6_nh_list);
3713 static void ip6_route_mpath_notify(struct rt6_info *rt,
3714 struct rt6_info *rt_last,
3715 struct nl_info *info,
3718 /* if this is an APPEND route, then rt points to the first route
3719 * inserted and rt_last points to last route inserted. Userspace
3720 * wants a consistent dump of the route which starts at the first
3721 * nexthop. Since sibling routes are always added at the end of
3722 * the list, find the first sibling of the last route appended
3724 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) {
3725 rt = list_first_entry(&rt_last->rt6i_siblings,
3731 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
3734 static int ip6_route_multipath_add(struct fib6_config *cfg,
3735 struct netlink_ext_ack *extack)
3737 struct rt6_info *rt_notif = NULL, *rt_last = NULL;
3738 struct nl_info *info = &cfg->fc_nlinfo;
3739 struct fib6_config r_cfg;
3740 struct rtnexthop *rtnh;
3741 struct rt6_info *rt;
3742 struct rt6_nh *err_nh;
3743 struct rt6_nh *nh, *nh_safe;
3749 int replace = (cfg->fc_nlinfo.nlh &&
3750 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
3751 LIST_HEAD(rt6_nh_list);
3753 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
3754 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
3755 nlflags |= NLM_F_APPEND;
3757 remaining = cfg->fc_mp_len;
3758 rtnh = (struct rtnexthop *)cfg->fc_mp;
3760 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
3761 * rt6_info structs per nexthop
3763 while (rtnh_ok(rtnh, remaining)) {
3764 memcpy(&r_cfg, cfg, sizeof(*cfg));
3765 if (rtnh->rtnh_ifindex)
3766 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3768 attrlen = rtnh_attrlen(rtnh);
3770 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3772 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3774 r_cfg.fc_gateway = nla_get_in6_addr(nla);
3775 r_cfg.fc_flags |= RTF_GATEWAY;
3777 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
3778 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
3780 r_cfg.fc_encap_type = nla_get_u16(nla);
3783 rt = ip6_route_info_create(&r_cfg, extack);
3790 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
3792 dst_release_immediate(&rt->dst);
3796 rtnh = rtnh_next(rtnh, &remaining);
3799 /* for add and replace send one notification with all nexthops.
3800 * Skip the notification in fib6_add_rt2node and send one with
3801 * the full route when done
3803 info->skip_notify = 1;
3806 list_for_each_entry(nh, &rt6_nh_list, next) {
3807 rt_last = nh->rt6_info;
3808 err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack);
3809 /* save reference to first route for notification */
3810 if (!rt_notif && !err)
3811 rt_notif = nh->rt6_info;
3813 /* nh->rt6_info is used or freed at this point, reset to NULL*/
3814 nh->rt6_info = NULL;
3817 ip6_print_replace_route_err(&rt6_nh_list);
3822 /* Because each route is added like a single route we remove
3823 * these flags after the first nexthop: if there is a collision,
3824 * we have already failed to add the first nexthop:
3825 * fib6_add_rt2node() has rejected it; when replacing, old
3826 * nexthops have been replaced by first new, the rest should
3829 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
3834 /* success ... tell user about new route */
3835 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
3839 /* send notification for routes that were added so that
3840 * the delete notifications sent by ip6_route_del are
3844 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
3846 /* Delete routes that were already added */
3847 list_for_each_entry(nh, &rt6_nh_list, next) {
3850 ip6_route_del(&nh->r_cfg, extack);
3854 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
3856 dst_release_immediate(&nh->rt6_info->dst);
3858 list_del(&nh->next);
3865 static int ip6_route_multipath_del(struct fib6_config *cfg,
3866 struct netlink_ext_ack *extack)
3868 struct fib6_config r_cfg;
3869 struct rtnexthop *rtnh;
3872 int err = 1, last_err = 0;
3874 remaining = cfg->fc_mp_len;
3875 rtnh = (struct rtnexthop *)cfg->fc_mp;
3877 /* Parse a Multipath Entry */
3878 while (rtnh_ok(rtnh, remaining)) {
3879 memcpy(&r_cfg, cfg, sizeof(*cfg));
3880 if (rtnh->rtnh_ifindex)
3881 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3883 attrlen = rtnh_attrlen(rtnh);
3885 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3887 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3889 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
3890 r_cfg.fc_flags |= RTF_GATEWAY;
3893 err = ip6_route_del(&r_cfg, extack);
3897 rtnh = rtnh_next(rtnh, &remaining);
3903 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
3904 struct netlink_ext_ack *extack)
3906 struct fib6_config cfg;
3909 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
3914 return ip6_route_multipath_del(&cfg, extack);
3916 cfg.fc_delete_all_nh = 1;
3917 return ip6_route_del(&cfg, extack);
3921 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
3922 struct netlink_ext_ack *extack)
3924 struct fib6_config cfg;
3927 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
3932 return ip6_route_multipath_add(&cfg, extack);
3934 return ip6_route_add(&cfg, extack);
3937 static size_t rt6_nlmsg_size(struct rt6_info *rt)
3939 int nexthop_len = 0;
3941 if (rt->rt6i_nsiblings) {
3942 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
3943 + NLA_ALIGN(sizeof(struct rtnexthop))
3944 + nla_total_size(16) /* RTA_GATEWAY */
3945 + lwtunnel_get_encap_size(rt->dst.lwtstate);
3947 nexthop_len *= rt->rt6i_nsiblings;
3950 return NLMSG_ALIGN(sizeof(struct rtmsg))
3951 + nla_total_size(16) /* RTA_SRC */
3952 + nla_total_size(16) /* RTA_DST */
3953 + nla_total_size(16) /* RTA_GATEWAY */
3954 + nla_total_size(16) /* RTA_PREFSRC */
3955 + nla_total_size(4) /* RTA_TABLE */
3956 + nla_total_size(4) /* RTA_IIF */
3957 + nla_total_size(4) /* RTA_OIF */
3958 + nla_total_size(4) /* RTA_PRIORITY */
3959 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
3960 + nla_total_size(sizeof(struct rta_cacheinfo))
3961 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
3962 + nla_total_size(1) /* RTA_PREF */
3963 + lwtunnel_get_encap_size(rt->dst.lwtstate)
3967 static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
3968 unsigned int *flags, bool skip_oif)
3970 if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
3971 *flags |= RTNH_F_LINKDOWN;
3972 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
3973 *flags |= RTNH_F_DEAD;
3976 if (rt->rt6i_flags & RTF_GATEWAY) {
3977 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
3978 goto nla_put_failure;
3981 if (rt->rt6i_nh_flags & RTNH_F_OFFLOAD)
3982 *flags |= RTNH_F_OFFLOAD;
3984 /* not needed for multipath encoding b/c it has a rtnexthop struct */
3985 if (!skip_oif && rt->dst.dev &&
3986 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
3987 goto nla_put_failure;
3989 if (rt->dst.lwtstate &&
3990 lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
3991 goto nla_put_failure;
3999 /* add multipath next hop */
4000 static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
4002 struct rtnexthop *rtnh;
4003 unsigned int flags = 0;
4005 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
4007 goto nla_put_failure;
4009 rtnh->rtnh_hops = 0;
4010 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
4012 if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
4013 goto nla_put_failure;
4015 rtnh->rtnh_flags = flags;
4017 /* length of rtnetlink header + attributes */
4018 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
4026 static int rt6_fill_node(struct net *net,
4027 struct sk_buff *skb, struct rt6_info *rt,
4028 struct in6_addr *dst, struct in6_addr *src,
4029 int iif, int type, u32 portid, u32 seq,
4032 u32 metrics[RTAX_MAX];
4034 struct nlmsghdr *nlh;
4038 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
4042 rtm = nlmsg_data(nlh);
4043 rtm->rtm_family = AF_INET6;
4044 rtm->rtm_dst_len = rt->rt6i_dst.plen;
4045 rtm->rtm_src_len = rt->rt6i_src.plen;
4048 table = rt->rt6i_table->tb6_id;
4050 table = RT6_TABLE_UNSPEC;
4051 rtm->rtm_table = table;
4052 if (nla_put_u32(skb, RTA_TABLE, table))
4053 goto nla_put_failure;
4054 if (rt->rt6i_flags & RTF_REJECT) {
4055 switch (rt->dst.error) {
4057 rtm->rtm_type = RTN_BLACKHOLE;
4060 rtm->rtm_type = RTN_PROHIBIT;
4063 rtm->rtm_type = RTN_THROW;
4066 rtm->rtm_type = RTN_UNREACHABLE;
4070 else if (rt->rt6i_flags & RTF_LOCAL)
4071 rtm->rtm_type = RTN_LOCAL;
4072 else if (rt->rt6i_flags & RTF_ANYCAST)
4073 rtm->rtm_type = RTN_ANYCAST;
4074 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
4075 rtm->rtm_type = RTN_LOCAL;
4077 rtm->rtm_type = RTN_UNICAST;
4079 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
4080 rtm->rtm_protocol = rt->rt6i_protocol;
4082 if (rt->rt6i_flags & RTF_CACHE)
4083 rtm->rtm_flags |= RTM_F_CLONED;
4086 if (nla_put_in6_addr(skb, RTA_DST, dst))
4087 goto nla_put_failure;
4088 rtm->rtm_dst_len = 128;
4089 } else if (rtm->rtm_dst_len)
4090 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
4091 goto nla_put_failure;
4092 #ifdef CONFIG_IPV6_SUBTREES
4094 if (nla_put_in6_addr(skb, RTA_SRC, src))
4095 goto nla_put_failure;
4096 rtm->rtm_src_len = 128;
4097 } else if (rtm->rtm_src_len &&
4098 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
4099 goto nla_put_failure;
4102 #ifdef CONFIG_IPV6_MROUTE
4103 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
4104 int err = ip6mr_get_route(net, skb, rtm, portid);
4109 goto nla_put_failure;
4112 if (nla_put_u32(skb, RTA_IIF, iif))
4113 goto nla_put_failure;
4115 struct in6_addr saddr_buf;
4116 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
4117 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4118 goto nla_put_failure;
4121 if (rt->rt6i_prefsrc.plen) {
4122 struct in6_addr saddr_buf;
4123 saddr_buf = rt->rt6i_prefsrc.addr;
4124 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4125 goto nla_put_failure;
4128 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
4130 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
4131 if (rtnetlink_put_metrics(skb, metrics) < 0)
4132 goto nla_put_failure;
4134 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
4135 goto nla_put_failure;
4137 /* For multipath routes, walk the siblings list and add
4138 * each as a nexthop within RTA_MULTIPATH.
4140 if (rt->rt6i_nsiblings) {
4141 struct rt6_info *sibling, *next_sibling;
4144 mp = nla_nest_start(skb, RTA_MULTIPATH);
4146 goto nla_put_failure;
4148 if (rt6_add_nexthop(skb, rt) < 0)
4149 goto nla_put_failure;
4151 list_for_each_entry_safe(sibling, next_sibling,
4152 &rt->rt6i_siblings, rt6i_siblings) {
4153 if (rt6_add_nexthop(skb, sibling) < 0)
4154 goto nla_put_failure;
4157 nla_nest_end(skb, mp);
4159 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
4160 goto nla_put_failure;
4163 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
4165 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
4166 goto nla_put_failure;
4168 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
4169 goto nla_put_failure;
4172 nlmsg_end(skb, nlh);
4176 nlmsg_cancel(skb, nlh);
4180 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
4182 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
4183 struct net *net = arg->net;
4185 if (rt == net->ipv6.ip6_null_entry)
4188 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
4189 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
4191 /* user wants prefix routes only */
4192 if (rtm->rtm_flags & RTM_F_PREFIX &&
4193 !(rt->rt6i_flags & RTF_PREFIX_RT)) {
4194 /* success since this is not a prefix route */
4199 return rt6_fill_node(net,
4200 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
4201 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
4205 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4206 struct netlink_ext_ack *extack)
4208 struct net *net = sock_net(in_skb->sk);
4209 struct nlattr *tb[RTA_MAX+1];
4210 int err, iif = 0, oif = 0;
4211 struct dst_entry *dst;
4212 struct rt6_info *rt;
4213 struct sk_buff *skb;
4218 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
4224 memset(&fl6, 0, sizeof(fl6));
4225 rtm = nlmsg_data(nlh);
4226 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
4227 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
4230 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
4233 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
4237 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
4240 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
4244 iif = nla_get_u32(tb[RTA_IIF]);
4247 oif = nla_get_u32(tb[RTA_OIF]);
4250 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
4253 fl6.flowi6_uid = make_kuid(current_user_ns(),
4254 nla_get_u32(tb[RTA_UID]));
4256 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
4259 struct net_device *dev;
4264 dev = dev_get_by_index_rcu(net, iif);
4271 fl6.flowi6_iif = iif;
4273 if (!ipv6_addr_any(&fl6.saddr))
4274 flags |= RT6_LOOKUP_F_HAS_SADDR;
4277 dst = ip6_route_input_lookup(net, dev, &fl6, flags);
4279 dst = ip6_route_lookup(net, &fl6, 0);
4283 fl6.flowi6_oif = oif;
4286 dst = ip6_route_output(net, NULL, &fl6);
4288 dst = ip6_route_lookup(net, &fl6, 0);
4292 rt = container_of(dst, struct rt6_info, dst);
4293 if (rt->dst.error) {
4294 err = rt->dst.error;
4299 if (rt == net->ipv6.ip6_null_entry) {
4300 err = rt->dst.error;
4305 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
4312 skb_dst_set(skb, &rt->dst);
4314 err = rt6_fill_node(net, skb, rt, NULL, NULL, iif,
4315 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
4318 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
4319 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
4326 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4331 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
4332 unsigned int nlm_flags)
4334 struct sk_buff *skb;
4335 struct net *net = info->nl_net;
4340 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
4342 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
4346 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
4347 event, info->portid, seq, nlm_flags);
4349 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
4350 WARN_ON(err == -EMSGSIZE);
4354 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
4355 info->nlh, gfp_any());
4359 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
4362 static int ip6_route_dev_notify(struct notifier_block *this,
4363 unsigned long event, void *ptr)
4365 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4366 struct net *net = dev_net(dev);
4368 if (!(dev->flags & IFF_LOOPBACK))
4371 if (event == NETDEV_REGISTER) {
4372 net->ipv6.ip6_null_entry->dst.dev = dev;
4373 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
4374 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4375 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
4376 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
4377 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
4378 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
4380 } else if (event == NETDEV_UNREGISTER &&
4381 dev->reg_state != NETREG_UNREGISTERED) {
4382 /* NETDEV_UNREGISTER could be fired for multiple times by
4383 * netdev_wait_allrefs(). Make sure we only call this once.
4385 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
4386 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4387 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
4388 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
4399 #ifdef CONFIG_PROC_FS
4401 static const struct file_operations ipv6_route_proc_fops = {
4402 .owner = THIS_MODULE,
4403 .open = ipv6_route_open,
4405 .llseek = seq_lseek,
4406 .release = seq_release_net,
4409 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
4411 struct net *net = (struct net *)seq->private;
4412 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
4413 net->ipv6.rt6_stats->fib_nodes,
4414 net->ipv6.rt6_stats->fib_route_nodes,
4415 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
4416 net->ipv6.rt6_stats->fib_rt_entries,
4417 net->ipv6.rt6_stats->fib_rt_cache,
4418 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
4419 net->ipv6.rt6_stats->fib_discarded_routes);
4424 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
4426 return single_open_net(inode, file, rt6_stats_seq_show);
4429 static const struct file_operations rt6_stats_seq_fops = {
4430 .owner = THIS_MODULE,
4431 .open = rt6_stats_seq_open,
4433 .llseek = seq_lseek,
4434 .release = single_release_net,
4436 #endif /* CONFIG_PROC_FS */
4438 #ifdef CONFIG_SYSCTL
4441 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
4442 void __user *buffer, size_t *lenp, loff_t *ppos)
4449 net = (struct net *)ctl->extra1;
4450 delay = net->ipv6.sysctl.flush_delay;
4451 proc_dointvec(ctl, write, buffer, lenp, ppos);
4452 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
4456 struct ctl_table ipv6_route_table_template[] = {
4458 .procname = "flush",
4459 .data = &init_net.ipv6.sysctl.flush_delay,
4460 .maxlen = sizeof(int),
4462 .proc_handler = ipv6_sysctl_rtcache_flush
4465 .procname = "gc_thresh",
4466 .data = &ip6_dst_ops_template.gc_thresh,
4467 .maxlen = sizeof(int),
4469 .proc_handler = proc_dointvec,
4472 .procname = "max_size",
4473 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
4474 .maxlen = sizeof(int),
4476 .proc_handler = proc_dointvec,
4479 .procname = "gc_min_interval",
4480 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
4481 .maxlen = sizeof(int),
4483 .proc_handler = proc_dointvec_jiffies,
4486 .procname = "gc_timeout",
4487 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
4488 .maxlen = sizeof(int),
4490 .proc_handler = proc_dointvec_jiffies,
4493 .procname = "gc_interval",
4494 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
4495 .maxlen = sizeof(int),
4497 .proc_handler = proc_dointvec_jiffies,
4500 .procname = "gc_elasticity",
4501 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
4502 .maxlen = sizeof(int),
4504 .proc_handler = proc_dointvec,
4507 .procname = "mtu_expires",
4508 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
4509 .maxlen = sizeof(int),
4511 .proc_handler = proc_dointvec_jiffies,
4514 .procname = "min_adv_mss",
4515 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
4516 .maxlen = sizeof(int),
4518 .proc_handler = proc_dointvec,
4521 .procname = "gc_min_interval_ms",
4522 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
4523 .maxlen = sizeof(int),
4525 .proc_handler = proc_dointvec_ms_jiffies,
4530 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
4532 struct ctl_table *table;
4534 table = kmemdup(ipv6_route_table_template,
4535 sizeof(ipv6_route_table_template),
4539 table[0].data = &net->ipv6.sysctl.flush_delay;
4540 table[0].extra1 = net;
4541 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
4542 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
4543 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
4544 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
4545 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
4546 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
4547 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
4548 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
4549 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
4551 /* Don't export sysctls to unprivileged users */
4552 if (net->user_ns != &init_user_ns)
4553 table[0].procname = NULL;
4560 static int __net_init ip6_route_net_init(struct net *net)
4564 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
4565 sizeof(net->ipv6.ip6_dst_ops));
4567 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
4568 goto out_ip6_dst_ops;
4570 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
4571 sizeof(*net->ipv6.ip6_null_entry),
4573 if (!net->ipv6.ip6_null_entry)
4574 goto out_ip6_dst_entries;
4575 net->ipv6.ip6_null_entry->dst.path =
4576 (struct dst_entry *)net->ipv6.ip6_null_entry;
4577 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
4578 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
4579 ip6_template_metrics, true);
4581 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4582 net->ipv6.fib6_has_custom_rules = false;
4583 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
4584 sizeof(*net->ipv6.ip6_prohibit_entry),
4586 if (!net->ipv6.ip6_prohibit_entry)
4587 goto out_ip6_null_entry;
4588 net->ipv6.ip6_prohibit_entry->dst.path =
4589 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
4590 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
4591 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
4592 ip6_template_metrics, true);
4594 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
4595 sizeof(*net->ipv6.ip6_blk_hole_entry),
4597 if (!net->ipv6.ip6_blk_hole_entry)
4598 goto out_ip6_prohibit_entry;
4599 net->ipv6.ip6_blk_hole_entry->dst.path =
4600 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
4601 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
4602 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
4603 ip6_template_metrics, true);
4606 net->ipv6.sysctl.flush_delay = 0;
4607 net->ipv6.sysctl.ip6_rt_max_size = 4096;
4608 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
4609 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
4610 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
4611 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
4612 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
4613 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
4615 net->ipv6.ip6_rt_gc_expire = 30*HZ;
4621 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4622 out_ip6_prohibit_entry:
4623 kfree(net->ipv6.ip6_prohibit_entry);
4625 kfree(net->ipv6.ip6_null_entry);
4627 out_ip6_dst_entries:
4628 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
4633 static void __net_exit ip6_route_net_exit(struct net *net)
4635 kfree(net->ipv6.ip6_null_entry);
4636 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4637 kfree(net->ipv6.ip6_prohibit_entry);
4638 kfree(net->ipv6.ip6_blk_hole_entry);
4640 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
4643 static int __net_init ip6_route_net_init_late(struct net *net)
4645 #ifdef CONFIG_PROC_FS
4646 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
4647 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
4652 static void __net_exit ip6_route_net_exit_late(struct net *net)
4654 #ifdef CONFIG_PROC_FS
4655 remove_proc_entry("ipv6_route", net->proc_net);
4656 remove_proc_entry("rt6_stats", net->proc_net);
4660 static struct pernet_operations ip6_route_net_ops = {
4661 .init = ip6_route_net_init,
4662 .exit = ip6_route_net_exit,
4665 static int __net_init ipv6_inetpeer_init(struct net *net)
4667 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
4671 inet_peer_base_init(bp);
4672 net->ipv6.peers = bp;
4676 static void __net_exit ipv6_inetpeer_exit(struct net *net)
4678 struct inet_peer_base *bp = net->ipv6.peers;
4680 net->ipv6.peers = NULL;
4681 inetpeer_invalidate_tree(bp);
4685 static struct pernet_operations ipv6_inetpeer_ops = {
4686 .init = ipv6_inetpeer_init,
4687 .exit = ipv6_inetpeer_exit,
4690 static struct pernet_operations ip6_route_net_late_ops = {
4691 .init = ip6_route_net_init_late,
4692 .exit = ip6_route_net_exit_late,
4695 static struct notifier_block ip6_route_dev_notifier = {
4696 .notifier_call = ip6_route_dev_notify,
4697 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
4700 void __init ip6_route_init_special_entries(void)
4702 /* Registering of the loopback is done before this portion of code,
4703 * the loopback reference in rt6_info will not be taken, do it
4704 * manually for init_net */
4705 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
4706 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4707 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4708 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
4709 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4710 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
4711 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4715 int __init ip6_route_init(void)
4721 ip6_dst_ops_template.kmem_cachep =
4722 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
4723 SLAB_HWCACHE_ALIGN, NULL);
4724 if (!ip6_dst_ops_template.kmem_cachep)
4727 ret = dst_entries_init(&ip6_dst_blackhole_ops);
4729 goto out_kmem_cache;
4731 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
4733 goto out_dst_entries;
4735 ret = register_pernet_subsys(&ip6_route_net_ops);
4737 goto out_register_inetpeer;
4739 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
4743 goto out_register_subsys;
4749 ret = fib6_rules_init();
4753 ret = register_pernet_subsys(&ip6_route_net_late_ops);
4755 goto fib6_rules_init;
4758 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, 0) ||
4759 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, 0) ||
4760 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL,
4761 RTNL_FLAG_DOIT_UNLOCKED))
4762 goto out_register_late_subsys;
4764 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
4766 goto out_register_late_subsys;
4768 for_each_possible_cpu(cpu) {
4769 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
4771 INIT_LIST_HEAD(&ul->head);
4772 spin_lock_init(&ul->lock);
4778 out_register_late_subsys:
4779 unregister_pernet_subsys(&ip6_route_net_late_ops);
4781 fib6_rules_cleanup();
4786 out_register_subsys:
4787 unregister_pernet_subsys(&ip6_route_net_ops);
4788 out_register_inetpeer:
4789 unregister_pernet_subsys(&ipv6_inetpeer_ops);
4791 dst_entries_destroy(&ip6_dst_blackhole_ops);
4793 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
4797 void ip6_route_cleanup(void)
4799 unregister_netdevice_notifier(&ip6_route_dev_notifier);
4800 unregister_pernet_subsys(&ip6_route_net_late_ops);
4801 fib6_rules_cleanup();
4804 unregister_pernet_subsys(&ipv6_inetpeer_ops);
4805 unregister_pernet_subsys(&ip6_route_net_ops);
4806 dst_entries_destroy(&ip6_dst_blackhole_ops);
4807 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);