1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux INET6 implementation
7 * Pedro Roque <roque@di.fc.ul.pt>
12 * YOSHIFUJI Hideaki @USAGI
13 * reworked default router selection.
14 * - respect outgoing interface
15 * - select from (probably) reachable routers (i.e.
16 * routers in REACHABLE, STALE, DELAY or PROBE states).
17 * - always select the same router if it is (probably)
18 * reachable. otherwise, round-robin the list.
20 * Fixed routing subtrees.
23 #define pr_fmt(fmt) "IPv6: " fmt
25 #include <linux/capability.h>
26 #include <linux/errno.h>
27 #include <linux/export.h>
28 #include <linux/types.h>
29 #include <linux/times.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/route.h>
34 #include <linux/netdevice.h>
35 #include <linux/in6.h>
36 #include <linux/mroute6.h>
37 #include <linux/init.h>
38 #include <linux/if_arp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/nsproxy.h>
42 #include <linux/slab.h>
43 #include <linux/jhash.h>
44 #include <net/net_namespace.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
49 #include <net/ndisc.h>
50 #include <net/addrconf.h>
52 #include <linux/rtnetlink.h>
54 #include <net/dst_metadata.h>
56 #include <net/netevent.h>
57 #include <net/netlink.h>
59 #include <net/lwtunnel.h>
60 #include <net/ip_tunnels.h>
61 #include <net/l3mdev.h>
63 #include <linux/uaccess.h>
66 #include <linux/sysctl.h>
69 static int ip6_rt_type_to_error(u8 fib6_type);
71 #define CREATE_TRACE_POINTS
72 #include <trace/events/fib6.h>
73 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
74 #undef CREATE_TRACE_POINTS
77 RT6_NUD_FAIL_HARD = -3,
78 RT6_NUD_FAIL_PROBE = -2,
79 RT6_NUD_FAIL_DO_RR = -1,
83 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
84 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
85 static unsigned int ip6_mtu(const struct dst_entry *dst);
86 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
87 static void ip6_dst_destroy(struct dst_entry *);
88 static void ip6_dst_ifdown(struct dst_entry *,
89 struct net_device *dev, int how);
90 static int ip6_dst_gc(struct dst_ops *ops);
92 static int ip6_pkt_discard(struct sk_buff *skb);
93 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
94 static int ip6_pkt_prohibit(struct sk_buff *skb);
95 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
96 static void ip6_link_failure(struct sk_buff *skb);
97 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
98 struct sk_buff *skb, u32 mtu);
99 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
100 struct sk_buff *skb);
101 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
103 static size_t rt6_nlmsg_size(struct fib6_info *f6i);
104 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
105 struct fib6_info *rt, struct dst_entry *dst,
106 struct in6_addr *dest, struct in6_addr *src,
107 int iif, int type, u32 portid, u32 seq,
109 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
110 const struct in6_addr *daddr,
111 const struct in6_addr *saddr);
113 #ifdef CONFIG_IPV6_ROUTE_INFO
114 static struct fib6_info *rt6_add_route_info(struct net *net,
115 const struct in6_addr *prefix, int prefixlen,
116 const struct in6_addr *gwaddr,
117 struct net_device *dev,
119 static struct fib6_info *rt6_get_route_info(struct net *net,
120 const struct in6_addr *prefix, int prefixlen,
121 const struct in6_addr *gwaddr,
122 struct net_device *dev);
125 struct uncached_list {
127 struct list_head head;
130 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
132 void rt6_uncached_list_add(struct rt6_info *rt)
134 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
136 rt->rt6i_uncached_list = ul;
138 spin_lock_bh(&ul->lock);
139 list_add_tail(&rt->rt6i_uncached, &ul->head);
140 spin_unlock_bh(&ul->lock);
143 void rt6_uncached_list_del(struct rt6_info *rt)
145 if (!list_empty(&rt->rt6i_uncached)) {
146 struct uncached_list *ul = rt->rt6i_uncached_list;
147 struct net *net = dev_net(rt->dst.dev);
149 spin_lock_bh(&ul->lock);
150 list_del(&rt->rt6i_uncached);
151 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
152 spin_unlock_bh(&ul->lock);
156 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
158 struct net_device *loopback_dev = net->loopback_dev;
161 if (dev == loopback_dev)
164 for_each_possible_cpu(cpu) {
165 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
168 spin_lock_bh(&ul->lock);
169 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
170 struct inet6_dev *rt_idev = rt->rt6i_idev;
171 struct net_device *rt_dev = rt->dst.dev;
173 if (rt_idev->dev == dev) {
174 rt->rt6i_idev = in6_dev_get(loopback_dev);
175 in6_dev_put(rt_idev);
179 rt->dst.dev = blackhole_netdev;
180 dev_hold(rt->dst.dev);
184 spin_unlock_bh(&ul->lock);
188 static inline const void *choose_neigh_daddr(const struct in6_addr *p,
192 if (!ipv6_addr_any(p))
193 return (const void *) p;
195 return &ipv6_hdr(skb)->daddr;
199 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
200 struct net_device *dev,
206 daddr = choose_neigh_daddr(gw, skb, daddr);
207 n = __ipv6_neigh_lookup(dev, daddr);
211 n = neigh_create(&nd_tbl, daddr, dev);
212 return IS_ERR(n) ? NULL : n;
215 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
219 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
221 return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
222 dst->dev, skb, daddr);
225 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
227 struct net_device *dev = dst->dev;
228 struct rt6_info *rt = (struct rt6_info *)dst;
230 daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
233 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
235 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
237 __ipv6_confirm_neigh(dev, daddr);
240 static struct dst_ops ip6_dst_ops_template = {
244 .check = ip6_dst_check,
245 .default_advmss = ip6_default_advmss,
247 .cow_metrics = dst_cow_metrics_generic,
248 .destroy = ip6_dst_destroy,
249 .ifdown = ip6_dst_ifdown,
250 .negative_advice = ip6_negative_advice,
251 .link_failure = ip6_link_failure,
252 .update_pmtu = ip6_rt_update_pmtu,
253 .redirect = rt6_do_redirect,
254 .local_out = __ip6_local_out,
255 .neigh_lookup = ip6_dst_neigh_lookup,
256 .confirm_neigh = ip6_confirm_neigh,
259 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
261 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
263 return mtu ? : dst->dev->mtu;
266 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
267 struct sk_buff *skb, u32 mtu)
271 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
276 static struct dst_ops ip6_dst_blackhole_ops = {
278 .destroy = ip6_dst_destroy,
279 .check = ip6_dst_check,
280 .mtu = ip6_blackhole_mtu,
281 .default_advmss = ip6_default_advmss,
282 .update_pmtu = ip6_rt_blackhole_update_pmtu,
283 .redirect = ip6_rt_blackhole_redirect,
284 .cow_metrics = dst_cow_metrics_generic,
285 .neigh_lookup = ip6_dst_neigh_lookup,
288 static const u32 ip6_template_metrics[RTAX_MAX] = {
289 [RTAX_HOPLIMIT - 1] = 0,
292 static const struct fib6_info fib6_null_entry_template = {
293 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
294 .fib6_protocol = RTPROT_KERNEL,
295 .fib6_metric = ~(u32)0,
296 .fib6_ref = REFCOUNT_INIT(1),
297 .fib6_type = RTN_UNREACHABLE,
298 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
301 static const struct rt6_info ip6_null_entry_template = {
303 .__refcnt = ATOMIC_INIT(1),
305 .obsolete = DST_OBSOLETE_FORCE_CHK,
306 .error = -ENETUNREACH,
307 .input = ip6_pkt_discard,
308 .output = ip6_pkt_discard_out,
310 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
313 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
315 static const struct rt6_info ip6_prohibit_entry_template = {
317 .__refcnt = ATOMIC_INIT(1),
319 .obsolete = DST_OBSOLETE_FORCE_CHK,
321 .input = ip6_pkt_prohibit,
322 .output = ip6_pkt_prohibit_out,
324 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
327 static const struct rt6_info ip6_blk_hole_entry_template = {
329 .__refcnt = ATOMIC_INIT(1),
331 .obsolete = DST_OBSOLETE_FORCE_CHK,
333 .input = dst_discard,
334 .output = dst_discard_out,
336 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
341 static void rt6_info_init(struct rt6_info *rt)
343 struct dst_entry *dst = &rt->dst;
345 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
346 INIT_LIST_HEAD(&rt->rt6i_uncached);
349 /* allocate dst with ip6_dst_ops */
350 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
353 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
354 1, DST_OBSOLETE_FORCE_CHK, flags);
358 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
363 EXPORT_SYMBOL(ip6_dst_alloc);
365 static void ip6_dst_destroy(struct dst_entry *dst)
367 struct rt6_info *rt = (struct rt6_info *)dst;
368 struct fib6_info *from;
369 struct inet6_dev *idev;
371 ip_dst_metrics_put(dst);
372 rt6_uncached_list_del(rt);
374 idev = rt->rt6i_idev;
376 rt->rt6i_idev = NULL;
380 from = xchg((__force struct fib6_info **)&rt->from, NULL);
381 fib6_info_release(from);
384 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
387 struct rt6_info *rt = (struct rt6_info *)dst;
388 struct inet6_dev *idev = rt->rt6i_idev;
389 struct net_device *loopback_dev =
390 dev_net(dev)->loopback_dev;
392 if (idev && idev->dev != loopback_dev) {
393 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
395 rt->rt6i_idev = loopback_idev;
401 static bool __rt6_check_expired(const struct rt6_info *rt)
403 if (rt->rt6i_flags & RTF_EXPIRES)
404 return time_after(jiffies, rt->dst.expires);
409 static bool rt6_check_expired(const struct rt6_info *rt)
411 struct fib6_info *from;
413 from = rcu_dereference(rt->from);
415 if (rt->rt6i_flags & RTF_EXPIRES) {
416 if (time_after(jiffies, rt->dst.expires))
419 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
420 fib6_check_expired(from);
425 void fib6_select_path(const struct net *net, struct fib6_result *res,
426 struct flowi6 *fl6, int oif, bool have_oif_match,
427 const struct sk_buff *skb, int strict)
429 struct fib6_info *sibling, *next_sibling;
430 struct fib6_info *match = res->f6i;
432 if ((!match->fib6_nsiblings && !match->nh) || have_oif_match)
435 /* We might have already computed the hash for ICMPv6 errors. In such
436 * case it will always be non-zero. Otherwise now is the time to do it.
439 (!match->nh || nexthop_is_multipath(match->nh)))
440 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
442 if (unlikely(match->nh)) {
443 nexthop_path_fib6_result(res, fl6->mp_hash);
447 if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
450 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
452 const struct fib6_nh *nh = sibling->fib6_nh;
455 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
456 if (fl6->mp_hash > nh_upper_bound)
458 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
466 res->nh = match->fib6_nh;
470 * Route lookup. rcu_read_lock() should be held.
473 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
474 const struct in6_addr *saddr, int oif, int flags)
476 const struct net_device *dev;
478 if (nh->fib_nh_flags & RTNH_F_DEAD)
481 dev = nh->fib_nh_dev;
483 if (dev->ifindex == oif)
486 if (ipv6_chk_addr(net, saddr, dev,
487 flags & RT6_LOOKUP_F_IFACE))
494 struct fib6_nh_dm_arg {
496 const struct in6_addr *saddr;
502 static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
504 struct fib6_nh_dm_arg *arg = _arg;
507 return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
511 /* returns fib6_nh from nexthop or NULL */
512 static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
513 struct fib6_result *res,
514 const struct in6_addr *saddr,
517 struct fib6_nh_dm_arg arg = {
524 if (nexthop_is_blackhole(nh))
527 if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
533 static void rt6_device_match(struct net *net, struct fib6_result *res,
534 const struct in6_addr *saddr, int oif, int flags)
536 struct fib6_info *f6i = res->f6i;
537 struct fib6_info *spf6i;
540 if (!oif && ipv6_addr_any(saddr)) {
541 if (unlikely(f6i->nh)) {
542 nh = nexthop_fib6_nh(f6i->nh);
543 if (nexthop_is_blackhole(f6i->nh))
548 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
552 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
553 bool matched = false;
555 if (unlikely(spf6i->nh)) {
556 nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
562 if (__rt6_device_match(net, nh, saddr, oif, flags))
571 if (oif && flags & RT6_LOOKUP_F_IFACE) {
572 res->f6i = net->ipv6.fib6_null_entry;
573 nh = res->f6i->fib6_nh;
577 if (unlikely(f6i->nh)) {
578 nh = nexthop_fib6_nh(f6i->nh);
579 if (nexthop_is_blackhole(f6i->nh))
585 if (nh->fib_nh_flags & RTNH_F_DEAD) {
586 res->f6i = net->ipv6.fib6_null_entry;
587 nh = res->f6i->fib6_nh;
591 res->fib6_type = res->f6i->fib6_type;
592 res->fib6_flags = res->f6i->fib6_flags;
596 res->fib6_flags |= RTF_REJECT;
597 res->fib6_type = RTN_BLACKHOLE;
601 #ifdef CONFIG_IPV6_ROUTER_PREF
602 struct __rt6_probe_work {
603 struct work_struct work;
604 struct in6_addr target;
605 struct net_device *dev;
608 static void rt6_probe_deferred(struct work_struct *w)
610 struct in6_addr mcaddr;
611 struct __rt6_probe_work *work =
612 container_of(w, struct __rt6_probe_work, work);
614 addrconf_addr_solict_mult(&work->target, &mcaddr);
615 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
620 static void rt6_probe(struct fib6_nh *fib6_nh)
622 struct __rt6_probe_work *work = NULL;
623 const struct in6_addr *nh_gw;
624 struct neighbour *neigh;
625 struct net_device *dev;
626 struct inet6_dev *idev;
629 * Okay, this does not seem to be appropriate
630 * for now, however, we need to check if it
631 * is really so; aka Router Reachability Probing.
633 * Router Reachability Probe MUST be rate-limited
634 * to no more than one per minute.
636 if (fib6_nh->fib_nh_gw_family)
639 nh_gw = &fib6_nh->fib_nh_gw6;
640 dev = fib6_nh->fib_nh_dev;
642 idev = __in6_dev_get(dev);
643 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
645 if (neigh->nud_state & NUD_VALID)
648 write_lock(&neigh->lock);
649 if (!(neigh->nud_state & NUD_VALID) &&
651 neigh->updated + idev->cnf.rtr_probe_interval)) {
652 work = kmalloc(sizeof(*work), GFP_ATOMIC);
654 __neigh_set_probe_once(neigh);
656 write_unlock(&neigh->lock);
657 } else if (time_after(jiffies, fib6_nh->last_probe +
658 idev->cnf.rtr_probe_interval)) {
659 work = kmalloc(sizeof(*work), GFP_ATOMIC);
663 fib6_nh->last_probe = jiffies;
664 INIT_WORK(&work->work, rt6_probe_deferred);
665 work->target = *nh_gw;
668 schedule_work(&work->work);
672 rcu_read_unlock_bh();
675 static inline void rt6_probe(struct fib6_nh *fib6_nh)
681 * Default Router Selection (RFC 2461 6.3.6)
683 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
685 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
686 struct neighbour *neigh;
689 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
690 &fib6_nh->fib_nh_gw6);
692 read_lock(&neigh->lock);
693 if (neigh->nud_state & NUD_VALID)
694 ret = RT6_NUD_SUCCEED;
695 #ifdef CONFIG_IPV6_ROUTER_PREF
696 else if (!(neigh->nud_state & NUD_FAILED))
697 ret = RT6_NUD_SUCCEED;
699 ret = RT6_NUD_FAIL_PROBE;
701 read_unlock(&neigh->lock);
703 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
704 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
706 rcu_read_unlock_bh();
711 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
716 if (!oif || nh->fib_nh_dev->ifindex == oif)
719 if (!m && (strict & RT6_LOOKUP_F_IFACE))
720 return RT6_NUD_FAIL_HARD;
721 #ifdef CONFIG_IPV6_ROUTER_PREF
722 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
724 if ((strict & RT6_LOOKUP_F_REACHABLE) &&
725 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
726 int n = rt6_check_neigh(nh);
733 static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
734 int oif, int strict, int *mpri, bool *do_rr)
736 bool match_do_rr = false;
740 if (nh->fib_nh_flags & RTNH_F_DEAD)
743 if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
744 nh->fib_nh_flags & RTNH_F_LINKDOWN &&
745 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
748 m = rt6_score_route(nh, fib6_flags, oif, strict);
749 if (m == RT6_NUD_FAIL_DO_RR) {
751 m = 0; /* lowest valid score */
752 } else if (m == RT6_NUD_FAIL_HARD) {
756 if (strict & RT6_LOOKUP_F_REACHABLE)
759 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
761 *do_rr = match_do_rr;
769 struct fib6_nh_frl_arg {
778 static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
780 struct fib6_nh_frl_arg *arg = _arg;
783 return find_match(nh, arg->flags, arg->oif, arg->strict,
784 arg->mpri, arg->do_rr);
787 static void __find_rr_leaf(struct fib6_info *f6i_start,
788 struct fib6_info *nomatch, u32 metric,
789 struct fib6_result *res, struct fib6_info **cont,
790 int oif, int strict, bool *do_rr, int *mpri)
792 struct fib6_info *f6i;
794 for (f6i = f6i_start;
795 f6i && f6i != nomatch;
796 f6i = rcu_dereference(f6i->fib6_next)) {
797 bool matched = false;
800 if (cont && f6i->fib6_metric != metric) {
805 if (fib6_check_expired(f6i))
808 if (unlikely(f6i->nh)) {
809 struct fib6_nh_frl_arg arg = {
810 .flags = f6i->fib6_flags,
817 if (nexthop_is_blackhole(f6i->nh)) {
818 res->fib6_flags = RTF_REJECT;
819 res->fib6_type = RTN_BLACKHOLE;
821 res->nh = nexthop_fib6_nh(f6i->nh);
824 if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
831 if (find_match(nh, f6i->fib6_flags, oif, strict,
838 res->fib6_flags = f6i->fib6_flags;
839 res->fib6_type = f6i->fib6_type;
844 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
845 struct fib6_info *rr_head, int oif, int strict,
846 bool *do_rr, struct fib6_result *res)
848 u32 metric = rr_head->fib6_metric;
849 struct fib6_info *cont = NULL;
852 __find_rr_leaf(rr_head, NULL, metric, res, &cont,
853 oif, strict, do_rr, &mpri);
855 __find_rr_leaf(leaf, rr_head, metric, res, &cont,
856 oif, strict, do_rr, &mpri);
858 if (res->f6i || !cont)
861 __find_rr_leaf(cont, NULL, metric, res, NULL,
862 oif, strict, do_rr, &mpri);
865 static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
866 struct fib6_result *res, int strict)
868 struct fib6_info *leaf = rcu_dereference(fn->leaf);
869 struct fib6_info *rt0;
873 /* make sure this function or its helpers sets f6i */
876 if (!leaf || leaf == net->ipv6.fib6_null_entry)
879 rt0 = rcu_dereference(fn->rr_ptr);
883 /* Double check to make sure fn is not an intermediate node
884 * and fn->leaf does not points to its child's leaf
885 * (This might happen if all routes under fn are deleted from
886 * the tree and fib6_repair_tree() is called on the node.)
888 key_plen = rt0->fib6_dst.plen;
889 #ifdef CONFIG_IPV6_SUBTREES
890 if (rt0->fib6_src.plen)
891 key_plen = rt0->fib6_src.plen;
893 if (fn->fn_bit != key_plen)
896 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
898 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
900 /* no entries matched; do round-robin */
901 if (!next || next->fib6_metric != rt0->fib6_metric)
905 spin_lock_bh(&leaf->fib6_table->tb6_lock);
906 /* make sure next is not being deleted from the tree */
908 rcu_assign_pointer(fn->rr_ptr, next);
909 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
915 res->f6i = net->ipv6.fib6_null_entry;
916 res->nh = res->f6i->fib6_nh;
917 res->fib6_flags = res->f6i->fib6_flags;
918 res->fib6_type = res->f6i->fib6_type;
922 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
924 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
925 res->nh->fib_nh_gw_family;
928 #ifdef CONFIG_IPV6_ROUTE_INFO
929 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
930 const struct in6_addr *gwaddr)
932 struct net *net = dev_net(dev);
933 struct route_info *rinfo = (struct route_info *) opt;
934 struct in6_addr prefix_buf, *prefix;
936 unsigned long lifetime;
937 struct fib6_info *rt;
939 if (len < sizeof(struct route_info)) {
943 /* Sanity check for prefix_len and length */
944 if (rinfo->length > 3) {
946 } else if (rinfo->prefix_len > 128) {
948 } else if (rinfo->prefix_len > 64) {
949 if (rinfo->length < 2) {
952 } else if (rinfo->prefix_len > 0) {
953 if (rinfo->length < 1) {
958 pref = rinfo->route_pref;
959 if (pref == ICMPV6_ROUTER_PREF_INVALID)
962 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
964 if (rinfo->length == 3)
965 prefix = (struct in6_addr *)rinfo->prefix;
967 /* this function is safe */
968 ipv6_addr_prefix(&prefix_buf,
969 (struct in6_addr *)rinfo->prefix,
971 prefix = &prefix_buf;
974 if (rinfo->prefix_len == 0)
975 rt = rt6_get_dflt_router(net, gwaddr, dev);
977 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
980 if (rt && !lifetime) {
986 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
989 rt->fib6_flags = RTF_ROUTEINFO |
990 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
993 if (!addrconf_finite_timeout(lifetime))
994 fib6_clean_expires(rt);
996 fib6_set_expires(rt, jiffies + HZ * lifetime);
998 fib6_info_release(rt);
1005 * Misc support functions
1008 /* called with rcu_lock held */
1009 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1011 struct net_device *dev = res->nh->fib_nh_dev;
1013 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1014 /* for copies of local routes, dst->dev needs to be the
1015 * device if it is a master device, the master device if
1016 * device is enslaved, and the loopback as the default
1018 if (netif_is_l3_slave(dev) &&
1019 !rt6_need_strict(&res->f6i->fib6_dst.addr))
1020 dev = l3mdev_master_dev_rcu(dev);
1021 else if (!netif_is_l3_master(dev))
1022 dev = dev_net(dev)->loopback_dev;
1023 /* last case is netif_is_l3_master(dev) is true in which
1024 * case we want dev returned to be dev
1031 static const int fib6_prop[RTN_MAX + 1] = {
1035 [RTN_BROADCAST] = 0,
1037 [RTN_MULTICAST] = 0,
1038 [RTN_BLACKHOLE] = -EINVAL,
1039 [RTN_UNREACHABLE] = -EHOSTUNREACH,
1040 [RTN_PROHIBIT] = -EACCES,
1041 [RTN_THROW] = -EAGAIN,
1042 [RTN_NAT] = -EINVAL,
1043 [RTN_XRESOLVE] = -EINVAL,
1046 static int ip6_rt_type_to_error(u8 fib6_type)
1048 return fib6_prop[fib6_type];
1051 static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1053 unsigned short flags = 0;
1055 if (rt->dst_nocount)
1056 flags |= DST_NOCOUNT;
1057 if (rt->dst_nopolicy)
1058 flags |= DST_NOPOLICY;
1065 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1067 rt->dst.error = ip6_rt_type_to_error(fib6_type);
1069 switch (fib6_type) {
1071 rt->dst.output = dst_discard_out;
1072 rt->dst.input = dst_discard;
1075 rt->dst.output = ip6_pkt_prohibit_out;
1076 rt->dst.input = ip6_pkt_prohibit;
1079 case RTN_UNREACHABLE:
1081 rt->dst.output = ip6_pkt_discard_out;
1082 rt->dst.input = ip6_pkt_discard;
1087 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1089 struct fib6_info *f6i = res->f6i;
1091 if (res->fib6_flags & RTF_REJECT) {
1092 ip6_rt_init_dst_reject(rt, res->fib6_type);
1097 rt->dst.output = ip6_output;
1099 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1100 rt->dst.input = ip6_input;
1101 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1102 rt->dst.input = ip6_mc_input;
1104 rt->dst.input = ip6_forward;
1107 if (res->nh->fib_nh_lws) {
1108 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1109 lwtunnel_set_redirect(&rt->dst);
1112 rt->dst.lastuse = jiffies;
1115 /* Caller must already hold reference to @from */
1116 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1118 rt->rt6i_flags &= ~RTF_EXPIRES;
1119 rcu_assign_pointer(rt->from, from);
1120 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1123 /* Caller must already hold reference to f6i in result */
1124 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1126 const struct fib6_nh *nh = res->nh;
1127 const struct net_device *dev = nh->fib_nh_dev;
1128 struct fib6_info *f6i = res->f6i;
1130 ip6_rt_init_dst(rt, res);
1132 rt->rt6i_dst = f6i->fib6_dst;
1133 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1134 rt->rt6i_flags = res->fib6_flags;
1135 if (nh->fib_nh_gw_family) {
1136 rt->rt6i_gateway = nh->fib_nh_gw6;
1137 rt->rt6i_flags |= RTF_GATEWAY;
1139 rt6_set_from(rt, f6i);
1140 #ifdef CONFIG_IPV6_SUBTREES
1141 rt->rt6i_src = f6i->fib6_src;
1145 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1146 struct in6_addr *saddr)
1148 struct fib6_node *pn, *sn;
1150 if (fn->fn_flags & RTN_TL_ROOT)
1152 pn = rcu_dereference(fn->parent);
1153 sn = FIB6_SUBTREE(pn);
1155 fn = fib6_node_lookup(sn, NULL, saddr);
1158 if (fn->fn_flags & RTN_RTINFO)
1163 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1165 struct rt6_info *rt = *prt;
1167 if (dst_hold_safe(&rt->dst))
1170 rt = net->ipv6.ip6_null_entry;
1179 /* called with rcu_lock held */
1180 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1182 struct net_device *dev = res->nh->fib_nh_dev;
1183 struct fib6_info *f6i = res->f6i;
1184 unsigned short flags;
1185 struct rt6_info *nrt;
1187 if (!fib6_info_hold_safe(f6i))
1190 flags = fib6_info_dst_flags(f6i);
1191 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1193 fib6_info_release(f6i);
1197 ip6_rt_copy_init(nrt, res);
1201 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1202 dst_hold(&nrt->dst);
1206 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
1207 struct fib6_table *table,
1209 const struct sk_buff *skb,
1212 struct fib6_result res = {};
1213 struct fib6_node *fn;
1214 struct rt6_info *rt;
1216 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1217 flags &= ~RT6_LOOKUP_F_IFACE;
1220 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1222 res.f6i = rcu_dereference(fn->leaf);
1224 res.f6i = net->ipv6.fib6_null_entry;
1226 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1229 if (res.f6i == net->ipv6.fib6_null_entry) {
1230 fn = fib6_backtrack(fn, &fl6->saddr);
1234 rt = net->ipv6.ip6_null_entry;
1237 } else if (res.fib6_flags & RTF_REJECT) {
1241 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1242 fl6->flowi6_oif != 0, skb, flags);
1244 /* Search through exception table */
1245 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1247 if (ip6_hold_safe(net, &rt))
1248 dst_use_noref(&rt->dst, jiffies);
1251 rt = ip6_create_rt_rcu(&res);
1255 trace_fib6_table_lookup(net, &res, table, fl6);
1262 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1263 const struct sk_buff *skb, int flags)
1265 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1267 EXPORT_SYMBOL_GPL(ip6_route_lookup);
1269 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1270 const struct in6_addr *saddr, int oif,
1271 const struct sk_buff *skb, int strict)
1273 struct flowi6 fl6 = {
1277 struct dst_entry *dst;
1278 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1281 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1282 flags |= RT6_LOOKUP_F_HAS_SADDR;
1285 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1286 if (dst->error == 0)
1287 return (struct rt6_info *) dst;
1293 EXPORT_SYMBOL(rt6_lookup);
1295 /* ip6_ins_rt is called with FREE table->tb6_lock.
1296 * It takes new route entry, the addition fails by any reason the
1297 * route is released.
1298 * Caller must hold dst before calling it.
1301 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1302 struct netlink_ext_ack *extack)
1305 struct fib6_table *table;
1307 table = rt->fib6_table;
1308 spin_lock_bh(&table->tb6_lock);
1309 err = fib6_add(&table->tb6_root, rt, info, extack);
1310 spin_unlock_bh(&table->tb6_lock);
1315 int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1317 struct nl_info info = { .nl_net = net, };
1319 return __ip6_ins_rt(rt, &info, NULL);
1322 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1323 const struct in6_addr *daddr,
1324 const struct in6_addr *saddr)
1326 struct fib6_info *f6i = res->f6i;
1327 struct net_device *dev;
1328 struct rt6_info *rt;
1334 if (!fib6_info_hold_safe(f6i))
1337 dev = ip6_rt_get_dev_rcu(res);
1338 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1340 fib6_info_release(f6i);
1344 ip6_rt_copy_init(rt, res);
1345 rt->rt6i_flags |= RTF_CACHE;
1346 rt->dst.flags |= DST_HOST;
1347 rt->rt6i_dst.addr = *daddr;
1348 rt->rt6i_dst.plen = 128;
1350 if (!rt6_is_gw_or_nonexthop(res)) {
1351 if (f6i->fib6_dst.plen != 128 &&
1352 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1353 rt->rt6i_flags |= RTF_ANYCAST;
1354 #ifdef CONFIG_IPV6_SUBTREES
1355 if (rt->rt6i_src.plen && saddr) {
1356 rt->rt6i_src.addr = *saddr;
1357 rt->rt6i_src.plen = 128;
1365 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1367 struct fib6_info *f6i = res->f6i;
1368 unsigned short flags = fib6_info_dst_flags(f6i);
1369 struct net_device *dev;
1370 struct rt6_info *pcpu_rt;
1372 if (!fib6_info_hold_safe(f6i))
1376 dev = ip6_rt_get_dev_rcu(res);
1377 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
1380 fib6_info_release(f6i);
1383 ip6_rt_copy_init(pcpu_rt, res);
1384 pcpu_rt->rt6i_flags |= RTF_PCPU;
1388 /* It should be called with rcu_read_lock() acquired */
1389 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1391 struct rt6_info *pcpu_rt;
1393 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1398 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1399 const struct fib6_result *res)
1401 struct rt6_info *pcpu_rt, *prev, **p;
1403 pcpu_rt = ip6_rt_pcpu_alloc(res);
1407 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1408 prev = cmpxchg(p, NULL, pcpu_rt);
1411 if (res->f6i->fib6_destroying) {
1412 struct fib6_info *from;
1414 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1415 fib6_info_release(from);
1421 /* exception hash table implementation
1423 static DEFINE_SPINLOCK(rt6_exception_lock);
1425 /* Remove rt6_ex from hash table and free the memory
1426 * Caller must hold rt6_exception_lock
1428 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1429 struct rt6_exception *rt6_ex)
1431 struct fib6_info *from;
1434 if (!bucket || !rt6_ex)
1437 net = dev_net(rt6_ex->rt6i->dst.dev);
1438 net->ipv6.rt6_stats->fib_rt_cache--;
1440 /* purge completely the exception to allow releasing the held resources:
1441 * some [sk] cache may keep the dst around for unlimited time
1443 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
1444 fib6_info_release(from);
1445 dst_dev_put(&rt6_ex->rt6i->dst);
1447 hlist_del_rcu(&rt6_ex->hlist);
1448 dst_release(&rt6_ex->rt6i->dst);
1449 kfree_rcu(rt6_ex, rcu);
1450 WARN_ON_ONCE(!bucket->depth);
1454 /* Remove oldest rt6_ex in bucket and free the memory
1455 * Caller must hold rt6_exception_lock
1457 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1459 struct rt6_exception *rt6_ex, *oldest = NULL;
1464 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1465 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1468 rt6_remove_exception(bucket, oldest);
1471 static u32 rt6_exception_hash(const struct in6_addr *dst,
1472 const struct in6_addr *src)
1474 static u32 seed __read_mostly;
1477 net_get_random_once(&seed, sizeof(seed));
1478 val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed);
1480 #ifdef CONFIG_IPV6_SUBTREES
1482 val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val);
1484 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1487 /* Helper function to find the cached rt in the hash table
1488 * and update bucket pointer to point to the bucket for this
1489 * (daddr, saddr) pair
1490 * Caller must hold rt6_exception_lock
1492 static struct rt6_exception *
1493 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1494 const struct in6_addr *daddr,
1495 const struct in6_addr *saddr)
1497 struct rt6_exception *rt6_ex;
1500 if (!(*bucket) || !daddr)
1503 hval = rt6_exception_hash(daddr, saddr);
1506 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1507 struct rt6_info *rt6 = rt6_ex->rt6i;
1508 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1510 #ifdef CONFIG_IPV6_SUBTREES
1511 if (matched && saddr)
1512 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1520 /* Helper function to find the cached rt in the hash table
1521 * and update bucket pointer to point to the bucket for this
1522 * (daddr, saddr) pair
1523 * Caller must hold rcu_read_lock()
1525 static struct rt6_exception *
1526 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1527 const struct in6_addr *daddr,
1528 const struct in6_addr *saddr)
1530 struct rt6_exception *rt6_ex;
1533 WARN_ON_ONCE(!rcu_read_lock_held());
1535 if (!(*bucket) || !daddr)
1538 hval = rt6_exception_hash(daddr, saddr);
1541 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1542 struct rt6_info *rt6 = rt6_ex->rt6i;
1543 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1545 #ifdef CONFIG_IPV6_SUBTREES
1546 if (matched && saddr)
1547 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1555 static unsigned int fib6_mtu(const struct fib6_result *res)
1557 const struct fib6_nh *nh = res->nh;
1560 if (res->f6i->fib6_pmtu) {
1561 mtu = res->f6i->fib6_pmtu;
1563 struct net_device *dev = nh->fib_nh_dev;
1564 struct inet6_dev *idev;
1567 idev = __in6_dev_get(dev);
1568 mtu = idev->cnf.mtu6;
1572 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1574 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1577 #define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL
1579 /* used when the flushed bit is not relevant, only access to the bucket
1580 * (ie., all bucket users except rt6_insert_exception);
1582 * called under rcu lock; sometimes called with rt6_exception_lock held
1585 struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1588 struct rt6_exception_bucket *bucket;
1591 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1592 lockdep_is_held(lock));
1594 bucket = rcu_dereference(nh->rt6i_exception_bucket);
1596 /* remove bucket flushed bit if set */
1598 unsigned long p = (unsigned long)bucket;
1600 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1601 bucket = (struct rt6_exception_bucket *)p;
1607 static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1609 unsigned long p = (unsigned long)bucket;
1611 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1614 /* called with rt6_exception_lock held */
1615 static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1618 struct rt6_exception_bucket *bucket;
1621 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1622 lockdep_is_held(lock));
1624 p = (unsigned long)bucket;
1625 p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1626 bucket = (struct rt6_exception_bucket *)p;
1627 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1630 static int rt6_insert_exception(struct rt6_info *nrt,
1631 const struct fib6_result *res)
1633 struct net *net = dev_net(nrt->dst.dev);
1634 struct rt6_exception_bucket *bucket;
1635 struct fib6_info *f6i = res->f6i;
1636 struct in6_addr *src_key = NULL;
1637 struct rt6_exception *rt6_ex;
1638 struct fib6_nh *nh = res->nh;
1641 spin_lock_bh(&rt6_exception_lock);
1643 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1644 lockdep_is_held(&rt6_exception_lock));
1646 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1652 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1653 } else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1658 #ifdef CONFIG_IPV6_SUBTREES
1659 /* fib6_src.plen != 0 indicates f6i is in subtree
1660 * and exception table is indexed by a hash of
1661 * both fib6_dst and fib6_src.
1662 * Otherwise, the exception table is indexed by
1663 * a hash of only fib6_dst.
1665 if (f6i->fib6_src.plen)
1666 src_key = &nrt->rt6i_src.addr;
1668 /* rt6_mtu_change() might lower mtu on f6i.
1669 * Only insert this exception route if its mtu
1670 * is less than f6i's mtu value.
1672 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1677 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1680 rt6_remove_exception(bucket, rt6_ex);
1682 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1688 rt6_ex->stamp = jiffies;
1689 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1691 net->ipv6.rt6_stats->fib_rt_cache++;
1693 if (bucket->depth > FIB6_MAX_DEPTH)
1694 rt6_exception_remove_oldest(bucket);
1697 spin_unlock_bh(&rt6_exception_lock);
1699 /* Update fn->fn_sernum to invalidate all cached dst */
1701 spin_lock_bh(&f6i->fib6_table->tb6_lock);
1702 fib6_update_sernum(net, f6i);
1703 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1704 fib6_force_start_gc(net);
1710 static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1712 struct rt6_exception_bucket *bucket;
1713 struct rt6_exception *rt6_ex;
1714 struct hlist_node *tmp;
1717 spin_lock_bh(&rt6_exception_lock);
1719 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1723 /* Prevent rt6_insert_exception() to recreate the bucket list */
1725 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1727 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1728 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1730 rcu_access_pointer(rt6_ex->rt6i->from) == from)
1731 rt6_remove_exception(bucket, rt6_ex);
1733 WARN_ON_ONCE(!from && bucket->depth);
1737 spin_unlock_bh(&rt6_exception_lock);
1740 static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1742 struct fib6_info *f6i = arg;
1744 fib6_nh_flush_exceptions(nh, f6i);
1749 void rt6_flush_exceptions(struct fib6_info *f6i)
1752 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions,
1755 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1758 /* Find cached rt in the hash table inside passed in rt
1759 * Caller has to hold rcu_read_lock()
1761 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1762 const struct in6_addr *daddr,
1763 const struct in6_addr *saddr)
1765 const struct in6_addr *src_key = NULL;
1766 struct rt6_exception_bucket *bucket;
1767 struct rt6_exception *rt6_ex;
1768 struct rt6_info *ret = NULL;
1770 #ifdef CONFIG_IPV6_SUBTREES
1771 /* fib6i_src.plen != 0 indicates f6i is in subtree
1772 * and exception table is indexed by a hash of
1773 * both fib6_dst and fib6_src.
1774 * However, the src addr used to create the hash
1775 * might not be exactly the passed in saddr which
1776 * is a /128 addr from the flow.
1777 * So we need to use f6i->fib6_src to redo lookup
1778 * if the passed in saddr does not find anything.
1779 * (See the logic in ip6_rt_cache_alloc() on how
1780 * rt->rt6i_src is updated.)
1782 if (res->f6i->fib6_src.plen)
1786 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1787 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1789 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1792 #ifdef CONFIG_IPV6_SUBTREES
1793 /* Use fib6_src as src_key and redo lookup */
1794 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1795 src_key = &res->f6i->fib6_src.addr;
1803 /* Remove the passed in cached rt from the hash table that contains it */
1804 static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1805 const struct rt6_info *rt)
1807 const struct in6_addr *src_key = NULL;
1808 struct rt6_exception_bucket *bucket;
1809 struct rt6_exception *rt6_ex;
1812 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1815 spin_lock_bh(&rt6_exception_lock);
1816 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1818 #ifdef CONFIG_IPV6_SUBTREES
1819 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1820 * and exception table is indexed by a hash of
1821 * both rt6i_dst and rt6i_src.
1822 * Otherwise, the exception table is indexed by
1823 * a hash of only rt6i_dst.
1826 src_key = &rt->rt6i_src.addr;
1828 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1832 rt6_remove_exception(bucket, rt6_ex);
1838 spin_unlock_bh(&rt6_exception_lock);
1842 struct fib6_nh_excptn_arg {
1843 struct rt6_info *rt;
1847 static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1849 struct fib6_nh_excptn_arg *arg = _arg;
1852 err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1859 static int rt6_remove_exception_rt(struct rt6_info *rt)
1861 struct fib6_info *from;
1863 from = rcu_dereference(rt->from);
1864 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1868 struct fib6_nh_excptn_arg arg = {
1870 .plen = from->fib6_src.plen
1874 /* rc = 1 means an entry was found */
1875 rc = nexthop_for_each_fib6_nh(from->nh,
1876 rt6_nh_remove_exception_rt,
1878 return rc ? 0 : -ENOENT;
1881 return fib6_nh_remove_exception(from->fib6_nh,
1882 from->fib6_src.plen, rt);
1885 /* Find rt6_ex which contains the passed in rt cache and
1888 static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1889 const struct rt6_info *rt)
1891 const struct in6_addr *src_key = NULL;
1892 struct rt6_exception_bucket *bucket;
1893 struct rt6_exception *rt6_ex;
1895 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1896 #ifdef CONFIG_IPV6_SUBTREES
1897 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1898 * and exception table is indexed by a hash of
1899 * both rt6i_dst and rt6i_src.
1900 * Otherwise, the exception table is indexed by
1901 * a hash of only rt6i_dst.
1904 src_key = &rt->rt6i_src.addr;
1906 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1908 rt6_ex->stamp = jiffies;
1911 struct fib6_nh_match_arg {
1912 const struct net_device *dev;
1913 const struct in6_addr *gw;
1914 struct fib6_nh *match;
1917 /* determine if fib6_nh has given device and gateway */
1918 static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
1920 struct fib6_nh_match_arg *arg = _arg;
1922 if (arg->dev != nh->fib_nh_dev ||
1923 (arg->gw && !nh->fib_nh_gw_family) ||
1924 (!arg->gw && nh->fib_nh_gw_family) ||
1925 (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
1930 /* found a match, break the loop */
1934 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1936 struct fib6_info *from;
1937 struct fib6_nh *fib6_nh;
1941 from = rcu_dereference(rt->from);
1942 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1946 struct fib6_nh_match_arg arg = {
1948 .gw = &rt->rt6i_gateway,
1951 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
1955 fib6_nh = arg.match;
1957 fib6_nh = from->fib6_nh;
1959 fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
1964 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1965 struct rt6_info *rt, int mtu)
1967 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1968 * lowest MTU in the path: always allow updating the route PMTU to
1969 * reflect PMTU decreases.
1971 * If the new MTU is higher, and the route PMTU is equal to the local
1972 * MTU, this means the old MTU is the lowest in the path, so allow
1973 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1977 if (dst_mtu(&rt->dst) >= mtu)
1980 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1986 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1987 const struct fib6_nh *nh, int mtu)
1989 struct rt6_exception_bucket *bucket;
1990 struct rt6_exception *rt6_ex;
1993 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1997 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1998 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1999 struct rt6_info *entry = rt6_ex->rt6i;
2001 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
2002 * route), the metrics of its rt->from have already
2005 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
2006 rt6_mtu_change_route_allowed(idev, entry, mtu))
2007 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2013 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2015 static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2016 const struct in6_addr *gateway)
2018 struct rt6_exception_bucket *bucket;
2019 struct rt6_exception *rt6_ex;
2020 struct hlist_node *tmp;
2023 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2026 spin_lock_bh(&rt6_exception_lock);
2027 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2029 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2030 hlist_for_each_entry_safe(rt6_ex, tmp,
2031 &bucket->chain, hlist) {
2032 struct rt6_info *entry = rt6_ex->rt6i;
2034 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2035 RTF_CACHE_GATEWAY &&
2036 ipv6_addr_equal(gateway,
2037 &entry->rt6i_gateway)) {
2038 rt6_remove_exception(bucket, rt6_ex);
2045 spin_unlock_bh(&rt6_exception_lock);
2048 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
2049 struct rt6_exception *rt6_ex,
2050 struct fib6_gc_args *gc_args,
2053 struct rt6_info *rt = rt6_ex->rt6i;
2055 /* we are pruning and obsoleting aged-out and non gateway exceptions
2056 * even if others have still references to them, so that on next
2057 * dst_check() such references can be dropped.
2058 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
2059 * expired, independently from their aging, as per RFC 8201 section 4
2061 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
2062 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
2063 RT6_TRACE("aging clone %p\n", rt);
2064 rt6_remove_exception(bucket, rt6_ex);
2067 } else if (time_after(jiffies, rt->dst.expires)) {
2068 RT6_TRACE("purging expired route %p\n", rt);
2069 rt6_remove_exception(bucket, rt6_ex);
2073 if (rt->rt6i_flags & RTF_GATEWAY) {
2074 struct neighbour *neigh;
2075 __u8 neigh_flags = 0;
2077 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2079 neigh_flags = neigh->flags;
2081 if (!(neigh_flags & NTF_ROUTER)) {
2082 RT6_TRACE("purging route %p via non-router but gateway\n",
2084 rt6_remove_exception(bucket, rt6_ex);
2092 static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2093 struct fib6_gc_args *gc_args,
2096 struct rt6_exception_bucket *bucket;
2097 struct rt6_exception *rt6_ex;
2098 struct hlist_node *tmp;
2101 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2105 spin_lock(&rt6_exception_lock);
2106 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2108 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2109 hlist_for_each_entry_safe(rt6_ex, tmp,
2110 &bucket->chain, hlist) {
2111 rt6_age_examine_exception(bucket, rt6_ex,
2117 spin_unlock(&rt6_exception_lock);
2118 rcu_read_unlock_bh();
2121 struct fib6_nh_age_excptn_arg {
2122 struct fib6_gc_args *gc_args;
2126 static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2128 struct fib6_nh_age_excptn_arg *arg = _arg;
2130 fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2134 void rt6_age_exceptions(struct fib6_info *f6i,
2135 struct fib6_gc_args *gc_args,
2139 struct fib6_nh_age_excptn_arg arg = {
2144 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2147 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2151 /* must be called with rcu lock held */
2152 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2153 struct flowi6 *fl6, struct fib6_result *res, int strict)
2155 struct fib6_node *fn, *saved_fn;
2157 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2160 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2164 rt6_select(net, fn, oif, res, strict);
2165 if (res->f6i == net->ipv6.fib6_null_entry) {
2166 fn = fib6_backtrack(fn, &fl6->saddr);
2168 goto redo_rt6_select;
2169 else if (strict & RT6_LOOKUP_F_REACHABLE) {
2170 /* also consider unreachable route */
2171 strict &= ~RT6_LOOKUP_F_REACHABLE;
2173 goto redo_rt6_select;
2177 trace_fib6_table_lookup(net, res, table, fl6);
2182 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2183 int oif, struct flowi6 *fl6,
2184 const struct sk_buff *skb, int flags)
2186 struct fib6_result res = {};
2187 struct rt6_info *rt = NULL;
2190 WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2191 !rcu_read_lock_held());
2193 strict |= flags & RT6_LOOKUP_F_IFACE;
2194 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2195 if (net->ipv6.devconf_all->forwarding == 0)
2196 strict |= RT6_LOOKUP_F_REACHABLE;
2200 fib6_table_lookup(net, table, oif, fl6, &res, strict);
2201 if (res.f6i == net->ipv6.fib6_null_entry)
2204 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2206 /*Search through exception table */
2207 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2210 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2211 !res.nh->fib_nh_gw_family)) {
2212 /* Create a RTF_CACHE clone which will not be
2213 * owned by the fib6 tree. It is for the special case where
2214 * the daddr in the skb during the neighbor look-up is different
2215 * from the fl6->daddr used to look-up route here.
2217 rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2220 /* 1 refcnt is taken during ip6_rt_cache_alloc().
2221 * As rt6_uncached_list_add() does not consume refcnt,
2222 * this refcnt is always returned to the caller even
2223 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
2225 rt6_uncached_list_add(rt);
2226 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2232 /* Get a percpu copy */
2234 rt = rt6_get_pcpu_route(&res);
2237 rt = rt6_make_pcpu_route(net, &res);
2243 rt = net->ipv6.ip6_null_entry;
2244 if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2245 ip6_hold_safe(net, &rt);
2250 EXPORT_SYMBOL_GPL(ip6_pol_route);
2252 static struct rt6_info *ip6_pol_route_input(struct net *net,
2253 struct fib6_table *table,
2255 const struct sk_buff *skb,
2258 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2261 struct dst_entry *ip6_route_input_lookup(struct net *net,
2262 struct net_device *dev,
2264 const struct sk_buff *skb,
2267 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2268 flags |= RT6_LOOKUP_F_IFACE;
2270 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2272 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2274 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2275 struct flow_keys *keys,
2276 struct flow_keys *flkeys)
2278 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2279 const struct ipv6hdr *key_iph = outer_iph;
2280 struct flow_keys *_flkeys = flkeys;
2281 const struct ipv6hdr *inner_iph;
2282 const struct icmp6hdr *icmph;
2283 struct ipv6hdr _inner_iph;
2284 struct icmp6hdr _icmph;
2286 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2289 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2290 sizeof(_icmph), &_icmph);
2294 if (!icmpv6_is_err(icmph->icmp6_type))
2297 inner_iph = skb_header_pointer(skb,
2298 skb_transport_offset(skb) + sizeof(*icmph),
2299 sizeof(_inner_iph), &_inner_iph);
2303 key_iph = inner_iph;
2307 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2308 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2309 keys->tags.flow_label = _flkeys->tags.flow_label;
2310 keys->basic.ip_proto = _flkeys->basic.ip_proto;
2312 keys->addrs.v6addrs.src = key_iph->saddr;
2313 keys->addrs.v6addrs.dst = key_iph->daddr;
2314 keys->tags.flow_label = ip6_flowlabel(key_iph);
2315 keys->basic.ip_proto = key_iph->nexthdr;
2319 /* if skb is set it will be used and fl6 can be NULL */
2320 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2321 const struct sk_buff *skb, struct flow_keys *flkeys)
2323 struct flow_keys hash_keys;
2326 switch (ip6_multipath_hash_policy(net)) {
2328 memset(&hash_keys, 0, sizeof(hash_keys));
2329 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2331 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2333 hash_keys.addrs.v6addrs.src = fl6->saddr;
2334 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2335 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2336 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2341 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2342 struct flow_keys keys;
2344 /* short-circuit if we already have L4 hash present */
2346 return skb_get_hash_raw(skb) >> 1;
2348 memset(&hash_keys, 0, sizeof(hash_keys));
2351 skb_flow_dissect_flow_keys(skb, &keys, flag);
2354 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2355 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2356 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2357 hash_keys.ports.src = flkeys->ports.src;
2358 hash_keys.ports.dst = flkeys->ports.dst;
2359 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2361 memset(&hash_keys, 0, sizeof(hash_keys));
2362 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2363 hash_keys.addrs.v6addrs.src = fl6->saddr;
2364 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2365 hash_keys.ports.src = fl6->fl6_sport;
2366 hash_keys.ports.dst = fl6->fl6_dport;
2367 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2371 memset(&hash_keys, 0, sizeof(hash_keys));
2372 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2374 struct flow_keys keys;
2377 skb_flow_dissect_flow_keys(skb, &keys, 0);
2381 /* Inner can be v4 or v6 */
2382 if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2383 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2384 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2385 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2386 } else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2387 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2388 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2389 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2390 hash_keys.tags.flow_label = flkeys->tags.flow_label;
2391 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2393 /* Same as case 0 */
2394 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2395 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2398 /* Same as case 0 */
2399 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2400 hash_keys.addrs.v6addrs.src = fl6->saddr;
2401 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2402 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2403 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2407 mhash = flow_hash_from_keys(&hash_keys);
2412 /* Called with rcu held */
2413 void ip6_route_input(struct sk_buff *skb)
2415 const struct ipv6hdr *iph = ipv6_hdr(skb);
2416 struct net *net = dev_net(skb->dev);
2417 int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2418 struct ip_tunnel_info *tun_info;
2419 struct flowi6 fl6 = {
2420 .flowi6_iif = skb->dev->ifindex,
2421 .daddr = iph->daddr,
2422 .saddr = iph->saddr,
2423 .flowlabel = ip6_flowinfo(iph),
2424 .flowi6_mark = skb->mark,
2425 .flowi6_proto = iph->nexthdr,
2427 struct flow_keys *flkeys = NULL, _flkeys;
2429 tun_info = skb_tunnel_info(skb);
2430 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2431 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2433 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2436 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2437 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2439 skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2443 static struct rt6_info *ip6_pol_route_output(struct net *net,
2444 struct fib6_table *table,
2446 const struct sk_buff *skb,
2449 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2452 struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2453 const struct sock *sk,
2454 struct flowi6 *fl6, int flags)
2458 if (ipv6_addr_type(&fl6->daddr) &
2459 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2460 struct dst_entry *dst;
2462 /* This function does not take refcnt on the dst */
2463 dst = l3mdev_link_scope_lookup(net, fl6);
2468 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2470 flags |= RT6_LOOKUP_F_DST_NOREF;
2471 any_src = ipv6_addr_any(&fl6->saddr);
2472 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2473 (fl6->flowi6_oif && any_src))
2474 flags |= RT6_LOOKUP_F_IFACE;
2477 flags |= RT6_LOOKUP_F_HAS_SADDR;
2479 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2481 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2483 EXPORT_SYMBOL_GPL(ip6_route_output_flags_noref);
2485 struct dst_entry *ip6_route_output_flags(struct net *net,
2486 const struct sock *sk,
2490 struct dst_entry *dst;
2491 struct rt6_info *rt6;
2494 dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2495 rt6 = (struct rt6_info *)dst;
2496 /* For dst cached in uncached_list, refcnt is already taken. */
2497 if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) {
2498 dst = &net->ipv6.ip6_null_entry->dst;
2505 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2507 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2509 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2510 struct net_device *loopback_dev = net->loopback_dev;
2511 struct dst_entry *new = NULL;
2513 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2514 DST_OBSOLETE_DEAD, 0);
2517 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2521 new->input = dst_discard;
2522 new->output = dst_discard_out;
2524 dst_copy_metrics(new, &ort->dst);
2526 rt->rt6i_idev = in6_dev_get(loopback_dev);
2527 rt->rt6i_gateway = ort->rt6i_gateway;
2528 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2530 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2531 #ifdef CONFIG_IPV6_SUBTREES
2532 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2536 dst_release(dst_orig);
2537 return new ? new : ERR_PTR(-ENOMEM);
2541 * Destination cache support functions
2544 static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2548 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2551 if (fib6_check_expired(f6i))
2557 static struct dst_entry *rt6_check(struct rt6_info *rt,
2558 struct fib6_info *from,
2563 if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
2564 rt_cookie != cookie)
2567 if (rt6_check_expired(rt))
2573 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2574 struct fib6_info *from,
2577 if (!__rt6_check_expired(rt) &&
2578 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2579 fib6_check(from, cookie))
2585 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2587 struct dst_entry *dst_ret;
2588 struct fib6_info *from;
2589 struct rt6_info *rt;
2591 rt = container_of(dst, struct rt6_info, dst);
2595 /* All IPV6 dsts are created with ->obsolete set to the value
2596 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2597 * into this function always.
2600 from = rcu_dereference(rt->from);
2602 if (from && (rt->rt6i_flags & RTF_PCPU ||
2603 unlikely(!list_empty(&rt->rt6i_uncached))))
2604 dst_ret = rt6_dst_from_check(rt, from, cookie);
2606 dst_ret = rt6_check(rt, from, cookie);
2613 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2615 struct rt6_info *rt = (struct rt6_info *) dst;
2618 if (rt->rt6i_flags & RTF_CACHE) {
2620 if (rt6_check_expired(rt)) {
2621 rt6_remove_exception_rt(rt);
2633 static void ip6_link_failure(struct sk_buff *skb)
2635 struct rt6_info *rt;
2637 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2639 rt = (struct rt6_info *) skb_dst(skb);
2642 if (rt->rt6i_flags & RTF_CACHE) {
2643 rt6_remove_exception_rt(rt);
2645 struct fib6_info *from;
2646 struct fib6_node *fn;
2648 from = rcu_dereference(rt->from);
2650 fn = rcu_dereference(from->fib6_node);
2651 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2659 static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2661 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2662 struct fib6_info *from;
2665 from = rcu_dereference(rt0->from);
2667 rt0->dst.expires = from->expires;
2671 dst_set_expires(&rt0->dst, timeout);
2672 rt0->rt6i_flags |= RTF_EXPIRES;
2675 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2677 struct net *net = dev_net(rt->dst.dev);
2679 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2680 rt->rt6i_flags |= RTF_MODIFIED;
2681 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2684 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2686 return !(rt->rt6i_flags & RTF_CACHE) &&
2687 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2690 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2691 const struct ipv6hdr *iph, u32 mtu)
2693 const struct in6_addr *daddr, *saddr;
2694 struct rt6_info *rt6 = (struct rt6_info *)dst;
2696 if (dst_metric_locked(dst, RTAX_MTU))
2700 daddr = &iph->daddr;
2701 saddr = &iph->saddr;
2703 daddr = &sk->sk_v6_daddr;
2704 saddr = &inet6_sk(sk)->saddr;
2709 dst_confirm_neigh(dst, daddr);
2710 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2711 if (mtu >= dst_mtu(dst))
2714 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2715 rt6_do_update_pmtu(rt6, mtu);
2716 /* update rt6_ex->stamp for cache */
2717 if (rt6->rt6i_flags & RTF_CACHE)
2718 rt6_update_exception_stamp_rt(rt6);
2720 struct fib6_result res = {};
2721 struct rt6_info *nrt6;
2724 res.f6i = rcu_dereference(rt6->from);
2728 res.fib6_flags = res.f6i->fib6_flags;
2729 res.fib6_type = res.f6i->fib6_type;
2732 struct fib6_nh_match_arg arg = {
2734 .gw = &rt6->rt6i_gateway,
2737 nexthop_for_each_fib6_nh(res.f6i->nh,
2738 fib6_nh_find_match, &arg);
2740 /* fib6_info uses a nexthop that does not have fib6_nh
2741 * using the dst->dev + gw. Should be impossible.
2748 res.nh = res.f6i->fib6_nh;
2751 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2753 rt6_do_update_pmtu(nrt6, mtu);
2754 if (rt6_insert_exception(nrt6, &res))
2755 dst_release_immediate(&nrt6->dst);
2762 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2763 struct sk_buff *skb, u32 mtu)
2765 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
2768 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2769 int oif, u32 mark, kuid_t uid)
2771 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2772 struct dst_entry *dst;
2773 struct flowi6 fl6 = {
2775 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2776 .daddr = iph->daddr,
2777 .saddr = iph->saddr,
2778 .flowlabel = ip6_flowinfo(iph),
2782 dst = ip6_route_output(net, NULL, &fl6);
2784 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
2787 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2789 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2791 int oif = sk->sk_bound_dev_if;
2792 struct dst_entry *dst;
2794 if (!oif && skb->dev)
2795 oif = l3mdev_master_ifindex(skb->dev);
2797 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2799 dst = __sk_dst_get(sk);
2800 if (!dst || !dst->obsolete ||
2801 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2805 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2806 ip6_datagram_dst_update(sk, false);
2809 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2811 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2812 const struct flowi6 *fl6)
2814 #ifdef CONFIG_IPV6_SUBTREES
2815 struct ipv6_pinfo *np = inet6_sk(sk);
2818 ip6_dst_store(sk, dst,
2819 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2820 &sk->sk_v6_daddr : NULL,
2821 #ifdef CONFIG_IPV6_SUBTREES
2822 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2828 static bool ip6_redirect_nh_match(const struct fib6_result *res,
2830 const struct in6_addr *gw,
2831 struct rt6_info **ret)
2833 const struct fib6_nh *nh = res->nh;
2835 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
2836 fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
2839 /* rt_cache's gateway might be different from its 'parent'
2840 * in the case of an ip redirect.
2841 * So we keep searching in the exception table if the gateway
2844 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
2845 struct rt6_info *rt_cache;
2847 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
2849 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
2858 struct fib6_nh_rd_arg {
2859 struct fib6_result *res;
2861 const struct in6_addr *gw;
2862 struct rt6_info **ret;
2865 static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
2867 struct fib6_nh_rd_arg *arg = _arg;
2870 return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
2873 /* Handle redirects */
2874 struct ip6rd_flowi {
2876 struct in6_addr gateway;
2879 static struct rt6_info *__ip6_route_redirect(struct net *net,
2880 struct fib6_table *table,
2882 const struct sk_buff *skb,
2885 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2886 struct rt6_info *ret = NULL;
2887 struct fib6_result res = {};
2888 struct fib6_nh_rd_arg arg = {
2891 .gw = &rdfl->gateway,
2894 struct fib6_info *rt;
2895 struct fib6_node *fn;
2897 /* l3mdev_update_flow overrides oif if the device is enslaved; in
2898 * this case we must match on the real ingress device, so reset it
2900 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2901 fl6->flowi6_oif = skb->dev->ifindex;
2903 /* Get the "current" route for this destination and
2904 * check if the redirect has come from appropriate router.
2906 * RFC 4861 specifies that redirects should only be
2907 * accepted if they come from the nexthop to the target.
2908 * Due to the way the routes are chosen, this notion
2909 * is a bit fuzzy and one might need to check all possible
2914 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2916 for_each_fib6_node_rt_rcu(fn) {
2918 if (fib6_check_expired(rt))
2920 if (rt->fib6_flags & RTF_REJECT)
2922 if (unlikely(rt->nh)) {
2923 if (nexthop_is_blackhole(rt->nh))
2925 /* on match, res->nh is filled in and potentially ret */
2926 if (nexthop_for_each_fib6_nh(rt->nh,
2927 fib6_nh_redirect_match,
2931 res.nh = rt->fib6_nh;
2932 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
2939 rt = net->ipv6.fib6_null_entry;
2940 else if (rt->fib6_flags & RTF_REJECT) {
2941 ret = net->ipv6.ip6_null_entry;
2945 if (rt == net->ipv6.fib6_null_entry) {
2946 fn = fib6_backtrack(fn, &fl6->saddr);
2952 res.nh = rt->fib6_nh;
2955 ip6_hold_safe(net, &ret);
2957 res.fib6_flags = res.f6i->fib6_flags;
2958 res.fib6_type = res.f6i->fib6_type;
2959 ret = ip6_create_rt_rcu(&res);
2964 trace_fib6_table_lookup(net, &res, table, fl6);
2968 static struct dst_entry *ip6_route_redirect(struct net *net,
2969 const struct flowi6 *fl6,
2970 const struct sk_buff *skb,
2971 const struct in6_addr *gateway)
2973 int flags = RT6_LOOKUP_F_HAS_SADDR;
2974 struct ip6rd_flowi rdfl;
2977 rdfl.gateway = *gateway;
2979 return fib6_rule_lookup(net, &rdfl.fl6, skb,
2980 flags, __ip6_route_redirect);
2983 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2986 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2987 struct dst_entry *dst;
2988 struct flowi6 fl6 = {
2989 .flowi6_iif = LOOPBACK_IFINDEX,
2991 .flowi6_mark = mark,
2992 .daddr = iph->daddr,
2993 .saddr = iph->saddr,
2994 .flowlabel = ip6_flowinfo(iph),
2998 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
2999 rt6_do_redirect(dst, NULL, skb);
3002 EXPORT_SYMBOL_GPL(ip6_redirect);
3004 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
3006 const struct ipv6hdr *iph = ipv6_hdr(skb);
3007 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
3008 struct dst_entry *dst;
3009 struct flowi6 fl6 = {
3010 .flowi6_iif = LOOPBACK_IFINDEX,
3013 .saddr = iph->daddr,
3014 .flowi6_uid = sock_net_uid(net, NULL),
3017 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
3018 rt6_do_redirect(dst, NULL, skb);
3022 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
3024 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
3027 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
3029 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
3031 struct net_device *dev = dst->dev;
3032 unsigned int mtu = dst_mtu(dst);
3033 struct net *net = dev_net(dev);
3035 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
3037 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
3038 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
3041 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
3042 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
3043 * IPV6_MAXPLEN is also valid and means: "any MSS,
3044 * rely only on pmtu discovery"
3046 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
3051 static unsigned int ip6_mtu(const struct dst_entry *dst)
3053 struct inet6_dev *idev;
3056 mtu = dst_metric_raw(dst, RTAX_MTU);
3063 idev = __in6_dev_get(dst->dev);
3065 mtu = idev->cnf.mtu6;
3069 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3071 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
3075 * 1. mtu on route is locked - use it
3076 * 2. mtu from nexthop exception
3077 * 3. mtu from egress device
3079 * based on ip6_dst_mtu_forward and exception logic of
3080 * rt6_find_cached_rt; called with rcu_read_lock
3082 u32 ip6_mtu_from_fib6(const struct fib6_result *res,
3083 const struct in6_addr *daddr,
3084 const struct in6_addr *saddr)
3086 const struct fib6_nh *nh = res->nh;
3087 struct fib6_info *f6i = res->f6i;
3088 struct inet6_dev *idev;
3089 struct rt6_info *rt;
3092 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3093 mtu = f6i->fib6_pmtu;
3098 rt = rt6_find_cached_rt(res, daddr, saddr);
3100 mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3102 struct net_device *dev = nh->fib_nh_dev;
3105 idev = __in6_dev_get(dev);
3106 if (idev && idev->cnf.mtu6 > mtu)
3107 mtu = idev->cnf.mtu6;
3110 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3112 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
3115 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
3118 struct dst_entry *dst;
3119 struct rt6_info *rt;
3120 struct inet6_dev *idev = in6_dev_get(dev);
3121 struct net *net = dev_net(dev);
3123 if (unlikely(!idev))
3124 return ERR_PTR(-ENODEV);
3126 rt = ip6_dst_alloc(net, dev, 0);
3127 if (unlikely(!rt)) {
3129 dst = ERR_PTR(-ENOMEM);
3133 rt->dst.flags |= DST_HOST;
3134 rt->dst.input = ip6_input;
3135 rt->dst.output = ip6_output;
3136 rt->rt6i_gateway = fl6->daddr;
3137 rt->rt6i_dst.addr = fl6->daddr;
3138 rt->rt6i_dst.plen = 128;
3139 rt->rt6i_idev = idev;
3140 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
3142 /* Add this dst into uncached_list so that rt6_disable_ip() can
3143 * do proper release of the net_device
3145 rt6_uncached_list_add(rt);
3146 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
3148 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
3154 static int ip6_dst_gc(struct dst_ops *ops)
3156 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
3157 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
3158 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
3159 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
3160 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
3161 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
3164 entries = dst_entries_get_fast(ops);
3165 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
3166 entries <= rt_max_size)
3169 net->ipv6.ip6_rt_gc_expire++;
3170 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
3171 entries = dst_entries_get_slow(ops);
3172 if (entries < ops->gc_thresh)
3173 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
3175 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
3176 return entries > rt_max_size;
3179 static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3180 const struct in6_addr *gw_addr, u32 tbid,
3181 int flags, struct fib6_result *res)
3183 struct flowi6 fl6 = {
3184 .flowi6_oif = cfg->fc_ifindex,
3186 .saddr = cfg->fc_prefsrc,
3188 struct fib6_table *table;
3191 table = fib6_get_table(net, tbid);
3195 if (!ipv6_addr_any(&cfg->fc_prefsrc))
3196 flags |= RT6_LOOKUP_F_HAS_SADDR;
3198 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3200 err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3201 if (!err && res->f6i != net->ipv6.fib6_null_entry)
3202 fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3203 cfg->fc_ifindex != 0, NULL, flags);
3208 static int ip6_route_check_nh_onlink(struct net *net,
3209 struct fib6_config *cfg,
3210 const struct net_device *dev,
3211 struct netlink_ext_ack *extack)
3213 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
3214 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3215 struct fib6_result res = {};
3218 err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3219 if (!err && !(res.fib6_flags & RTF_REJECT) &&
3220 /* ignore match if it is the default route */
3221 !ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
3222 (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) {
3223 NL_SET_ERR_MSG(extack,
3224 "Nexthop has invalid gateway or device mismatch");
3231 static int ip6_route_check_nh(struct net *net,
3232 struct fib6_config *cfg,
3233 struct net_device **_dev,
3234 struct inet6_dev **idev)
3236 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3237 struct net_device *dev = _dev ? *_dev : NULL;
3238 int flags = RT6_LOOKUP_F_IFACE;
3239 struct fib6_result res = {};
3240 int err = -EHOSTUNREACH;
3242 if (cfg->fc_table) {
3243 err = ip6_nh_lookup_table(net, cfg, gw_addr,
3244 cfg->fc_table, flags, &res);
3245 /* gw_addr can not require a gateway or resolve to a reject
3246 * route. If a device is given, it must match the result.
3248 if (err || res.fib6_flags & RTF_REJECT ||
3249 res.nh->fib_nh_gw_family ||
3250 (dev && dev != res.nh->fib_nh_dev))
3251 err = -EHOSTUNREACH;
3255 struct flowi6 fl6 = {
3256 .flowi6_oif = cfg->fc_ifindex,
3260 err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3261 if (err || res.fib6_flags & RTF_REJECT ||
3262 res.nh->fib_nh_gw_family)
3263 err = -EHOSTUNREACH;
3268 fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3269 cfg->fc_ifindex != 0, NULL, flags);
3274 if (dev != res.nh->fib_nh_dev)
3275 err = -EHOSTUNREACH;
3277 *_dev = dev = res.nh->fib_nh_dev;
3279 *idev = in6_dev_get(dev);
3285 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3286 struct net_device **_dev, struct inet6_dev **idev,
3287 struct netlink_ext_ack *extack)
3289 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3290 int gwa_type = ipv6_addr_type(gw_addr);
3291 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3292 const struct net_device *dev = *_dev;
3293 bool need_addr_check = !dev;
3296 /* if gw_addr is local we will fail to detect this in case
3297 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3298 * will return already-added prefix route via interface that
3299 * prefix route was assigned to, which might be non-loopback.
3302 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3303 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3307 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3308 /* IPv6 strictly inhibits using not link-local
3309 * addresses as nexthop address.
3310 * Otherwise, router will not able to send redirects.
3311 * It is very good, but in some (rare!) circumstances
3312 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3313 * some exceptions. --ANK
3314 * We allow IPv4-mapped nexthops to support RFC4798-type
3317 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3318 NL_SET_ERR_MSG(extack, "Invalid gateway address");
3324 if (cfg->fc_flags & RTNH_F_ONLINK)
3325 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3327 err = ip6_route_check_nh(net, cfg, _dev, idev);
3335 /* reload in case device was changed */
3340 NL_SET_ERR_MSG(extack, "Egress device not specified");
3342 } else if (dev->flags & IFF_LOOPBACK) {
3343 NL_SET_ERR_MSG(extack,
3344 "Egress device can not be loopback device for this route");
3348 /* if we did not check gw_addr above, do so now that the
3349 * egress device has been resolved.
3351 if (need_addr_check &&
3352 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3353 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3362 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3364 if ((flags & RTF_REJECT) ||
3365 (dev && (dev->flags & IFF_LOOPBACK) &&
3366 !(addr_type & IPV6_ADDR_LOOPBACK) &&
3367 !(flags & RTF_LOCAL)))
3373 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3374 struct fib6_config *cfg, gfp_t gfp_flags,
3375 struct netlink_ext_ack *extack)
3377 struct net_device *dev = NULL;
3378 struct inet6_dev *idev = NULL;
3382 fib6_nh->fib_nh_family = AF_INET6;
3385 if (cfg->fc_ifindex) {
3386 dev = dev_get_by_index(net, cfg->fc_ifindex);
3389 idev = in6_dev_get(dev);
3394 if (cfg->fc_flags & RTNH_F_ONLINK) {
3396 NL_SET_ERR_MSG(extack,
3397 "Nexthop device required for onlink");
3401 if (!(dev->flags & IFF_UP)) {
3402 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3407 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3410 fib6_nh->fib_nh_weight = 1;
3412 /* We cannot add true routes via loopback here,
3413 * they would result in kernel looping; promote them to reject routes
3415 addr_type = ipv6_addr_type(&cfg->fc_dst);
3416 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3417 /* hold loopback dev/idev if we haven't done so. */
3418 if (dev != net->loopback_dev) {
3423 dev = net->loopback_dev;
3425 idev = in6_dev_get(dev);
3434 if (cfg->fc_flags & RTF_GATEWAY) {
3435 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3439 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3440 fib6_nh->fib_nh_gw_family = AF_INET6;
3447 if (idev->cnf.disable_ipv6) {
3448 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3453 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3454 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3459 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3460 !netif_carrier_ok(dev))
3461 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3463 err = fib_nh_common_init(&fib6_nh->nh_common, cfg->fc_encap,
3464 cfg->fc_encap_type, cfg, gfp_flags, extack);
3469 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3470 if (!fib6_nh->rt6i_pcpu) {
3475 fib6_nh->fib_nh_dev = dev;
3476 fib6_nh->fib_nh_oif = dev->ifindex;
3483 lwtstate_put(fib6_nh->fib_nh_lws);
3484 fib6_nh->fib_nh_lws = NULL;
3492 void fib6_nh_release(struct fib6_nh *fib6_nh)
3494 struct rt6_exception_bucket *bucket;
3498 fib6_nh_flush_exceptions(fib6_nh, NULL);
3499 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3501 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3507 if (fib6_nh->rt6i_pcpu) {
3510 for_each_possible_cpu(cpu) {
3511 struct rt6_info **ppcpu_rt;
3512 struct rt6_info *pcpu_rt;
3514 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3515 pcpu_rt = *ppcpu_rt;
3517 dst_dev_put(&pcpu_rt->dst);
3518 dst_release(&pcpu_rt->dst);
3523 free_percpu(fib6_nh->rt6i_pcpu);
3526 fib_nh_common_release(&fib6_nh->nh_common);
3529 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3531 struct netlink_ext_ack *extack)
3533 struct net *net = cfg->fc_nlinfo.nl_net;
3534 struct fib6_info *rt = NULL;
3535 struct nexthop *nh = NULL;
3536 struct fib6_table *table;
3537 struct fib6_nh *fib6_nh;
3541 /* RTF_PCPU is an internal flag; can not be set by userspace */
3542 if (cfg->fc_flags & RTF_PCPU) {
3543 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3547 /* RTF_CACHE is an internal flag; can not be set by userspace */
3548 if (cfg->fc_flags & RTF_CACHE) {
3549 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3553 if (cfg->fc_type > RTN_MAX) {
3554 NL_SET_ERR_MSG(extack, "Invalid route type");
3558 if (cfg->fc_dst_len > 128) {
3559 NL_SET_ERR_MSG(extack, "Invalid prefix length");
3562 if (cfg->fc_src_len > 128) {
3563 NL_SET_ERR_MSG(extack, "Invalid source address length");
3566 #ifndef CONFIG_IPV6_SUBTREES
3567 if (cfg->fc_src_len) {
3568 NL_SET_ERR_MSG(extack,
3569 "Specifying source address requires IPV6_SUBTREES to be enabled");
3573 if (cfg->fc_nh_id) {
3574 nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3576 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3579 err = fib6_check_nexthop(nh, cfg, extack);
3585 if (cfg->fc_nlinfo.nlh &&
3586 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3587 table = fib6_get_table(net, cfg->fc_table);
3589 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3590 table = fib6_new_table(net, cfg->fc_table);
3593 table = fib6_new_table(net, cfg->fc_table);
3600 rt = fib6_info_alloc(gfp_flags, !nh);
3604 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3606 if (IS_ERR(rt->fib6_metrics)) {
3607 err = PTR_ERR(rt->fib6_metrics);
3608 /* Do not leave garbage there. */
3609 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
3613 if (cfg->fc_flags & RTF_ADDRCONF)
3614 rt->dst_nocount = true;
3616 if (cfg->fc_flags & RTF_EXPIRES)
3617 fib6_set_expires(rt, jiffies +
3618 clock_t_to_jiffies(cfg->fc_expires));
3620 fib6_clean_expires(rt);
3622 if (cfg->fc_protocol == RTPROT_UNSPEC)
3623 cfg->fc_protocol = RTPROT_BOOT;
3624 rt->fib6_protocol = cfg->fc_protocol;
3626 rt->fib6_table = table;
3627 rt->fib6_metric = cfg->fc_metric;
3628 rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
3629 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3631 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3632 rt->fib6_dst.plen = cfg->fc_dst_len;
3633 if (rt->fib6_dst.plen == 128)
3634 rt->dst_host = true;
3636 #ifdef CONFIG_IPV6_SUBTREES
3637 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3638 rt->fib6_src.plen = cfg->fc_src_len;
3641 if (!nexthop_get(nh)) {
3642 NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3645 if (rt->fib6_src.plen) {
3646 NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3650 fib6_nh = nexthop_fib6_nh(rt->nh);
3652 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3656 fib6_nh = rt->fib6_nh;
3658 /* We cannot add true routes via loopback here, they would
3659 * result in kernel looping; promote them to reject routes
3661 addr_type = ipv6_addr_type(&cfg->fc_dst);
3662 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3664 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3667 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3668 struct net_device *dev = fib6_nh->fib_nh_dev;
3670 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3671 NL_SET_ERR_MSG(extack, "Invalid source address");
3675 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3676 rt->fib6_prefsrc.plen = 128;
3678 rt->fib6_prefsrc.plen = 0;
3682 fib6_info_release(rt);
3683 return ERR_PTR(err);
3686 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3687 struct netlink_ext_ack *extack)
3689 struct fib6_info *rt;
3692 rt = ip6_route_info_create(cfg, gfp_flags, extack);
3696 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3697 fib6_info_release(rt);
3702 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3704 struct net *net = info->nl_net;
3705 struct fib6_table *table;
3708 if (rt == net->ipv6.fib6_null_entry) {
3713 table = rt->fib6_table;
3714 spin_lock_bh(&table->tb6_lock);
3715 err = fib6_del(rt, info);
3716 spin_unlock_bh(&table->tb6_lock);
3719 fib6_info_release(rt);
3723 int ip6_del_rt(struct net *net, struct fib6_info *rt)
3725 struct nl_info info = { .nl_net = net };
3727 return __ip6_del_rt(rt, &info);
3730 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3732 struct nl_info *info = &cfg->fc_nlinfo;
3733 struct net *net = info->nl_net;
3734 struct sk_buff *skb = NULL;
3735 struct fib6_table *table;
3738 if (rt == net->ipv6.fib6_null_entry)
3740 table = rt->fib6_table;
3741 spin_lock_bh(&table->tb6_lock);
3743 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3744 struct fib6_info *sibling, *next_sibling;
3746 /* prefer to send a single notification with all hops */
3747 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3749 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3751 if (rt6_fill_node(net, skb, rt, NULL,
3752 NULL, NULL, 0, RTM_DELROUTE,
3753 info->portid, seq, 0) < 0) {
3757 info->skip_notify = 1;
3760 info->skip_notify_kernel = 1;
3761 call_fib6_multipath_entry_notifiers(net,
3762 FIB_EVENT_ENTRY_DEL,
3766 list_for_each_entry_safe(sibling, next_sibling,
3769 err = fib6_del(sibling, info);
3775 err = fib6_del(rt, info);
3777 spin_unlock_bh(&table->tb6_lock);
3779 fib6_info_release(rt);
3782 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3783 info->nlh, gfp_any());
3788 static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3792 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3795 if (cfg->fc_flags & RTF_GATEWAY &&
3796 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3799 rc = rt6_remove_exception_rt(rt);
3804 static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
3807 struct fib6_result res = {
3811 struct rt6_info *rt_cache;
3813 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
3815 return __ip6_del_cached_rt(rt_cache, cfg);
3820 struct fib6_nh_del_cached_rt_arg {
3821 struct fib6_config *cfg;
3822 struct fib6_info *f6i;
3825 static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
3827 struct fib6_nh_del_cached_rt_arg *arg = _arg;
3830 rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
3831 return rc != -ESRCH ? rc : 0;
3834 static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
3836 struct fib6_nh_del_cached_rt_arg arg = {
3841 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
3844 static int ip6_route_del(struct fib6_config *cfg,
3845 struct netlink_ext_ack *extack)
3847 struct fib6_table *table;
3848 struct fib6_info *rt;
3849 struct fib6_node *fn;
3852 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3854 NL_SET_ERR_MSG(extack, "FIB table does not exist");
3860 fn = fib6_locate(&table->tb6_root,
3861 &cfg->fc_dst, cfg->fc_dst_len,
3862 &cfg->fc_src, cfg->fc_src_len,
3863 !(cfg->fc_flags & RTF_CACHE));
3866 for_each_fib6_node_rt_rcu(fn) {
3869 if (rt->nh && cfg->fc_nh_id &&
3870 rt->nh->id != cfg->fc_nh_id)
3873 if (cfg->fc_flags & RTF_CACHE) {
3877 rc = ip6_del_cached_rt_nh(cfg, rt);
3878 } else if (cfg->fc_nh_id) {
3882 rc = ip6_del_cached_rt(cfg, rt, nh);
3891 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
3893 if (cfg->fc_protocol &&
3894 cfg->fc_protocol != rt->fib6_protocol)
3898 if (!fib6_info_hold_safe(rt))
3902 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3908 if (cfg->fc_ifindex &&
3910 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
3912 if (cfg->fc_flags & RTF_GATEWAY &&
3913 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
3915 if (!fib6_info_hold_safe(rt))
3919 /* if gateway was specified only delete the one hop */
3920 if (cfg->fc_flags & RTF_GATEWAY)
3921 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3923 return __ip6_del_rt_siblings(rt, cfg);
3931 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
3933 struct netevent_redirect netevent;
3934 struct rt6_info *rt, *nrt = NULL;
3935 struct fib6_result res = {};
3936 struct ndisc_options ndopts;
3937 struct inet6_dev *in6_dev;
3938 struct neighbour *neigh;
3940 int optlen, on_link;
3943 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
3944 optlen -= sizeof(*msg);
3947 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
3951 msg = (struct rd_msg *)icmp6_hdr(skb);
3953 if (ipv6_addr_is_multicast(&msg->dest)) {
3954 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
3959 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
3961 } else if (ipv6_addr_type(&msg->target) !=
3962 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
3963 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
3967 in6_dev = __in6_dev_get(skb->dev);
3970 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
3974 * The IP source address of the Redirect MUST be the same as the current
3975 * first-hop router for the specified ICMP Destination Address.
3978 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
3979 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
3984 if (ndopts.nd_opts_tgt_lladdr) {
3985 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
3988 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
3993 rt = (struct rt6_info *) dst;
3994 if (rt->rt6i_flags & RTF_REJECT) {
3995 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
3999 /* Redirect received -> path was valid.
4000 * Look, redirects are sent only in response to data packets,
4001 * so that this nexthop apparently is reachable. --ANK
4003 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
4005 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
4010 * We have finally decided to accept it.
4013 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
4014 NEIGH_UPDATE_F_WEAK_OVERRIDE|
4015 NEIGH_UPDATE_F_OVERRIDE|
4016 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
4017 NEIGH_UPDATE_F_ISROUTER)),
4018 NDISC_REDIRECT, &ndopts);
4021 res.f6i = rcu_dereference(rt->from);
4026 struct fib6_nh_match_arg arg = {
4028 .gw = &rt->rt6i_gateway,
4031 nexthop_for_each_fib6_nh(res.f6i->nh,
4032 fib6_nh_find_match, &arg);
4034 /* fib6_info uses a nexthop that does not have fib6_nh
4035 * using the dst->dev. Should be impossible
4041 res.nh = res.f6i->fib6_nh;
4044 res.fib6_flags = res.f6i->fib6_flags;
4045 res.fib6_type = res.f6i->fib6_type;
4046 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4050 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4052 nrt->rt6i_flags &= ~RTF_GATEWAY;
4054 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
4056 /* rt6_insert_exception() will take care of duplicated exceptions */
4057 if (rt6_insert_exception(nrt, &res)) {
4058 dst_release_immediate(&nrt->dst);
4062 netevent.old = &rt->dst;
4063 netevent.new = &nrt->dst;
4064 netevent.daddr = &msg->dest;
4065 netevent.neigh = neigh;
4066 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
4070 neigh_release(neigh);
4073 #ifdef CONFIG_IPV6_ROUTE_INFO
4074 static struct fib6_info *rt6_get_route_info(struct net *net,
4075 const struct in6_addr *prefix, int prefixlen,
4076 const struct in6_addr *gwaddr,
4077 struct net_device *dev)
4079 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4080 int ifindex = dev->ifindex;
4081 struct fib6_node *fn;
4082 struct fib6_info *rt = NULL;
4083 struct fib6_table *table;
4085 table = fib6_get_table(net, tb_id);
4090 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4094 for_each_fib6_node_rt_rcu(fn) {
4095 /* these routes do not use nexthops */
4098 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4100 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4101 !rt->fib6_nh->fib_nh_gw_family)
4103 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
4105 if (!fib6_info_hold_safe(rt))
4114 static struct fib6_info *rt6_add_route_info(struct net *net,
4115 const struct in6_addr *prefix, int prefixlen,
4116 const struct in6_addr *gwaddr,
4117 struct net_device *dev,
4120 struct fib6_config cfg = {
4121 .fc_metric = IP6_RT_PRIO_USER,
4122 .fc_ifindex = dev->ifindex,
4123 .fc_dst_len = prefixlen,
4124 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4125 RTF_UP | RTF_PREF(pref),
4126 .fc_protocol = RTPROT_RA,
4127 .fc_type = RTN_UNICAST,
4128 .fc_nlinfo.portid = 0,
4129 .fc_nlinfo.nlh = NULL,
4130 .fc_nlinfo.nl_net = net,
4133 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
4134 cfg.fc_dst = *prefix;
4135 cfg.fc_gateway = *gwaddr;
4137 /* We should treat it as a default route if prefix length is 0. */
4139 cfg.fc_flags |= RTF_DEFAULT;
4141 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
4143 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
4147 struct fib6_info *rt6_get_dflt_router(struct net *net,
4148 const struct in6_addr *addr,
4149 struct net_device *dev)
4151 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
4152 struct fib6_info *rt;
4153 struct fib6_table *table;
4155 table = fib6_get_table(net, tb_id);
4160 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4163 /* RA routes do not use nexthops */
4168 if (dev == nh->fib_nh_dev &&
4169 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
4170 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
4173 if (rt && !fib6_info_hold_safe(rt))
4179 struct fib6_info *rt6_add_dflt_router(struct net *net,
4180 const struct in6_addr *gwaddr,
4181 struct net_device *dev,
4184 struct fib6_config cfg = {
4185 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
4186 .fc_metric = IP6_RT_PRIO_USER,
4187 .fc_ifindex = dev->ifindex,
4188 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4189 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4190 .fc_protocol = RTPROT_RA,
4191 .fc_type = RTN_UNICAST,
4192 .fc_nlinfo.portid = 0,
4193 .fc_nlinfo.nlh = NULL,
4194 .fc_nlinfo.nl_net = net,
4197 cfg.fc_gateway = *gwaddr;
4199 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
4200 struct fib6_table *table;
4202 table = fib6_get_table(dev_net(dev), cfg.fc_table);
4204 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
4207 return rt6_get_dflt_router(net, gwaddr, dev);
4210 static void __rt6_purge_dflt_routers(struct net *net,
4211 struct fib6_table *table)
4213 struct fib6_info *rt;
4217 for_each_fib6_node_rt_rcu(&table->tb6_root) {
4218 struct net_device *dev = fib6_info_nh_dev(rt);
4219 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
4221 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
4222 (!idev || idev->cnf.accept_ra != 2) &&
4223 fib6_info_hold_safe(rt)) {
4225 ip6_del_rt(net, rt);
4231 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
4234 void rt6_purge_dflt_routers(struct net *net)
4236 struct fib6_table *table;
4237 struct hlist_head *head;
4242 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
4243 head = &net->ipv6.fib_table_hash[h];
4244 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
4245 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
4246 __rt6_purge_dflt_routers(net, table);
4253 static void rtmsg_to_fib6_config(struct net *net,
4254 struct in6_rtmsg *rtmsg,
4255 struct fib6_config *cfg)
4257 *cfg = (struct fib6_config){
4258 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
4260 .fc_ifindex = rtmsg->rtmsg_ifindex,
4261 .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
4262 .fc_expires = rtmsg->rtmsg_info,
4263 .fc_dst_len = rtmsg->rtmsg_dst_len,
4264 .fc_src_len = rtmsg->rtmsg_src_len,
4265 .fc_flags = rtmsg->rtmsg_flags,
4266 .fc_type = rtmsg->rtmsg_type,
4268 .fc_nlinfo.nl_net = net,
4270 .fc_dst = rtmsg->rtmsg_dst,
4271 .fc_src = rtmsg->rtmsg_src,
4272 .fc_gateway = rtmsg->rtmsg_gateway,
4276 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4278 struct fib6_config cfg;
4279 struct in6_rtmsg rtmsg;
4283 case SIOCADDRT: /* Add a route */
4284 case SIOCDELRT: /* Delete a route */
4285 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4287 err = copy_from_user(&rtmsg, arg,
4288 sizeof(struct in6_rtmsg));
4292 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
4297 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
4300 err = ip6_route_del(&cfg, NULL);
4314 * Drop the packet on the floor
4317 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
4319 struct dst_entry *dst = skb_dst(skb);
4320 struct net *net = dev_net(dst->dev);
4321 struct inet6_dev *idev;
4324 if (netif_is_l3_master(skb->dev) &&
4325 dst->dev == net->loopback_dev)
4326 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4328 idev = ip6_dst_idev(dst);
4330 switch (ipstats_mib_noroutes) {
4331 case IPSTATS_MIB_INNOROUTES:
4332 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
4333 if (type == IPV6_ADDR_ANY) {
4334 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4338 case IPSTATS_MIB_OUTNOROUTES:
4339 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4343 /* Start over by dropping the dst for l3mdev case */
4344 if (netif_is_l3_master(skb->dev))
4347 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
4352 static int ip6_pkt_discard(struct sk_buff *skb)
4354 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4357 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4359 skb->dev = skb_dst(skb)->dev;
4360 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
4363 static int ip6_pkt_prohibit(struct sk_buff *skb)
4365 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4368 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4370 skb->dev = skb_dst(skb)->dev;
4371 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4375 * Allocate a dst for local (unicast / anycast) address.
4378 struct fib6_info *addrconf_f6i_alloc(struct net *net,
4379 struct inet6_dev *idev,
4380 const struct in6_addr *addr,
4381 bool anycast, gfp_t gfp_flags)
4383 struct fib6_config cfg = {
4384 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4385 .fc_ifindex = idev->dev->ifindex,
4386 .fc_flags = RTF_UP | RTF_NONEXTHOP,
4389 .fc_protocol = RTPROT_KERNEL,
4390 .fc_nlinfo.nl_net = net,
4391 .fc_ignore_dev_down = true,
4393 struct fib6_info *f6i;
4396 cfg.fc_type = RTN_ANYCAST;
4397 cfg.fc_flags |= RTF_ANYCAST;
4399 cfg.fc_type = RTN_LOCAL;
4400 cfg.fc_flags |= RTF_LOCAL;
4403 f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
4405 f6i->dst_nocount = true;
4409 /* remove deleted ip from prefsrc entries */
4410 struct arg_dev_net_ip {
4411 struct net_device *dev;
4413 struct in6_addr *addr;
4416 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4418 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
4419 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4420 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4423 ((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) &&
4424 rt != net->ipv6.fib6_null_entry &&
4425 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
4426 spin_lock_bh(&rt6_exception_lock);
4427 /* remove prefsrc entry */
4428 rt->fib6_prefsrc.plen = 0;
4429 spin_unlock_bh(&rt6_exception_lock);
4434 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4436 struct net *net = dev_net(ifp->idev->dev);
4437 struct arg_dev_net_ip adni = {
4438 .dev = ifp->idev->dev,
4442 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4445 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
4447 /* Remove routers and update dst entries when gateway turn into host. */
4448 static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4450 struct in6_addr *gateway = (struct in6_addr *)arg;
4453 /* RA routes do not use nexthops */
4458 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4459 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4462 /* Further clean up cached routes in exception table.
4463 * This is needed because cached route may have a different
4464 * gateway than its 'parent' in the case of an ip redirect.
4466 fib6_nh_exceptions_clean_tohost(nh, gateway);
4471 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4473 fib6_clean_all(net, fib6_clean_tohost, gateway);
4476 struct arg_netdev_event {
4477 const struct net_device *dev;
4479 unsigned char nh_flags;
4480 unsigned long event;
4484 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4486 struct fib6_info *iter;
4487 struct fib6_node *fn;
4489 fn = rcu_dereference_protected(rt->fib6_node,
4490 lockdep_is_held(&rt->fib6_table->tb6_lock));
4491 iter = rcu_dereference_protected(fn->leaf,
4492 lockdep_is_held(&rt->fib6_table->tb6_lock));
4494 if (iter->fib6_metric == rt->fib6_metric &&
4495 rt6_qualify_for_ecmp(iter))
4497 iter = rcu_dereference_protected(iter->fib6_next,
4498 lockdep_is_held(&rt->fib6_table->tb6_lock));
4504 /* only called for fib entries with builtin fib6_nh */
4505 static bool rt6_is_dead(const struct fib6_info *rt)
4507 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4508 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4509 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4515 static int rt6_multipath_total_weight(const struct fib6_info *rt)
4517 struct fib6_info *iter;
4520 if (!rt6_is_dead(rt))
4521 total += rt->fib6_nh->fib_nh_weight;
4523 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4524 if (!rt6_is_dead(iter))
4525 total += iter->fib6_nh->fib_nh_weight;
4531 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4533 int upper_bound = -1;
4535 if (!rt6_is_dead(rt)) {
4536 *weight += rt->fib6_nh->fib_nh_weight;
4537 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4540 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4543 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4545 struct fib6_info *iter;
4548 rt6_upper_bound_set(rt, &weight, total);
4550 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4551 rt6_upper_bound_set(iter, &weight, total);
4554 void rt6_multipath_rebalance(struct fib6_info *rt)
4556 struct fib6_info *first;
4559 /* In case the entire multipath route was marked for flushing,
4560 * then there is no need to rebalance upon the removal of every
4563 if (!rt->fib6_nsiblings || rt->should_flush)
4566 /* During lookup routes are evaluated in order, so we need to
4567 * make sure upper bounds are assigned from the first sibling
4570 first = rt6_multipath_first_sibling(rt);
4571 if (WARN_ON_ONCE(!first))
4574 total = rt6_multipath_total_weight(first);
4575 rt6_multipath_upper_bound_set(first, total);
4578 static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4580 const struct arg_netdev_event *arg = p_arg;
4581 struct net *net = dev_net(arg->dev);
4583 if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4584 rt->fib6_nh->fib_nh_dev == arg->dev) {
4585 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4586 fib6_update_sernum_upto_root(net, rt);
4587 rt6_multipath_rebalance(rt);
4593 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4595 struct arg_netdev_event arg = {
4598 .nh_flags = nh_flags,
4602 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4603 arg.nh_flags |= RTNH_F_LINKDOWN;
4605 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4608 /* only called for fib entries with inline fib6_nh */
4609 static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4610 const struct net_device *dev)
4612 struct fib6_info *iter;
4614 if (rt->fib6_nh->fib_nh_dev == dev)
4616 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4617 if (iter->fib6_nh->fib_nh_dev == dev)
4623 static void rt6_multipath_flush(struct fib6_info *rt)
4625 struct fib6_info *iter;
4627 rt->should_flush = 1;
4628 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4629 iter->should_flush = 1;
4632 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4633 const struct net_device *down_dev)
4635 struct fib6_info *iter;
4636 unsigned int dead = 0;
4638 if (rt->fib6_nh->fib_nh_dev == down_dev ||
4639 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4641 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4642 if (iter->fib6_nh->fib_nh_dev == down_dev ||
4643 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4649 static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4650 const struct net_device *dev,
4651 unsigned char nh_flags)
4653 struct fib6_info *iter;
4655 if (rt->fib6_nh->fib_nh_dev == dev)
4656 rt->fib6_nh->fib_nh_flags |= nh_flags;
4657 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4658 if (iter->fib6_nh->fib_nh_dev == dev)
4659 iter->fib6_nh->fib_nh_flags |= nh_flags;
4662 /* called with write lock held for table with rt */
4663 static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4665 const struct arg_netdev_event *arg = p_arg;
4666 const struct net_device *dev = arg->dev;
4667 struct net *net = dev_net(dev);
4669 if (rt == net->ipv6.fib6_null_entry || rt->nh)
4672 switch (arg->event) {
4673 case NETDEV_UNREGISTER:
4674 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4676 if (rt->should_flush)
4678 if (!rt->fib6_nsiblings)
4679 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4680 if (rt6_multipath_uses_dev(rt, dev)) {
4683 count = rt6_multipath_dead_count(rt, dev);
4684 if (rt->fib6_nsiblings + 1 == count) {
4685 rt6_multipath_flush(rt);
4688 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4690 fib6_update_sernum(net, rt);
4691 rt6_multipath_rebalance(rt);
4695 if (rt->fib6_nh->fib_nh_dev != dev ||
4696 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4698 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4699 rt6_multipath_rebalance(rt);
4706 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
4708 struct arg_netdev_event arg = {
4714 struct net *net = dev_net(dev);
4716 if (net->ipv6.sysctl.skip_notify_on_dev_down)
4717 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
4719 fib6_clean_all(net, fib6_ifdown, &arg);
4722 void rt6_disable_ip(struct net_device *dev, unsigned long event)
4724 rt6_sync_down_dev(dev, event);
4725 rt6_uncached_list_flush_dev(dev_net(dev), dev);
4726 neigh_ifdown(&nd_tbl, dev);
4729 struct rt6_mtu_change_arg {
4730 struct net_device *dev;
4732 struct fib6_info *f6i;
4735 static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
4737 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
4738 struct fib6_info *f6i = arg->f6i;
4740 /* For administrative MTU increase, there is no way to discover
4741 * IPv6 PMTU increase, so PMTU increase should be updated here.
4742 * Since RFC 1981 doesn't include administrative MTU increase
4743 * update PMTU increase is a MUST. (i.e. jumbo frame)
4745 if (nh->fib_nh_dev == arg->dev) {
4746 struct inet6_dev *idev = __in6_dev_get(arg->dev);
4747 u32 mtu = f6i->fib6_pmtu;
4749 if (mtu >= arg->mtu ||
4750 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4751 fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
4753 spin_lock_bh(&rt6_exception_lock);
4754 rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
4755 spin_unlock_bh(&rt6_exception_lock);
4761 static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
4763 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4764 struct inet6_dev *idev;
4766 /* In IPv6 pmtu discovery is not optional,
4767 so that RTAX_MTU lock cannot disable it.
4768 We still use this lock to block changes
4769 caused by addrconf/ndisc.
4772 idev = __in6_dev_get(arg->dev);
4776 if (fib6_metric_locked(f6i, RTAX_MTU))
4781 /* fib6_nh_mtu_change only returns 0, so this is safe */
4782 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
4786 return fib6_nh_mtu_change(f6i->fib6_nh, arg);
4789 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4791 struct rt6_mtu_change_arg arg = {
4796 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4799 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4800 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
4801 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
4802 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
4803 [RTA_OIF] = { .type = NLA_U32 },
4804 [RTA_IIF] = { .type = NLA_U32 },
4805 [RTA_PRIORITY] = { .type = NLA_U32 },
4806 [RTA_METRICS] = { .type = NLA_NESTED },
4807 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
4808 [RTA_PREF] = { .type = NLA_U8 },
4809 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4810 [RTA_ENCAP] = { .type = NLA_NESTED },
4811 [RTA_EXPIRES] = { .type = NLA_U32 },
4812 [RTA_UID] = { .type = NLA_U32 },
4813 [RTA_MARK] = { .type = NLA_U32 },
4814 [RTA_TABLE] = { .type = NLA_U32 },
4815 [RTA_IP_PROTO] = { .type = NLA_U8 },
4816 [RTA_SPORT] = { .type = NLA_U16 },
4817 [RTA_DPORT] = { .type = NLA_U16 },
4818 [RTA_NH_ID] = { .type = NLA_U32 },
4821 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4822 struct fib6_config *cfg,
4823 struct netlink_ext_ack *extack)
4826 struct nlattr *tb[RTA_MAX+1];
4830 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
4831 rtm_ipv6_policy, extack);
4836 rtm = nlmsg_data(nlh);
4838 *cfg = (struct fib6_config){
4839 .fc_table = rtm->rtm_table,
4840 .fc_dst_len = rtm->rtm_dst_len,
4841 .fc_src_len = rtm->rtm_src_len,
4843 .fc_protocol = rtm->rtm_protocol,
4844 .fc_type = rtm->rtm_type,
4846 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
4847 .fc_nlinfo.nlh = nlh,
4848 .fc_nlinfo.nl_net = sock_net(skb->sk),
4851 if (rtm->rtm_type == RTN_UNREACHABLE ||
4852 rtm->rtm_type == RTN_BLACKHOLE ||
4853 rtm->rtm_type == RTN_PROHIBIT ||
4854 rtm->rtm_type == RTN_THROW)
4855 cfg->fc_flags |= RTF_REJECT;
4857 if (rtm->rtm_type == RTN_LOCAL)
4858 cfg->fc_flags |= RTF_LOCAL;
4860 if (rtm->rtm_flags & RTM_F_CLONED)
4861 cfg->fc_flags |= RTF_CACHE;
4863 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4865 if (tb[RTA_NH_ID]) {
4866 if (tb[RTA_GATEWAY] || tb[RTA_OIF] ||
4867 tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
4868 NL_SET_ERR_MSG(extack,
4869 "Nexthop specification and nexthop id are mutually exclusive");
4872 cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
4875 if (tb[RTA_GATEWAY]) {
4876 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4877 cfg->fc_flags |= RTF_GATEWAY;
4880 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
4885 int plen = (rtm->rtm_dst_len + 7) >> 3;
4887 if (nla_len(tb[RTA_DST]) < plen)
4890 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
4894 int plen = (rtm->rtm_src_len + 7) >> 3;
4896 if (nla_len(tb[RTA_SRC]) < plen)
4899 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
4902 if (tb[RTA_PREFSRC])
4903 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
4906 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4908 if (tb[RTA_PRIORITY])
4909 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
4911 if (tb[RTA_METRICS]) {
4912 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
4913 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
4917 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
4919 if (tb[RTA_MULTIPATH]) {
4920 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
4921 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
4923 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
4924 cfg->fc_mp_len, extack);
4930 pref = nla_get_u8(tb[RTA_PREF]);
4931 if (pref != ICMPV6_ROUTER_PREF_LOW &&
4932 pref != ICMPV6_ROUTER_PREF_HIGH)
4933 pref = ICMPV6_ROUTER_PREF_MEDIUM;
4934 cfg->fc_flags |= RTF_PREF(pref);
4938 cfg->fc_encap = tb[RTA_ENCAP];
4940 if (tb[RTA_ENCAP_TYPE]) {
4941 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
4943 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
4948 if (tb[RTA_EXPIRES]) {
4949 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
4951 if (addrconf_finite_timeout(timeout)) {
4952 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
4953 cfg->fc_flags |= RTF_EXPIRES;
4963 struct fib6_info *fib6_info;
4964 struct fib6_config r_cfg;
4965 struct list_head next;
4968 static int ip6_route_info_append(struct net *net,
4969 struct list_head *rt6_nh_list,
4970 struct fib6_info *rt,
4971 struct fib6_config *r_cfg)
4976 list_for_each_entry(nh, rt6_nh_list, next) {
4977 /* check if fib6_info already exists */
4978 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
4982 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
4986 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
4987 list_add_tail(&nh->next, rt6_nh_list);
4992 static void ip6_route_mpath_notify(struct fib6_info *rt,
4993 struct fib6_info *rt_last,
4994 struct nl_info *info,
4997 /* if this is an APPEND route, then rt points to the first route
4998 * inserted and rt_last points to last route inserted. Userspace
4999 * wants a consistent dump of the route which starts at the first
5000 * nexthop. Since sibling routes are always added at the end of
5001 * the list, find the first sibling of the last route appended
5003 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
5004 rt = list_first_entry(&rt_last->fib6_siblings,
5010 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
5013 static int ip6_route_multipath_add(struct fib6_config *cfg,
5014 struct netlink_ext_ack *extack)
5016 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
5017 struct nl_info *info = &cfg->fc_nlinfo;
5018 enum fib_event_type event_type;
5019 struct fib6_config r_cfg;
5020 struct rtnexthop *rtnh;
5021 struct fib6_info *rt;
5022 struct rt6_nh *err_nh;
5023 struct rt6_nh *nh, *nh_safe;
5029 int replace = (cfg->fc_nlinfo.nlh &&
5030 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
5031 LIST_HEAD(rt6_nh_list);
5033 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
5034 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
5035 nlflags |= NLM_F_APPEND;
5037 remaining = cfg->fc_mp_len;
5038 rtnh = (struct rtnexthop *)cfg->fc_mp;
5040 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
5041 * fib6_info structs per nexthop
5043 while (rtnh_ok(rtnh, remaining)) {
5044 memcpy(&r_cfg, cfg, sizeof(*cfg));
5045 if (rtnh->rtnh_ifindex)
5046 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5048 attrlen = rtnh_attrlen(rtnh);
5050 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5052 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5054 r_cfg.fc_gateway = nla_get_in6_addr(nla);
5055 r_cfg.fc_flags |= RTF_GATEWAY;
5057 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
5058 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
5060 r_cfg.fc_encap_type = nla_get_u16(nla);
5063 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
5064 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
5070 if (!rt6_qualify_for_ecmp(rt)) {
5072 NL_SET_ERR_MSG(extack,
5073 "Device only routes can not be added for IPv6 using the multipath API.");
5074 fib6_info_release(rt);
5078 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
5080 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
5083 fib6_info_release(rt);
5087 rtnh = rtnh_next(rtnh, &remaining);
5090 if (list_empty(&rt6_nh_list)) {
5091 NL_SET_ERR_MSG(extack,
5092 "Invalid nexthop configuration - no valid nexthops");
5096 /* for add and replace send one notification with all nexthops.
5097 * Skip the notification in fib6_add_rt2node and send one with
5098 * the full route when done
5100 info->skip_notify = 1;
5102 /* For add and replace, send one notification with all nexthops. For
5103 * append, send one notification with all appended nexthops.
5105 info->skip_notify_kernel = 1;
5108 list_for_each_entry(nh, &rt6_nh_list, next) {
5109 err = __ip6_ins_rt(nh->fib6_info, info, extack);
5110 fib6_info_release(nh->fib6_info);
5113 /* save reference to last route successfully inserted */
5114 rt_last = nh->fib6_info;
5116 /* save reference to first route for notification */
5118 rt_notif = nh->fib6_info;
5121 /* nh->fib6_info is used or freed at this point, reset to NULL*/
5122 nh->fib6_info = NULL;
5125 NL_SET_ERR_MSG_MOD(extack,
5126 "multipath route replace failed (check consistency of installed routes)");
5131 /* Because each route is added like a single route we remove
5132 * these flags after the first nexthop: if there is a collision,
5133 * we have already failed to add the first nexthop:
5134 * fib6_add_rt2node() has rejected it; when replacing, old
5135 * nexthops have been replaced by first new, the rest should
5138 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5143 event_type = replace ? FIB_EVENT_ENTRY_REPLACE : FIB_EVENT_ENTRY_ADD;
5144 err = call_fib6_multipath_entry_notifiers(info->nl_net, event_type,
5145 rt_notif, nhn - 1, extack);
5147 /* Delete all the siblings that were just added */
5152 /* success ... tell user about new route */
5153 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5157 /* send notification for routes that were added so that
5158 * the delete notifications sent by ip6_route_del are
5162 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5164 /* Delete routes that were already added */
5165 list_for_each_entry(nh, &rt6_nh_list, next) {
5168 ip6_route_del(&nh->r_cfg, extack);
5172 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
5174 fib6_info_release(nh->fib6_info);
5175 list_del(&nh->next);
5182 static int ip6_route_multipath_del(struct fib6_config *cfg,
5183 struct netlink_ext_ack *extack)
5185 struct fib6_config r_cfg;
5186 struct rtnexthop *rtnh;
5189 int err = 1, last_err = 0;
5191 remaining = cfg->fc_mp_len;
5192 rtnh = (struct rtnexthop *)cfg->fc_mp;
5194 /* Parse a Multipath Entry */
5195 while (rtnh_ok(rtnh, remaining)) {
5196 memcpy(&r_cfg, cfg, sizeof(*cfg));
5197 if (rtnh->rtnh_ifindex)
5198 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5200 attrlen = rtnh_attrlen(rtnh);
5202 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5204 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5206 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
5207 r_cfg.fc_flags |= RTF_GATEWAY;
5210 err = ip6_route_del(&r_cfg, extack);
5214 rtnh = rtnh_next(rtnh, &remaining);
5220 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5221 struct netlink_ext_ack *extack)
5223 struct fib6_config cfg;
5226 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5231 !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
5232 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5237 return ip6_route_multipath_del(&cfg, extack);
5239 cfg.fc_delete_all_nh = 1;
5240 return ip6_route_del(&cfg, extack);
5244 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5245 struct netlink_ext_ack *extack)
5247 struct fib6_config cfg;
5250 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5254 if (cfg.fc_metric == 0)
5255 cfg.fc_metric = IP6_RT_PRIO_USER;
5258 return ip6_route_multipath_add(&cfg, extack);
5260 return ip6_route_add(&cfg, GFP_KERNEL, extack);
5263 /* add the overhead of this fib6_nh to nexthop_len */
5264 static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
5266 int *nexthop_len = arg;
5268 *nexthop_len += nla_total_size(0) /* RTA_MULTIPATH */
5269 + NLA_ALIGN(sizeof(struct rtnexthop))
5270 + nla_total_size(16); /* RTA_GATEWAY */
5272 if (nh->fib_nh_lws) {
5273 /* RTA_ENCAP_TYPE */
5274 *nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5276 *nexthop_len += nla_total_size(2);
5282 static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5287 nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5288 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5291 struct fib6_nh *nh = f6i->fib6_nh;
5294 if (f6i->fib6_nsiblings) {
5295 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
5296 + NLA_ALIGN(sizeof(struct rtnexthop))
5297 + nla_total_size(16) /* RTA_GATEWAY */
5298 + lwtunnel_get_encap_size(nh->fib_nh_lws);
5300 nexthop_len *= f6i->fib6_nsiblings;
5302 nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5305 return NLMSG_ALIGN(sizeof(struct rtmsg))
5306 + nla_total_size(16) /* RTA_SRC */
5307 + nla_total_size(16) /* RTA_DST */
5308 + nla_total_size(16) /* RTA_GATEWAY */
5309 + nla_total_size(16) /* RTA_PREFSRC */
5310 + nla_total_size(4) /* RTA_TABLE */
5311 + nla_total_size(4) /* RTA_IIF */
5312 + nla_total_size(4) /* RTA_OIF */
5313 + nla_total_size(4) /* RTA_PRIORITY */
5314 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5315 + nla_total_size(sizeof(struct rta_cacheinfo))
5316 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5317 + nla_total_size(1) /* RTA_PREF */
5321 static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5322 unsigned char *flags)
5324 if (nexthop_is_multipath(nh)) {
5327 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5329 goto nla_put_failure;
5331 if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
5332 goto nla_put_failure;
5334 nla_nest_end(skb, mp);
5336 struct fib6_nh *fib6_nh;
5338 fib6_nh = nexthop_fib6_nh(nh);
5339 if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
5341 goto nla_put_failure;
5350 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5351 struct fib6_info *rt, struct dst_entry *dst,
5352 struct in6_addr *dest, struct in6_addr *src,
5353 int iif, int type, u32 portid, u32 seq,
5356 struct rt6_info *rt6 = (struct rt6_info *)dst;
5357 struct rt6key *rt6_dst, *rt6_src;
5358 u32 *pmetrics, table, rt6_flags;
5359 unsigned char nh_flags = 0;
5360 struct nlmsghdr *nlh;
5364 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5369 rt6_dst = &rt6->rt6i_dst;
5370 rt6_src = &rt6->rt6i_src;
5371 rt6_flags = rt6->rt6i_flags;
5373 rt6_dst = &rt->fib6_dst;
5374 rt6_src = &rt->fib6_src;
5375 rt6_flags = rt->fib6_flags;
5378 rtm = nlmsg_data(nlh);
5379 rtm->rtm_family = AF_INET6;
5380 rtm->rtm_dst_len = rt6_dst->plen;
5381 rtm->rtm_src_len = rt6_src->plen;
5384 table = rt->fib6_table->tb6_id;
5386 table = RT6_TABLE_UNSPEC;
5387 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
5388 if (nla_put_u32(skb, RTA_TABLE, table))
5389 goto nla_put_failure;
5391 rtm->rtm_type = rt->fib6_type;
5393 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5394 rtm->rtm_protocol = rt->fib6_protocol;
5396 if (rt6_flags & RTF_CACHE)
5397 rtm->rtm_flags |= RTM_F_CLONED;
5400 if (nla_put_in6_addr(skb, RTA_DST, dest))
5401 goto nla_put_failure;
5402 rtm->rtm_dst_len = 128;
5403 } else if (rtm->rtm_dst_len)
5404 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
5405 goto nla_put_failure;
5406 #ifdef CONFIG_IPV6_SUBTREES
5408 if (nla_put_in6_addr(skb, RTA_SRC, src))
5409 goto nla_put_failure;
5410 rtm->rtm_src_len = 128;
5411 } else if (rtm->rtm_src_len &&
5412 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
5413 goto nla_put_failure;
5416 #ifdef CONFIG_IPV6_MROUTE
5417 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5418 int err = ip6mr_get_route(net, skb, rtm, portid);
5423 goto nla_put_failure;
5426 if (nla_put_u32(skb, RTA_IIF, iif))
5427 goto nla_put_failure;
5429 struct in6_addr saddr_buf;
5430 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
5431 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5432 goto nla_put_failure;
5435 if (rt->fib6_prefsrc.plen) {
5436 struct in6_addr saddr_buf;
5437 saddr_buf = rt->fib6_prefsrc.addr;
5438 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5439 goto nla_put_failure;
5442 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5443 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5444 goto nla_put_failure;
5446 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
5447 goto nla_put_failure;
5449 /* For multipath routes, walk the siblings list and add
5450 * each as a nexthop within RTA_MULTIPATH.
5453 if (rt6_flags & RTF_GATEWAY &&
5454 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5455 goto nla_put_failure;
5457 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
5458 goto nla_put_failure;
5459 } else if (rt->fib6_nsiblings) {
5460 struct fib6_info *sibling, *next_sibling;
5463 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5465 goto nla_put_failure;
5467 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5468 rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
5469 goto nla_put_failure;
5471 list_for_each_entry_safe(sibling, next_sibling,
5472 &rt->fib6_siblings, fib6_siblings) {
5473 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5474 sibling->fib6_nh->fib_nh_weight,
5476 goto nla_put_failure;
5479 nla_nest_end(skb, mp);
5480 } else if (rt->nh) {
5481 if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5482 goto nla_put_failure;
5484 if (nexthop_is_blackhole(rt->nh))
5485 rtm->rtm_type = RTN_BLACKHOLE;
5487 if (rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5488 goto nla_put_failure;
5490 rtm->rtm_flags |= nh_flags;
5492 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
5493 &nh_flags, false) < 0)
5494 goto nla_put_failure;
5496 rtm->rtm_flags |= nh_flags;
5499 if (rt6_flags & RTF_EXPIRES) {
5500 expires = dst ? dst->expires : rt->expires;
5504 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5505 goto nla_put_failure;
5507 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5508 goto nla_put_failure;
5511 nlmsg_end(skb, nlh);
5515 nlmsg_cancel(skb, nlh);
5519 static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5521 const struct net_device *dev = arg;
5523 if (nh->fib_nh_dev == dev)
5529 static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5530 const struct net_device *dev)
5533 struct net_device *_dev = (struct net_device *)dev;
5535 return !!nexthop_for_each_fib6_nh(f6i->nh,
5536 fib6_info_nh_uses_dev,
5540 if (f6i->fib6_nh->fib_nh_dev == dev)
5543 if (f6i->fib6_nsiblings) {
5544 struct fib6_info *sibling, *next_sibling;
5546 list_for_each_entry_safe(sibling, next_sibling,
5547 &f6i->fib6_siblings, fib6_siblings) {
5548 if (sibling->fib6_nh->fib_nh_dev == dev)
5556 struct fib6_nh_exception_dump_walker {
5557 struct rt6_rtnl_dump_arg *dump;
5558 struct fib6_info *rt;
5564 static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
5566 struct fib6_nh_exception_dump_walker *w = arg;
5567 struct rt6_rtnl_dump_arg *dump = w->dump;
5568 struct rt6_exception_bucket *bucket;
5569 struct rt6_exception *rt6_ex;
5572 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
5576 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
5577 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
5583 /* Expiration of entries doesn't bump sernum, insertion
5584 * does. Removal is triggered by insertion, so we can
5585 * rely on the fact that if entries change between two
5586 * partial dumps, this node is scanned again completely,
5587 * see rt6_insert_exception() and fib6_dump_table().
5589 * Count expired entries we go through as handled
5590 * entries that we'll skip next time, in case of partial
5591 * node dump. Otherwise, if entries expire meanwhile,
5592 * we'll skip the wrong amount.
5594 if (rt6_check_expired(rt6_ex->rt6i)) {
5599 err = rt6_fill_node(dump->net, dump->skb, w->rt,
5600 &rt6_ex->rt6i->dst, NULL, NULL, 0,
5602 NETLINK_CB(dump->cb->skb).portid,
5603 dump->cb->nlh->nlmsg_seq, w->flags);
5615 /* Return -1 if done with node, number of handled routes on partial dump */
5616 int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
5618 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
5619 struct fib_dump_filter *filter = &arg->filter;
5620 unsigned int flags = NLM_F_MULTI;
5621 struct net *net = arg->net;
5624 if (rt == net->ipv6.fib6_null_entry)
5627 if ((filter->flags & RTM_F_PREFIX) &&
5628 !(rt->fib6_flags & RTF_PREFIX_RT)) {
5629 /* success since this is not a prefix route */
5632 if (filter->filter_set &&
5633 ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
5634 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
5635 (filter->protocol && rt->fib6_protocol != filter->protocol))) {
5639 if (filter->filter_set ||
5640 !filter->dump_routes || !filter->dump_exceptions) {
5641 flags |= NLM_F_DUMP_FILTERED;
5644 if (filter->dump_routes) {
5648 if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
5650 NETLINK_CB(arg->cb->skb).portid,
5651 arg->cb->nlh->nlmsg_seq, flags)) {
5658 if (filter->dump_exceptions) {
5659 struct fib6_nh_exception_dump_walker w = { .dump = arg,
5668 err = nexthop_for_each_fib6_nh(rt->nh,
5669 rt6_nh_dump_exceptions,
5672 err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
5677 return count += w.count;
5683 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
5684 const struct nlmsghdr *nlh,
5686 struct netlink_ext_ack *extack)
5691 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
5692 NL_SET_ERR_MSG_MOD(extack,
5693 "Invalid header for get route request");
5697 if (!netlink_strict_get_check(skb))
5698 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5699 rtm_ipv6_policy, extack);
5701 rtm = nlmsg_data(nlh);
5702 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
5703 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
5704 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
5706 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
5709 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
5710 NL_SET_ERR_MSG_MOD(extack,
5711 "Invalid flags for get route request");
5715 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
5716 rtm_ipv6_policy, extack);
5720 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
5721 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
5722 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
5726 for (i = 0; i <= RTA_MAX; i++) {
5742 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
5750 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5751 struct netlink_ext_ack *extack)
5753 struct net *net = sock_net(in_skb->sk);
5754 struct nlattr *tb[RTA_MAX+1];
5755 int err, iif = 0, oif = 0;
5756 struct fib6_info *from;
5757 struct dst_entry *dst;
5758 struct rt6_info *rt;
5759 struct sk_buff *skb;
5761 struct flowi6 fl6 = {};
5764 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
5769 rtm = nlmsg_data(nlh);
5770 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
5771 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
5774 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
5777 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
5781 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
5784 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
5788 iif = nla_get_u32(tb[RTA_IIF]);
5791 oif = nla_get_u32(tb[RTA_OIF]);
5794 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
5797 fl6.flowi6_uid = make_kuid(current_user_ns(),
5798 nla_get_u32(tb[RTA_UID]));
5800 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
5803 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
5806 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
5808 if (tb[RTA_IP_PROTO]) {
5809 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
5810 &fl6.flowi6_proto, AF_INET6,
5817 struct net_device *dev;
5822 dev = dev_get_by_index_rcu(net, iif);
5829 fl6.flowi6_iif = iif;
5831 if (!ipv6_addr_any(&fl6.saddr))
5832 flags |= RT6_LOOKUP_F_HAS_SADDR;
5834 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
5838 fl6.flowi6_oif = oif;
5840 dst = ip6_route_output(net, NULL, &fl6);
5844 rt = container_of(dst, struct rt6_info, dst);
5845 if (rt->dst.error) {
5846 err = rt->dst.error;
5851 if (rt == net->ipv6.ip6_null_entry) {
5852 err = rt->dst.error;
5857 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
5864 skb_dst_set(skb, &rt->dst);
5867 from = rcu_dereference(rt->from);
5870 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
5872 NETLINK_CB(in_skb).portid,
5875 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
5876 &fl6.saddr, iif, RTM_NEWROUTE,
5877 NETLINK_CB(in_skb).portid,
5889 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
5894 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
5895 unsigned int nlm_flags)
5897 struct sk_buff *skb;
5898 struct net *net = info->nl_net;
5903 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
5905 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
5909 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
5910 event, info->portid, seq, nlm_flags);
5912 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
5913 WARN_ON(err == -EMSGSIZE);
5917 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5918 info->nlh, gfp_any());
5922 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
5925 void fib6_rt_update(struct net *net, struct fib6_info *rt,
5926 struct nl_info *info)
5928 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
5929 struct sk_buff *skb;
5932 /* call_fib6_entry_notifiers will be removed when in-kernel notifier
5933 * is implemented and supported for nexthop objects
5935 call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, rt, NULL);
5937 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
5941 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
5942 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
5944 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
5945 WARN_ON(err == -EMSGSIZE);
5949 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5950 info->nlh, gfp_any());
5954 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
5957 static int ip6_route_dev_notify(struct notifier_block *this,
5958 unsigned long event, void *ptr)
5960 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5961 struct net *net = dev_net(dev);
5963 if (!(dev->flags & IFF_LOOPBACK))
5966 if (event == NETDEV_REGISTER) {
5967 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
5968 net->ipv6.ip6_null_entry->dst.dev = dev;
5969 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
5970 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5971 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
5972 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
5973 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
5974 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
5976 } else if (event == NETDEV_UNREGISTER &&
5977 dev->reg_state != NETREG_UNREGISTERED) {
5978 /* NETDEV_UNREGISTER could be fired for multiple times by
5979 * netdev_wait_allrefs(). Make sure we only call this once.
5981 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
5982 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5983 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
5984 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
5995 #ifdef CONFIG_PROC_FS
5996 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
5998 struct net *net = (struct net *)seq->private;
5999 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
6000 net->ipv6.rt6_stats->fib_nodes,
6001 net->ipv6.rt6_stats->fib_route_nodes,
6002 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
6003 net->ipv6.rt6_stats->fib_rt_entries,
6004 net->ipv6.rt6_stats->fib_rt_cache,
6005 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
6006 net->ipv6.rt6_stats->fib_discarded_routes);
6010 #endif /* CONFIG_PROC_FS */
6012 #ifdef CONFIG_SYSCTL
6015 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
6016 void __user *buffer, size_t *lenp, loff_t *ppos)
6024 net = (struct net *)ctl->extra1;
6025 delay = net->ipv6.sysctl.flush_delay;
6026 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6030 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
6034 static struct ctl_table ipv6_route_table_template[] = {
6036 .procname = "flush",
6037 .data = &init_net.ipv6.sysctl.flush_delay,
6038 .maxlen = sizeof(int),
6040 .proc_handler = ipv6_sysctl_rtcache_flush
6043 .procname = "gc_thresh",
6044 .data = &ip6_dst_ops_template.gc_thresh,
6045 .maxlen = sizeof(int),
6047 .proc_handler = proc_dointvec,
6050 .procname = "max_size",
6051 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
6052 .maxlen = sizeof(int),
6054 .proc_handler = proc_dointvec,
6057 .procname = "gc_min_interval",
6058 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6059 .maxlen = sizeof(int),
6061 .proc_handler = proc_dointvec_jiffies,
6064 .procname = "gc_timeout",
6065 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
6066 .maxlen = sizeof(int),
6068 .proc_handler = proc_dointvec_jiffies,
6071 .procname = "gc_interval",
6072 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
6073 .maxlen = sizeof(int),
6075 .proc_handler = proc_dointvec_jiffies,
6078 .procname = "gc_elasticity",
6079 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
6080 .maxlen = sizeof(int),
6082 .proc_handler = proc_dointvec,
6085 .procname = "mtu_expires",
6086 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
6087 .maxlen = sizeof(int),
6089 .proc_handler = proc_dointvec_jiffies,
6092 .procname = "min_adv_mss",
6093 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
6094 .maxlen = sizeof(int),
6096 .proc_handler = proc_dointvec,
6099 .procname = "gc_min_interval_ms",
6100 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6101 .maxlen = sizeof(int),
6103 .proc_handler = proc_dointvec_ms_jiffies,
6106 .procname = "skip_notify_on_dev_down",
6107 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
6108 .maxlen = sizeof(int),
6110 .proc_handler = proc_dointvec_minmax,
6111 .extra1 = SYSCTL_ZERO,
6112 .extra2 = SYSCTL_ONE,
6117 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6119 struct ctl_table *table;
6121 table = kmemdup(ipv6_route_table_template,
6122 sizeof(ipv6_route_table_template),
6126 table[0].data = &net->ipv6.sysctl.flush_delay;
6127 table[0].extra1 = net;
6128 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
6129 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
6130 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6131 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
6132 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
6133 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
6134 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
6135 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
6136 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6137 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
6139 /* Don't export sysctls to unprivileged users */
6140 if (net->user_ns != &init_user_ns)
6141 table[0].procname = NULL;
6148 static int __net_init ip6_route_net_init(struct net *net)
6152 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6153 sizeof(net->ipv6.ip6_dst_ops));
6155 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6156 goto out_ip6_dst_ops;
6158 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
6159 if (!net->ipv6.fib6_null_entry)
6160 goto out_ip6_dst_entries;
6161 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6162 sizeof(*net->ipv6.fib6_null_entry));
6164 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6165 sizeof(*net->ipv6.ip6_null_entry),
6167 if (!net->ipv6.ip6_null_entry)
6168 goto out_fib6_null_entry;
6169 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6170 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6171 ip6_template_metrics, true);
6172 INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached);
6174 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6175 net->ipv6.fib6_has_custom_rules = false;
6176 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6177 sizeof(*net->ipv6.ip6_prohibit_entry),
6179 if (!net->ipv6.ip6_prohibit_entry)
6180 goto out_ip6_null_entry;
6181 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6182 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6183 ip6_template_metrics, true);
6184 INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached);
6186 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6187 sizeof(*net->ipv6.ip6_blk_hole_entry),
6189 if (!net->ipv6.ip6_blk_hole_entry)
6190 goto out_ip6_prohibit_entry;
6191 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6192 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6193 ip6_template_metrics, true);
6194 INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached);
6197 net->ipv6.sysctl.flush_delay = 0;
6198 net->ipv6.sysctl.ip6_rt_max_size = 4096;
6199 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6200 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6201 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6202 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
6203 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
6204 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
6205 net->ipv6.sysctl.skip_notify_on_dev_down = 0;
6207 net->ipv6.ip6_rt_gc_expire = 30*HZ;
6213 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6214 out_ip6_prohibit_entry:
6215 kfree(net->ipv6.ip6_prohibit_entry);
6217 kfree(net->ipv6.ip6_null_entry);
6219 out_fib6_null_entry:
6220 kfree(net->ipv6.fib6_null_entry);
6221 out_ip6_dst_entries:
6222 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6227 static void __net_exit ip6_route_net_exit(struct net *net)
6229 kfree(net->ipv6.fib6_null_entry);
6230 kfree(net->ipv6.ip6_null_entry);
6231 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6232 kfree(net->ipv6.ip6_prohibit_entry);
6233 kfree(net->ipv6.ip6_blk_hole_entry);
6235 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6238 static int __net_init ip6_route_net_init_late(struct net *net)
6240 #ifdef CONFIG_PROC_FS
6241 proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
6242 sizeof(struct ipv6_route_iter));
6243 proc_create_net_single("rt6_stats", 0444, net->proc_net,
6244 rt6_stats_seq_show, NULL);
6249 static void __net_exit ip6_route_net_exit_late(struct net *net)
6251 #ifdef CONFIG_PROC_FS
6252 remove_proc_entry("ipv6_route", net->proc_net);
6253 remove_proc_entry("rt6_stats", net->proc_net);
6257 static struct pernet_operations ip6_route_net_ops = {
6258 .init = ip6_route_net_init,
6259 .exit = ip6_route_net_exit,
6262 static int __net_init ipv6_inetpeer_init(struct net *net)
6264 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
6268 inet_peer_base_init(bp);
6269 net->ipv6.peers = bp;
6273 static void __net_exit ipv6_inetpeer_exit(struct net *net)
6275 struct inet_peer_base *bp = net->ipv6.peers;
6277 net->ipv6.peers = NULL;
6278 inetpeer_invalidate_tree(bp);
6282 static struct pernet_operations ipv6_inetpeer_ops = {
6283 .init = ipv6_inetpeer_init,
6284 .exit = ipv6_inetpeer_exit,
6287 static struct pernet_operations ip6_route_net_late_ops = {
6288 .init = ip6_route_net_init_late,
6289 .exit = ip6_route_net_exit_late,
6292 static struct notifier_block ip6_route_dev_notifier = {
6293 .notifier_call = ip6_route_dev_notify,
6294 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6297 void __init ip6_route_init_special_entries(void)
6299 /* Registering of the loopback is done before this portion of code,
6300 * the loopback reference in rt6_info will not be taken, do it
6301 * manually for init_net */
6302 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
6303 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6304 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6305 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6306 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6307 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6308 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6309 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6313 int __init ip6_route_init(void)
6319 ip6_dst_ops_template.kmem_cachep =
6320 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6321 SLAB_HWCACHE_ALIGN, NULL);
6322 if (!ip6_dst_ops_template.kmem_cachep)
6325 ret = dst_entries_init(&ip6_dst_blackhole_ops);
6327 goto out_kmem_cache;
6329 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
6331 goto out_dst_entries;
6333 ret = register_pernet_subsys(&ip6_route_net_ops);
6335 goto out_register_inetpeer;
6337 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
6341 goto out_register_subsys;
6347 ret = fib6_rules_init();
6351 ret = register_pernet_subsys(&ip6_route_net_late_ops);
6353 goto fib6_rules_init;
6355 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
6356 inet6_rtm_newroute, NULL, 0);
6358 goto out_register_late_subsys;
6360 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
6361 inet6_rtm_delroute, NULL, 0);
6363 goto out_register_late_subsys;
6365 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
6366 inet6_rtm_getroute, NULL,
6367 RTNL_FLAG_DOIT_UNLOCKED);
6369 goto out_register_late_subsys;
6371 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
6373 goto out_register_late_subsys;
6375 for_each_possible_cpu(cpu) {
6376 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
6378 INIT_LIST_HEAD(&ul->head);
6379 spin_lock_init(&ul->lock);
6385 out_register_late_subsys:
6386 rtnl_unregister_all(PF_INET6);
6387 unregister_pernet_subsys(&ip6_route_net_late_ops);
6389 fib6_rules_cleanup();
6394 out_register_subsys:
6395 unregister_pernet_subsys(&ip6_route_net_ops);
6396 out_register_inetpeer:
6397 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6399 dst_entries_destroy(&ip6_dst_blackhole_ops);
6401 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6405 void ip6_route_cleanup(void)
6407 unregister_netdevice_notifier(&ip6_route_dev_notifier);
6408 unregister_pernet_subsys(&ip6_route_net_late_ops);
6409 fib6_rules_cleanup();
6412 unregister_pernet_subsys(&ipv6_inetpeer_ops);
6413 unregister_pernet_subsys(&ip6_route_net_ops);
6414 dst_entries_destroy(&ip6_dst_blackhole_ops);
6415 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);