2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * IPv4 Forwarding Information Base: semantics.
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 #include <linux/uaccess.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/jiffies.h>
22 #include <linux/string.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/errno.h>
27 #include <linux/inet.h>
28 #include <linux/inetdevice.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/proc_fs.h>
32 #include <linux/skbuff.h>
33 #include <linux/init.h>
34 #include <linux/slab.h>
35 #include <linux/netlink.h>
39 #include <net/protocol.h>
40 #include <net/route.h>
43 #include <net/ip_fib.h>
44 #include <net/netlink.h>
45 #include <net/nexthop.h>
46 #include <net/lwtunnel.h>
47 #include <net/fib_notifier.h>
49 #include "fib_lookup.h"
51 static DEFINE_SPINLOCK(fib_info_lock);
52 static struct hlist_head *fib_info_hash;
53 static struct hlist_head *fib_info_laddrhash;
54 static unsigned int fib_info_hash_size;
55 static unsigned int fib_info_cnt;
57 #define DEVINDEX_HASHBITS 8
58 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
59 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
61 #ifdef CONFIG_IP_ROUTE_MULTIPATH
63 #define for_nexthops(fi) { \
64 int nhsel; const struct fib_nh *nh; \
65 for (nhsel = 0, nh = (fi)->fib_nh; \
66 nhsel < (fi)->fib_nhs; \
69 #define change_nexthops(fi) { \
70 int nhsel; struct fib_nh *nexthop_nh; \
71 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
72 nhsel < (fi)->fib_nhs; \
73 nexthop_nh++, nhsel++)
75 #else /* CONFIG_IP_ROUTE_MULTIPATH */
77 /* Hope, that gcc will optimize it to get rid of dummy loop */
79 #define for_nexthops(fi) { \
80 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
81 for (nhsel = 0; nhsel < 1; nhsel++)
83 #define change_nexthops(fi) { \
85 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
86 for (nhsel = 0; nhsel < 1; nhsel++)
88 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
90 #define endfor_nexthops(fi) }
93 const struct fib_prop fib_props[RTN_MAX + 1] = {
96 .scope = RT_SCOPE_NOWHERE,
100 .scope = RT_SCOPE_UNIVERSE,
104 .scope = RT_SCOPE_HOST,
108 .scope = RT_SCOPE_LINK,
112 .scope = RT_SCOPE_LINK,
116 .scope = RT_SCOPE_UNIVERSE,
120 .scope = RT_SCOPE_UNIVERSE,
122 [RTN_UNREACHABLE] = {
123 .error = -EHOSTUNREACH,
124 .scope = RT_SCOPE_UNIVERSE,
128 .scope = RT_SCOPE_UNIVERSE,
132 .scope = RT_SCOPE_UNIVERSE,
136 .scope = RT_SCOPE_NOWHERE,
140 .scope = RT_SCOPE_NOWHERE,
144 static void rt_fibinfo_free(struct rtable __rcu **rtp)
146 struct rtable *rt = rcu_dereference_protected(*rtp, 1);
151 /* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
152 * because we waited an RCU grace period before calling
153 * free_fib_info_rcu()
156 dst_dev_put(&rt->dst);
157 dst_release_immediate(&rt->dst);
160 static void free_nh_exceptions(struct fib_nh *nh)
162 struct fnhe_hash_bucket *hash;
165 hash = rcu_dereference_protected(nh->nh_exceptions, 1);
168 for (i = 0; i < FNHE_HASH_SIZE; i++) {
169 struct fib_nh_exception *fnhe;
171 fnhe = rcu_dereference_protected(hash[i].chain, 1);
173 struct fib_nh_exception *next;
175 next = rcu_dereference_protected(fnhe->fnhe_next, 1);
177 rt_fibinfo_free(&fnhe->fnhe_rth_input);
178 rt_fibinfo_free(&fnhe->fnhe_rth_output);
188 static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
195 for_each_possible_cpu(cpu) {
198 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
200 dst_dev_put(&rt->dst);
201 dst_release_immediate(&rt->dst);
207 void fib_nh_common_release(struct fib_nh_common *nhc)
210 dev_put(nhc->nhc_dev);
212 lwtstate_put(nhc->nhc_lwtstate);
214 EXPORT_SYMBOL_GPL(fib_nh_common_release);
216 void fib_nh_release(struct net *net, struct fib_nh *fib_nh)
218 #ifdef CONFIG_IP_ROUTE_CLASSID
219 if (fib_nh->nh_tclassid)
220 net->ipv4.fib_num_tclassid_users--;
222 fib_nh_common_release(&fib_nh->nh_common);
223 free_nh_exceptions(fib_nh);
224 rt_fibinfo_free_cpus(fib_nh->nh_pcpu_rth_output);
225 rt_fibinfo_free(&fib_nh->nh_rth_input);
228 /* Release a nexthop info record */
229 static void free_fib_info_rcu(struct rcu_head *head)
231 struct fib_info *fi = container_of(head, struct fib_info, rcu);
233 change_nexthops(fi) {
234 fib_nh_release(fi->fib_net, nexthop_nh);
235 } endfor_nexthops(fi);
237 ip_fib_metrics_put(fi->fib_metrics);
242 void free_fib_info(struct fib_info *fi)
244 if (fi->fib_dead == 0) {
245 pr_warn("Freeing alive fib_info %p\n", fi);
250 call_rcu(&fi->rcu, free_fib_info_rcu);
252 EXPORT_SYMBOL_GPL(free_fib_info);
254 void fib_release_info(struct fib_info *fi)
256 spin_lock_bh(&fib_info_lock);
257 if (fi && --fi->fib_treeref == 0) {
258 hlist_del(&fi->fib_hash);
260 hlist_del(&fi->fib_lhash);
261 change_nexthops(fi) {
262 if (!nexthop_nh->fib_nh_dev)
264 hlist_del(&nexthop_nh->nh_hash);
265 } endfor_nexthops(fi)
269 spin_unlock_bh(&fib_info_lock);
272 static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
274 const struct fib_nh *onh = ofi->fib_nh;
277 if (nh->fib_nh_oif != onh->fib_nh_oif ||
278 nh->fib_nh_gw4 != onh->fib_nh_gw4 ||
279 nh->fib_nh_scope != onh->fib_nh_scope ||
280 #ifdef CONFIG_IP_ROUTE_MULTIPATH
281 nh->fib_nh_weight != onh->fib_nh_weight ||
283 #ifdef CONFIG_IP_ROUTE_CLASSID
284 nh->nh_tclassid != onh->nh_tclassid ||
286 lwtunnel_cmp_encap(nh->fib_nh_lws, onh->fib_nh_lws) ||
287 ((nh->fib_nh_flags ^ onh->fib_nh_flags) & ~RTNH_COMPARE_MASK))
290 } endfor_nexthops(fi);
294 static inline unsigned int fib_devindex_hashfn(unsigned int val)
296 unsigned int mask = DEVINDEX_HASHSIZE - 1;
299 (val >> DEVINDEX_HASHBITS) ^
300 (val >> (DEVINDEX_HASHBITS * 2))) & mask;
303 static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
305 unsigned int mask = (fib_info_hash_size - 1);
306 unsigned int val = fi->fib_nhs;
308 val ^= (fi->fib_protocol << 8) | fi->fib_scope;
309 val ^= (__force u32)fi->fib_prefsrc;
310 val ^= fi->fib_priority;
312 val ^= fib_devindex_hashfn(nh->fib_nh_oif);
313 } endfor_nexthops(fi)
315 return (val ^ (val >> 7) ^ (val >> 12)) & mask;
318 static struct fib_info *fib_find_info(const struct fib_info *nfi)
320 struct hlist_head *head;
324 hash = fib_info_hashfn(nfi);
325 head = &fib_info_hash[hash];
327 hlist_for_each_entry(fi, head, fib_hash) {
328 if (!net_eq(fi->fib_net, nfi->fib_net))
330 if (fi->fib_nhs != nfi->fib_nhs)
332 if (nfi->fib_protocol == fi->fib_protocol &&
333 nfi->fib_scope == fi->fib_scope &&
334 nfi->fib_prefsrc == fi->fib_prefsrc &&
335 nfi->fib_priority == fi->fib_priority &&
336 nfi->fib_type == fi->fib_type &&
337 memcmp(nfi->fib_metrics, fi->fib_metrics,
338 sizeof(u32) * RTAX_MAX) == 0 &&
339 !((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) &&
340 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
347 /* Check, that the gateway is already configured.
348 * Used only by redirect accept routine.
350 int ip_fib_check_default(__be32 gw, struct net_device *dev)
352 struct hlist_head *head;
356 spin_lock(&fib_info_lock);
358 hash = fib_devindex_hashfn(dev->ifindex);
359 head = &fib_info_devhash[hash];
360 hlist_for_each_entry(nh, head, nh_hash) {
361 if (nh->fib_nh_dev == dev &&
362 nh->fib_nh_gw4 == gw &&
363 !(nh->fib_nh_flags & RTNH_F_DEAD)) {
364 spin_unlock(&fib_info_lock);
369 spin_unlock(&fib_info_lock);
374 static inline size_t fib_nlmsg_size(struct fib_info *fi)
376 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
377 + nla_total_size(4) /* RTA_TABLE */
378 + nla_total_size(4) /* RTA_DST */
379 + nla_total_size(4) /* RTA_PRIORITY */
380 + nla_total_size(4) /* RTA_PREFSRC */
381 + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
383 /* space for nested metrics */
384 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
387 size_t nh_encapsize = 0;
388 /* Also handles the special case fib_nhs == 1 */
390 /* each nexthop is packed in an attribute */
391 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
393 /* may contain flow and gateway attribute */
394 nhsize += 2 * nla_total_size(4);
396 /* grab encap info */
398 if (nh->fib_nh_lws) {
400 nh_encapsize += lwtunnel_get_encap_size(
403 nh_encapsize += nla_total_size(2);
405 } endfor_nexthops(fi);
407 /* all nexthops are packed in a nested attribute */
408 payload += nla_total_size((fi->fib_nhs * nhsize) +
416 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
417 int dst_len, u32 tb_id, const struct nl_info *info,
418 unsigned int nlm_flags)
421 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
424 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
428 err = fib_dump_info(skb, info->portid, seq, event, tb_id,
429 fa->fa_type, key, dst_len,
430 fa->fa_tos, fa->fa_info, nlm_flags);
432 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
433 WARN_ON(err == -EMSGSIZE);
437 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE,
438 info->nlh, GFP_KERNEL);
442 rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
445 static int fib_detect_death(struct fib_info *fi, int order,
446 struct fib_info **last_resort, int *last_idx,
450 int state = NUD_NONE;
452 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].fib_nh_gw4, fi->fib_dev);
454 state = n->nud_state;
459 if (state == NUD_REACHABLE)
461 if ((state & NUD_VALID) && order != dflt)
463 if ((state & NUD_VALID) ||
464 (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) {
471 int fib_nh_common_init(struct fib_nh_common *nhc, struct nlattr *encap,
472 u16 encap_type, void *cfg, gfp_t gfp_flags,
473 struct netlink_ext_ack *extack)
476 struct lwtunnel_state *lwtstate;
479 if (encap_type == LWTUNNEL_ENCAP_NONE) {
480 NL_SET_ERR_MSG(extack, "LWT encap type not specified");
483 err = lwtunnel_build_state(encap_type, encap, nhc->nhc_family,
484 cfg, &lwtstate, extack);
488 nhc->nhc_lwtstate = lwtstate_get(lwtstate);
493 EXPORT_SYMBOL_GPL(fib_nh_common_init);
495 int fib_nh_init(struct net *net, struct fib_nh *nh,
496 struct fib_config *cfg, int nh_weight,
497 struct netlink_ext_ack *extack)
501 nh->fib_nh_family = AF_INET;
503 nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
504 if (!nh->nh_pcpu_rth_output)
507 err = fib_nh_common_init(&nh->nh_common, cfg->fc_encap,
508 cfg->fc_encap_type, cfg, GFP_KERNEL, extack);
512 nh->fib_nh_oif = cfg->fc_oif;
514 nh->fib_nh_gw4 = cfg->fc_gw;
515 nh->fib_nh_has_gw = 1;
517 nh->fib_nh_flags = cfg->fc_flags;
519 #ifdef CONFIG_IP_ROUTE_CLASSID
520 nh->nh_tclassid = cfg->fc_flow;
522 net->ipv4.fib_num_tclassid_users++;
524 #ifdef CONFIG_IP_ROUTE_MULTIPATH
525 nh->fib_nh_weight = nh_weight;
530 rt_fibinfo_free_cpus(nh->nh_pcpu_rth_output);
531 nh->nh_pcpu_rth_output = NULL;
536 #ifdef CONFIG_IP_ROUTE_MULTIPATH
538 static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining,
539 struct netlink_ext_ack *extack)
543 while (rtnh_ok(rtnh, remaining)) {
545 rtnh = rtnh_next(rtnh, &remaining);
548 /* leftover implies invalid nexthop configuration, discard it */
550 NL_SET_ERR_MSG(extack,
551 "Invalid nexthop configuration - extra data after nexthops");
558 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
559 int remaining, struct fib_config *cfg,
560 struct netlink_ext_ack *extack)
562 struct net *net = fi->fib_net;
563 struct fib_config fib_cfg;
566 change_nexthops(fi) {
569 memset(&fib_cfg, 0, sizeof(fib_cfg));
571 if (!rtnh_ok(rtnh, remaining)) {
572 NL_SET_ERR_MSG(extack,
573 "Invalid nexthop configuration - extra data after nexthop");
577 if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) {
578 NL_SET_ERR_MSG(extack,
579 "Invalid flags for nexthop - can not contain DEAD or LINKDOWN");
583 fib_cfg.fc_flags = (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
584 fib_cfg.fc_oif = rtnh->rtnh_ifindex;
586 attrlen = rtnh_attrlen(rtnh);
588 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
590 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
592 fib_cfg.fc_gw = nla_get_in_addr(nla);
594 nla = nla_find(attrs, attrlen, RTA_FLOW);
596 fib_cfg.fc_flow = nla_get_u32(nla);
598 fib_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
599 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
601 fib_cfg.fc_encap_type = nla_get_u16(nla);
604 ret = fib_nh_init(net, nexthop_nh, &fib_cfg,
605 rtnh->rtnh_hops + 1, extack);
609 rtnh = rtnh_next(rtnh, &remaining);
610 } endfor_nexthops(fi);
613 if (cfg->fc_oif && fi->fib_nh->fib_nh_oif != cfg->fc_oif) {
614 NL_SET_ERR_MSG(extack,
615 "Nexthop device index does not match RTA_OIF");
618 if (cfg->fc_gw && fi->fib_nh->fib_nh_gw4 != cfg->fc_gw) {
619 NL_SET_ERR_MSG(extack,
620 "Nexthop gateway does not match RTA_GATEWAY");
623 #ifdef CONFIG_IP_ROUTE_CLASSID
624 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow) {
625 NL_SET_ERR_MSG(extack,
626 "Nexthop class id does not match RTA_FLOW");
635 static void fib_rebalance(struct fib_info *fi)
645 if (nh->fib_nh_flags & RTNH_F_DEAD)
648 if (ip_ignore_linkdown(nh->fib_nh_dev) &&
649 nh->fib_nh_flags & RTNH_F_LINKDOWN)
652 total += nh->fib_nh_weight;
653 } endfor_nexthops(fi);
656 change_nexthops(fi) {
659 if (nexthop_nh->fib_nh_flags & RTNH_F_DEAD) {
661 } else if (ip_ignore_linkdown(nexthop_nh->fib_nh_dev) &&
662 nexthop_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
665 w += nexthop_nh->fib_nh_weight;
666 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31,
670 atomic_set(&nexthop_nh->fib_nh_upper_bound, upper_bound);
671 } endfor_nexthops(fi);
673 #else /* CONFIG_IP_ROUTE_MULTIPATH */
675 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
676 int remaining, struct fib_config *cfg,
677 struct netlink_ext_ack *extack)
679 NL_SET_ERR_MSG(extack, "Multipath support not enabled in kernel");
684 #define fib_rebalance(fi) do { } while (0)
686 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
688 static int fib_encap_match(u16 encap_type,
689 struct nlattr *encap,
690 const struct fib_nh *nh,
691 const struct fib_config *cfg,
692 struct netlink_ext_ack *extack)
694 struct lwtunnel_state *lwtstate;
697 if (encap_type == LWTUNNEL_ENCAP_NONE)
700 ret = lwtunnel_build_state(encap_type, encap, AF_INET,
701 cfg, &lwtstate, extack);
703 result = lwtunnel_cmp_encap(lwtstate, nh->fib_nh_lws);
704 lwtstate_free(lwtstate);
710 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
711 struct netlink_ext_ack *extack)
713 #ifdef CONFIG_IP_ROUTE_MULTIPATH
714 struct rtnexthop *rtnh;
718 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
721 if (cfg->fc_oif || cfg->fc_gw) {
723 if (fib_encap_match(cfg->fc_encap_type, cfg->fc_encap,
724 fi->fib_nh, cfg, extack))
727 #ifdef CONFIG_IP_ROUTE_CLASSID
729 cfg->fc_flow != fi->fib_nh->nh_tclassid)
732 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->fib_nh_oif) &&
733 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->fib_nh_gw4))
738 #ifdef CONFIG_IP_ROUTE_MULTIPATH
743 remaining = cfg->fc_mp_len;
748 if (!rtnh_ok(rtnh, remaining))
751 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->fib_nh_oif)
754 attrlen = rtnh_attrlen(rtnh);
756 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
758 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
759 if (nla && nla_get_in_addr(nla) != nh->fib_nh_gw4)
761 #ifdef CONFIG_IP_ROUTE_CLASSID
762 nla = nla_find(attrs, attrlen, RTA_FLOW);
763 if (nla && nla_get_u32(nla) != nh->nh_tclassid)
768 rtnh = rtnh_next(rtnh, &remaining);
769 } endfor_nexthops(fi);
774 bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
782 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
783 int type = nla_type(nla);
791 if (type == RTAX_CC_ALGO) {
792 char tmp[TCP_CA_NAME_MAX];
795 nla_strlcpy(tmp, nla, sizeof(tmp));
796 val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca);
798 if (nla_len(nla) != sizeof(u32))
800 val = nla_get_u32(nla);
803 fi_val = fi->fib_metrics->metrics[type - 1];
804 if (type == RTAX_FEATURES)
805 fi_val &= ~DST_FEATURE_ECN_CA;
819 * Semantics of nexthop is very messy by historical reasons.
820 * We have to take into account, that:
821 * a) gateway can be actually local interface address,
822 * so that gatewayed route is direct.
823 * b) gateway must be on-link address, possibly
824 * described not by an ifaddr, but also by a direct route.
825 * c) If both gateway and interface are specified, they should not
827 * d) If we use tunnel routes, gateway could be not on-link.
829 * Attempt to reconcile all of these (alas, self-contradictory) conditions
830 * results in pretty ugly and hairy code with obscure logic.
832 * I chose to generalized it instead, so that the size
833 * of code does not increase practically, but it becomes
835 * Every prefix is assigned a "scope" value: "host" is local address,
836 * "link" is direct route,
837 * [ ... "site" ... "interior" ... ]
838 * and "universe" is true gateway route with global meaning.
840 * Every prefix refers to a set of "nexthop"s (gw, oif),
841 * where gw must have narrower scope. This recursion stops
842 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
843 * which means that gw is forced to be on link.
845 * Code is still hairy, but now it is apparently logically
846 * consistent and very flexible. F.e. as by-product it allows
847 * to co-exists in peace independent exterior and interior
850 * Normally it looks as following.
852 * {universe prefix} -> (gw, oif) [scope link]
854 * |-> {link prefix} -> (gw, oif) [scope local]
856 * |-> {local prefix} (terminal node)
858 static int fib_check_nh(struct fib_config *cfg, struct fib_nh *nh,
859 struct netlink_ext_ack *extack)
863 struct net_device *dev;
865 net = cfg->fc_nlinfo.nl_net;
866 if (nh->fib_nh_gw4) {
867 struct fib_result res;
869 if (nh->fib_nh_flags & RTNH_F_ONLINK) {
870 unsigned int addr_type;
872 if (cfg->fc_scope >= RT_SCOPE_LINK) {
873 NL_SET_ERR_MSG(extack,
874 "Nexthop has invalid scope");
877 dev = __dev_get_by_index(net, nh->fib_nh_oif);
879 NL_SET_ERR_MSG(extack, "Nexthop device required for onlink");
882 if (!(dev->flags & IFF_UP)) {
883 NL_SET_ERR_MSG(extack,
884 "Nexthop device is not up");
887 addr_type = inet_addr_type_dev_table(net, dev,
889 if (addr_type != RTN_UNICAST) {
890 NL_SET_ERR_MSG(extack,
891 "Nexthop has invalid gateway");
894 if (!netif_carrier_ok(dev))
895 nh->fib_nh_flags |= RTNH_F_LINKDOWN;
896 nh->fib_nh_dev = dev;
898 nh->fib_nh_scope = RT_SCOPE_LINK;
903 struct fib_table *tbl = NULL;
904 struct flowi4 fl4 = {
905 .daddr = nh->fib_nh_gw4,
906 .flowi4_scope = cfg->fc_scope + 1,
907 .flowi4_oif = nh->fib_nh_oif,
908 .flowi4_iif = LOOPBACK_IFINDEX,
911 /* It is not necessary, but requires a bit of thinking */
912 if (fl4.flowi4_scope < RT_SCOPE_LINK)
913 fl4.flowi4_scope = RT_SCOPE_LINK;
916 tbl = fib_get_table(net, cfg->fc_table);
919 err = fib_table_lookup(tbl, &fl4, &res,
920 FIB_LOOKUP_IGNORE_LINKSTATE |
923 /* on error or if no table given do full lookup. This
924 * is needed for example when nexthops are in the local
925 * table rather than the given table
928 err = fib_lookup(net, &fl4, &res,
929 FIB_LOOKUP_IGNORE_LINKSTATE);
933 NL_SET_ERR_MSG(extack,
934 "Nexthop has invalid gateway");
940 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) {
941 NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
944 nh->fib_nh_scope = res.scope;
945 nh->fib_nh_oif = FIB_RES_OIF(res);
946 nh->fib_nh_dev = dev = FIB_RES_DEV(res);
948 NL_SET_ERR_MSG(extack,
949 "No egress device for nexthop gateway");
953 if (!netif_carrier_ok(dev))
954 nh->fib_nh_flags |= RTNH_F_LINKDOWN;
955 err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
957 struct in_device *in_dev;
959 if (nh->fib_nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) {
960 NL_SET_ERR_MSG(extack,
961 "Invalid flags for nexthop - PERVASIVE and ONLINK can not be set");
966 in_dev = inetdev_by_index(net, nh->fib_nh_oif);
970 if (!(in_dev->dev->flags & IFF_UP)) {
971 NL_SET_ERR_MSG(extack, "Device for nexthop is not up");
974 nh->fib_nh_dev = in_dev->dev;
975 dev_hold(nh->fib_nh_dev);
976 nh->fib_nh_scope = RT_SCOPE_HOST;
977 if (!netif_carrier_ok(nh->fib_nh_dev))
978 nh->fib_nh_flags |= RTNH_F_LINKDOWN;
986 static inline unsigned int fib_laddr_hashfn(__be32 val)
988 unsigned int mask = (fib_info_hash_size - 1);
990 return ((__force u32)val ^
991 ((__force u32)val >> 7) ^
992 ((__force u32)val >> 14)) & mask;
995 static struct hlist_head *fib_info_hash_alloc(int bytes)
997 if (bytes <= PAGE_SIZE)
998 return kzalloc(bytes, GFP_KERNEL);
1000 return (struct hlist_head *)
1001 __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1005 static void fib_info_hash_free(struct hlist_head *hash, int bytes)
1010 if (bytes <= PAGE_SIZE)
1013 free_pages((unsigned long) hash, get_order(bytes));
1016 static void fib_info_hash_move(struct hlist_head *new_info_hash,
1017 struct hlist_head *new_laddrhash,
1018 unsigned int new_size)
1020 struct hlist_head *old_info_hash, *old_laddrhash;
1021 unsigned int old_size = fib_info_hash_size;
1022 unsigned int i, bytes;
1024 spin_lock_bh(&fib_info_lock);
1025 old_info_hash = fib_info_hash;
1026 old_laddrhash = fib_info_laddrhash;
1027 fib_info_hash_size = new_size;
1029 for (i = 0; i < old_size; i++) {
1030 struct hlist_head *head = &fib_info_hash[i];
1031 struct hlist_node *n;
1032 struct fib_info *fi;
1034 hlist_for_each_entry_safe(fi, n, head, fib_hash) {
1035 struct hlist_head *dest;
1036 unsigned int new_hash;
1038 new_hash = fib_info_hashfn(fi);
1039 dest = &new_info_hash[new_hash];
1040 hlist_add_head(&fi->fib_hash, dest);
1043 fib_info_hash = new_info_hash;
1045 for (i = 0; i < old_size; i++) {
1046 struct hlist_head *lhead = &fib_info_laddrhash[i];
1047 struct hlist_node *n;
1048 struct fib_info *fi;
1050 hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
1051 struct hlist_head *ldest;
1052 unsigned int new_hash;
1054 new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
1055 ldest = &new_laddrhash[new_hash];
1056 hlist_add_head(&fi->fib_lhash, ldest);
1059 fib_info_laddrhash = new_laddrhash;
1061 spin_unlock_bh(&fib_info_lock);
1063 bytes = old_size * sizeof(struct hlist_head *);
1064 fib_info_hash_free(old_info_hash, bytes);
1065 fib_info_hash_free(old_laddrhash, bytes);
1068 __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
1070 nh->nh_saddr = inet_select_addr(nh->fib_nh_dev,
1072 nh->nh_parent->fib_scope);
1073 nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
1075 return nh->nh_saddr;
1078 static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc)
1080 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
1081 fib_prefsrc != cfg->fc_dst) {
1082 u32 tb_id = cfg->fc_table;
1085 if (tb_id == RT_TABLE_MAIN)
1086 tb_id = RT_TABLE_LOCAL;
1088 rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net,
1089 fib_prefsrc, tb_id);
1091 if (rc != RTN_LOCAL && tb_id != RT_TABLE_LOCAL) {
1092 rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net,
1093 fib_prefsrc, RT_TABLE_LOCAL);
1096 if (rc != RTN_LOCAL)
1102 struct fib_info *fib_create_info(struct fib_config *cfg,
1103 struct netlink_ext_ack *extack)
1106 struct fib_info *fi = NULL;
1107 struct fib_info *ofi;
1109 struct net *net = cfg->fc_nlinfo.nl_net;
1111 if (cfg->fc_type > RTN_MAX)
1114 /* Fast check to catch the most weird cases */
1115 if (fib_props[cfg->fc_type].scope > cfg->fc_scope) {
1116 NL_SET_ERR_MSG(extack, "Invalid scope");
1120 if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) {
1121 NL_SET_ERR_MSG(extack,
1122 "Invalid rtm_flags - can not contain DEAD or LINKDOWN");
1126 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1128 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len, extack);
1135 if (fib_info_cnt >= fib_info_hash_size) {
1136 unsigned int new_size = fib_info_hash_size << 1;
1137 struct hlist_head *new_info_hash;
1138 struct hlist_head *new_laddrhash;
1143 bytes = new_size * sizeof(struct hlist_head *);
1144 new_info_hash = fib_info_hash_alloc(bytes);
1145 new_laddrhash = fib_info_hash_alloc(bytes);
1146 if (!new_info_hash || !new_laddrhash) {
1147 fib_info_hash_free(new_info_hash, bytes);
1148 fib_info_hash_free(new_laddrhash, bytes);
1150 fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
1152 if (!fib_info_hash_size)
1156 fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL);
1159 fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx,
1160 cfg->fc_mx_len, extack);
1161 if (unlikely(IS_ERR(fi->fib_metrics))) {
1162 err = PTR_ERR(fi->fib_metrics);
1164 return ERR_PTR(err);
1169 fi->fib_protocol = cfg->fc_protocol;
1170 fi->fib_scope = cfg->fc_scope;
1171 fi->fib_flags = cfg->fc_flags;
1172 fi->fib_priority = cfg->fc_priority;
1173 fi->fib_prefsrc = cfg->fc_prefsrc;
1174 fi->fib_type = cfg->fc_type;
1175 fi->fib_tb_id = cfg->fc_table;
1178 change_nexthops(fi) {
1179 nexthop_nh->nh_parent = fi;
1180 } endfor_nexthops(fi)
1183 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg, extack);
1185 err = fib_nh_init(net, fi->fib_nh, cfg, 1, extack);
1190 if (fib_props[cfg->fc_type].error) {
1191 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp) {
1192 NL_SET_ERR_MSG(extack,
1193 "Gateway, device and multipath can not be specified for this route type");
1198 switch (cfg->fc_type) {
1206 NL_SET_ERR_MSG(extack, "Invalid route type");
1211 if (cfg->fc_scope > RT_SCOPE_HOST) {
1212 NL_SET_ERR_MSG(extack, "Invalid scope");
1216 if (cfg->fc_scope == RT_SCOPE_HOST) {
1217 struct fib_nh *nh = fi->fib_nh;
1219 /* Local address is added. */
1221 NL_SET_ERR_MSG(extack,
1222 "Route with host scope can not have multiple nexthops");
1225 if (nh->fib_nh_gw4) {
1226 NL_SET_ERR_MSG(extack,
1227 "Route with host scope can not have a gateway");
1230 nh->fib_nh_scope = RT_SCOPE_NOWHERE;
1231 nh->fib_nh_dev = dev_get_by_index(net, fi->fib_nh->fib_nh_oif);
1233 if (!nh->fib_nh_dev)
1238 change_nexthops(fi) {
1239 err = fib_check_nh(cfg, nexthop_nh, extack);
1242 if (nexthop_nh->fib_nh_flags & RTNH_F_LINKDOWN)
1244 } endfor_nexthops(fi)
1245 if (linkdown == fi->fib_nhs)
1246 fi->fib_flags |= RTNH_F_LINKDOWN;
1249 if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc)) {
1250 NL_SET_ERR_MSG(extack, "Invalid prefsrc address");
1254 change_nexthops(fi) {
1255 fib_info_update_nh_saddr(net, nexthop_nh);
1256 } endfor_nexthops(fi)
1261 ofi = fib_find_info(fi);
1270 refcount_set(&fi->fib_clntref, 1);
1271 spin_lock_bh(&fib_info_lock);
1272 hlist_add_head(&fi->fib_hash,
1273 &fib_info_hash[fib_info_hashfn(fi)]);
1274 if (fi->fib_prefsrc) {
1275 struct hlist_head *head;
1277 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];
1278 hlist_add_head(&fi->fib_lhash, head);
1280 change_nexthops(fi) {
1281 struct hlist_head *head;
1284 if (!nexthop_nh->fib_nh_dev)
1286 hash = fib_devindex_hashfn(nexthop_nh->fib_nh_dev->ifindex);
1287 head = &fib_info_devhash[hash];
1288 hlist_add_head(&nexthop_nh->nh_hash, head);
1289 } endfor_nexthops(fi)
1290 spin_unlock_bh(&fib_info_lock);
1302 return ERR_PTR(err);
1305 int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1306 u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
1307 struct fib_info *fi, unsigned int flags)
1309 struct nlmsghdr *nlh;
1312 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
1316 rtm = nlmsg_data(nlh);
1317 rtm->rtm_family = AF_INET;
1318 rtm->rtm_dst_len = dst_len;
1319 rtm->rtm_src_len = 0;
1322 rtm->rtm_table = tb_id;
1324 rtm->rtm_table = RT_TABLE_COMPAT;
1325 if (nla_put_u32(skb, RTA_TABLE, tb_id))
1326 goto nla_put_failure;
1327 rtm->rtm_type = type;
1328 rtm->rtm_flags = fi->fib_flags;
1329 rtm->rtm_scope = fi->fib_scope;
1330 rtm->rtm_protocol = fi->fib_protocol;
1332 if (rtm->rtm_dst_len &&
1333 nla_put_in_addr(skb, RTA_DST, dst))
1334 goto nla_put_failure;
1335 if (fi->fib_priority &&
1336 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
1337 goto nla_put_failure;
1338 if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
1339 goto nla_put_failure;
1341 if (fi->fib_prefsrc &&
1342 nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc))
1343 goto nla_put_failure;
1344 if (fi->fib_nhs == 1) {
1345 if (fi->fib_nh->fib_nh_gw4 &&
1346 nla_put_in_addr(skb, RTA_GATEWAY, fi->fib_nh->fib_nh_gw4))
1347 goto nla_put_failure;
1348 if (fi->fib_nh->fib_nh_oif &&
1349 nla_put_u32(skb, RTA_OIF, fi->fib_nh->fib_nh_oif))
1350 goto nla_put_failure;
1351 if (fi->fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
1353 if (ip_ignore_linkdown(fi->fib_nh->fib_nh_dev))
1354 rtm->rtm_flags |= RTNH_F_DEAD;
1357 if (fi->fib_nh->fib_nh_flags & RTNH_F_OFFLOAD)
1358 rtm->rtm_flags |= RTNH_F_OFFLOAD;
1359 #ifdef CONFIG_IP_ROUTE_CLASSID
1360 if (fi->fib_nh[0].nh_tclassid &&
1361 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
1362 goto nla_put_failure;
1364 if (fi->fib_nh->fib_nh_lws &&
1365 lwtunnel_fill_encap(skb, fi->fib_nh->fib_nh_lws) < 0)
1366 goto nla_put_failure;
1368 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1369 if (fi->fib_nhs > 1) {
1370 struct rtnexthop *rtnh;
1373 mp = nla_nest_start(skb, RTA_MULTIPATH);
1375 goto nla_put_failure;
1378 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1380 goto nla_put_failure;
1382 rtnh->rtnh_flags = nh->fib_nh_flags & 0xFF;
1383 if (nh->fib_nh_flags & RTNH_F_LINKDOWN) {
1385 if (ip_ignore_linkdown(nh->fib_nh_dev))
1386 rtnh->rtnh_flags |= RTNH_F_DEAD;
1389 rtnh->rtnh_hops = nh->fib_nh_weight - 1;
1390 rtnh->rtnh_ifindex = nh->fib_nh_oif;
1392 if (nh->fib_nh_gw4 &&
1393 nla_put_in_addr(skb, RTA_GATEWAY, nh->fib_nh_gw4))
1394 goto nla_put_failure;
1395 #ifdef CONFIG_IP_ROUTE_CLASSID
1396 if (nh->nh_tclassid &&
1397 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
1398 goto nla_put_failure;
1400 if (nh->fib_nh_lws &&
1401 lwtunnel_fill_encap(skb, nh->fib_nh_lws) < 0)
1402 goto nla_put_failure;
1404 /* length of rtnetlink header + attributes */
1405 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1406 } endfor_nexthops(fi);
1408 nla_nest_end(skb, mp);
1411 nlmsg_end(skb, nlh);
1415 nlmsg_cancel(skb, nlh);
1421 * - local address disappeared -> we must delete all the entries
1423 * - device went down -> we must shutdown all nexthops going via it.
1425 int fib_sync_down_addr(struct net_device *dev, __be32 local)
1428 unsigned int hash = fib_laddr_hashfn(local);
1429 struct hlist_head *head = &fib_info_laddrhash[hash];
1430 struct net *net = dev_net(dev);
1431 int tb_id = l3mdev_fib_table(dev);
1432 struct fib_info *fi;
1434 if (!fib_info_laddrhash || local == 0)
1437 hlist_for_each_entry(fi, head, fib_lhash) {
1438 if (!net_eq(fi->fib_net, net) ||
1439 fi->fib_tb_id != tb_id)
1441 if (fi->fib_prefsrc == local) {
1442 fi->fib_flags |= RTNH_F_DEAD;
1449 static int call_fib_nh_notifiers(struct fib_nh *nh,
1450 enum fib_event_type event_type)
1452 bool ignore_link_down = ip_ignore_linkdown(nh->fib_nh_dev);
1453 struct fib_nh_notifier_info info = {
1457 switch (event_type) {
1458 case FIB_EVENT_NH_ADD:
1459 if (nh->fib_nh_flags & RTNH_F_DEAD)
1461 if (ignore_link_down && nh->fib_nh_flags & RTNH_F_LINKDOWN)
1463 return call_fib4_notifiers(dev_net(nh->fib_nh_dev), event_type,
1465 case FIB_EVENT_NH_DEL:
1466 if ((ignore_link_down && nh->fib_nh_flags & RTNH_F_LINKDOWN) ||
1467 (nh->fib_nh_flags & RTNH_F_DEAD))
1468 return call_fib4_notifiers(dev_net(nh->fib_nh_dev),
1469 event_type, &info.info);
1477 /* Update the PMTU of exceptions when:
1478 * - the new MTU of the first hop becomes smaller than the PMTU
1479 * - the old MTU was the same as the PMTU, and it limited discovery of
1480 * larger MTUs on the path. With that limit raised, we can now
1481 * discover larger MTUs
1482 * A special case is locked exceptions, for which the PMTU is smaller
1483 * than the minimal accepted PMTU:
1484 * - if the new MTU is greater than the PMTU, don't make any change
1485 * - otherwise, unlock and set PMTU
1487 static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig)
1489 struct fnhe_hash_bucket *bucket;
1492 bucket = rcu_dereference_protected(nh->nh_exceptions, 1);
1496 for (i = 0; i < FNHE_HASH_SIZE; i++) {
1497 struct fib_nh_exception *fnhe;
1499 for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
1501 fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
1502 if (fnhe->fnhe_mtu_locked) {
1503 if (new <= fnhe->fnhe_pmtu) {
1504 fnhe->fnhe_pmtu = new;
1505 fnhe->fnhe_mtu_locked = false;
1507 } else if (new < fnhe->fnhe_pmtu ||
1508 orig == fnhe->fnhe_pmtu) {
1509 fnhe->fnhe_pmtu = new;
1515 void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
1517 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1518 struct hlist_head *head = &fib_info_devhash[hash];
1521 hlist_for_each_entry(nh, head, nh_hash) {
1522 if (nh->fib_nh_dev == dev)
1523 nh_update_mtu(nh, dev->mtu, orig_mtu);
1527 /* Event force Flags Description
1528 * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
1529 * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
1530 * NETDEV_DOWN 1 LINKDOWN|DEAD Last address removed
1531 * NETDEV_UNREGISTER 1 LINKDOWN|DEAD Device removed
1533 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
1536 int scope = RT_SCOPE_NOWHERE;
1537 struct fib_info *prev_fi = NULL;
1538 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1539 struct hlist_head *head = &fib_info_devhash[hash];
1545 hlist_for_each_entry(nh, head, nh_hash) {
1546 struct fib_info *fi = nh->nh_parent;
1549 BUG_ON(!fi->fib_nhs);
1550 if (nh->fib_nh_dev != dev || fi == prev_fi)
1554 change_nexthops(fi) {
1555 if (nexthop_nh->fib_nh_flags & RTNH_F_DEAD)
1557 else if (nexthop_nh->fib_nh_dev == dev &&
1558 nexthop_nh->fib_nh_scope != scope) {
1561 case NETDEV_UNREGISTER:
1562 nexthop_nh->fib_nh_flags |= RTNH_F_DEAD;
1565 nexthop_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
1568 call_fib_nh_notifiers(nexthop_nh,
1572 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1573 if (event == NETDEV_UNREGISTER &&
1574 nexthop_nh->fib_nh_dev == dev) {
1579 } endfor_nexthops(fi)
1580 if (dead == fi->fib_nhs) {
1583 case NETDEV_UNREGISTER:
1584 fi->fib_flags |= RTNH_F_DEAD;
1587 fi->fib_flags |= RTNH_F_LINKDOWN;
1599 /* Must be invoked inside of an RCU protected region. */
1600 static void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
1602 struct fib_info *fi = NULL, *last_resort = NULL;
1603 struct hlist_head *fa_head = res->fa_head;
1604 struct fib_table *tb = res->table;
1605 u8 slen = 32 - res->prefixlen;
1606 int order = -1, last_idx = -1;
1607 struct fib_alias *fa, *fa1 = NULL;
1608 u32 last_prio = res->fi->fib_priority;
1611 hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
1612 struct fib_info *next_fi = fa->fa_info;
1614 if (fa->fa_slen != slen)
1616 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1618 if (fa->tb_id != tb->tb_id)
1620 if (next_fi->fib_priority > last_prio &&
1621 fa->fa_tos == last_tos) {
1626 if (next_fi->fib_flags & RTNH_F_DEAD)
1628 last_tos = fa->fa_tos;
1629 last_prio = next_fi->fib_priority;
1631 if (next_fi->fib_scope != res->scope ||
1632 fa->fa_type != RTN_UNICAST)
1634 if (!next_fi->fib_nh[0].fib_nh_gw4 ||
1635 next_fi->fib_nh[0].fib_nh_scope != RT_SCOPE_LINK)
1638 fib_alias_accessed(fa);
1641 if (next_fi != res->fi)
1644 } else if (!fib_detect_death(fi, order, &last_resort,
1645 &last_idx, fa1->fa_default)) {
1646 fib_result_assign(res, fi);
1647 fa1->fa_default = order;
1654 if (order <= 0 || !fi) {
1656 fa1->fa_default = -1;
1660 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1662 fib_result_assign(res, fi);
1663 fa1->fa_default = order;
1668 fib_result_assign(res, last_resort);
1669 fa1->fa_default = last_idx;
1675 * Dead device goes up. We wake up dead nexthops.
1676 * It takes sense only on multipath routes.
1678 int fib_sync_up(struct net_device *dev, unsigned int nh_flags)
1680 struct fib_info *prev_fi;
1682 struct hlist_head *head;
1686 if (!(dev->flags & IFF_UP))
1689 if (nh_flags & RTNH_F_DEAD) {
1690 unsigned int flags = dev_get_flags(dev);
1692 if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1693 nh_flags |= RTNH_F_LINKDOWN;
1697 hash = fib_devindex_hashfn(dev->ifindex);
1698 head = &fib_info_devhash[hash];
1701 hlist_for_each_entry(nh, head, nh_hash) {
1702 struct fib_info *fi = nh->nh_parent;
1705 BUG_ON(!fi->fib_nhs);
1706 if (nh->fib_nh_dev != dev || fi == prev_fi)
1711 change_nexthops(fi) {
1712 if (!(nexthop_nh->fib_nh_flags & nh_flags)) {
1716 if (!nexthop_nh->fib_nh_dev ||
1717 !(nexthop_nh->fib_nh_dev->flags & IFF_UP))
1719 if (nexthop_nh->fib_nh_dev != dev ||
1720 !__in_dev_get_rtnl(dev))
1723 nexthop_nh->fib_nh_flags &= ~nh_flags;
1724 call_fib_nh_notifiers(nexthop_nh, FIB_EVENT_NH_ADD);
1725 } endfor_nexthops(fi)
1728 fi->fib_flags &= ~nh_flags;
1738 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1739 static bool fib_good_nh(const struct fib_nh *nh)
1741 int state = NUD_REACHABLE;
1743 if (nh->fib_nh_scope == RT_SCOPE_LINK) {
1744 struct neighbour *n;
1748 n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
1749 (__force u32)nh->fib_nh_gw4);
1751 state = n->nud_state;
1753 rcu_read_unlock_bh();
1756 return !!(state & NUD_VALID);
1759 void fib_select_multipath(struct fib_result *res, int hash)
1761 struct fib_info *fi = res->fi;
1762 struct net *net = fi->fib_net;
1766 if (net->ipv4.sysctl_fib_multipath_use_neigh) {
1767 if (!fib_good_nh(nh))
1770 res->nh_sel = nhsel;
1775 if (hash > atomic_read(&nh->fib_nh_upper_bound))
1778 res->nh_sel = nhsel;
1780 } endfor_nexthops(fi);
1784 void fib_select_path(struct net *net, struct fib_result *res,
1785 struct flowi4 *fl4, const struct sk_buff *skb)
1787 if (fl4->flowi4_oif && !(fl4->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF))
1790 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1791 if (res->fi->fib_nhs > 1) {
1792 int h = fib_multipath_hash(net, fl4, skb, NULL);
1794 fib_select_multipath(res, h);
1798 if (!res->prefixlen &&
1799 res->table->tb_num_default > 1 &&
1800 res->type == RTN_UNICAST)
1801 fib_select_default(fl4, res);
1805 fl4->saddr = FIB_RES_PREFSRC(net, *res);