1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * ROUTE - implementation of the IP router.
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox, <gw4pts@gw4pts.ampr.org>
12 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
13 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
16 * Alan Cox : Verify area fixes.
17 * Alan Cox : cli() protects routing changes
18 * Rui Oliveira : ICMP routing table updates
19 * (rco@di.uminho.pt) Routing table insertion and update
20 * Linus Torvalds : Rewrote bits to be sensible
21 * Alan Cox : Added BSD route gw semantics
22 * Alan Cox : Super /proc >4K
23 * Alan Cox : MTU in route table
24 * Alan Cox : MSS actually. Also added the window
26 * Sam Lantinga : Fixed route matching in rt_del()
27 * Alan Cox : Routing cache support.
28 * Alan Cox : Removed compatibility cruft.
29 * Alan Cox : RTF_REJECT support.
30 * Alan Cox : TCP irtt support.
31 * Jonathan Naylor : Added Metric support.
32 * Miquel van Smoorenburg : BSD API fixes.
33 * Miquel van Smoorenburg : Metrics.
34 * Alan Cox : Use __u32 properly
35 * Alan Cox : Aligned routing errors more closely with BSD
36 * our system is still very different.
37 * Alan Cox : Faster /proc handling
38 * Alexey Kuznetsov : Massive rework to support tree based routing,
39 * routing caches and better behaviour.
41 * Olaf Erb : irtt wasn't being copied right.
42 * Bjorn Ekwall : Kerneld route support.
43 * Alan Cox : Multicast fixed (I hope)
44 * Pavel Krauz : Limited broadcast fixed
45 * Mike McLagan : Routing by source
46 * Alexey Kuznetsov : End of old history. Split to fib.c and
47 * route.c and rewritten from scratch.
48 * Andi Kleen : Load-limit warning messages.
49 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
50 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
51 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
52 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
53 * Marc Boucher : routing by fwmark
54 * Robert Olsson : Added rt_cache statistics
55 * Arnaldo C. Melo : Convert proc stuff to seq_file
56 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
57 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
58 * Ilia Sotnikov : Removed TOS from hash calculations
61 #define pr_fmt(fmt) "IPv4: " fmt
63 #include <linux/module.h>
64 #include <linux/uaccess.h>
65 #include <linux/bitops.h>
66 #include <linux/types.h>
67 #include <linux/kernel.h>
69 #include <linux/string.h>
70 #include <linux/socket.h>
71 #include <linux/sockios.h>
72 #include <linux/errno.h>
74 #include <linux/inet.h>
75 #include <linux/netdevice.h>
76 #include <linux/proc_fs.h>
77 #include <linux/init.h>
78 #include <linux/skbuff.h>
79 #include <linux/inetdevice.h>
80 #include <linux/igmp.h>
81 #include <linux/pkt_sched.h>
82 #include <linux/mroute.h>
83 #include <linux/netfilter_ipv4.h>
84 #include <linux/random.h>
85 #include <linux/rcupdate.h>
86 #include <linux/times.h>
87 #include <linux/slab.h>
88 #include <linux/jhash.h>
90 #include <net/dst_metadata.h>
91 #include <net/net_namespace.h>
92 #include <net/protocol.h>
94 #include <net/route.h>
95 #include <net/inetpeer.h>
97 #include <net/ip_fib.h>
98 #include <net/nexthop.h>
101 #include <net/icmp.h>
102 #include <net/xfrm.h>
103 #include <net/lwtunnel.h>
104 #include <net/netevent.h>
105 #include <net/rtnetlink.h>
107 #include <linux/sysctl.h>
109 #include <net/secure_seq.h>
110 #include <net/ip_tunnels.h>
111 #include <net/l3mdev.h>
113 #include "fib_lookup.h"
115 #define RT_FL_TOS(oldflp4) \
116 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
118 #define RT_GC_TIMEOUT (300*HZ)
120 static int ip_rt_max_size;
121 static int ip_rt_redirect_number __read_mostly = 9;
122 static int ip_rt_redirect_load __read_mostly = HZ / 50;
123 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
124 static int ip_rt_error_cost __read_mostly = HZ;
125 static int ip_rt_error_burst __read_mostly = 5 * HZ;
126 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
127 static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
128 static int ip_rt_min_advmss __read_mostly = 256;
130 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
133 * Interface to generic destination cache.
136 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
137 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
138 static unsigned int ipv4_mtu(const struct dst_entry *dst);
139 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
140 static void ipv4_link_failure(struct sk_buff *skb);
141 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
142 struct sk_buff *skb, u32 mtu);
143 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
144 struct sk_buff *skb);
145 static void ipv4_dst_destroy(struct dst_entry *dst);
147 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
153 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
156 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
158 static struct dst_ops ipv4_dst_ops = {
160 .check = ipv4_dst_check,
161 .default_advmss = ipv4_default_advmss,
163 .cow_metrics = ipv4_cow_metrics,
164 .destroy = ipv4_dst_destroy,
165 .negative_advice = ipv4_negative_advice,
166 .link_failure = ipv4_link_failure,
167 .update_pmtu = ip_rt_update_pmtu,
168 .redirect = ip_do_redirect,
169 .local_out = __ip_local_out,
170 .neigh_lookup = ipv4_neigh_lookup,
171 .confirm_neigh = ipv4_confirm_neigh,
174 #define ECN_OR_COST(class) TC_PRIO_##class
176 const __u8 ip_tos2prio[16] = {
178 ECN_OR_COST(BESTEFFORT),
180 ECN_OR_COST(BESTEFFORT),
186 ECN_OR_COST(INTERACTIVE),
188 ECN_OR_COST(INTERACTIVE),
189 TC_PRIO_INTERACTIVE_BULK,
190 ECN_OR_COST(INTERACTIVE_BULK),
191 TC_PRIO_INTERACTIVE_BULK,
192 ECN_OR_COST(INTERACTIVE_BULK)
194 EXPORT_SYMBOL(ip_tos2prio);
196 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
197 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
199 #ifdef CONFIG_PROC_FS
200 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
204 return SEQ_START_TOKEN;
207 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
213 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
217 static int rt_cache_seq_show(struct seq_file *seq, void *v)
219 if (v == SEQ_START_TOKEN)
220 seq_printf(seq, "%-127s\n",
221 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
222 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
227 static const struct seq_operations rt_cache_seq_ops = {
228 .start = rt_cache_seq_start,
229 .next = rt_cache_seq_next,
230 .stop = rt_cache_seq_stop,
231 .show = rt_cache_seq_show,
234 static int rt_cache_seq_open(struct inode *inode, struct file *file)
236 return seq_open(file, &rt_cache_seq_ops);
239 static const struct file_operations rt_cache_seq_fops = {
240 .open = rt_cache_seq_open,
243 .release = seq_release,
247 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
252 return SEQ_START_TOKEN;
254 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
255 if (!cpu_possible(cpu))
258 return &per_cpu(rt_cache_stat, cpu);
263 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
267 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
268 if (!cpu_possible(cpu))
271 return &per_cpu(rt_cache_stat, cpu);
277 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
282 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
284 struct rt_cache_stat *st = v;
286 if (v == SEQ_START_TOKEN) {
287 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
291 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
292 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
293 dst_entries_get_slow(&ipv4_dst_ops),
306 0, /* st->gc_total */
307 0, /* st->gc_ignored */
308 0, /* st->gc_goal_miss */
309 0, /* st->gc_dst_overflow */
310 0, /* st->in_hlist_search */
311 0 /* st->out_hlist_search */
316 static const struct seq_operations rt_cpu_seq_ops = {
317 .start = rt_cpu_seq_start,
318 .next = rt_cpu_seq_next,
319 .stop = rt_cpu_seq_stop,
320 .show = rt_cpu_seq_show,
324 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
326 return seq_open(file, &rt_cpu_seq_ops);
329 static const struct file_operations rt_cpu_seq_fops = {
330 .open = rt_cpu_seq_open,
333 .release = seq_release,
336 #ifdef CONFIG_IP_ROUTE_CLASSID
337 static int rt_acct_proc_show(struct seq_file *m, void *v)
339 struct ip_rt_acct *dst, *src;
342 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
346 for_each_possible_cpu(i) {
347 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
348 for (j = 0; j < 256; j++) {
349 dst[j].o_bytes += src[j].o_bytes;
350 dst[j].o_packets += src[j].o_packets;
351 dst[j].i_bytes += src[j].i_bytes;
352 dst[j].i_packets += src[j].i_packets;
356 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
362 static int __net_init ip_rt_do_proc_init(struct net *net)
364 struct proc_dir_entry *pde;
366 pde = proc_create("rt_cache", 0444, net->proc_net,
371 pde = proc_create("rt_cache", 0444,
372 net->proc_net_stat, &rt_cpu_seq_fops);
376 #ifdef CONFIG_IP_ROUTE_CLASSID
377 pde = proc_create_single("rt_acct", 0, net->proc_net,
384 #ifdef CONFIG_IP_ROUTE_CLASSID
386 remove_proc_entry("rt_cache", net->proc_net_stat);
389 remove_proc_entry("rt_cache", net->proc_net);
394 static void __net_exit ip_rt_do_proc_exit(struct net *net)
396 remove_proc_entry("rt_cache", net->proc_net_stat);
397 remove_proc_entry("rt_cache", net->proc_net);
398 #ifdef CONFIG_IP_ROUTE_CLASSID
399 remove_proc_entry("rt_acct", net->proc_net);
403 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
404 .init = ip_rt_do_proc_init,
405 .exit = ip_rt_do_proc_exit,
408 static int __init ip_rt_proc_init(void)
410 return register_pernet_subsys(&ip_rt_proc_ops);
414 static inline int ip_rt_proc_init(void)
418 #endif /* CONFIG_PROC_FS */
420 static inline bool rt_is_expired(const struct rtable *rth)
422 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
425 void rt_cache_flush(struct net *net)
427 rt_genid_bump_ipv4(net);
430 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
434 const struct rtable *rt = container_of(dst, struct rtable, dst);
435 struct net_device *dev = dst->dev;
440 if (likely(rt->rt_gw_family == AF_INET)) {
441 n = ip_neigh_gw4(dev, rt->rt_gw4);
442 } else if (rt->rt_gw_family == AF_INET6) {
443 n = ip_neigh_gw6(dev, &rt->rt_gw6);
447 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
448 n = ip_neigh_gw4(dev, pkey);
451 if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
454 rcu_read_unlock_bh();
459 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
461 const struct rtable *rt = container_of(dst, struct rtable, dst);
462 struct net_device *dev = dst->dev;
463 const __be32 *pkey = daddr;
465 if (rt->rt_gw_family == AF_INET) {
466 pkey = (const __be32 *)&rt->rt_gw4;
467 } else if (rt->rt_gw_family == AF_INET6) {
468 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
471 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
474 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
477 #define IP_IDENTS_SZ 2048u
479 static atomic_t *ip_idents __read_mostly;
480 static u32 *ip_tstamps __read_mostly;
482 /* In order to protect privacy, we add a perturbation to identifiers
483 * if one generator is seldom used. This makes hard for an attacker
484 * to infer how many packets were sent between two points in time.
486 u32 ip_idents_reserve(u32 hash, int segs)
488 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
489 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
490 u32 old = READ_ONCE(*p_tstamp);
491 u32 now = (u32)jiffies;
494 if (old != now && cmpxchg(p_tstamp, old, now) == old)
495 delta = prandom_u32_max(now - old);
497 /* Do not use atomic_add_return() as it makes UBSAN unhappy */
499 old = (u32)atomic_read(p_id);
500 new = old + delta + segs;
501 } while (atomic_cmpxchg(p_id, old, new) != old);
505 EXPORT_SYMBOL(ip_idents_reserve);
507 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
511 /* Note the following code is not safe, but this is okay. */
512 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
513 get_random_bytes(&net->ipv4.ip_id_key,
514 sizeof(net->ipv4.ip_id_key));
516 hash = siphash_3u32((__force u32)iph->daddr,
517 (__force u32)iph->saddr,
519 &net->ipv4.ip_id_key);
520 id = ip_idents_reserve(hash, segs);
523 EXPORT_SYMBOL(__ip_select_ident);
525 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
526 const struct sock *sk,
527 const struct iphdr *iph,
529 u8 prot, u32 mark, int flow_flags)
532 const struct inet_sock *inet = inet_sk(sk);
534 oif = sk->sk_bound_dev_if;
536 tos = RT_CONN_FLAGS(sk);
537 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
539 flowi4_init_output(fl4, oif, mark, tos,
540 RT_SCOPE_UNIVERSE, prot,
542 iph->daddr, iph->saddr, 0, 0,
543 sock_net_uid(net, sk));
546 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
547 const struct sock *sk)
549 const struct net *net = dev_net(skb->dev);
550 const struct iphdr *iph = ip_hdr(skb);
551 int oif = skb->dev->ifindex;
552 u8 tos = RT_TOS(iph->tos);
553 u8 prot = iph->protocol;
554 u32 mark = skb->mark;
556 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
559 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
561 const struct inet_sock *inet = inet_sk(sk);
562 const struct ip_options_rcu *inet_opt;
563 __be32 daddr = inet->inet_daddr;
566 inet_opt = rcu_dereference(inet->inet_opt);
567 if (inet_opt && inet_opt->opt.srr)
568 daddr = inet_opt->opt.faddr;
569 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
570 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
571 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
572 inet_sk_flowi_flags(sk),
573 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
577 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
578 const struct sk_buff *skb)
581 build_skb_flow_key(fl4, skb, sk);
583 build_sk_flow_key(fl4, sk);
586 static DEFINE_SPINLOCK(fnhe_lock);
588 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
592 rt = rcu_dereference(fnhe->fnhe_rth_input);
594 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
595 dst_dev_put(&rt->dst);
596 dst_release(&rt->dst);
598 rt = rcu_dereference(fnhe->fnhe_rth_output);
600 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
601 dst_dev_put(&rt->dst);
602 dst_release(&rt->dst);
606 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
608 struct fib_nh_exception *fnhe, *oldest;
610 oldest = rcu_dereference(hash->chain);
611 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
612 fnhe = rcu_dereference(fnhe->fnhe_next)) {
613 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
616 fnhe_flush_routes(oldest);
620 static inline u32 fnhe_hashfun(__be32 daddr)
622 static u32 fnhe_hashrnd __read_mostly;
625 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
626 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
627 return hash_32(hval, FNHE_HASH_SHIFT);
630 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
632 rt->rt_pmtu = fnhe->fnhe_pmtu;
633 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
634 rt->dst.expires = fnhe->fnhe_expires;
637 rt->rt_flags |= RTCF_REDIRECTED;
638 rt->rt_uses_gateway = 1;
639 rt->rt_gw_family = AF_INET;
640 rt->rt_gw4 = fnhe->fnhe_gw;
644 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
645 __be32 gw, u32 pmtu, bool lock,
646 unsigned long expires)
648 struct fnhe_hash_bucket *hash;
649 struct fib_nh_exception *fnhe;
655 genid = fnhe_genid(dev_net(nhc->nhc_dev));
656 hval = fnhe_hashfun(daddr);
658 spin_lock_bh(&fnhe_lock);
660 hash = rcu_dereference(nhc->nhc_exceptions);
662 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
665 rcu_assign_pointer(nhc->nhc_exceptions, hash);
671 for (fnhe = rcu_dereference(hash->chain); fnhe;
672 fnhe = rcu_dereference(fnhe->fnhe_next)) {
673 if (fnhe->fnhe_daddr == daddr)
679 if (fnhe->fnhe_genid != genid)
680 fnhe->fnhe_genid = genid;
684 fnhe->fnhe_pmtu = pmtu;
685 fnhe->fnhe_mtu_locked = lock;
687 fnhe->fnhe_expires = max(1UL, expires);
688 /* Update all cached dsts too */
689 rt = rcu_dereference(fnhe->fnhe_rth_input);
691 fill_route_from_fnhe(rt, fnhe);
692 rt = rcu_dereference(fnhe->fnhe_rth_output);
694 fill_route_from_fnhe(rt, fnhe);
696 if (depth > FNHE_RECLAIM_DEPTH)
697 fnhe = fnhe_oldest(hash);
699 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
703 fnhe->fnhe_next = hash->chain;
704 rcu_assign_pointer(hash->chain, fnhe);
706 fnhe->fnhe_genid = genid;
707 fnhe->fnhe_daddr = daddr;
709 fnhe->fnhe_pmtu = pmtu;
710 fnhe->fnhe_mtu_locked = lock;
711 fnhe->fnhe_expires = max(1UL, expires);
713 /* Exception created; mark the cached routes for the nexthop
714 * stale, so anyone caching it rechecks if this exception
717 rt = rcu_dereference(nhc->nhc_rth_input);
719 rt->dst.obsolete = DST_OBSOLETE_KILL;
721 for_each_possible_cpu(i) {
722 struct rtable __rcu **prt;
723 prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
724 rt = rcu_dereference(*prt);
726 rt->dst.obsolete = DST_OBSOLETE_KILL;
730 fnhe->fnhe_stamp = jiffies;
733 spin_unlock_bh(&fnhe_lock);
736 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
739 __be32 new_gw = icmp_hdr(skb)->un.gateway;
740 __be32 old_gw = ip_hdr(skb)->saddr;
741 struct net_device *dev = skb->dev;
742 struct in_device *in_dev;
743 struct fib_result res;
747 switch (icmp_hdr(skb)->code & 7) {
749 case ICMP_REDIR_NETTOS:
750 case ICMP_REDIR_HOST:
751 case ICMP_REDIR_HOSTTOS:
758 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
761 in_dev = __in_dev_get_rcu(dev);
766 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
767 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
768 ipv4_is_zeronet(new_gw))
769 goto reject_redirect;
771 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
772 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
773 goto reject_redirect;
774 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
775 goto reject_redirect;
777 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
778 goto reject_redirect;
781 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
783 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
785 if (!(n->nud_state & NUD_VALID)) {
786 neigh_event_send(n, NULL);
788 if (fib_lookup(net, fl4, &res, 0) == 0) {
789 struct fib_nh_common *nhc = FIB_RES_NHC(res);
791 update_or_create_fnhe(nhc, fl4->daddr, new_gw,
793 jiffies + ip_rt_gc_timeout);
796 rt->dst.obsolete = DST_OBSOLETE_KILL;
797 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
804 #ifdef CONFIG_IP_ROUTE_VERBOSE
805 if (IN_DEV_LOG_MARTIANS(in_dev)) {
806 const struct iphdr *iph = (const struct iphdr *) skb->data;
807 __be32 daddr = iph->daddr;
808 __be32 saddr = iph->saddr;
810 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
811 " Advised path = %pI4 -> %pI4\n",
812 &old_gw, dev->name, &new_gw,
819 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
823 const struct iphdr *iph = (const struct iphdr *) skb->data;
824 struct net *net = dev_net(skb->dev);
825 int oif = skb->dev->ifindex;
826 u8 tos = RT_TOS(iph->tos);
827 u8 prot = iph->protocol;
828 u32 mark = skb->mark;
830 rt = (struct rtable *) dst;
832 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
833 __ip_do_redirect(rt, skb, &fl4, true);
836 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
838 struct rtable *rt = (struct rtable *)dst;
839 struct dst_entry *ret = dst;
842 if (dst->obsolete > 0) {
845 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
856 * 1. The first ip_rt_redirect_number redirects are sent
857 * with exponential backoff, then we stop sending them at all,
858 * assuming that the host ignores our redirects.
859 * 2. If we did not see packets requiring redirects
860 * during ip_rt_redirect_silence, we assume that the host
861 * forgot redirected route and start to send redirects again.
863 * This algorithm is much cheaper and more intelligent than dumb load limiting
866 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
867 * and "frag. need" (breaks PMTU discovery) in icmp.c.
870 void ip_rt_send_redirect(struct sk_buff *skb)
872 struct rtable *rt = skb_rtable(skb);
873 struct in_device *in_dev;
874 struct inet_peer *peer;
880 in_dev = __in_dev_get_rcu(rt->dst.dev);
881 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
885 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
886 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
889 net = dev_net(rt->dst.dev);
890 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
892 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
893 rt_nexthop(rt, ip_hdr(skb)->daddr));
897 /* No redirected packets during ip_rt_redirect_silence;
898 * reset the algorithm.
900 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
901 peer->rate_tokens = 0;
902 peer->n_redirects = 0;
905 /* Too many ignored redirects; do not send anything
906 * set dst.rate_last to the last seen redirected packet.
908 if (peer->n_redirects >= ip_rt_redirect_number) {
909 peer->rate_last = jiffies;
913 /* Check for load limit; set rate_last to the latest sent
916 if (peer->rate_tokens == 0 ||
919 (ip_rt_redirect_load << peer->n_redirects)))) {
920 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
922 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
923 peer->rate_last = jiffies;
925 #ifdef CONFIG_IP_ROUTE_VERBOSE
927 peer->n_redirects == ip_rt_redirect_number)
928 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
929 &ip_hdr(skb)->saddr, inet_iif(skb),
930 &ip_hdr(skb)->daddr, &gw);
937 static int ip_error(struct sk_buff *skb)
939 struct rtable *rt = skb_rtable(skb);
940 struct net_device *dev = skb->dev;
941 struct in_device *in_dev;
942 struct inet_peer *peer;
948 if (netif_is_l3_master(skb->dev)) {
949 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
954 in_dev = __in_dev_get_rcu(dev);
956 /* IP on this device is disabled. */
960 net = dev_net(rt->dst.dev);
961 if (!IN_DEV_FORWARD(in_dev)) {
962 switch (rt->dst.error) {
964 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
968 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
974 switch (rt->dst.error) {
979 code = ICMP_HOST_UNREACH;
982 code = ICMP_NET_UNREACH;
983 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
986 code = ICMP_PKT_FILTERED;
990 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
991 l3mdev_master_ifindex(skb->dev), 1);
996 peer->rate_tokens += now - peer->rate_last;
997 if (peer->rate_tokens > ip_rt_error_burst)
998 peer->rate_tokens = ip_rt_error_burst;
999 peer->rate_last = now;
1000 if (peer->rate_tokens >= ip_rt_error_cost)
1001 peer->rate_tokens -= ip_rt_error_cost;
1007 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1009 out: kfree_skb(skb);
1013 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1015 struct dst_entry *dst = &rt->dst;
1016 u32 old_mtu = ipv4_mtu(dst);
1017 struct fib_result res;
1020 if (ip_mtu_locked(dst))
1026 if (mtu < ip_rt_min_pmtu) {
1028 mtu = min(old_mtu, ip_rt_min_pmtu);
1031 if (rt->rt_pmtu == mtu && !lock &&
1032 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1036 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1037 struct fib_nh_common *nhc = FIB_RES_NHC(res);
1039 update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1040 jiffies + ip_rt_mtu_expires);
1045 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1046 struct sk_buff *skb, u32 mtu)
1048 struct rtable *rt = (struct rtable *) dst;
1051 ip_rt_build_flow_key(&fl4, sk, skb);
1052 __ip_rt_update_pmtu(rt, &fl4, mtu);
1055 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1056 int oif, u8 protocol)
1058 const struct iphdr *iph = (const struct iphdr *) skb->data;
1061 u32 mark = IP4_REPLY_MARK(net, skb->mark);
1063 __build_flow_key(net, &fl4, NULL, iph, oif,
1064 RT_TOS(iph->tos), protocol, mark, 0);
1065 rt = __ip_route_output_key(net, &fl4);
1067 __ip_rt_update_pmtu(rt, &fl4, mtu);
1071 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1073 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1075 const struct iphdr *iph = (const struct iphdr *) skb->data;
1079 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1081 if (!fl4.flowi4_mark)
1082 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1084 rt = __ip_route_output_key(sock_net(sk), &fl4);
1086 __ip_rt_update_pmtu(rt, &fl4, mtu);
1091 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1093 const struct iphdr *iph = (const struct iphdr *) skb->data;
1096 struct dst_entry *odst = NULL;
1098 struct net *net = sock_net(sk);
1102 if (!ip_sk_accept_pmtu(sk))
1105 odst = sk_dst_get(sk);
1107 if (sock_owned_by_user(sk) || !odst) {
1108 __ipv4_sk_update_pmtu(skb, sk, mtu);
1112 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1114 rt = (struct rtable *)odst;
1115 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1116 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1123 __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
1125 if (!dst_check(&rt->dst, 0)) {
1127 dst_release(&rt->dst);
1129 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1137 sk_dst_set(sk, &rt->dst);
1143 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1145 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1146 int oif, u8 protocol)
1148 const struct iphdr *iph = (const struct iphdr *) skb->data;
1152 __build_flow_key(net, &fl4, NULL, iph, oif,
1153 RT_TOS(iph->tos), protocol, 0, 0);
1154 rt = __ip_route_output_key(net, &fl4);
1156 __ip_do_redirect(rt, skb, &fl4, false);
1160 EXPORT_SYMBOL_GPL(ipv4_redirect);
1162 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1164 const struct iphdr *iph = (const struct iphdr *) skb->data;
1167 struct net *net = sock_net(sk);
1169 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1170 rt = __ip_route_output_key(net, &fl4);
1172 __ip_do_redirect(rt, skb, &fl4, false);
1176 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1178 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1180 struct rtable *rt = (struct rtable *) dst;
1182 /* All IPV4 dsts are created with ->obsolete set to the value
1183 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1184 * into this function always.
1186 * When a PMTU/redirect information update invalidates a route,
1187 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1188 * DST_OBSOLETE_DEAD.
1190 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1195 static void ipv4_send_dest_unreach(struct sk_buff *skb)
1197 struct ip_options opt;
1200 /* Recompile ip options since IPCB may not be valid anymore.
1201 * Also check we have a reasonable ipv4 header.
1203 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1204 ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1207 memset(&opt, 0, sizeof(opt));
1208 if (ip_hdr(skb)->ihl > 5) {
1209 if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1211 opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1214 res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1220 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1223 static void ipv4_link_failure(struct sk_buff *skb)
1227 ipv4_send_dest_unreach(skb);
1229 rt = skb_rtable(skb);
1231 dst_set_expires(&rt->dst, 0);
1234 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1236 pr_debug("%s: %pI4 -> %pI4, %s\n",
1237 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1238 skb->dev ? skb->dev->name : "?");
1245 We do not cache source address of outgoing interface,
1246 because it is used only by IP RR, TS and SRR options,
1247 so that it out of fast path.
1249 BTW remember: "addr" is allowed to be not aligned
1253 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1257 if (rt_is_output_route(rt))
1258 src = ip_hdr(skb)->saddr;
1260 struct fib_result res;
1261 struct iphdr *iph = ip_hdr(skb);
1262 struct flowi4 fl4 = {
1263 .daddr = iph->daddr,
1264 .saddr = iph->saddr,
1265 .flowi4_tos = RT_TOS(iph->tos),
1266 .flowi4_oif = rt->dst.dev->ifindex,
1267 .flowi4_iif = skb->dev->ifindex,
1268 .flowi4_mark = skb->mark,
1272 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1273 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1275 src = inet_select_addr(rt->dst.dev,
1276 rt_nexthop(rt, iph->daddr),
1280 memcpy(addr, &src, 4);
1283 #ifdef CONFIG_IP_ROUTE_CLASSID
1284 static void set_class_tag(struct rtable *rt, u32 tag)
1286 if (!(rt->dst.tclassid & 0xFFFF))
1287 rt->dst.tclassid |= tag & 0xFFFF;
1288 if (!(rt->dst.tclassid & 0xFFFF0000))
1289 rt->dst.tclassid |= tag & 0xFFFF0000;
1293 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1295 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1296 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1299 return min(advmss, IPV4_MAX_PMTU - header_size);
1302 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1304 const struct rtable *rt = (const struct rtable *) dst;
1305 unsigned int mtu = rt->rt_pmtu;
1307 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1308 mtu = dst_metric_raw(dst, RTAX_MTU);
1313 mtu = READ_ONCE(dst->dev->mtu);
1315 if (unlikely(ip_mtu_locked(dst))) {
1316 if (rt->rt_uses_gateway && mtu > 576)
1320 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1322 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1325 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1327 struct fnhe_hash_bucket *hash;
1328 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1329 u32 hval = fnhe_hashfun(daddr);
1331 spin_lock_bh(&fnhe_lock);
1333 hash = rcu_dereference_protected(nhc->nhc_exceptions,
1334 lockdep_is_held(&fnhe_lock));
1337 fnhe_p = &hash->chain;
1338 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1340 if (fnhe->fnhe_daddr == daddr) {
1341 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1342 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1343 /* set fnhe_daddr to 0 to ensure it won't bind with
1344 * new dsts in rt_bind_exception().
1346 fnhe->fnhe_daddr = 0;
1347 fnhe_flush_routes(fnhe);
1348 kfree_rcu(fnhe, rcu);
1351 fnhe_p = &fnhe->fnhe_next;
1352 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1353 lockdep_is_held(&fnhe_lock));
1356 spin_unlock_bh(&fnhe_lock);
1359 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1362 struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1363 struct fib_nh_exception *fnhe;
1369 hval = fnhe_hashfun(daddr);
1371 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1372 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1373 if (fnhe->fnhe_daddr == daddr) {
1374 if (fnhe->fnhe_expires &&
1375 time_after(jiffies, fnhe->fnhe_expires)) {
1376 ip_del_fnhe(nhc, daddr);
1386 * 1. mtu on route is locked - use it
1387 * 2. mtu from nexthop exception
1388 * 3. mtu from egress device
1391 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1393 struct fib_nh_common *nhc = res->nhc;
1394 struct net_device *dev = nhc->nhc_dev;
1395 struct fib_info *fi = res->fi;
1398 if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
1399 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1403 struct fib_nh_exception *fnhe;
1405 fnhe = find_exception(nhc, daddr);
1406 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1407 mtu = fnhe->fnhe_pmtu;
1411 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1413 return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1416 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1417 __be32 daddr, const bool do_cache)
1421 spin_lock_bh(&fnhe_lock);
1423 if (daddr == fnhe->fnhe_daddr) {
1424 struct rtable __rcu **porig;
1425 struct rtable *orig;
1426 int genid = fnhe_genid(dev_net(rt->dst.dev));
1428 if (rt_is_input_route(rt))
1429 porig = &fnhe->fnhe_rth_input;
1431 porig = &fnhe->fnhe_rth_output;
1432 orig = rcu_dereference(*porig);
1434 if (fnhe->fnhe_genid != genid) {
1435 fnhe->fnhe_genid = genid;
1437 fnhe->fnhe_pmtu = 0;
1438 fnhe->fnhe_expires = 0;
1439 fnhe->fnhe_mtu_locked = false;
1440 fnhe_flush_routes(fnhe);
1443 fill_route_from_fnhe(rt, fnhe);
1446 rt->rt_gw_family = AF_INET;
1451 rcu_assign_pointer(*porig, rt);
1453 dst_dev_put(&orig->dst);
1454 dst_release(&orig->dst);
1459 fnhe->fnhe_stamp = jiffies;
1461 spin_unlock_bh(&fnhe_lock);
1466 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1468 struct rtable *orig, *prev, **p;
1471 if (rt_is_input_route(rt)) {
1472 p = (struct rtable **)&nhc->nhc_rth_input;
1474 p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1478 /* hold dst before doing cmpxchg() to avoid race condition
1482 prev = cmpxchg(p, orig, rt);
1485 rt_add_uncached_list(orig);
1486 dst_release(&orig->dst);
1489 dst_release(&rt->dst);
1496 struct uncached_list {
1498 struct list_head head;
1501 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1503 void rt_add_uncached_list(struct rtable *rt)
1505 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1507 rt->rt_uncached_list = ul;
1509 spin_lock_bh(&ul->lock);
1510 list_add_tail(&rt->rt_uncached, &ul->head);
1511 spin_unlock_bh(&ul->lock);
1514 void rt_del_uncached_list(struct rtable *rt)
1516 if (!list_empty(&rt->rt_uncached)) {
1517 struct uncached_list *ul = rt->rt_uncached_list;
1519 spin_lock_bh(&ul->lock);
1520 list_del(&rt->rt_uncached);
1521 spin_unlock_bh(&ul->lock);
1525 static void ipv4_dst_destroy(struct dst_entry *dst)
1527 struct rtable *rt = (struct rtable *)dst;
1529 ip_dst_metrics_put(dst);
1530 rt_del_uncached_list(rt);
1533 void rt_flush_dev(struct net_device *dev)
1538 for_each_possible_cpu(cpu) {
1539 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1541 spin_lock_bh(&ul->lock);
1542 list_for_each_entry(rt, &ul->head, rt_uncached) {
1543 if (rt->dst.dev != dev)
1545 rt->dst.dev = blackhole_netdev;
1546 dev_hold(rt->dst.dev);
1549 spin_unlock_bh(&ul->lock);
1553 static bool rt_cache_valid(const struct rtable *rt)
1556 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1560 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1561 const struct fib_result *res,
1562 struct fib_nh_exception *fnhe,
1563 struct fib_info *fi, u16 type, u32 itag,
1564 const bool do_cache)
1566 bool cached = false;
1569 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1571 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1572 rt->rt_uses_gateway = 1;
1573 rt->rt_gw_family = nhc->nhc_gw_family;
1574 /* only INET and INET6 are supported */
1575 if (likely(nhc->nhc_gw_family == AF_INET))
1576 rt->rt_gw4 = nhc->nhc_gw.ipv4;
1578 rt->rt_gw6 = nhc->nhc_gw.ipv6;
1581 ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1583 #ifdef CONFIG_IP_ROUTE_CLASSID
1584 if (nhc->nhc_family == AF_INET) {
1587 nh = container_of(nhc, struct fib_nh, nh_common);
1588 rt->dst.tclassid = nh->nh_tclassid;
1591 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1593 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1595 cached = rt_cache_route(nhc, rt);
1596 if (unlikely(!cached)) {
1597 /* Routes we intend to cache in nexthop exception or
1598 * FIB nexthop have the DST_NOCACHE bit clear.
1599 * However, if we are unsuccessful at storing this
1600 * route into the cache we really need to set it.
1603 rt->rt_gw_family = AF_INET;
1606 rt_add_uncached_list(rt);
1609 rt_add_uncached_list(rt);
1611 #ifdef CONFIG_IP_ROUTE_CLASSID
1612 #ifdef CONFIG_IP_MULTIPLE_TABLES
1613 set_class_tag(rt, res->tclassid);
1615 set_class_tag(rt, itag);
1619 struct rtable *rt_dst_alloc(struct net_device *dev,
1620 unsigned int flags, u16 type,
1621 bool nopolicy, bool noxfrm, bool will_cache)
1625 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1626 (will_cache ? 0 : DST_HOST) |
1627 (nopolicy ? DST_NOPOLICY : 0) |
1628 (noxfrm ? DST_NOXFRM : 0));
1631 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1632 rt->rt_flags = flags;
1634 rt->rt_is_input = 0;
1637 rt->rt_mtu_locked = 0;
1638 rt->rt_uses_gateway = 0;
1639 rt->rt_gw_family = 0;
1641 INIT_LIST_HEAD(&rt->rt_uncached);
1643 rt->dst.output = ip_output;
1644 if (flags & RTCF_LOCAL)
1645 rt->dst.input = ip_local_deliver;
1650 EXPORT_SYMBOL(rt_dst_alloc);
1652 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1654 struct rtable *new_rt;
1656 new_rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1660 new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1661 new_rt->rt_flags = rt->rt_flags;
1662 new_rt->rt_type = rt->rt_type;
1663 new_rt->rt_is_input = rt->rt_is_input;
1664 new_rt->rt_iif = rt->rt_iif;
1665 new_rt->rt_pmtu = rt->rt_pmtu;
1666 new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1667 new_rt->rt_gw_family = rt->rt_gw_family;
1668 if (rt->rt_gw_family == AF_INET)
1669 new_rt->rt_gw4 = rt->rt_gw4;
1670 else if (rt->rt_gw_family == AF_INET6)
1671 new_rt->rt_gw6 = rt->rt_gw6;
1672 INIT_LIST_HEAD(&new_rt->rt_uncached);
1674 new_rt->dst.flags |= DST_HOST;
1675 new_rt->dst.input = rt->dst.input;
1676 new_rt->dst.output = rt->dst.output;
1677 new_rt->dst.error = rt->dst.error;
1678 new_rt->dst.lastuse = jiffies;
1679 new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1683 EXPORT_SYMBOL(rt_dst_clone);
1685 /* called in rcu_read_lock() section */
1686 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1687 u8 tos, struct net_device *dev,
1688 struct in_device *in_dev, u32 *itag)
1692 /* Primary sanity checks. */
1696 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1697 skb->protocol != htons(ETH_P_IP))
1700 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1703 if (ipv4_is_zeronet(saddr)) {
1704 if (!ipv4_is_local_multicast(daddr) &&
1705 ip_hdr(skb)->protocol != IPPROTO_IGMP)
1708 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1716 /* called in rcu_read_lock() section */
1717 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1718 u8 tos, struct net_device *dev, int our)
1720 struct in_device *in_dev = __in_dev_get_rcu(dev);
1721 unsigned int flags = RTCF_MULTICAST;
1726 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1731 flags |= RTCF_LOCAL;
1733 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1734 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1738 #ifdef CONFIG_IP_ROUTE_CLASSID
1739 rth->dst.tclassid = itag;
1741 rth->dst.output = ip_rt_bug;
1742 rth->rt_is_input= 1;
1744 #ifdef CONFIG_IP_MROUTE
1745 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1746 rth->dst.input = ip_mr_input;
1748 RT_CACHE_STAT_INC(in_slow_mc);
1750 skb_dst_set(skb, &rth->dst);
1755 static void ip_handle_martian_source(struct net_device *dev,
1756 struct in_device *in_dev,
1757 struct sk_buff *skb,
1761 RT_CACHE_STAT_INC(in_martian_src);
1762 #ifdef CONFIG_IP_ROUTE_VERBOSE
1763 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1765 * RFC1812 recommendation, if source is martian,
1766 * the only hint is MAC header.
1768 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1769 &daddr, &saddr, dev->name);
1770 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1771 print_hex_dump(KERN_WARNING, "ll header: ",
1772 DUMP_PREFIX_OFFSET, 16, 1,
1773 skb_mac_header(skb),
1774 dev->hard_header_len, false);
1780 /* called in rcu_read_lock() section */
1781 static int __mkroute_input(struct sk_buff *skb,
1782 const struct fib_result *res,
1783 struct in_device *in_dev,
1784 __be32 daddr, __be32 saddr, u32 tos)
1786 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1787 struct net_device *dev = nhc->nhc_dev;
1788 struct fib_nh_exception *fnhe;
1791 struct in_device *out_dev;
1795 /* get a working reference to the output device */
1796 out_dev = __in_dev_get_rcu(dev);
1798 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1802 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1803 in_dev->dev, in_dev, &itag);
1805 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1811 do_cache = res->fi && !itag;
1812 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1813 skb->protocol == htons(ETH_P_IP)) {
1816 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1817 if (IN_DEV_SHARED_MEDIA(out_dev) ||
1818 inet_addr_onlink(out_dev, saddr, gw))
1819 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1822 if (skb->protocol != htons(ETH_P_IP)) {
1823 /* Not IP (i.e. ARP). Do not create route, if it is
1824 * invalid for proxy arp. DNAT routes are always valid.
1826 * Proxy arp feature have been extended to allow, ARP
1827 * replies back to the same interface, to support
1828 * Private VLAN switch technologies. See arp.c.
1830 if (out_dev == in_dev &&
1831 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1837 fnhe = find_exception(nhc, daddr);
1840 rth = rcu_dereference(fnhe->fnhe_rth_input);
1842 rth = rcu_dereference(nhc->nhc_rth_input);
1843 if (rt_cache_valid(rth)) {
1844 skb_dst_set_noref(skb, &rth->dst);
1849 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1850 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1851 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1857 rth->rt_is_input = 1;
1858 RT_CACHE_STAT_INC(in_slow_tot);
1860 rth->dst.input = ip_forward;
1862 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1864 lwtunnel_set_redirect(&rth->dst);
1865 skb_dst_set(skb, &rth->dst);
1872 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1873 /* To make ICMP packets follow the right flow, the multipath hash is
1874 * calculated from the inner IP addresses.
1876 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1877 struct flow_keys *hash_keys)
1879 const struct iphdr *outer_iph = ip_hdr(skb);
1880 const struct iphdr *key_iph = outer_iph;
1881 const struct iphdr *inner_iph;
1882 const struct icmphdr *icmph;
1883 struct iphdr _inner_iph;
1884 struct icmphdr _icmph;
1886 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1889 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1892 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1897 if (!icmp_is_err(icmph->type))
1900 inner_iph = skb_header_pointer(skb,
1901 outer_iph->ihl * 4 + sizeof(_icmph),
1902 sizeof(_inner_iph), &_inner_iph);
1906 key_iph = inner_iph;
1908 hash_keys->addrs.v4addrs.src = key_iph->saddr;
1909 hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1912 /* if skb is set it will be used and fl4 can be NULL */
1913 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
1914 const struct sk_buff *skb, struct flow_keys *flkeys)
1916 u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
1917 struct flow_keys hash_keys;
1920 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1922 memset(&hash_keys, 0, sizeof(hash_keys));
1923 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1925 ip_multipath_l3_keys(skb, &hash_keys);
1927 hash_keys.addrs.v4addrs.src = fl4->saddr;
1928 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1932 /* skb is currently provided only when forwarding */
1934 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1935 struct flow_keys keys;
1937 /* short-circuit if we already have L4 hash present */
1939 return skb_get_hash_raw(skb) >> 1;
1941 memset(&hash_keys, 0, sizeof(hash_keys));
1944 skb_flow_dissect_flow_keys(skb, &keys, flag);
1948 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1949 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
1950 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
1951 hash_keys.ports.src = flkeys->ports.src;
1952 hash_keys.ports.dst = flkeys->ports.dst;
1953 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
1955 memset(&hash_keys, 0, sizeof(hash_keys));
1956 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1957 hash_keys.addrs.v4addrs.src = fl4->saddr;
1958 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1959 hash_keys.ports.src = fl4->fl4_sport;
1960 hash_keys.ports.dst = fl4->fl4_dport;
1961 hash_keys.basic.ip_proto = fl4->flowi4_proto;
1965 memset(&hash_keys, 0, sizeof(hash_keys));
1966 /* skb is currently provided only when forwarding */
1968 struct flow_keys keys;
1970 skb_flow_dissect_flow_keys(skb, &keys, 0);
1971 /* Inner can be v4 or v6 */
1972 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1973 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1974 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1975 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1976 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1977 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1978 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
1979 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
1980 hash_keys.tags.flow_label = keys.tags.flow_label;
1981 hash_keys.basic.ip_proto = keys.basic.ip_proto;
1983 /* Same as case 0 */
1984 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1985 ip_multipath_l3_keys(skb, &hash_keys);
1988 /* Same as case 0 */
1989 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1990 hash_keys.addrs.v4addrs.src = fl4->saddr;
1991 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1995 mhash = flow_hash_from_keys(&hash_keys);
1998 mhash = jhash_2words(mhash, multipath_hash, 0);
2002 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
2004 static int ip_mkroute_input(struct sk_buff *skb,
2005 struct fib_result *res,
2006 struct in_device *in_dev,
2007 __be32 daddr, __be32 saddr, u32 tos,
2008 struct flow_keys *hkeys)
2010 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2011 if (res->fi && fib_info_num_path(res->fi) > 1) {
2012 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
2014 fib_select_multipath(res, h);
2018 /* create a routing cache entry */
2019 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
2022 /* Implements all the saddr-related checks as ip_route_input_slow(),
2023 * assuming daddr is valid and the destination is not a local broadcast one.
2024 * Uses the provided hint instead of performing a route lookup.
2026 int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2027 u8 tos, struct net_device *dev,
2028 const struct sk_buff *hint)
2030 struct in_device *in_dev = __in_dev_get_rcu(dev);
2031 struct rtable *rt = (struct rtable *)hint;
2032 struct net *net = dev_net(dev);
2036 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2037 goto martian_source;
2039 if (ipv4_is_zeronet(saddr))
2040 goto martian_source;
2042 if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2043 goto martian_source;
2045 if (rt->rt_type != RTN_LOCAL)
2046 goto skip_validate_source;
2048 tos &= IPTOS_RT_MASK;
2049 err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag);
2051 goto martian_source;
2053 skip_validate_source:
2054 skb_dst_copy(skb, hint);
2058 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2063 * NOTE. We drop all the packets that has local source
2064 * addresses, because every properly looped back packet
2065 * must have correct destination already attached by output routine.
2066 * Changes in the enforced policies must be applied also to
2067 * ip_route_use_hint().
2069 * Such approach solves two big problems:
2070 * 1. Not simplex devices are handled properly.
2071 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2072 * called with rcu_read_lock()
2075 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2076 u8 tos, struct net_device *dev,
2077 struct fib_result *res)
2079 struct in_device *in_dev = __in_dev_get_rcu(dev);
2080 struct flow_keys *flkeys = NULL, _flkeys;
2081 struct net *net = dev_net(dev);
2082 struct ip_tunnel_info *tun_info;
2084 unsigned int flags = 0;
2088 bool do_cache = true;
2090 /* IP on this device is disabled. */
2095 /* Check for the most weird martians, which can be not detected
2099 tun_info = skb_tunnel_info(skb);
2100 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2101 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2103 fl4.flowi4_tun_key.tun_id = 0;
2106 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2107 goto martian_source;
2111 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2114 /* Accept zero addresses only to limited broadcast;
2115 * I even do not know to fix it or not. Waiting for complains :-)
2117 if (ipv4_is_zeronet(saddr))
2118 goto martian_source;
2120 if (ipv4_is_zeronet(daddr))
2121 goto martian_destination;
2123 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2124 * and call it once if daddr or/and saddr are loopback addresses
2126 if (ipv4_is_loopback(daddr)) {
2127 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2128 goto martian_destination;
2129 } else if (ipv4_is_loopback(saddr)) {
2130 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2131 goto martian_source;
2135 * Now we are ready to route packet.
2138 fl4.flowi4_iif = dev->ifindex;
2139 fl4.flowi4_mark = skb->mark;
2140 fl4.flowi4_tos = tos;
2141 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2142 fl4.flowi4_flags = 0;
2145 fl4.flowi4_uid = sock_net_uid(net, NULL);
2147 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2150 fl4.flowi4_proto = 0;
2155 err = fib_lookup(net, &fl4, res, 0);
2157 if (!IN_DEV_FORWARD(in_dev))
2158 err = -EHOSTUNREACH;
2162 if (res->type == RTN_BROADCAST) {
2163 if (IN_DEV_BFORWARD(in_dev))
2165 /* not do cache if bc_forwarding is enabled */
2166 if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2171 if (res->type == RTN_LOCAL) {
2172 err = fib_validate_source(skb, saddr, daddr, tos,
2173 0, dev, in_dev, &itag);
2175 goto martian_source;
2179 if (!IN_DEV_FORWARD(in_dev)) {
2180 err = -EHOSTUNREACH;
2183 if (res->type != RTN_UNICAST)
2184 goto martian_destination;
2187 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2191 if (skb->protocol != htons(ETH_P_IP))
2194 if (!ipv4_is_zeronet(saddr)) {
2195 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2198 goto martian_source;
2200 flags |= RTCF_BROADCAST;
2201 res->type = RTN_BROADCAST;
2202 RT_CACHE_STAT_INC(in_brd);
2205 do_cache &= res->fi && !itag;
2207 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2209 rth = rcu_dereference(nhc->nhc_rth_input);
2210 if (rt_cache_valid(rth)) {
2211 skb_dst_set_noref(skb, &rth->dst);
2217 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
2218 flags | RTCF_LOCAL, res->type,
2219 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2223 rth->dst.output= ip_rt_bug;
2224 #ifdef CONFIG_IP_ROUTE_CLASSID
2225 rth->dst.tclassid = itag;
2227 rth->rt_is_input = 1;
2229 RT_CACHE_STAT_INC(in_slow_tot);
2230 if (res->type == RTN_UNREACHABLE) {
2231 rth->dst.input= ip_error;
2232 rth->dst.error= -err;
2233 rth->rt_flags &= ~RTCF_LOCAL;
2237 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2239 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2240 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2241 WARN_ON(rth->dst.input == lwtunnel_input);
2242 rth->dst.lwtstate->orig_input = rth->dst.input;
2243 rth->dst.input = lwtunnel_input;
2246 if (unlikely(!rt_cache_route(nhc, rth)))
2247 rt_add_uncached_list(rth);
2249 skb_dst_set(skb, &rth->dst);
2254 RT_CACHE_STAT_INC(in_no_route);
2255 res->type = RTN_UNREACHABLE;
2261 * Do not cache martian addresses: they should be logged (RFC1812)
2263 martian_destination:
2264 RT_CACHE_STAT_INC(in_martian_dst);
2265 #ifdef CONFIG_IP_ROUTE_VERBOSE
2266 if (IN_DEV_LOG_MARTIANS(in_dev))
2267 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2268 &daddr, &saddr, dev->name);
2280 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2284 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2285 u8 tos, struct net_device *dev)
2287 struct fib_result res;
2290 tos &= IPTOS_RT_MASK;
2292 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2297 EXPORT_SYMBOL(ip_route_input_noref);
2299 /* called with rcu_read_lock held */
2300 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2301 u8 tos, struct net_device *dev, struct fib_result *res)
2303 /* Multicast recognition logic is moved from route cache to here.
2304 The problem was that too many Ethernet cards have broken/missing
2305 hardware multicast filters :-( As result the host on multicasting
2306 network acquires a lot of useless route cache entries, sort of
2307 SDR messages from all the world. Now we try to get rid of them.
2308 Really, provided software IP multicast filter is organized
2309 reasonably (at least, hashed), it does not result in a slowdown
2310 comparing with route cache reject entries.
2311 Note, that multicast routers are not affected, because
2312 route cache entry is created eventually.
2314 if (ipv4_is_multicast(daddr)) {
2315 struct in_device *in_dev = __in_dev_get_rcu(dev);
2321 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2322 ip_hdr(skb)->protocol);
2324 /* check l3 master if no match yet */
2325 if (!our && netif_is_l3_slave(dev)) {
2326 struct in_device *l3_in_dev;
2328 l3_in_dev = __in_dev_get_rcu(skb->dev);
2330 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2331 ip_hdr(skb)->protocol);
2335 #ifdef CONFIG_IP_MROUTE
2337 (!ipv4_is_local_multicast(daddr) &&
2338 IN_DEV_MFORWARD(in_dev))
2341 err = ip_route_input_mc(skb, daddr, saddr,
2347 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2350 /* called with rcu_read_lock() */
2351 static struct rtable *__mkroute_output(const struct fib_result *res,
2352 const struct flowi4 *fl4, int orig_oif,
2353 struct net_device *dev_out,
2356 struct fib_info *fi = res->fi;
2357 struct fib_nh_exception *fnhe;
2358 struct in_device *in_dev;
2359 u16 type = res->type;
2363 in_dev = __in_dev_get_rcu(dev_out);
2365 return ERR_PTR(-EINVAL);
2367 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2368 if (ipv4_is_loopback(fl4->saddr) &&
2369 !(dev_out->flags & IFF_LOOPBACK) &&
2370 !netif_is_l3_master(dev_out))
2371 return ERR_PTR(-EINVAL);
2373 if (ipv4_is_lbcast(fl4->daddr))
2374 type = RTN_BROADCAST;
2375 else if (ipv4_is_multicast(fl4->daddr))
2376 type = RTN_MULTICAST;
2377 else if (ipv4_is_zeronet(fl4->daddr))
2378 return ERR_PTR(-EINVAL);
2380 if (dev_out->flags & IFF_LOOPBACK)
2381 flags |= RTCF_LOCAL;
2384 if (type == RTN_BROADCAST) {
2385 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2387 } else if (type == RTN_MULTICAST) {
2388 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2389 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2391 flags &= ~RTCF_LOCAL;
2394 /* If multicast route do not exist use
2395 * default one, but do not gateway in this case.
2398 if (fi && res->prefixlen < 4)
2400 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2401 (orig_oif != dev_out->ifindex)) {
2402 /* For local routes that require a particular output interface
2403 * we do not want to cache the result. Caching the result
2404 * causes incorrect behaviour when there are multiple source
2405 * addresses on the interface, the end result being that if the
2406 * intended recipient is waiting on that interface for the
2407 * packet he won't receive it because it will be delivered on
2408 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2409 * be set to the loopback interface as well.
2415 do_cache &= fi != NULL;
2417 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2418 struct rtable __rcu **prth;
2420 fnhe = find_exception(nhc, fl4->daddr);
2424 prth = &fnhe->fnhe_rth_output;
2426 if (unlikely(fl4->flowi4_flags &
2427 FLOWI_FLAG_KNOWN_NH &&
2428 !(nhc->nhc_gw_family &&
2429 nhc->nhc_scope == RT_SCOPE_LINK))) {
2433 prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2435 rth = rcu_dereference(*prth);
2436 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2441 rth = rt_dst_alloc(dev_out, flags, type,
2442 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2443 IN_DEV_CONF_GET(in_dev, NOXFRM),
2446 return ERR_PTR(-ENOBUFS);
2448 rth->rt_iif = orig_oif;
2450 RT_CACHE_STAT_INC(out_slow_tot);
2452 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2453 if (flags & RTCF_LOCAL &&
2454 !(dev_out->flags & IFF_LOOPBACK)) {
2455 rth->dst.output = ip_mc_output;
2456 RT_CACHE_STAT_INC(out_slow_mc);
2458 #ifdef CONFIG_IP_MROUTE
2459 if (type == RTN_MULTICAST) {
2460 if (IN_DEV_MFORWARD(in_dev) &&
2461 !ipv4_is_local_multicast(fl4->daddr)) {
2462 rth->dst.input = ip_mr_input;
2463 rth->dst.output = ip_mc_output;
2469 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2470 lwtunnel_set_redirect(&rth->dst);
2476 * Major route resolver routine.
2479 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2480 const struct sk_buff *skb)
2482 __u8 tos = RT_FL_TOS(fl4);
2483 struct fib_result res = {
2491 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2492 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2493 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2494 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2497 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2502 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2504 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2505 struct fib_result *res,
2506 const struct sk_buff *skb)
2508 struct net_device *dev_out = NULL;
2509 int orig_oif = fl4->flowi4_oif;
2510 unsigned int flags = 0;
2515 if (ipv4_is_multicast(fl4->saddr) ||
2516 ipv4_is_lbcast(fl4->saddr) ||
2517 ipv4_is_zeronet(fl4->saddr)) {
2518 rth = ERR_PTR(-EINVAL);
2522 rth = ERR_PTR(-ENETUNREACH);
2524 /* I removed check for oif == dev_out->oif here.
2525 It was wrong for two reasons:
2526 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2527 is assigned to multiple interfaces.
2528 2. Moreover, we are allowed to send packets with saddr
2529 of another iface. --ANK
2532 if (fl4->flowi4_oif == 0 &&
2533 (ipv4_is_multicast(fl4->daddr) ||
2534 ipv4_is_lbcast(fl4->daddr))) {
2535 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2536 dev_out = __ip_dev_find(net, fl4->saddr, false);
2540 /* Special hack: user can direct multicasts
2541 and limited broadcast via necessary interface
2542 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2543 This hack is not just for fun, it allows
2544 vic,vat and friends to work.
2545 They bind socket to loopback, set ttl to zero
2546 and expect that it will work.
2547 From the viewpoint of routing cache they are broken,
2548 because we are not allowed to build multicast path
2549 with loopback source addr (look, routing cache
2550 cannot know, that ttl is zero, so that packet
2551 will not leave this host and route is valid).
2552 Luckily, this hack is good workaround.
2555 fl4->flowi4_oif = dev_out->ifindex;
2559 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2560 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2561 if (!__ip_dev_find(net, fl4->saddr, false))
2567 if (fl4->flowi4_oif) {
2568 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2569 rth = ERR_PTR(-ENODEV);
2573 /* RACE: Check return value of inet_select_addr instead. */
2574 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2575 rth = ERR_PTR(-ENETUNREACH);
2578 if (ipv4_is_local_multicast(fl4->daddr) ||
2579 ipv4_is_lbcast(fl4->daddr) ||
2580 fl4->flowi4_proto == IPPROTO_IGMP) {
2582 fl4->saddr = inet_select_addr(dev_out, 0,
2587 if (ipv4_is_multicast(fl4->daddr))
2588 fl4->saddr = inet_select_addr(dev_out, 0,
2590 else if (!fl4->daddr)
2591 fl4->saddr = inet_select_addr(dev_out, 0,
2597 fl4->daddr = fl4->saddr;
2599 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2600 dev_out = net->loopback_dev;
2601 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2602 res->type = RTN_LOCAL;
2603 flags |= RTCF_LOCAL;
2607 err = fib_lookup(net, fl4, res, 0);
2611 if (fl4->flowi4_oif &&
2612 (ipv4_is_multicast(fl4->daddr) ||
2613 !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2614 /* Apparently, routing tables are wrong. Assume,
2615 that the destination is on link.
2618 Because we are allowed to send to iface
2619 even if it has NO routes and NO assigned
2620 addresses. When oif is specified, routing
2621 tables are looked up with only one purpose:
2622 to catch if destination is gatewayed, rather than
2623 direct. Moreover, if MSG_DONTROUTE is set,
2624 we send packet, ignoring both routing tables
2625 and ifaddr state. --ANK
2628 We could make it even if oif is unknown,
2629 likely IPv6, but we do not.
2632 if (fl4->saddr == 0)
2633 fl4->saddr = inet_select_addr(dev_out, 0,
2635 res->type = RTN_UNICAST;
2642 if (res->type == RTN_LOCAL) {
2644 if (res->fi->fib_prefsrc)
2645 fl4->saddr = res->fi->fib_prefsrc;
2647 fl4->saddr = fl4->daddr;
2650 /* L3 master device is the loopback for that domain */
2651 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2654 /* make sure orig_oif points to fib result device even
2655 * though packet rx/tx happens over loopback or l3mdev
2657 orig_oif = FIB_RES_OIF(*res);
2659 fl4->flowi4_oif = dev_out->ifindex;
2660 flags |= RTCF_LOCAL;
2664 fib_select_path(net, res, fl4, skb);
2666 dev_out = FIB_RES_DEV(*res);
2667 fl4->flowi4_oif = dev_out->ifindex;
2671 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2677 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2682 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2684 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2686 return mtu ? : dst->dev->mtu;
2689 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2690 struct sk_buff *skb, u32 mtu)
2694 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2695 struct sk_buff *skb)
2699 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2705 static struct dst_ops ipv4_dst_blackhole_ops = {
2707 .check = ipv4_blackhole_dst_check,
2708 .mtu = ipv4_blackhole_mtu,
2709 .default_advmss = ipv4_default_advmss,
2710 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2711 .redirect = ipv4_rt_blackhole_redirect,
2712 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2713 .neigh_lookup = ipv4_neigh_lookup,
2716 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2718 struct rtable *ort = (struct rtable *) dst_orig;
2721 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2723 struct dst_entry *new = &rt->dst;
2726 new->input = dst_discard;
2727 new->output = dst_discard_out;
2729 new->dev = net->loopback_dev;
2733 rt->rt_is_input = ort->rt_is_input;
2734 rt->rt_iif = ort->rt_iif;
2735 rt->rt_pmtu = ort->rt_pmtu;
2736 rt->rt_mtu_locked = ort->rt_mtu_locked;
2738 rt->rt_genid = rt_genid_ipv4(net);
2739 rt->rt_flags = ort->rt_flags;
2740 rt->rt_type = ort->rt_type;
2741 rt->rt_uses_gateway = ort->rt_uses_gateway;
2742 rt->rt_gw_family = ort->rt_gw_family;
2743 if (rt->rt_gw_family == AF_INET)
2744 rt->rt_gw4 = ort->rt_gw4;
2745 else if (rt->rt_gw_family == AF_INET6)
2746 rt->rt_gw6 = ort->rt_gw6;
2748 INIT_LIST_HEAD(&rt->rt_uncached);
2751 dst_release(dst_orig);
2753 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2756 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2757 const struct sock *sk)
2759 struct rtable *rt = __ip_route_output_key(net, flp4);
2764 if (flp4->flowi4_proto)
2765 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2766 flowi4_to_flowi(flp4),
2771 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2773 /* called with rcu_read_lock held */
2774 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2775 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2776 struct sk_buff *skb, u32 portid, u32 seq,
2780 struct nlmsghdr *nlh;
2781 unsigned long expires = 0;
2783 u32 metrics[RTAX_MAX];
2785 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2789 r = nlmsg_data(nlh);
2790 r->rtm_family = AF_INET;
2791 r->rtm_dst_len = 32;
2793 r->rtm_tos = fl4 ? fl4->flowi4_tos : 0;
2794 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2795 if (nla_put_u32(skb, RTA_TABLE, table_id))
2796 goto nla_put_failure;
2797 r->rtm_type = rt->rt_type;
2798 r->rtm_scope = RT_SCOPE_UNIVERSE;
2799 r->rtm_protocol = RTPROT_UNSPEC;
2800 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2801 if (rt->rt_flags & RTCF_NOTIFY)
2802 r->rtm_flags |= RTM_F_NOTIFY;
2803 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2804 r->rtm_flags |= RTCF_DOREDIRECT;
2806 if (nla_put_in_addr(skb, RTA_DST, dst))
2807 goto nla_put_failure;
2809 r->rtm_src_len = 32;
2810 if (nla_put_in_addr(skb, RTA_SRC, src))
2811 goto nla_put_failure;
2814 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2815 goto nla_put_failure;
2816 #ifdef CONFIG_IP_ROUTE_CLASSID
2817 if (rt->dst.tclassid &&
2818 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2819 goto nla_put_failure;
2821 if (fl4 && !rt_is_input_route(rt) &&
2822 fl4->saddr != src) {
2823 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2824 goto nla_put_failure;
2826 if (rt->rt_uses_gateway) {
2827 if (rt->rt_gw_family == AF_INET &&
2828 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2829 goto nla_put_failure;
2830 } else if (rt->rt_gw_family == AF_INET6) {
2831 int alen = sizeof(struct in6_addr);
2835 nla = nla_reserve(skb, RTA_VIA, alen + 2);
2837 goto nla_put_failure;
2839 via = nla_data(nla);
2840 via->rtvia_family = AF_INET6;
2841 memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2845 expires = rt->dst.expires;
2847 unsigned long now = jiffies;
2849 if (time_before(now, expires))
2855 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2856 if (rt->rt_pmtu && expires)
2857 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2858 if (rt->rt_mtu_locked && expires)
2859 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2860 if (rtnetlink_put_metrics(skb, metrics) < 0)
2861 goto nla_put_failure;
2864 if (fl4->flowi4_mark &&
2865 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2866 goto nla_put_failure;
2868 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2869 nla_put_u32(skb, RTA_UID,
2870 from_kuid_munged(current_user_ns(),
2872 goto nla_put_failure;
2874 if (rt_is_input_route(rt)) {
2875 #ifdef CONFIG_IP_MROUTE
2876 if (ipv4_is_multicast(dst) &&
2877 !ipv4_is_local_multicast(dst) &&
2878 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2879 int err = ipmr_get_route(net, skb,
2880 fl4->saddr, fl4->daddr,
2886 goto nla_put_failure;
2890 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
2891 goto nla_put_failure;
2895 error = rt->dst.error;
2897 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2898 goto nla_put_failure;
2900 nlmsg_end(skb, nlh);
2904 nlmsg_cancel(skb, nlh);
2908 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2909 struct netlink_callback *cb, u32 table_id,
2910 struct fnhe_hash_bucket *bucket, int genid,
2911 int *fa_index, int fa_start, unsigned int flags)
2915 for (i = 0; i < FNHE_HASH_SIZE; i++) {
2916 struct fib_nh_exception *fnhe;
2918 for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
2919 fnhe = rcu_dereference(fnhe->fnhe_next)) {
2923 if (*fa_index < fa_start)
2926 if (fnhe->fnhe_genid != genid)
2929 if (fnhe->fnhe_expires &&
2930 time_after(jiffies, fnhe->fnhe_expires))
2933 rt = rcu_dereference(fnhe->fnhe_rth_input);
2935 rt = rcu_dereference(fnhe->fnhe_rth_output);
2939 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
2940 table_id, NULL, skb,
2941 NETLINK_CB(cb->skb).portid,
2942 cb->nlh->nlmsg_seq, flags);
2953 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2954 u32 table_id, struct fib_info *fi,
2955 int *fa_index, int fa_start, unsigned int flags)
2957 struct net *net = sock_net(cb->skb->sk);
2958 int nhsel, genid = fnhe_genid(net);
2960 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
2961 struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
2962 struct fnhe_hash_bucket *bucket;
2965 if (nhc->nhc_flags & RTNH_F_DEAD)
2969 bucket = rcu_dereference(nhc->nhc_exceptions);
2972 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
2973 genid, fa_index, fa_start,
2983 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
2984 u8 ip_proto, __be16 sport,
2987 struct sk_buff *skb;
2990 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2994 /* Reserve room for dummy headers, this skb can pass
2995 * through good chunk of routing engine.
2997 skb_reset_mac_header(skb);
2998 skb_reset_network_header(skb);
2999 skb->protocol = htons(ETH_P_IP);
3000 iph = skb_put(skb, sizeof(struct iphdr));
3001 iph->protocol = ip_proto;
3007 skb_set_transport_header(skb, skb->len);
3009 switch (iph->protocol) {
3011 struct udphdr *udph;
3013 udph = skb_put_zero(skb, sizeof(struct udphdr));
3014 udph->source = sport;
3016 udph->len = sizeof(struct udphdr);
3021 struct tcphdr *tcph;
3023 tcph = skb_put_zero(skb, sizeof(struct tcphdr));
3024 tcph->source = sport;
3026 tcph->doff = sizeof(struct tcphdr) / 4;
3028 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
3032 case IPPROTO_ICMP: {
3033 struct icmphdr *icmph;
3035 icmph = skb_put_zero(skb, sizeof(struct icmphdr));
3036 icmph->type = ICMP_ECHO;
3044 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
3045 const struct nlmsghdr *nlh,
3047 struct netlink_ext_ack *extack)
3052 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
3053 NL_SET_ERR_MSG(extack,
3054 "ipv4: Invalid header for route get request");
3058 if (!netlink_strict_get_check(skb))
3059 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
3060 rtm_ipv4_policy, extack);
3062 rtm = nlmsg_data(nlh);
3063 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
3064 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
3065 rtm->rtm_table || rtm->rtm_protocol ||
3066 rtm->rtm_scope || rtm->rtm_type) {
3067 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
3071 if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
3072 RTM_F_LOOKUP_TABLE |
3074 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
3078 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
3079 rtm_ipv4_policy, extack);
3083 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
3084 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
3085 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
3089 for (i = 0; i <= RTA_MAX; i++) {
3105 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3113 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3114 struct netlink_ext_ack *extack)
3116 struct net *net = sock_net(in_skb->sk);
3117 struct nlattr *tb[RTA_MAX+1];
3118 u32 table_id = RT_TABLE_MAIN;
3119 __be16 sport = 0, dport = 0;
3120 struct fib_result res = {};
3121 u8 ip_proto = IPPROTO_UDP;
3122 struct rtable *rt = NULL;
3123 struct sk_buff *skb;
3125 struct flowi4 fl4 = {};
3133 err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3137 rtm = nlmsg_data(nlh);
3138 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
3139 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
3140 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3141 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3143 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3145 uid = (iif ? INVALID_UID : current_uid());
3147 if (tb[RTA_IP_PROTO]) {
3148 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3149 &ip_proto, AF_INET, extack);
3155 sport = nla_get_be16(tb[RTA_SPORT]);
3158 dport = nla_get_be16(tb[RTA_DPORT]);
3160 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3166 fl4.flowi4_tos = rtm->rtm_tos;
3167 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
3168 fl4.flowi4_mark = mark;
3169 fl4.flowi4_uid = uid;
3171 fl4.fl4_sport = sport;
3173 fl4.fl4_dport = dport;
3174 fl4.flowi4_proto = ip_proto;
3179 struct net_device *dev;
3181 dev = dev_get_by_index_rcu(net, iif);
3187 fl4.flowi4_iif = iif; /* for rt_fill_info */
3190 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
3193 rt = skb_rtable(skb);
3194 if (err == 0 && rt->dst.error)
3195 err = -rt->dst.error;
3197 fl4.flowi4_iif = LOOPBACK_IFINDEX;
3198 skb->dev = net->loopback_dev;
3199 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3204 skb_dst_set(skb, &rt->dst);
3210 if (rtm->rtm_flags & RTM_F_NOTIFY)
3211 rt->rt_flags |= RTCF_NOTIFY;
3213 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3214 table_id = res.table ? res.table->tb_id : 0;
3216 /* reset skb for netlink reply msg */
3218 skb_reset_network_header(skb);
3219 skb_reset_transport_header(skb);
3220 skb_reset_mac_header(skb);
3222 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3224 err = fib_props[res.type].error;
3226 err = -EHOSTUNREACH;
3229 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3230 nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
3231 rt->rt_type, res.prefix, res.prefixlen,
3232 fl4.flowi4_tos, res.fi, 0);
3234 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3235 NETLINK_CB(in_skb).portid,
3243 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3253 void ip_rt_multicast_event(struct in_device *in_dev)
3255 rt_cache_flush(dev_net(in_dev->dev));
3258 #ifdef CONFIG_SYSCTL
3259 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
3260 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
3261 static int ip_rt_gc_elasticity __read_mostly = 8;
3262 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
3264 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3265 void __user *buffer,
3266 size_t *lenp, loff_t *ppos)
3268 struct net *net = (struct net *)__ctl->extra1;
3271 rt_cache_flush(net);
3272 fnhe_genid_bump(net);
3279 static struct ctl_table ipv4_route_table[] = {
3281 .procname = "gc_thresh",
3282 .data = &ipv4_dst_ops.gc_thresh,
3283 .maxlen = sizeof(int),
3285 .proc_handler = proc_dointvec,
3288 .procname = "max_size",
3289 .data = &ip_rt_max_size,
3290 .maxlen = sizeof(int),
3292 .proc_handler = proc_dointvec,
3295 /* Deprecated. Use gc_min_interval_ms */
3297 .procname = "gc_min_interval",
3298 .data = &ip_rt_gc_min_interval,
3299 .maxlen = sizeof(int),
3301 .proc_handler = proc_dointvec_jiffies,
3304 .procname = "gc_min_interval_ms",
3305 .data = &ip_rt_gc_min_interval,
3306 .maxlen = sizeof(int),
3308 .proc_handler = proc_dointvec_ms_jiffies,
3311 .procname = "gc_timeout",
3312 .data = &ip_rt_gc_timeout,
3313 .maxlen = sizeof(int),
3315 .proc_handler = proc_dointvec_jiffies,
3318 .procname = "gc_interval",
3319 .data = &ip_rt_gc_interval,
3320 .maxlen = sizeof(int),
3322 .proc_handler = proc_dointvec_jiffies,
3325 .procname = "redirect_load",
3326 .data = &ip_rt_redirect_load,
3327 .maxlen = sizeof(int),
3329 .proc_handler = proc_dointvec,
3332 .procname = "redirect_number",
3333 .data = &ip_rt_redirect_number,
3334 .maxlen = sizeof(int),
3336 .proc_handler = proc_dointvec,
3339 .procname = "redirect_silence",
3340 .data = &ip_rt_redirect_silence,
3341 .maxlen = sizeof(int),
3343 .proc_handler = proc_dointvec,
3346 .procname = "error_cost",
3347 .data = &ip_rt_error_cost,
3348 .maxlen = sizeof(int),
3350 .proc_handler = proc_dointvec,
3353 .procname = "error_burst",
3354 .data = &ip_rt_error_burst,
3355 .maxlen = sizeof(int),
3357 .proc_handler = proc_dointvec,
3360 .procname = "gc_elasticity",
3361 .data = &ip_rt_gc_elasticity,
3362 .maxlen = sizeof(int),
3364 .proc_handler = proc_dointvec,
3367 .procname = "mtu_expires",
3368 .data = &ip_rt_mtu_expires,
3369 .maxlen = sizeof(int),
3371 .proc_handler = proc_dointvec_jiffies,
3374 .procname = "min_pmtu",
3375 .data = &ip_rt_min_pmtu,
3376 .maxlen = sizeof(int),
3378 .proc_handler = proc_dointvec_minmax,
3379 .extra1 = &ip_min_valid_pmtu,
3382 .procname = "min_adv_mss",
3383 .data = &ip_rt_min_advmss,
3384 .maxlen = sizeof(int),
3386 .proc_handler = proc_dointvec,
3391 static const char ipv4_route_flush_procname[] = "flush";
3393 static struct ctl_table ipv4_route_flush_table[] = {
3395 .procname = ipv4_route_flush_procname,
3396 .maxlen = sizeof(int),
3398 .proc_handler = ipv4_sysctl_rtcache_flush,
3403 static __net_init int sysctl_route_net_init(struct net *net)
3405 struct ctl_table *tbl;
3407 tbl = ipv4_route_flush_table;
3408 if (!net_eq(net, &init_net)) {
3409 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3413 /* Don't export non-whitelisted sysctls to unprivileged users */
3414 if (net->user_ns != &init_user_ns) {
3415 if (tbl[0].procname != ipv4_route_flush_procname)
3416 tbl[0].procname = NULL;
3419 tbl[0].extra1 = net;
3421 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3422 if (!net->ipv4.route_hdr)
3427 if (tbl != ipv4_route_flush_table)
3433 static __net_exit void sysctl_route_net_exit(struct net *net)
3435 struct ctl_table *tbl;
3437 tbl = net->ipv4.route_hdr->ctl_table_arg;
3438 unregister_net_sysctl_table(net->ipv4.route_hdr);
3439 BUG_ON(tbl == ipv4_route_flush_table);
3443 static __net_initdata struct pernet_operations sysctl_route_ops = {
3444 .init = sysctl_route_net_init,
3445 .exit = sysctl_route_net_exit,
3449 static __net_init int rt_genid_init(struct net *net)
3451 atomic_set(&net->ipv4.rt_genid, 0);
3452 atomic_set(&net->fnhe_genid, 0);
3453 atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3457 static __net_initdata struct pernet_operations rt_genid_ops = {
3458 .init = rt_genid_init,
3461 static int __net_init ipv4_inetpeer_init(struct net *net)
3463 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3467 inet_peer_base_init(bp);
3468 net->ipv4.peers = bp;
3472 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3474 struct inet_peer_base *bp = net->ipv4.peers;
3476 net->ipv4.peers = NULL;
3477 inetpeer_invalidate_tree(bp);
3481 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3482 .init = ipv4_inetpeer_init,
3483 .exit = ipv4_inetpeer_exit,
3486 #ifdef CONFIG_IP_ROUTE_CLASSID
3487 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3488 #endif /* CONFIG_IP_ROUTE_CLASSID */
3490 int __init ip_rt_init(void)
3494 ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
3497 panic("IP: failed to allocate ip_idents\n");
3499 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
3501 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
3503 panic("IP: failed to allocate ip_tstamps\n");
3505 for_each_possible_cpu(cpu) {
3506 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3508 INIT_LIST_HEAD(&ul->head);
3509 spin_lock_init(&ul->lock);
3511 #ifdef CONFIG_IP_ROUTE_CLASSID
3512 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3514 panic("IP: failed to allocate ip_rt_acct\n");
3517 ipv4_dst_ops.kmem_cachep =
3518 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3519 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3521 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3523 if (dst_entries_init(&ipv4_dst_ops) < 0)
3524 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3526 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3527 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3529 ipv4_dst_ops.gc_thresh = ~0;
3530 ip_rt_max_size = INT_MAX;
3535 if (ip_rt_proc_init())
3536 pr_err("Unable to create route proc files\n");
3541 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3542 RTNL_FLAG_DOIT_UNLOCKED);
3544 #ifdef CONFIG_SYSCTL
3545 register_pernet_subsys(&sysctl_route_ops);
3547 register_pernet_subsys(&rt_genid_ops);
3548 register_pernet_subsys(&ipv4_inetpeer_ops);
3552 #ifdef CONFIG_SYSCTL
3554 * We really need to sanitize the damn ipv4 init order, then all
3555 * this nonsense will go away.
3557 void __init ip_static_sysctl_init(void)
3559 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);