2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #define pr_fmt(fmt) "IPv4: " fmt
67 #include <linux/module.h>
68 #include <linux/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/skbuff.h>
83 #include <linux/inetdevice.h>
84 #include <linux/igmp.h>
85 #include <linux/pkt_sched.h>
86 #include <linux/mroute.h>
87 #include <linux/netfilter_ipv4.h>
88 #include <linux/random.h>
89 #include <linux/rcupdate.h>
90 #include <linux/times.h>
91 #include <linux/slab.h>
92 #include <linux/jhash.h>
94 #include <net/dst_metadata.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/lwtunnel.h>
107 #include <net/netevent.h>
108 #include <net/rtnetlink.h>
110 #include <linux/sysctl.h>
112 #include <net/secure_seq.h>
113 #include <net/ip_tunnels.h>
114 #include <net/l3mdev.h>
116 #include "fib_lookup.h"
118 #define RT_FL_TOS(oldflp4) \
119 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
121 #define RT_GC_TIMEOUT (300*HZ)
123 static int ip_rt_max_size;
124 static int ip_rt_redirect_number __read_mostly = 9;
125 static int ip_rt_redirect_load __read_mostly = HZ / 50;
126 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
127 static int ip_rt_error_cost __read_mostly = HZ;
128 static int ip_rt_error_burst __read_mostly = 5 * HZ;
129 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130 static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131 static int ip_rt_min_advmss __read_mostly = 256;
133 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
136 * Interface to generic destination cache.
139 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
140 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
141 static unsigned int ipv4_mtu(const struct dst_entry *dst);
142 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
143 static void ipv4_link_failure(struct sk_buff *skb);
144 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
145 struct sk_buff *skb, u32 mtu);
146 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
147 struct sk_buff *skb);
148 static void ipv4_dst_destroy(struct dst_entry *dst);
150 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
156 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
159 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
161 static struct dst_ops ipv4_dst_ops = {
163 .check = ipv4_dst_check,
164 .default_advmss = ipv4_default_advmss,
166 .cow_metrics = ipv4_cow_metrics,
167 .destroy = ipv4_dst_destroy,
168 .negative_advice = ipv4_negative_advice,
169 .link_failure = ipv4_link_failure,
170 .update_pmtu = ip_rt_update_pmtu,
171 .redirect = ip_do_redirect,
172 .local_out = __ip_local_out,
173 .neigh_lookup = ipv4_neigh_lookup,
174 .confirm_neigh = ipv4_confirm_neigh,
177 #define ECN_OR_COST(class) TC_PRIO_##class
179 const __u8 ip_tos2prio[16] = {
181 ECN_OR_COST(BESTEFFORT),
183 ECN_OR_COST(BESTEFFORT),
189 ECN_OR_COST(INTERACTIVE),
191 ECN_OR_COST(INTERACTIVE),
192 TC_PRIO_INTERACTIVE_BULK,
193 ECN_OR_COST(INTERACTIVE_BULK),
194 TC_PRIO_INTERACTIVE_BULK,
195 ECN_OR_COST(INTERACTIVE_BULK)
197 EXPORT_SYMBOL(ip_tos2prio);
199 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
200 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
202 #ifdef CONFIG_PROC_FS
203 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
207 return SEQ_START_TOKEN;
210 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
216 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
220 static int rt_cache_seq_show(struct seq_file *seq, void *v)
222 if (v == SEQ_START_TOKEN)
223 seq_printf(seq, "%-127s\n",
224 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
225 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
230 static const struct seq_operations rt_cache_seq_ops = {
231 .start = rt_cache_seq_start,
232 .next = rt_cache_seq_next,
233 .stop = rt_cache_seq_stop,
234 .show = rt_cache_seq_show,
237 static int rt_cache_seq_open(struct inode *inode, struct file *file)
239 return seq_open(file, &rt_cache_seq_ops);
242 static const struct file_operations rt_cache_seq_fops = {
243 .open = rt_cache_seq_open,
246 .release = seq_release,
250 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
255 return SEQ_START_TOKEN;
257 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
258 if (!cpu_possible(cpu))
261 return &per_cpu(rt_cache_stat, cpu);
266 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
270 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
271 if (!cpu_possible(cpu))
274 return &per_cpu(rt_cache_stat, cpu);
280 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
285 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
287 struct rt_cache_stat *st = v;
289 if (v == SEQ_START_TOKEN) {
290 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
294 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
295 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
296 dst_entries_get_slow(&ipv4_dst_ops),
309 0, /* st->gc_total */
310 0, /* st->gc_ignored */
311 0, /* st->gc_goal_miss */
312 0, /* st->gc_dst_overflow */
313 0, /* st->in_hlist_search */
314 0 /* st->out_hlist_search */
319 static const struct seq_operations rt_cpu_seq_ops = {
320 .start = rt_cpu_seq_start,
321 .next = rt_cpu_seq_next,
322 .stop = rt_cpu_seq_stop,
323 .show = rt_cpu_seq_show,
327 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
329 return seq_open(file, &rt_cpu_seq_ops);
332 static const struct file_operations rt_cpu_seq_fops = {
333 .open = rt_cpu_seq_open,
336 .release = seq_release,
339 #ifdef CONFIG_IP_ROUTE_CLASSID
340 static int rt_acct_proc_show(struct seq_file *m, void *v)
342 struct ip_rt_acct *dst, *src;
345 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
349 for_each_possible_cpu(i) {
350 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
351 for (j = 0; j < 256; j++) {
352 dst[j].o_bytes += src[j].o_bytes;
353 dst[j].o_packets += src[j].o_packets;
354 dst[j].i_bytes += src[j].i_bytes;
355 dst[j].i_packets += src[j].i_packets;
359 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
365 static int __net_init ip_rt_do_proc_init(struct net *net)
367 struct proc_dir_entry *pde;
369 pde = proc_create("rt_cache", 0444, net->proc_net,
374 pde = proc_create("rt_cache", 0444,
375 net->proc_net_stat, &rt_cpu_seq_fops);
379 #ifdef CONFIG_IP_ROUTE_CLASSID
380 pde = proc_create_single("rt_acct", 0, net->proc_net,
387 #ifdef CONFIG_IP_ROUTE_CLASSID
389 remove_proc_entry("rt_cache", net->proc_net_stat);
392 remove_proc_entry("rt_cache", net->proc_net);
397 static void __net_exit ip_rt_do_proc_exit(struct net *net)
399 remove_proc_entry("rt_cache", net->proc_net_stat);
400 remove_proc_entry("rt_cache", net->proc_net);
401 #ifdef CONFIG_IP_ROUTE_CLASSID
402 remove_proc_entry("rt_acct", net->proc_net);
406 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
407 .init = ip_rt_do_proc_init,
408 .exit = ip_rt_do_proc_exit,
411 static int __init ip_rt_proc_init(void)
413 return register_pernet_subsys(&ip_rt_proc_ops);
417 static inline int ip_rt_proc_init(void)
421 #endif /* CONFIG_PROC_FS */
423 static inline bool rt_is_expired(const struct rtable *rth)
425 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
428 void rt_cache_flush(struct net *net)
430 rt_genid_bump_ipv4(net);
433 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
437 const struct rtable *rt = container_of(dst, struct rtable, dst);
438 struct net_device *dev = dst->dev;
443 if (likely(rt->rt_gw_family == AF_INET)) {
444 n = ip_neigh_gw4(dev, rt->rt_gw4);
445 } else if (rt->rt_gw_family == AF_INET6) {
446 n = ip_neigh_gw6(dev, &rt->rt_gw6);
450 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
451 n = ip_neigh_gw4(dev, pkey);
454 if (n && !refcount_inc_not_zero(&n->refcnt))
457 rcu_read_unlock_bh();
462 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
464 const struct rtable *rt = container_of(dst, struct rtable, dst);
465 struct net_device *dev = dst->dev;
466 const __be32 *pkey = daddr;
468 if (rt->rt_gw_family == AF_INET) {
469 pkey = (const __be32 *)&rt->rt_gw4;
470 } else if (rt->rt_gw_family == AF_INET6) {
471 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
474 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
477 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
480 #define IP_IDENTS_SZ 2048u
482 static atomic_t *ip_idents __read_mostly;
483 static u32 *ip_tstamps __read_mostly;
485 /* In order to protect privacy, we add a perturbation to identifiers
486 * if one generator is seldom used. This makes hard for an attacker
487 * to infer how many packets were sent between two points in time.
489 u32 ip_idents_reserve(u32 hash, int segs)
491 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
492 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
493 u32 old = READ_ONCE(*p_tstamp);
494 u32 now = (u32)jiffies;
497 if (old != now && cmpxchg(p_tstamp, old, now) == old)
498 delta = prandom_u32_max(now - old);
500 /* Do not use atomic_add_return() as it makes UBSAN unhappy */
502 old = (u32)atomic_read(p_id);
503 new = old + delta + segs;
504 } while (atomic_cmpxchg(p_id, old, new) != old);
508 EXPORT_SYMBOL(ip_idents_reserve);
510 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
514 /* Note the following code is not safe, but this is okay. */
515 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
516 get_random_bytes(&net->ipv4.ip_id_key,
517 sizeof(net->ipv4.ip_id_key));
519 hash = siphash_3u32((__force u32)iph->daddr,
520 (__force u32)iph->saddr,
522 &net->ipv4.ip_id_key);
523 id = ip_idents_reserve(hash, segs);
526 EXPORT_SYMBOL(__ip_select_ident);
528 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
529 const struct sock *sk,
530 const struct iphdr *iph,
532 u8 prot, u32 mark, int flow_flags)
535 const struct inet_sock *inet = inet_sk(sk);
537 oif = sk->sk_bound_dev_if;
539 tos = RT_CONN_FLAGS(sk);
540 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
542 flowi4_init_output(fl4, oif, mark, tos,
543 RT_SCOPE_UNIVERSE, prot,
545 iph->daddr, iph->saddr, 0, 0,
546 sock_net_uid(net, sk));
549 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
550 const struct sock *sk)
552 const struct net *net = dev_net(skb->dev);
553 const struct iphdr *iph = ip_hdr(skb);
554 int oif = skb->dev->ifindex;
555 u8 tos = RT_TOS(iph->tos);
556 u8 prot = iph->protocol;
557 u32 mark = skb->mark;
559 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
562 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
564 const struct inet_sock *inet = inet_sk(sk);
565 const struct ip_options_rcu *inet_opt;
566 __be32 daddr = inet->inet_daddr;
569 inet_opt = rcu_dereference(inet->inet_opt);
570 if (inet_opt && inet_opt->opt.srr)
571 daddr = inet_opt->opt.faddr;
572 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
573 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
574 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
575 inet_sk_flowi_flags(sk),
576 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
580 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
581 const struct sk_buff *skb)
584 build_skb_flow_key(fl4, skb, sk);
586 build_sk_flow_key(fl4, sk);
589 static DEFINE_SPINLOCK(fnhe_lock);
591 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
595 rt = rcu_dereference(fnhe->fnhe_rth_input);
597 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
598 dst_dev_put(&rt->dst);
599 dst_release(&rt->dst);
601 rt = rcu_dereference(fnhe->fnhe_rth_output);
603 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
604 dst_dev_put(&rt->dst);
605 dst_release(&rt->dst);
609 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
611 struct fib_nh_exception *fnhe, *oldest;
613 oldest = rcu_dereference(hash->chain);
614 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
615 fnhe = rcu_dereference(fnhe->fnhe_next)) {
616 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
619 fnhe_flush_routes(oldest);
623 static inline u32 fnhe_hashfun(__be32 daddr)
625 static u32 fnhe_hashrnd __read_mostly;
628 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
629 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
630 return hash_32(hval, FNHE_HASH_SHIFT);
633 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
635 rt->rt_pmtu = fnhe->fnhe_pmtu;
636 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
637 rt->dst.expires = fnhe->fnhe_expires;
640 rt->rt_flags |= RTCF_REDIRECTED;
641 rt->rt_gw_family = AF_INET;
642 rt->rt_gw4 = fnhe->fnhe_gw;
646 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
647 u32 pmtu, bool lock, unsigned long expires)
649 struct fnhe_hash_bucket *hash;
650 struct fib_nh_exception *fnhe;
656 genid = fnhe_genid(dev_net(nh->fib_nh_dev));
657 hval = fnhe_hashfun(daddr);
659 spin_lock_bh(&fnhe_lock);
661 hash = rcu_dereference(nh->nh_exceptions);
663 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
666 rcu_assign_pointer(nh->nh_exceptions, hash);
672 for (fnhe = rcu_dereference(hash->chain); fnhe;
673 fnhe = rcu_dereference(fnhe->fnhe_next)) {
674 if (fnhe->fnhe_daddr == daddr)
680 if (fnhe->fnhe_genid != genid)
681 fnhe->fnhe_genid = genid;
685 fnhe->fnhe_pmtu = pmtu;
686 fnhe->fnhe_mtu_locked = lock;
688 fnhe->fnhe_expires = max(1UL, expires);
689 /* Update all cached dsts too */
690 rt = rcu_dereference(fnhe->fnhe_rth_input);
692 fill_route_from_fnhe(rt, fnhe);
693 rt = rcu_dereference(fnhe->fnhe_rth_output);
695 fill_route_from_fnhe(rt, fnhe);
697 if (depth > FNHE_RECLAIM_DEPTH)
698 fnhe = fnhe_oldest(hash);
700 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
704 fnhe->fnhe_next = hash->chain;
705 rcu_assign_pointer(hash->chain, fnhe);
707 fnhe->fnhe_genid = genid;
708 fnhe->fnhe_daddr = daddr;
710 fnhe->fnhe_pmtu = pmtu;
711 fnhe->fnhe_mtu_locked = lock;
712 fnhe->fnhe_expires = max(1UL, expires);
714 /* Exception created; mark the cached routes for the nexthop
715 * stale, so anyone caching it rechecks if this exception
718 rt = rcu_dereference(nh->nh_rth_input);
720 rt->dst.obsolete = DST_OBSOLETE_KILL;
722 for_each_possible_cpu(i) {
723 struct rtable __rcu **prt;
724 prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
725 rt = rcu_dereference(*prt);
727 rt->dst.obsolete = DST_OBSOLETE_KILL;
731 fnhe->fnhe_stamp = jiffies;
734 spin_unlock_bh(&fnhe_lock);
737 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
740 __be32 new_gw = icmp_hdr(skb)->un.gateway;
741 __be32 old_gw = ip_hdr(skb)->saddr;
742 struct net_device *dev = skb->dev;
743 struct in_device *in_dev;
744 struct fib_result res;
748 switch (icmp_hdr(skb)->code & 7) {
750 case ICMP_REDIR_NETTOS:
751 case ICMP_REDIR_HOST:
752 case ICMP_REDIR_HOSTTOS:
759 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
762 in_dev = __in_dev_get_rcu(dev);
767 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
768 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
769 ipv4_is_zeronet(new_gw))
770 goto reject_redirect;
772 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
773 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
774 goto reject_redirect;
775 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
776 goto reject_redirect;
778 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
779 goto reject_redirect;
782 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
784 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
786 if (!(n->nud_state & NUD_VALID)) {
787 neigh_event_send(n, NULL);
789 if (fib_lookup(net, fl4, &res, 0) == 0) {
790 struct fib_nh_common *nhc = FIB_RES_NHC(res);
793 nh = container_of(nhc, struct fib_nh, nh_common);
794 update_or_create_fnhe(nh, fl4->daddr, new_gw,
796 jiffies + ip_rt_gc_timeout);
799 rt->dst.obsolete = DST_OBSOLETE_KILL;
800 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
807 #ifdef CONFIG_IP_ROUTE_VERBOSE
808 if (IN_DEV_LOG_MARTIANS(in_dev)) {
809 const struct iphdr *iph = (const struct iphdr *) skb->data;
810 __be32 daddr = iph->daddr;
811 __be32 saddr = iph->saddr;
813 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
814 " Advised path = %pI4 -> %pI4\n",
815 &old_gw, dev->name, &new_gw,
822 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
826 const struct iphdr *iph = (const struct iphdr *) skb->data;
827 struct net *net = dev_net(skb->dev);
828 int oif = skb->dev->ifindex;
829 u8 tos = RT_TOS(iph->tos);
830 u8 prot = iph->protocol;
831 u32 mark = skb->mark;
833 rt = (struct rtable *) dst;
835 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
836 __ip_do_redirect(rt, skb, &fl4, true);
839 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
841 struct rtable *rt = (struct rtable *)dst;
842 struct dst_entry *ret = dst;
845 if (dst->obsolete > 0) {
848 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
859 * 1. The first ip_rt_redirect_number redirects are sent
860 * with exponential backoff, then we stop sending them at all,
861 * assuming that the host ignores our redirects.
862 * 2. If we did not see packets requiring redirects
863 * during ip_rt_redirect_silence, we assume that the host
864 * forgot redirected route and start to send redirects again.
866 * This algorithm is much cheaper and more intelligent than dumb load limiting
869 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
870 * and "frag. need" (breaks PMTU discovery) in icmp.c.
873 void ip_rt_send_redirect(struct sk_buff *skb)
875 struct rtable *rt = skb_rtable(skb);
876 struct in_device *in_dev;
877 struct inet_peer *peer;
883 in_dev = __in_dev_get_rcu(rt->dst.dev);
884 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
888 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
889 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
892 net = dev_net(rt->dst.dev);
893 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
895 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
896 rt_nexthop(rt, ip_hdr(skb)->daddr));
900 /* No redirected packets during ip_rt_redirect_silence;
901 * reset the algorithm.
903 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
904 peer->rate_tokens = 0;
905 peer->n_redirects = 0;
908 /* Too many ignored redirects; do not send anything
909 * set dst.rate_last to the last seen redirected packet.
911 if (peer->n_redirects >= ip_rt_redirect_number) {
912 peer->rate_last = jiffies;
916 /* Check for load limit; set rate_last to the latest sent
919 if (peer->rate_tokens == 0 ||
922 (ip_rt_redirect_load << peer->rate_tokens)))) {
923 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
925 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
926 peer->rate_last = jiffies;
929 #ifdef CONFIG_IP_ROUTE_VERBOSE
931 peer->rate_tokens == ip_rt_redirect_number)
932 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
933 &ip_hdr(skb)->saddr, inet_iif(skb),
934 &ip_hdr(skb)->daddr, &gw);
941 static int ip_error(struct sk_buff *skb)
943 struct rtable *rt = skb_rtable(skb);
944 struct net_device *dev = skb->dev;
945 struct in_device *in_dev;
946 struct inet_peer *peer;
952 if (netif_is_l3_master(skb->dev)) {
953 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
958 in_dev = __in_dev_get_rcu(dev);
960 /* IP on this device is disabled. */
964 net = dev_net(rt->dst.dev);
965 if (!IN_DEV_FORWARD(in_dev)) {
966 switch (rt->dst.error) {
968 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
972 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
978 switch (rt->dst.error) {
983 code = ICMP_HOST_UNREACH;
986 code = ICMP_NET_UNREACH;
987 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
990 code = ICMP_PKT_FILTERED;
994 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
995 l3mdev_master_ifindex(skb->dev), 1);
1000 peer->rate_tokens += now - peer->rate_last;
1001 if (peer->rate_tokens > ip_rt_error_burst)
1002 peer->rate_tokens = ip_rt_error_burst;
1003 peer->rate_last = now;
1004 if (peer->rate_tokens >= ip_rt_error_cost)
1005 peer->rate_tokens -= ip_rt_error_cost;
1011 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1013 out: kfree_skb(skb);
1017 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1019 struct dst_entry *dst = &rt->dst;
1020 u32 old_mtu = ipv4_mtu(dst);
1021 struct fib_result res;
1024 if (ip_mtu_locked(dst))
1030 if (mtu < ip_rt_min_pmtu) {
1032 mtu = min(old_mtu, ip_rt_min_pmtu);
1035 if (rt->rt_pmtu == mtu && !lock &&
1036 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1040 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1041 struct fib_nh_common *nhc = FIB_RES_NHC(res);
1044 nh = container_of(nhc, struct fib_nh, nh_common);
1045 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
1046 jiffies + ip_rt_mtu_expires);
1051 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1052 struct sk_buff *skb, u32 mtu)
1054 struct rtable *rt = (struct rtable *) dst;
1057 ip_rt_build_flow_key(&fl4, sk, skb);
1058 __ip_rt_update_pmtu(rt, &fl4, mtu);
1061 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1062 int oif, u8 protocol)
1064 const struct iphdr *iph = (const struct iphdr *) skb->data;
1067 u32 mark = IP4_REPLY_MARK(net, skb->mark);
1069 __build_flow_key(net, &fl4, NULL, iph, oif,
1070 RT_TOS(iph->tos), protocol, mark, 0);
1071 rt = __ip_route_output_key(net, &fl4);
1073 __ip_rt_update_pmtu(rt, &fl4, mtu);
1077 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1079 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1081 const struct iphdr *iph = (const struct iphdr *) skb->data;
1085 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1087 if (!fl4.flowi4_mark)
1088 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1090 rt = __ip_route_output_key(sock_net(sk), &fl4);
1092 __ip_rt_update_pmtu(rt, &fl4, mtu);
1097 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1099 const struct iphdr *iph = (const struct iphdr *) skb->data;
1102 struct dst_entry *odst = NULL;
1104 struct net *net = sock_net(sk);
1108 if (!ip_sk_accept_pmtu(sk))
1111 odst = sk_dst_get(sk);
1113 if (sock_owned_by_user(sk) || !odst) {
1114 __ipv4_sk_update_pmtu(skb, sk, mtu);
1118 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1120 rt = (struct rtable *)odst;
1121 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1122 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1129 __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
1131 if (!dst_check(&rt->dst, 0)) {
1133 dst_release(&rt->dst);
1135 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1143 sk_dst_set(sk, &rt->dst);
1149 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1151 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1152 int oif, u8 protocol)
1154 const struct iphdr *iph = (const struct iphdr *) skb->data;
1158 __build_flow_key(net, &fl4, NULL, iph, oif,
1159 RT_TOS(iph->tos), protocol, 0, 0);
1160 rt = __ip_route_output_key(net, &fl4);
1162 __ip_do_redirect(rt, skb, &fl4, false);
1166 EXPORT_SYMBOL_GPL(ipv4_redirect);
1168 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1170 const struct iphdr *iph = (const struct iphdr *) skb->data;
1173 struct net *net = sock_net(sk);
1175 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1176 rt = __ip_route_output_key(net, &fl4);
1178 __ip_do_redirect(rt, skb, &fl4, false);
1182 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1184 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1186 struct rtable *rt = (struct rtable *) dst;
1188 /* All IPV4 dsts are created with ->obsolete set to the value
1189 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1190 * into this function always.
1192 * When a PMTU/redirect information update invalidates a route,
1193 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1194 * DST_OBSOLETE_DEAD.
1196 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1201 static void ipv4_link_failure(struct sk_buff *skb)
1203 struct ip_options opt;
1207 /* Recompile ip options since IPCB may not be valid anymore.
1209 memset(&opt, 0, sizeof(opt));
1210 opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
1213 res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1219 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1221 rt = skb_rtable(skb);
1223 dst_set_expires(&rt->dst, 0);
1226 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1228 pr_debug("%s: %pI4 -> %pI4, %s\n",
1229 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1230 skb->dev ? skb->dev->name : "?");
1237 We do not cache source address of outgoing interface,
1238 because it is used only by IP RR, TS and SRR options,
1239 so that it out of fast path.
1241 BTW remember: "addr" is allowed to be not aligned
1245 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1249 if (rt_is_output_route(rt))
1250 src = ip_hdr(skb)->saddr;
1252 struct fib_result res;
1253 struct iphdr *iph = ip_hdr(skb);
1254 struct flowi4 fl4 = {
1255 .daddr = iph->daddr,
1256 .saddr = iph->saddr,
1257 .flowi4_tos = RT_TOS(iph->tos),
1258 .flowi4_oif = rt->dst.dev->ifindex,
1259 .flowi4_iif = skb->dev->ifindex,
1260 .flowi4_mark = skb->mark,
1264 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1265 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1267 src = inet_select_addr(rt->dst.dev,
1268 rt_nexthop(rt, iph->daddr),
1272 memcpy(addr, &src, 4);
1275 #ifdef CONFIG_IP_ROUTE_CLASSID
1276 static void set_class_tag(struct rtable *rt, u32 tag)
1278 if (!(rt->dst.tclassid & 0xFFFF))
1279 rt->dst.tclassid |= tag & 0xFFFF;
1280 if (!(rt->dst.tclassid & 0xFFFF0000))
1281 rt->dst.tclassid |= tag & 0xFFFF0000;
1285 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1287 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1288 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1291 return min(advmss, IPV4_MAX_PMTU - header_size);
1294 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1296 const struct rtable *rt = (const struct rtable *) dst;
1297 unsigned int mtu = rt->rt_pmtu;
1299 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1300 mtu = dst_metric_raw(dst, RTAX_MTU);
1305 mtu = READ_ONCE(dst->dev->mtu);
1307 if (unlikely(ip_mtu_locked(dst))) {
1308 if (rt->rt_gw_family && mtu > 576)
1312 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1314 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1317 static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1319 struct fnhe_hash_bucket *hash;
1320 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1321 u32 hval = fnhe_hashfun(daddr);
1323 spin_lock_bh(&fnhe_lock);
1325 hash = rcu_dereference_protected(nh->nh_exceptions,
1326 lockdep_is_held(&fnhe_lock));
1329 fnhe_p = &hash->chain;
1330 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1332 if (fnhe->fnhe_daddr == daddr) {
1333 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1334 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1335 /* set fnhe_daddr to 0 to ensure it won't bind with
1336 * new dsts in rt_bind_exception().
1338 fnhe->fnhe_daddr = 0;
1339 fnhe_flush_routes(fnhe);
1340 kfree_rcu(fnhe, rcu);
1343 fnhe_p = &fnhe->fnhe_next;
1344 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1345 lockdep_is_held(&fnhe_lock));
1348 spin_unlock_bh(&fnhe_lock);
1351 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1353 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
1354 struct fib_nh_exception *fnhe;
1360 hval = fnhe_hashfun(daddr);
1362 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1363 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1364 if (fnhe->fnhe_daddr == daddr) {
1365 if (fnhe->fnhe_expires &&
1366 time_after(jiffies, fnhe->fnhe_expires)) {
1367 ip_del_fnhe(nh, daddr);
1377 * 1. mtu on route is locked - use it
1378 * 2. mtu from nexthop exception
1379 * 3. mtu from egress device
1382 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1384 struct fib_nh_common *nhc = res->nhc;
1385 struct net_device *dev = nhc->nhc_dev;
1386 struct fib_info *fi = res->fi;
1389 if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
1390 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1394 struct fib_nh *nh = container_of(nhc, struct fib_nh, nh_common);
1395 struct fib_nh_exception *fnhe;
1397 fnhe = find_exception(nh, daddr);
1398 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1399 mtu = fnhe->fnhe_pmtu;
1403 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1405 return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1408 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1409 __be32 daddr, const bool do_cache)
1413 spin_lock_bh(&fnhe_lock);
1415 if (daddr == fnhe->fnhe_daddr) {
1416 struct rtable __rcu **porig;
1417 struct rtable *orig;
1418 int genid = fnhe_genid(dev_net(rt->dst.dev));
1420 if (rt_is_input_route(rt))
1421 porig = &fnhe->fnhe_rth_input;
1423 porig = &fnhe->fnhe_rth_output;
1424 orig = rcu_dereference(*porig);
1426 if (fnhe->fnhe_genid != genid) {
1427 fnhe->fnhe_genid = genid;
1429 fnhe->fnhe_pmtu = 0;
1430 fnhe->fnhe_expires = 0;
1431 fnhe->fnhe_mtu_locked = false;
1432 fnhe_flush_routes(fnhe);
1435 fill_route_from_fnhe(rt, fnhe);
1438 rt->rt_gw_family = AF_INET;
1443 rcu_assign_pointer(*porig, rt);
1445 dst_dev_put(&orig->dst);
1446 dst_release(&orig->dst);
1451 fnhe->fnhe_stamp = jiffies;
1453 spin_unlock_bh(&fnhe_lock);
1458 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1460 struct rtable *orig, *prev, **p;
1463 if (rt_is_input_route(rt)) {
1464 p = (struct rtable **)&nh->nh_rth_input;
1466 p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
1470 /* hold dst before doing cmpxchg() to avoid race condition
1474 prev = cmpxchg(p, orig, rt);
1477 dst_dev_put(&orig->dst);
1478 dst_release(&orig->dst);
1481 dst_release(&rt->dst);
1488 struct uncached_list {
1490 struct list_head head;
1493 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1495 void rt_add_uncached_list(struct rtable *rt)
1497 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1499 rt->rt_uncached_list = ul;
1501 spin_lock_bh(&ul->lock);
1502 list_add_tail(&rt->rt_uncached, &ul->head);
1503 spin_unlock_bh(&ul->lock);
1506 void rt_del_uncached_list(struct rtable *rt)
1508 if (!list_empty(&rt->rt_uncached)) {
1509 struct uncached_list *ul = rt->rt_uncached_list;
1511 spin_lock_bh(&ul->lock);
1512 list_del(&rt->rt_uncached);
1513 spin_unlock_bh(&ul->lock);
1517 static void ipv4_dst_destroy(struct dst_entry *dst)
1519 struct rtable *rt = (struct rtable *)dst;
1521 ip_dst_metrics_put(dst);
1522 rt_del_uncached_list(rt);
1525 void rt_flush_dev(struct net_device *dev)
1527 struct net *net = dev_net(dev);
1531 for_each_possible_cpu(cpu) {
1532 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1534 spin_lock_bh(&ul->lock);
1535 list_for_each_entry(rt, &ul->head, rt_uncached) {
1536 if (rt->dst.dev != dev)
1538 rt->dst.dev = net->loopback_dev;
1539 dev_hold(rt->dst.dev);
1542 spin_unlock_bh(&ul->lock);
1546 static bool rt_cache_valid(const struct rtable *rt)
1549 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1553 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1554 const struct fib_result *res,
1555 struct fib_nh_exception *fnhe,
1556 struct fib_info *fi, u16 type, u32 itag,
1557 const bool do_cache)
1559 bool cached = false;
1562 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1565 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1566 rt->rt_gw_family = nhc->nhc_gw_family;
1567 /* only INET and INET6 are supported */
1568 if (likely(nhc->nhc_gw_family == AF_INET))
1569 rt->rt_gw4 = nhc->nhc_gw.ipv4;
1571 rt->rt_gw6 = nhc->nhc_gw.ipv6;
1574 ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1576 nh = container_of(nhc, struct fib_nh, nh_common);
1577 #ifdef CONFIG_IP_ROUTE_CLASSID
1578 rt->dst.tclassid = nh->nh_tclassid;
1580 rt->dst.lwtstate = lwtstate_get(nh->fib_nh_lws);
1582 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1584 cached = rt_cache_route(nh, rt);
1585 if (unlikely(!cached)) {
1586 /* Routes we intend to cache in nexthop exception or
1587 * FIB nexthop have the DST_NOCACHE bit clear.
1588 * However, if we are unsuccessful at storing this
1589 * route into the cache we really need to set it.
1592 rt->rt_gw_family = AF_INET;
1595 rt_add_uncached_list(rt);
1598 rt_add_uncached_list(rt);
1600 #ifdef CONFIG_IP_ROUTE_CLASSID
1601 #ifdef CONFIG_IP_MULTIPLE_TABLES
1602 set_class_tag(rt, res->tclassid);
1604 set_class_tag(rt, itag);
1608 struct rtable *rt_dst_alloc(struct net_device *dev,
1609 unsigned int flags, u16 type,
1610 bool nopolicy, bool noxfrm, bool will_cache)
1614 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1615 (will_cache ? 0 : DST_HOST) |
1616 (nopolicy ? DST_NOPOLICY : 0) |
1617 (noxfrm ? DST_NOXFRM : 0));
1620 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1621 rt->rt_flags = flags;
1623 rt->rt_is_input = 0;
1626 rt->rt_mtu_locked = 0;
1627 rt->rt_gw_family = 0;
1629 INIT_LIST_HEAD(&rt->rt_uncached);
1631 rt->dst.output = ip_output;
1632 if (flags & RTCF_LOCAL)
1633 rt->dst.input = ip_local_deliver;
1638 EXPORT_SYMBOL(rt_dst_alloc);
1640 /* called in rcu_read_lock() section */
1641 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1642 u8 tos, struct net_device *dev,
1643 struct in_device *in_dev, u32 *itag)
1647 /* Primary sanity checks. */
1651 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1652 skb->protocol != htons(ETH_P_IP))
1655 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1658 if (ipv4_is_zeronet(saddr)) {
1659 if (!ipv4_is_local_multicast(daddr) &&
1660 ip_hdr(skb)->protocol != IPPROTO_IGMP)
1663 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1671 /* called in rcu_read_lock() section */
1672 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1673 u8 tos, struct net_device *dev, int our)
1675 struct in_device *in_dev = __in_dev_get_rcu(dev);
1676 unsigned int flags = RTCF_MULTICAST;
1681 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1686 flags |= RTCF_LOCAL;
1688 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1689 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1693 #ifdef CONFIG_IP_ROUTE_CLASSID
1694 rth->dst.tclassid = itag;
1696 rth->dst.output = ip_rt_bug;
1697 rth->rt_is_input= 1;
1699 #ifdef CONFIG_IP_MROUTE
1700 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1701 rth->dst.input = ip_mr_input;
1703 RT_CACHE_STAT_INC(in_slow_mc);
1705 skb_dst_set(skb, &rth->dst);
1710 static void ip_handle_martian_source(struct net_device *dev,
1711 struct in_device *in_dev,
1712 struct sk_buff *skb,
1716 RT_CACHE_STAT_INC(in_martian_src);
1717 #ifdef CONFIG_IP_ROUTE_VERBOSE
1718 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1720 * RFC1812 recommendation, if source is martian,
1721 * the only hint is MAC header.
1723 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1724 &daddr, &saddr, dev->name);
1725 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1726 print_hex_dump(KERN_WARNING, "ll header: ",
1727 DUMP_PREFIX_OFFSET, 16, 1,
1728 skb_mac_header(skb),
1729 dev->hard_header_len, false);
1735 /* called in rcu_read_lock() section */
1736 static int __mkroute_input(struct sk_buff *skb,
1737 const struct fib_result *res,
1738 struct in_device *in_dev,
1739 __be32 daddr, __be32 saddr, u32 tos)
1741 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1742 struct net_device *dev = nhc->nhc_dev;
1743 struct fib_nh_exception *fnhe;
1747 struct in_device *out_dev;
1751 /* get a working reference to the output device */
1752 out_dev = __in_dev_get_rcu(dev);
1754 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1758 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1759 in_dev->dev, in_dev, &itag);
1761 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1767 do_cache = res->fi && !itag;
1768 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1769 skb->protocol == htons(ETH_P_IP)) {
1772 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1773 if (IN_DEV_SHARED_MEDIA(out_dev) ||
1774 inet_addr_onlink(out_dev, saddr, gw))
1775 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1778 if (skb->protocol != htons(ETH_P_IP)) {
1779 /* Not IP (i.e. ARP). Do not create route, if it is
1780 * invalid for proxy arp. DNAT routes are always valid.
1782 * Proxy arp feature have been extended to allow, ARP
1783 * replies back to the same interface, to support
1784 * Private VLAN switch technologies. See arp.c.
1786 if (out_dev == in_dev &&
1787 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1793 nh = container_of(nhc, struct fib_nh, nh_common);
1794 fnhe = find_exception(nh, daddr);
1797 rth = rcu_dereference(fnhe->fnhe_rth_input);
1799 rth = rcu_dereference(nh->nh_rth_input);
1800 if (rt_cache_valid(rth)) {
1801 skb_dst_set_noref(skb, &rth->dst);
1806 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1807 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1808 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1814 rth->rt_is_input = 1;
1815 RT_CACHE_STAT_INC(in_slow_tot);
1817 rth->dst.input = ip_forward;
1819 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1821 lwtunnel_set_redirect(&rth->dst);
1822 skb_dst_set(skb, &rth->dst);
1829 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1830 /* To make ICMP packets follow the right flow, the multipath hash is
1831 * calculated from the inner IP addresses.
1833 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1834 struct flow_keys *hash_keys)
1836 const struct iphdr *outer_iph = ip_hdr(skb);
1837 const struct iphdr *key_iph = outer_iph;
1838 const struct iphdr *inner_iph;
1839 const struct icmphdr *icmph;
1840 struct iphdr _inner_iph;
1841 struct icmphdr _icmph;
1843 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1846 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1849 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1854 if (icmph->type != ICMP_DEST_UNREACH &&
1855 icmph->type != ICMP_REDIRECT &&
1856 icmph->type != ICMP_TIME_EXCEEDED &&
1857 icmph->type != ICMP_PARAMETERPROB)
1860 inner_iph = skb_header_pointer(skb,
1861 outer_iph->ihl * 4 + sizeof(_icmph),
1862 sizeof(_inner_iph), &_inner_iph);
1866 key_iph = inner_iph;
1868 hash_keys->addrs.v4addrs.src = key_iph->saddr;
1869 hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1872 /* if skb is set it will be used and fl4 can be NULL */
1873 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
1874 const struct sk_buff *skb, struct flow_keys *flkeys)
1876 u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
1877 struct flow_keys hash_keys;
1880 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1882 memset(&hash_keys, 0, sizeof(hash_keys));
1883 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1885 ip_multipath_l3_keys(skb, &hash_keys);
1887 hash_keys.addrs.v4addrs.src = fl4->saddr;
1888 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1892 /* skb is currently provided only when forwarding */
1894 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1895 struct flow_keys keys;
1897 /* short-circuit if we already have L4 hash present */
1899 return skb_get_hash_raw(skb) >> 1;
1901 memset(&hash_keys, 0, sizeof(hash_keys));
1904 skb_flow_dissect_flow_keys(skb, &keys, flag);
1908 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1909 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
1910 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
1911 hash_keys.ports.src = flkeys->ports.src;
1912 hash_keys.ports.dst = flkeys->ports.dst;
1913 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
1915 memset(&hash_keys, 0, sizeof(hash_keys));
1916 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1917 hash_keys.addrs.v4addrs.src = fl4->saddr;
1918 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1919 hash_keys.ports.src = fl4->fl4_sport;
1920 hash_keys.ports.dst = fl4->fl4_dport;
1921 hash_keys.basic.ip_proto = fl4->flowi4_proto;
1925 mhash = flow_hash_from_keys(&hash_keys);
1928 mhash = jhash_2words(mhash, multipath_hash, 0);
1932 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
1934 static int ip_mkroute_input(struct sk_buff *skb,
1935 struct fib_result *res,
1936 struct in_device *in_dev,
1937 __be32 daddr, __be32 saddr, u32 tos,
1938 struct flow_keys *hkeys)
1940 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1941 if (res->fi && res->fi->fib_nhs > 1) {
1942 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
1944 fib_select_multipath(res, h);
1948 /* create a routing cache entry */
1949 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1953 * NOTE. We drop all the packets that has local source
1954 * addresses, because every properly looped back packet
1955 * must have correct destination already attached by output routine.
1957 * Such approach solves two big problems:
1958 * 1. Not simplex devices are handled properly.
1959 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1960 * called with rcu_read_lock()
1963 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1964 u8 tos, struct net_device *dev,
1965 struct fib_result *res)
1967 struct in_device *in_dev = __in_dev_get_rcu(dev);
1968 struct flow_keys *flkeys = NULL, _flkeys;
1969 struct net *net = dev_net(dev);
1970 struct ip_tunnel_info *tun_info;
1972 unsigned int flags = 0;
1978 /* IP on this device is disabled. */
1983 /* Check for the most weird martians, which can be not detected
1987 tun_info = skb_tunnel_info(skb);
1988 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1989 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
1991 fl4.flowi4_tun_key.tun_id = 0;
1994 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1995 goto martian_source;
1999 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2002 /* Accept zero addresses only to limited broadcast;
2003 * I even do not know to fix it or not. Waiting for complains :-)
2005 if (ipv4_is_zeronet(saddr))
2006 goto martian_source;
2008 if (ipv4_is_zeronet(daddr))
2009 goto martian_destination;
2011 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2012 * and call it once if daddr or/and saddr are loopback addresses
2014 if (ipv4_is_loopback(daddr)) {
2015 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2016 goto martian_destination;
2017 } else if (ipv4_is_loopback(saddr)) {
2018 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2019 goto martian_source;
2023 * Now we are ready to route packet.
2026 fl4.flowi4_iif = dev->ifindex;
2027 fl4.flowi4_mark = skb->mark;
2028 fl4.flowi4_tos = tos;
2029 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2030 fl4.flowi4_flags = 0;
2033 fl4.flowi4_uid = sock_net_uid(net, NULL);
2035 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2038 fl4.flowi4_proto = 0;
2043 err = fib_lookup(net, &fl4, res, 0);
2045 if (!IN_DEV_FORWARD(in_dev))
2046 err = -EHOSTUNREACH;
2050 if (res->type == RTN_BROADCAST) {
2051 if (IN_DEV_BFORWARD(in_dev))
2056 if (res->type == RTN_LOCAL) {
2057 err = fib_validate_source(skb, saddr, daddr, tos,
2058 0, dev, in_dev, &itag);
2060 goto martian_source;
2064 if (!IN_DEV_FORWARD(in_dev)) {
2065 err = -EHOSTUNREACH;
2068 if (res->type != RTN_UNICAST)
2069 goto martian_destination;
2072 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2076 if (skb->protocol != htons(ETH_P_IP))
2079 if (!ipv4_is_zeronet(saddr)) {
2080 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2083 goto martian_source;
2085 flags |= RTCF_BROADCAST;
2086 res->type = RTN_BROADCAST;
2087 RT_CACHE_STAT_INC(in_brd);
2093 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2096 nh = container_of(nhc, struct fib_nh, nh_common);
2097 rth = rcu_dereference(nh->nh_rth_input);
2098 if (rt_cache_valid(rth)) {
2099 skb_dst_set_noref(skb, &rth->dst);
2107 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
2108 flags | RTCF_LOCAL, res->type,
2109 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2113 rth->dst.output= ip_rt_bug;
2114 #ifdef CONFIG_IP_ROUTE_CLASSID
2115 rth->dst.tclassid = itag;
2117 rth->rt_is_input = 1;
2119 RT_CACHE_STAT_INC(in_slow_tot);
2120 if (res->type == RTN_UNREACHABLE) {
2121 rth->dst.input= ip_error;
2122 rth->dst.error= -err;
2123 rth->rt_flags &= ~RTCF_LOCAL;
2127 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2130 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2131 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2132 WARN_ON(rth->dst.input == lwtunnel_input);
2133 rth->dst.lwtstate->orig_input = rth->dst.input;
2134 rth->dst.input = lwtunnel_input;
2137 nh = container_of(nhc, struct fib_nh, nh_common);
2138 if (unlikely(!rt_cache_route(nh, rth)))
2139 rt_add_uncached_list(rth);
2141 skb_dst_set(skb, &rth->dst);
2146 RT_CACHE_STAT_INC(in_no_route);
2147 res->type = RTN_UNREACHABLE;
2153 * Do not cache martian addresses: they should be logged (RFC1812)
2155 martian_destination:
2156 RT_CACHE_STAT_INC(in_martian_dst);
2157 #ifdef CONFIG_IP_ROUTE_VERBOSE
2158 if (IN_DEV_LOG_MARTIANS(in_dev))
2159 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2160 &daddr, &saddr, dev->name);
2172 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2176 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2177 u8 tos, struct net_device *dev)
2179 struct fib_result res;
2182 tos &= IPTOS_RT_MASK;
2184 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2189 EXPORT_SYMBOL(ip_route_input_noref);
2191 /* called with rcu_read_lock held */
2192 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2193 u8 tos, struct net_device *dev, struct fib_result *res)
2195 /* Multicast recognition logic is moved from route cache to here.
2196 The problem was that too many Ethernet cards have broken/missing
2197 hardware multicast filters :-( As result the host on multicasting
2198 network acquires a lot of useless route cache entries, sort of
2199 SDR messages from all the world. Now we try to get rid of them.
2200 Really, provided software IP multicast filter is organized
2201 reasonably (at least, hashed), it does not result in a slowdown
2202 comparing with route cache reject entries.
2203 Note, that multicast routers are not affected, because
2204 route cache entry is created eventually.
2206 if (ipv4_is_multicast(daddr)) {
2207 struct in_device *in_dev = __in_dev_get_rcu(dev);
2213 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2214 ip_hdr(skb)->protocol);
2216 /* check l3 master if no match yet */
2217 if (!our && netif_is_l3_slave(dev)) {
2218 struct in_device *l3_in_dev;
2220 l3_in_dev = __in_dev_get_rcu(skb->dev);
2222 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2223 ip_hdr(skb)->protocol);
2227 #ifdef CONFIG_IP_MROUTE
2229 (!ipv4_is_local_multicast(daddr) &&
2230 IN_DEV_MFORWARD(in_dev))
2233 err = ip_route_input_mc(skb, daddr, saddr,
2239 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2242 /* called with rcu_read_lock() */
2243 static struct rtable *__mkroute_output(const struct fib_result *res,
2244 const struct flowi4 *fl4, int orig_oif,
2245 struct net_device *dev_out,
2248 struct fib_info *fi = res->fi;
2249 struct fib_nh_exception *fnhe;
2250 struct in_device *in_dev;
2251 u16 type = res->type;
2255 in_dev = __in_dev_get_rcu(dev_out);
2257 return ERR_PTR(-EINVAL);
2259 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2260 if (ipv4_is_loopback(fl4->saddr) &&
2261 !(dev_out->flags & IFF_LOOPBACK) &&
2262 !netif_is_l3_master(dev_out))
2263 return ERR_PTR(-EINVAL);
2265 if (ipv4_is_lbcast(fl4->daddr))
2266 type = RTN_BROADCAST;
2267 else if (ipv4_is_multicast(fl4->daddr))
2268 type = RTN_MULTICAST;
2269 else if (ipv4_is_zeronet(fl4->daddr))
2270 return ERR_PTR(-EINVAL);
2272 if (dev_out->flags & IFF_LOOPBACK)
2273 flags |= RTCF_LOCAL;
2276 if (type == RTN_BROADCAST) {
2277 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2279 } else if (type == RTN_MULTICAST) {
2280 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2281 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2283 flags &= ~RTCF_LOCAL;
2286 /* If multicast route do not exist use
2287 * default one, but do not gateway in this case.
2290 if (fi && res->prefixlen < 4)
2292 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2293 (orig_oif != dev_out->ifindex)) {
2294 /* For local routes that require a particular output interface
2295 * we do not want to cache the result. Caching the result
2296 * causes incorrect behaviour when there are multiple source
2297 * addresses on the interface, the end result being that if the
2298 * intended recipient is waiting on that interface for the
2299 * packet he won't receive it because it will be delivered on
2300 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2301 * be set to the loopback interface as well.
2307 do_cache &= fi != NULL;
2309 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2310 struct fib_nh *nh = container_of(nhc, struct fib_nh, nh_common);
2311 struct rtable __rcu **prth;
2313 fnhe = find_exception(nh, fl4->daddr);
2317 prth = &fnhe->fnhe_rth_output;
2319 if (unlikely(fl4->flowi4_flags &
2320 FLOWI_FLAG_KNOWN_NH &&
2321 !(nhc->nhc_gw_family &&
2322 nhc->nhc_scope == RT_SCOPE_LINK))) {
2326 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2328 rth = rcu_dereference(*prth);
2329 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2334 rth = rt_dst_alloc(dev_out, flags, type,
2335 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2336 IN_DEV_CONF_GET(in_dev, NOXFRM),
2339 return ERR_PTR(-ENOBUFS);
2341 rth->rt_iif = orig_oif;
2343 RT_CACHE_STAT_INC(out_slow_tot);
2345 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2346 if (flags & RTCF_LOCAL &&
2347 !(dev_out->flags & IFF_LOOPBACK)) {
2348 rth->dst.output = ip_mc_output;
2349 RT_CACHE_STAT_INC(out_slow_mc);
2351 #ifdef CONFIG_IP_MROUTE
2352 if (type == RTN_MULTICAST) {
2353 if (IN_DEV_MFORWARD(in_dev) &&
2354 !ipv4_is_local_multicast(fl4->daddr)) {
2355 rth->dst.input = ip_mr_input;
2356 rth->dst.output = ip_mc_output;
2362 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2363 lwtunnel_set_redirect(&rth->dst);
2369 * Major route resolver routine.
2372 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2373 const struct sk_buff *skb)
2375 __u8 tos = RT_FL_TOS(fl4);
2376 struct fib_result res = {
2384 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2385 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2386 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2387 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2390 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2395 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2397 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2398 struct fib_result *res,
2399 const struct sk_buff *skb)
2401 struct net_device *dev_out = NULL;
2402 int orig_oif = fl4->flowi4_oif;
2403 unsigned int flags = 0;
2405 int err = -ENETUNREACH;
2408 rth = ERR_PTR(-EINVAL);
2409 if (ipv4_is_multicast(fl4->saddr) ||
2410 ipv4_is_lbcast(fl4->saddr) ||
2411 ipv4_is_zeronet(fl4->saddr))
2414 /* I removed check for oif == dev_out->oif here.
2415 It was wrong for two reasons:
2416 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2417 is assigned to multiple interfaces.
2418 2. Moreover, we are allowed to send packets with saddr
2419 of another iface. --ANK
2422 if (fl4->flowi4_oif == 0 &&
2423 (ipv4_is_multicast(fl4->daddr) ||
2424 ipv4_is_lbcast(fl4->daddr))) {
2425 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2426 dev_out = __ip_dev_find(net, fl4->saddr, false);
2430 /* Special hack: user can direct multicasts
2431 and limited broadcast via necessary interface
2432 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2433 This hack is not just for fun, it allows
2434 vic,vat and friends to work.
2435 They bind socket to loopback, set ttl to zero
2436 and expect that it will work.
2437 From the viewpoint of routing cache they are broken,
2438 because we are not allowed to build multicast path
2439 with loopback source addr (look, routing cache
2440 cannot know, that ttl is zero, so that packet
2441 will not leave this host and route is valid).
2442 Luckily, this hack is good workaround.
2445 fl4->flowi4_oif = dev_out->ifindex;
2449 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2450 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2451 if (!__ip_dev_find(net, fl4->saddr, false))
2457 if (fl4->flowi4_oif) {
2458 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2459 rth = ERR_PTR(-ENODEV);
2463 /* RACE: Check return value of inet_select_addr instead. */
2464 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2465 rth = ERR_PTR(-ENETUNREACH);
2468 if (ipv4_is_local_multicast(fl4->daddr) ||
2469 ipv4_is_lbcast(fl4->daddr) ||
2470 fl4->flowi4_proto == IPPROTO_IGMP) {
2472 fl4->saddr = inet_select_addr(dev_out, 0,
2477 if (ipv4_is_multicast(fl4->daddr))
2478 fl4->saddr = inet_select_addr(dev_out, 0,
2480 else if (!fl4->daddr)
2481 fl4->saddr = inet_select_addr(dev_out, 0,
2487 fl4->daddr = fl4->saddr;
2489 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2490 dev_out = net->loopback_dev;
2491 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2492 res->type = RTN_LOCAL;
2493 flags |= RTCF_LOCAL;
2497 err = fib_lookup(net, fl4, res, 0);
2501 if (fl4->flowi4_oif &&
2502 (ipv4_is_multicast(fl4->daddr) ||
2503 !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2504 /* Apparently, routing tables are wrong. Assume,
2505 that the destination is on link.
2508 Because we are allowed to send to iface
2509 even if it has NO routes and NO assigned
2510 addresses. When oif is specified, routing
2511 tables are looked up with only one purpose:
2512 to catch if destination is gatewayed, rather than
2513 direct. Moreover, if MSG_DONTROUTE is set,
2514 we send packet, ignoring both routing tables
2515 and ifaddr state. --ANK
2518 We could make it even if oif is unknown,
2519 likely IPv6, but we do not.
2522 if (fl4->saddr == 0)
2523 fl4->saddr = inet_select_addr(dev_out, 0,
2525 res->type = RTN_UNICAST;
2532 if (res->type == RTN_LOCAL) {
2534 if (res->fi->fib_prefsrc)
2535 fl4->saddr = res->fi->fib_prefsrc;
2537 fl4->saddr = fl4->daddr;
2540 /* L3 master device is the loopback for that domain */
2541 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2544 /* make sure orig_oif points to fib result device even
2545 * though packet rx/tx happens over loopback or l3mdev
2547 orig_oif = FIB_RES_OIF(*res);
2549 fl4->flowi4_oif = dev_out->ifindex;
2550 flags |= RTCF_LOCAL;
2554 fib_select_path(net, res, fl4, skb);
2556 dev_out = FIB_RES_DEV(*res);
2557 fl4->flowi4_oif = dev_out->ifindex;
2561 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2567 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2572 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2574 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2576 return mtu ? : dst->dev->mtu;
2579 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2580 struct sk_buff *skb, u32 mtu)
2584 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2585 struct sk_buff *skb)
2589 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2595 static struct dst_ops ipv4_dst_blackhole_ops = {
2597 .check = ipv4_blackhole_dst_check,
2598 .mtu = ipv4_blackhole_mtu,
2599 .default_advmss = ipv4_default_advmss,
2600 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2601 .redirect = ipv4_rt_blackhole_redirect,
2602 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2603 .neigh_lookup = ipv4_neigh_lookup,
2606 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2608 struct rtable *ort = (struct rtable *) dst_orig;
2611 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2613 struct dst_entry *new = &rt->dst;
2616 new->input = dst_discard;
2617 new->output = dst_discard_out;
2619 new->dev = net->loopback_dev;
2623 rt->rt_is_input = ort->rt_is_input;
2624 rt->rt_iif = ort->rt_iif;
2625 rt->rt_pmtu = ort->rt_pmtu;
2626 rt->rt_mtu_locked = ort->rt_mtu_locked;
2628 rt->rt_genid = rt_genid_ipv4(net);
2629 rt->rt_flags = ort->rt_flags;
2630 rt->rt_type = ort->rt_type;
2631 rt->rt_gw_family = ort->rt_gw_family;
2632 if (rt->rt_gw_family == AF_INET)
2633 rt->rt_gw4 = ort->rt_gw4;
2634 else if (rt->rt_gw_family == AF_INET6)
2635 rt->rt_gw6 = ort->rt_gw6;
2637 INIT_LIST_HEAD(&rt->rt_uncached);
2640 dst_release(dst_orig);
2642 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2645 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2646 const struct sock *sk)
2648 struct rtable *rt = __ip_route_output_key(net, flp4);
2653 if (flp4->flowi4_proto)
2654 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2655 flowi4_to_flowi(flp4),
2660 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2662 /* called with rcu_read_lock held */
2663 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2664 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2665 struct sk_buff *skb, u32 portid, u32 seq)
2668 struct nlmsghdr *nlh;
2669 unsigned long expires = 0;
2671 u32 metrics[RTAX_MAX];
2673 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
2677 r = nlmsg_data(nlh);
2678 r->rtm_family = AF_INET;
2679 r->rtm_dst_len = 32;
2681 r->rtm_tos = fl4->flowi4_tos;
2682 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2683 if (nla_put_u32(skb, RTA_TABLE, table_id))
2684 goto nla_put_failure;
2685 r->rtm_type = rt->rt_type;
2686 r->rtm_scope = RT_SCOPE_UNIVERSE;
2687 r->rtm_protocol = RTPROT_UNSPEC;
2688 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2689 if (rt->rt_flags & RTCF_NOTIFY)
2690 r->rtm_flags |= RTM_F_NOTIFY;
2691 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2692 r->rtm_flags |= RTCF_DOREDIRECT;
2694 if (nla_put_in_addr(skb, RTA_DST, dst))
2695 goto nla_put_failure;
2697 r->rtm_src_len = 32;
2698 if (nla_put_in_addr(skb, RTA_SRC, src))
2699 goto nla_put_failure;
2702 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2703 goto nla_put_failure;
2704 #ifdef CONFIG_IP_ROUTE_CLASSID
2705 if (rt->dst.tclassid &&
2706 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2707 goto nla_put_failure;
2709 if (!rt_is_input_route(rt) &&
2710 fl4->saddr != src) {
2711 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2712 goto nla_put_failure;
2714 if (rt->rt_gw_family == AF_INET &&
2715 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2716 goto nla_put_failure;
2717 } else if (rt->rt_gw_family == AF_INET6) {
2718 int alen = sizeof(struct in6_addr);
2722 nla = nla_reserve(skb, RTA_VIA, alen + 2);
2724 goto nla_put_failure;
2726 via = nla_data(nla);
2727 via->rtvia_family = AF_INET6;
2728 memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2731 expires = rt->dst.expires;
2733 unsigned long now = jiffies;
2735 if (time_before(now, expires))
2741 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2742 if (rt->rt_pmtu && expires)
2743 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2744 if (rt->rt_mtu_locked && expires)
2745 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2746 if (rtnetlink_put_metrics(skb, metrics) < 0)
2747 goto nla_put_failure;
2749 if (fl4->flowi4_mark &&
2750 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2751 goto nla_put_failure;
2753 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2754 nla_put_u32(skb, RTA_UID,
2755 from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
2756 goto nla_put_failure;
2758 error = rt->dst.error;
2760 if (rt_is_input_route(rt)) {
2761 #ifdef CONFIG_IP_MROUTE
2762 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2763 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2764 int err = ipmr_get_route(net, skb,
2765 fl4->saddr, fl4->daddr,
2771 goto nla_put_failure;
2775 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
2776 goto nla_put_failure;
2779 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2780 goto nla_put_failure;
2782 nlmsg_end(skb, nlh);
2786 nlmsg_cancel(skb, nlh);
2790 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
2791 u8 ip_proto, __be16 sport,
2794 struct sk_buff *skb;
2797 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2801 /* Reserve room for dummy headers, this skb can pass
2802 * through good chunk of routing engine.
2804 skb_reset_mac_header(skb);
2805 skb_reset_network_header(skb);
2806 skb->protocol = htons(ETH_P_IP);
2807 iph = skb_put(skb, sizeof(struct iphdr));
2808 iph->protocol = ip_proto;
2814 skb_set_transport_header(skb, skb->len);
2816 switch (iph->protocol) {
2818 struct udphdr *udph;
2820 udph = skb_put_zero(skb, sizeof(struct udphdr));
2821 udph->source = sport;
2823 udph->len = sizeof(struct udphdr);
2828 struct tcphdr *tcph;
2830 tcph = skb_put_zero(skb, sizeof(struct tcphdr));
2831 tcph->source = sport;
2833 tcph->doff = sizeof(struct tcphdr) / 4;
2835 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
2839 case IPPROTO_ICMP: {
2840 struct icmphdr *icmph;
2842 icmph = skb_put_zero(skb, sizeof(struct icmphdr));
2843 icmph->type = ICMP_ECHO;
2851 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
2852 const struct nlmsghdr *nlh,
2854 struct netlink_ext_ack *extack)
2859 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
2860 NL_SET_ERR_MSG(extack,
2861 "ipv4: Invalid header for route get request");
2865 if (!netlink_strict_get_check(skb))
2866 return nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
2867 rtm_ipv4_policy, extack);
2869 rtm = nlmsg_data(nlh);
2870 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
2871 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
2872 rtm->rtm_table || rtm->rtm_protocol ||
2873 rtm->rtm_scope || rtm->rtm_type) {
2874 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
2878 if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
2879 RTM_F_LOOKUP_TABLE |
2881 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
2885 err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
2886 rtm_ipv4_policy, extack);
2890 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
2891 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
2892 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
2896 for (i = 0; i <= RTA_MAX; i++) {
2912 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
2920 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2921 struct netlink_ext_ack *extack)
2923 struct net *net = sock_net(in_skb->sk);
2924 struct nlattr *tb[RTA_MAX+1];
2925 u32 table_id = RT_TABLE_MAIN;
2926 __be16 sport = 0, dport = 0;
2927 struct fib_result res = {};
2928 u8 ip_proto = IPPROTO_UDP;
2929 struct rtable *rt = NULL;
2930 struct sk_buff *skb;
2932 struct flowi4 fl4 = {};
2940 err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
2944 rtm = nlmsg_data(nlh);
2945 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2946 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2947 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2948 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2950 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
2952 uid = (iif ? INVALID_UID : current_uid());
2954 if (tb[RTA_IP_PROTO]) {
2955 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
2956 &ip_proto, AF_INET, extack);
2962 sport = nla_get_be16(tb[RTA_SPORT]);
2965 dport = nla_get_be16(tb[RTA_DPORT]);
2967 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
2973 fl4.flowi4_tos = rtm->rtm_tos;
2974 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2975 fl4.flowi4_mark = mark;
2976 fl4.flowi4_uid = uid;
2978 fl4.fl4_sport = sport;
2980 fl4.fl4_dport = dport;
2981 fl4.flowi4_proto = ip_proto;
2986 struct net_device *dev;
2988 dev = dev_get_by_index_rcu(net, iif);
2994 fl4.flowi4_iif = iif; /* for rt_fill_info */
2997 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
3000 rt = skb_rtable(skb);
3001 if (err == 0 && rt->dst.error)
3002 err = -rt->dst.error;
3004 fl4.flowi4_iif = LOOPBACK_IFINDEX;
3005 skb->dev = net->loopback_dev;
3006 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3011 skb_dst_set(skb, &rt->dst);
3017 if (rtm->rtm_flags & RTM_F_NOTIFY)
3018 rt->rt_flags |= RTCF_NOTIFY;
3020 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3021 table_id = res.table ? res.table->tb_id : 0;
3023 /* reset skb for netlink reply msg */
3025 skb_reset_network_header(skb);
3026 skb_reset_transport_header(skb);
3027 skb_reset_mac_header(skb);
3029 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3031 err = fib_props[res.type].error;
3033 err = -EHOSTUNREACH;
3036 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3037 nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
3038 rt->rt_type, res.prefix, res.prefixlen,
3039 fl4.flowi4_tos, res.fi, 0);
3041 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3042 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
3049 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3059 void ip_rt_multicast_event(struct in_device *in_dev)
3061 rt_cache_flush(dev_net(in_dev->dev));
3064 #ifdef CONFIG_SYSCTL
3065 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
3066 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
3067 static int ip_rt_gc_elasticity __read_mostly = 8;
3068 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
3070 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3071 void __user *buffer,
3072 size_t *lenp, loff_t *ppos)
3074 struct net *net = (struct net *)__ctl->extra1;
3077 rt_cache_flush(net);
3078 fnhe_genid_bump(net);
3085 static struct ctl_table ipv4_route_table[] = {
3087 .procname = "gc_thresh",
3088 .data = &ipv4_dst_ops.gc_thresh,
3089 .maxlen = sizeof(int),
3091 .proc_handler = proc_dointvec,
3094 .procname = "max_size",
3095 .data = &ip_rt_max_size,
3096 .maxlen = sizeof(int),
3098 .proc_handler = proc_dointvec,
3101 /* Deprecated. Use gc_min_interval_ms */
3103 .procname = "gc_min_interval",
3104 .data = &ip_rt_gc_min_interval,
3105 .maxlen = sizeof(int),
3107 .proc_handler = proc_dointvec_jiffies,
3110 .procname = "gc_min_interval_ms",
3111 .data = &ip_rt_gc_min_interval,
3112 .maxlen = sizeof(int),
3114 .proc_handler = proc_dointvec_ms_jiffies,
3117 .procname = "gc_timeout",
3118 .data = &ip_rt_gc_timeout,
3119 .maxlen = sizeof(int),
3121 .proc_handler = proc_dointvec_jiffies,
3124 .procname = "gc_interval",
3125 .data = &ip_rt_gc_interval,
3126 .maxlen = sizeof(int),
3128 .proc_handler = proc_dointvec_jiffies,
3131 .procname = "redirect_load",
3132 .data = &ip_rt_redirect_load,
3133 .maxlen = sizeof(int),
3135 .proc_handler = proc_dointvec,
3138 .procname = "redirect_number",
3139 .data = &ip_rt_redirect_number,
3140 .maxlen = sizeof(int),
3142 .proc_handler = proc_dointvec,
3145 .procname = "redirect_silence",
3146 .data = &ip_rt_redirect_silence,
3147 .maxlen = sizeof(int),
3149 .proc_handler = proc_dointvec,
3152 .procname = "error_cost",
3153 .data = &ip_rt_error_cost,
3154 .maxlen = sizeof(int),
3156 .proc_handler = proc_dointvec,
3159 .procname = "error_burst",
3160 .data = &ip_rt_error_burst,
3161 .maxlen = sizeof(int),
3163 .proc_handler = proc_dointvec,
3166 .procname = "gc_elasticity",
3167 .data = &ip_rt_gc_elasticity,
3168 .maxlen = sizeof(int),
3170 .proc_handler = proc_dointvec,
3173 .procname = "mtu_expires",
3174 .data = &ip_rt_mtu_expires,
3175 .maxlen = sizeof(int),
3177 .proc_handler = proc_dointvec_jiffies,
3180 .procname = "min_pmtu",
3181 .data = &ip_rt_min_pmtu,
3182 .maxlen = sizeof(int),
3184 .proc_handler = proc_dointvec_minmax,
3185 .extra1 = &ip_min_valid_pmtu,
3188 .procname = "min_adv_mss",
3189 .data = &ip_rt_min_advmss,
3190 .maxlen = sizeof(int),
3192 .proc_handler = proc_dointvec,
3197 static struct ctl_table ipv4_route_flush_table[] = {
3199 .procname = "flush",
3200 .maxlen = sizeof(int),
3202 .proc_handler = ipv4_sysctl_rtcache_flush,
3207 static __net_init int sysctl_route_net_init(struct net *net)
3209 struct ctl_table *tbl;
3211 tbl = ipv4_route_flush_table;
3212 if (!net_eq(net, &init_net)) {
3213 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3217 /* Don't export sysctls to unprivileged users */
3218 if (net->user_ns != &init_user_ns)
3219 tbl[0].procname = NULL;
3221 tbl[0].extra1 = net;
3223 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3224 if (!net->ipv4.route_hdr)
3229 if (tbl != ipv4_route_flush_table)
3235 static __net_exit void sysctl_route_net_exit(struct net *net)
3237 struct ctl_table *tbl;
3239 tbl = net->ipv4.route_hdr->ctl_table_arg;
3240 unregister_net_sysctl_table(net->ipv4.route_hdr);
3241 BUG_ON(tbl == ipv4_route_flush_table);
3245 static __net_initdata struct pernet_operations sysctl_route_ops = {
3246 .init = sysctl_route_net_init,
3247 .exit = sysctl_route_net_exit,
3251 static __net_init int rt_genid_init(struct net *net)
3253 atomic_set(&net->ipv4.rt_genid, 0);
3254 atomic_set(&net->fnhe_genid, 0);
3255 atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3259 static __net_initdata struct pernet_operations rt_genid_ops = {
3260 .init = rt_genid_init,
3263 static int __net_init ipv4_inetpeer_init(struct net *net)
3265 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3269 inet_peer_base_init(bp);
3270 net->ipv4.peers = bp;
3274 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3276 struct inet_peer_base *bp = net->ipv4.peers;
3278 net->ipv4.peers = NULL;
3279 inetpeer_invalidate_tree(bp);
3283 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3284 .init = ipv4_inetpeer_init,
3285 .exit = ipv4_inetpeer_exit,
3288 #ifdef CONFIG_IP_ROUTE_CLASSID
3289 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3290 #endif /* CONFIG_IP_ROUTE_CLASSID */
3292 int __init ip_rt_init(void)
3296 ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
3299 panic("IP: failed to allocate ip_idents\n");
3301 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
3303 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
3305 panic("IP: failed to allocate ip_tstamps\n");
3307 for_each_possible_cpu(cpu) {
3308 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3310 INIT_LIST_HEAD(&ul->head);
3311 spin_lock_init(&ul->lock);
3313 #ifdef CONFIG_IP_ROUTE_CLASSID
3314 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3316 panic("IP: failed to allocate ip_rt_acct\n");
3319 ipv4_dst_ops.kmem_cachep =
3320 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3321 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3323 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3325 if (dst_entries_init(&ipv4_dst_ops) < 0)
3326 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3328 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3329 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3331 ipv4_dst_ops.gc_thresh = ~0;
3332 ip_rt_max_size = INT_MAX;
3337 if (ip_rt_proc_init())
3338 pr_err("Unable to create route proc files\n");
3343 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3344 RTNL_FLAG_DOIT_UNLOCKED);
3346 #ifdef CONFIG_SYSCTL
3347 register_pernet_subsys(&sysctl_route_ops);
3349 register_pernet_subsys(&rt_genid_ops);
3350 register_pernet_subsys(&ipv4_inetpeer_ops);
3354 #ifdef CONFIG_SYSCTL
3356 * We really need to sanitize the damn ipv4 init order, then all
3357 * this nonsense will go away.
3359 void __init ip_static_sysctl_init(void)
3361 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);