]> asedeno.scripts.mit.edu Git - linux.git/blob - net/ipv4/route.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux.git] / net / ipv4 / route.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              ROUTE - implementation of the IP router.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *              Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12  *              Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13  *
14  * Fixes:
15  *              Alan Cox        :       Verify area fixes.
16  *              Alan Cox        :       cli() protects routing changes
17  *              Rui Oliveira    :       ICMP routing table updates
18  *              (rco@di.uminho.pt)      Routing table insertion and update
19  *              Linus Torvalds  :       Rewrote bits to be sensible
20  *              Alan Cox        :       Added BSD route gw semantics
21  *              Alan Cox        :       Super /proc >4K
22  *              Alan Cox        :       MTU in route table
23  *              Alan Cox        :       MSS actually. Also added the window
24  *                                      clamper.
25  *              Sam Lantinga    :       Fixed route matching in rt_del()
26  *              Alan Cox        :       Routing cache support.
27  *              Alan Cox        :       Removed compatibility cruft.
28  *              Alan Cox        :       RTF_REJECT support.
29  *              Alan Cox        :       TCP irtt support.
30  *              Jonathan Naylor :       Added Metric support.
31  *      Miquel van Smoorenburg  :       BSD API fixes.
32  *      Miquel van Smoorenburg  :       Metrics.
33  *              Alan Cox        :       Use __u32 properly
34  *              Alan Cox        :       Aligned routing errors more closely with BSD
35  *                                      our system is still very different.
36  *              Alan Cox        :       Faster /proc handling
37  *      Alexey Kuznetsov        :       Massive rework to support tree based routing,
38  *                                      routing caches and better behaviour.
39  *
40  *              Olaf Erb        :       irtt wasn't being copied right.
41  *              Bjorn Ekwall    :       Kerneld route support.
42  *              Alan Cox        :       Multicast fixed (I hope)
43  *              Pavel Krauz     :       Limited broadcast fixed
44  *              Mike McLagan    :       Routing by source
45  *      Alexey Kuznetsov        :       End of old history. Split to fib.c and
46  *                                      route.c and rewritten from scratch.
47  *              Andi Kleen      :       Load-limit warning messages.
48  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
49  *      Vitaly E. Lavrov        :       Race condition in ip_route_input_slow.
50  *      Tobias Ringstrom        :       Uninitialized res.type in ip_route_output_slow.
51  *      Vladimir V. Ivanov      :       IP rule info (flowid) is really useful.
52  *              Marc Boucher    :       routing by fwmark
53  *      Robert Olsson           :       Added rt_cache statistics
54  *      Arnaldo C. Melo         :       Convert proc stuff to seq_file
55  *      Eric Dumazet            :       hashed spinlocks and rt_check_expire() fixes.
56  *      Ilia Sotnikov           :       Ignore TOS on PMTUD and Redirect
57  *      Ilia Sotnikov           :       Removed TOS from hash calculations
58  *
59  *              This program is free software; you can redistribute it and/or
60  *              modify it under the terms of the GNU General Public License
61  *              as published by the Free Software Foundation; either version
62  *              2 of the License, or (at your option) any later version.
63  */
64
65 #define pr_fmt(fmt) "IPv4: " fmt
66
67 #include <linux/module.h>
68 #include <linux/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
72 #include <linux/mm.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
77 #include <linux/in.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/skbuff.h>
83 #include <linux/inetdevice.h>
84 #include <linux/igmp.h>
85 #include <linux/pkt_sched.h>
86 #include <linux/mroute.h>
87 #include <linux/netfilter_ipv4.h>
88 #include <linux/random.h>
89 #include <linux/rcupdate.h>
90 #include <linux/times.h>
91 #include <linux/slab.h>
92 #include <linux/jhash.h>
93 #include <net/dst.h>
94 #include <net/dst_metadata.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
97 #include <net/ip.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
102 #include <net/arp.h>
103 #include <net/tcp.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/lwtunnel.h>
107 #include <net/netevent.h>
108 #include <net/rtnetlink.h>
109 #ifdef CONFIG_SYSCTL
110 #include <linux/sysctl.h>
111 #endif
112 #include <net/secure_seq.h>
113 #include <net/ip_tunnels.h>
114 #include <net/l3mdev.h>
115
116 #include "fib_lookup.h"
117
118 #define RT_FL_TOS(oldflp4) \
119         ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
120
121 #define RT_GC_TIMEOUT (300*HZ)
122
123 static int ip_rt_max_size;
124 static int ip_rt_redirect_number __read_mostly  = 9;
125 static int ip_rt_redirect_load __read_mostly    = HZ / 50;
126 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
127 static int ip_rt_error_cost __read_mostly       = HZ;
128 static int ip_rt_error_burst __read_mostly      = 5 * HZ;
129 static int ip_rt_mtu_expires __read_mostly      = 10 * 60 * HZ;
130 static u32 ip_rt_min_pmtu __read_mostly         = 512 + 20 + 20;
131 static int ip_rt_min_advmss __read_mostly       = 256;
132
133 static int ip_rt_gc_timeout __read_mostly       = RT_GC_TIMEOUT;
134
135 /*
136  *      Interface to generic destination cache.
137  */
138
139 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
140 static unsigned int      ipv4_default_advmss(const struct dst_entry *dst);
141 static unsigned int      ipv4_mtu(const struct dst_entry *dst);
142 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
143 static void              ipv4_link_failure(struct sk_buff *skb);
144 static void              ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
145                                            struct sk_buff *skb, u32 mtu);
146 static void              ip_do_redirect(struct dst_entry *dst, struct sock *sk,
147                                         struct sk_buff *skb);
148 static void             ipv4_dst_destroy(struct dst_entry *dst);
149
150 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
151 {
152         WARN_ON(1);
153         return NULL;
154 }
155
156 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
157                                            struct sk_buff *skb,
158                                            const void *daddr);
159 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
160
161 static struct dst_ops ipv4_dst_ops = {
162         .family =               AF_INET,
163         .check =                ipv4_dst_check,
164         .default_advmss =       ipv4_default_advmss,
165         .mtu =                  ipv4_mtu,
166         .cow_metrics =          ipv4_cow_metrics,
167         .destroy =              ipv4_dst_destroy,
168         .negative_advice =      ipv4_negative_advice,
169         .link_failure =         ipv4_link_failure,
170         .update_pmtu =          ip_rt_update_pmtu,
171         .redirect =             ip_do_redirect,
172         .local_out =            __ip_local_out,
173         .neigh_lookup =         ipv4_neigh_lookup,
174         .confirm_neigh =        ipv4_confirm_neigh,
175 };
176
177 #define ECN_OR_COST(class)      TC_PRIO_##class
178
179 const __u8 ip_tos2prio[16] = {
180         TC_PRIO_BESTEFFORT,
181         ECN_OR_COST(BESTEFFORT),
182         TC_PRIO_BESTEFFORT,
183         ECN_OR_COST(BESTEFFORT),
184         TC_PRIO_BULK,
185         ECN_OR_COST(BULK),
186         TC_PRIO_BULK,
187         ECN_OR_COST(BULK),
188         TC_PRIO_INTERACTIVE,
189         ECN_OR_COST(INTERACTIVE),
190         TC_PRIO_INTERACTIVE,
191         ECN_OR_COST(INTERACTIVE),
192         TC_PRIO_INTERACTIVE_BULK,
193         ECN_OR_COST(INTERACTIVE_BULK),
194         TC_PRIO_INTERACTIVE_BULK,
195         ECN_OR_COST(INTERACTIVE_BULK)
196 };
197 EXPORT_SYMBOL(ip_tos2prio);
198
199 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
200 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
201
202 #ifdef CONFIG_PROC_FS
203 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
204 {
205         if (*pos)
206                 return NULL;
207         return SEQ_START_TOKEN;
208 }
209
210 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
211 {
212         ++*pos;
213         return NULL;
214 }
215
216 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
217 {
218 }
219
220 static int rt_cache_seq_show(struct seq_file *seq, void *v)
221 {
222         if (v == SEQ_START_TOKEN)
223                 seq_printf(seq, "%-127s\n",
224                            "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
225                            "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
226                            "HHUptod\tSpecDst");
227         return 0;
228 }
229
230 static const struct seq_operations rt_cache_seq_ops = {
231         .start  = rt_cache_seq_start,
232         .next   = rt_cache_seq_next,
233         .stop   = rt_cache_seq_stop,
234         .show   = rt_cache_seq_show,
235 };
236
237 static int rt_cache_seq_open(struct inode *inode, struct file *file)
238 {
239         return seq_open(file, &rt_cache_seq_ops);
240 }
241
242 static const struct file_operations rt_cache_seq_fops = {
243         .open    = rt_cache_seq_open,
244         .read    = seq_read,
245         .llseek  = seq_lseek,
246         .release = seq_release,
247 };
248
249
250 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
251 {
252         int cpu;
253
254         if (*pos == 0)
255                 return SEQ_START_TOKEN;
256
257         for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
258                 if (!cpu_possible(cpu))
259                         continue;
260                 *pos = cpu+1;
261                 return &per_cpu(rt_cache_stat, cpu);
262         }
263         return NULL;
264 }
265
266 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
267 {
268         int cpu;
269
270         for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
271                 if (!cpu_possible(cpu))
272                         continue;
273                 *pos = cpu+1;
274                 return &per_cpu(rt_cache_stat, cpu);
275         }
276         return NULL;
277
278 }
279
280 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
281 {
282
283 }
284
285 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
286 {
287         struct rt_cache_stat *st = v;
288
289         if (v == SEQ_START_TOKEN) {
290                 seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
291                 return 0;
292         }
293
294         seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
295                    " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
296                    dst_entries_get_slow(&ipv4_dst_ops),
297                    0, /* st->in_hit */
298                    st->in_slow_tot,
299                    st->in_slow_mc,
300                    st->in_no_route,
301                    st->in_brd,
302                    st->in_martian_dst,
303                    st->in_martian_src,
304
305                    0, /* st->out_hit */
306                    st->out_slow_tot,
307                    st->out_slow_mc,
308
309                    0, /* st->gc_total */
310                    0, /* st->gc_ignored */
311                    0, /* st->gc_goal_miss */
312                    0, /* st->gc_dst_overflow */
313                    0, /* st->in_hlist_search */
314                    0  /* st->out_hlist_search */
315                 );
316         return 0;
317 }
318
319 static const struct seq_operations rt_cpu_seq_ops = {
320         .start  = rt_cpu_seq_start,
321         .next   = rt_cpu_seq_next,
322         .stop   = rt_cpu_seq_stop,
323         .show   = rt_cpu_seq_show,
324 };
325
326
327 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
328 {
329         return seq_open(file, &rt_cpu_seq_ops);
330 }
331
332 static const struct file_operations rt_cpu_seq_fops = {
333         .open    = rt_cpu_seq_open,
334         .read    = seq_read,
335         .llseek  = seq_lseek,
336         .release = seq_release,
337 };
338
339 #ifdef CONFIG_IP_ROUTE_CLASSID
340 static int rt_acct_proc_show(struct seq_file *m, void *v)
341 {
342         struct ip_rt_acct *dst, *src;
343         unsigned int i, j;
344
345         dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
346         if (!dst)
347                 return -ENOMEM;
348
349         for_each_possible_cpu(i) {
350                 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
351                 for (j = 0; j < 256; j++) {
352                         dst[j].o_bytes   += src[j].o_bytes;
353                         dst[j].o_packets += src[j].o_packets;
354                         dst[j].i_bytes   += src[j].i_bytes;
355                         dst[j].i_packets += src[j].i_packets;
356                 }
357         }
358
359         seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
360         kfree(dst);
361         return 0;
362 }
363 #endif
364
365 static int __net_init ip_rt_do_proc_init(struct net *net)
366 {
367         struct proc_dir_entry *pde;
368
369         pde = proc_create("rt_cache", 0444, net->proc_net,
370                           &rt_cache_seq_fops);
371         if (!pde)
372                 goto err1;
373
374         pde = proc_create("rt_cache", 0444,
375                           net->proc_net_stat, &rt_cpu_seq_fops);
376         if (!pde)
377                 goto err2;
378
379 #ifdef CONFIG_IP_ROUTE_CLASSID
380         pde = proc_create_single("rt_acct", 0, net->proc_net,
381                         rt_acct_proc_show);
382         if (!pde)
383                 goto err3;
384 #endif
385         return 0;
386
387 #ifdef CONFIG_IP_ROUTE_CLASSID
388 err3:
389         remove_proc_entry("rt_cache", net->proc_net_stat);
390 #endif
391 err2:
392         remove_proc_entry("rt_cache", net->proc_net);
393 err1:
394         return -ENOMEM;
395 }
396
397 static void __net_exit ip_rt_do_proc_exit(struct net *net)
398 {
399         remove_proc_entry("rt_cache", net->proc_net_stat);
400         remove_proc_entry("rt_cache", net->proc_net);
401 #ifdef CONFIG_IP_ROUTE_CLASSID
402         remove_proc_entry("rt_acct", net->proc_net);
403 #endif
404 }
405
406 static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
407         .init = ip_rt_do_proc_init,
408         .exit = ip_rt_do_proc_exit,
409 };
410
411 static int __init ip_rt_proc_init(void)
412 {
413         return register_pernet_subsys(&ip_rt_proc_ops);
414 }
415
416 #else
417 static inline int ip_rt_proc_init(void)
418 {
419         return 0;
420 }
421 #endif /* CONFIG_PROC_FS */
422
423 static inline bool rt_is_expired(const struct rtable *rth)
424 {
425         return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
426 }
427
428 void rt_cache_flush(struct net *net)
429 {
430         rt_genid_bump_ipv4(net);
431 }
432
433 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
434                                            struct sk_buff *skb,
435                                            const void *daddr)
436 {
437         const struct rtable *rt = container_of(dst, struct rtable, dst);
438         struct net_device *dev = dst->dev;
439         struct neighbour *n;
440
441         rcu_read_lock_bh();
442
443         if (likely(rt->rt_gw_family == AF_INET)) {
444                 n = ip_neigh_gw4(dev, rt->rt_gw4);
445         } else if (rt->rt_gw_family == AF_INET6) {
446                 n = ip_neigh_gw6(dev, &rt->rt_gw6);
447         } else {
448                 __be32 pkey;
449
450                 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
451                 n = ip_neigh_gw4(dev, pkey);
452         }
453
454         if (n && !refcount_inc_not_zero(&n->refcnt))
455                 n = NULL;
456
457         rcu_read_unlock_bh();
458
459         return n;
460 }
461
462 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
463 {
464         const struct rtable *rt = container_of(dst, struct rtable, dst);
465         struct net_device *dev = dst->dev;
466         const __be32 *pkey = daddr;
467
468         if (rt->rt_gw_family == AF_INET) {
469                 pkey = (const __be32 *)&rt->rt_gw4;
470         } else if (rt->rt_gw_family == AF_INET6) {
471                 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
472         } else if (!daddr ||
473                  (rt->rt_flags &
474                   (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
475                 return;
476         }
477         __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
478 }
479
480 #define IP_IDENTS_SZ 2048u
481
482 static atomic_t *ip_idents __read_mostly;
483 static u32 *ip_tstamps __read_mostly;
484
485 /* In order to protect privacy, we add a perturbation to identifiers
486  * if one generator is seldom used. This makes hard for an attacker
487  * to infer how many packets were sent between two points in time.
488  */
489 u32 ip_idents_reserve(u32 hash, int segs)
490 {
491         u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
492         atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
493         u32 old = READ_ONCE(*p_tstamp);
494         u32 now = (u32)jiffies;
495         u32 new, delta = 0;
496
497         if (old != now && cmpxchg(p_tstamp, old, now) == old)
498                 delta = prandom_u32_max(now - old);
499
500         /* Do not use atomic_add_return() as it makes UBSAN unhappy */
501         do {
502                 old = (u32)atomic_read(p_id);
503                 new = old + delta + segs;
504         } while (atomic_cmpxchg(p_id, old, new) != old);
505
506         return new - segs;
507 }
508 EXPORT_SYMBOL(ip_idents_reserve);
509
510 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
511 {
512         u32 hash, id;
513
514         /* Note the following code is not safe, but this is okay. */
515         if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
516                 get_random_bytes(&net->ipv4.ip_id_key,
517                                  sizeof(net->ipv4.ip_id_key));
518
519         hash = siphash_3u32((__force u32)iph->daddr,
520                             (__force u32)iph->saddr,
521                             iph->protocol,
522                             &net->ipv4.ip_id_key);
523         id = ip_idents_reserve(hash, segs);
524         iph->id = htons(id);
525 }
526 EXPORT_SYMBOL(__ip_select_ident);
527
528 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
529                              const struct sock *sk,
530                              const struct iphdr *iph,
531                              int oif, u8 tos,
532                              u8 prot, u32 mark, int flow_flags)
533 {
534         if (sk) {
535                 const struct inet_sock *inet = inet_sk(sk);
536
537                 oif = sk->sk_bound_dev_if;
538                 mark = sk->sk_mark;
539                 tos = RT_CONN_FLAGS(sk);
540                 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
541         }
542         flowi4_init_output(fl4, oif, mark, tos,
543                            RT_SCOPE_UNIVERSE, prot,
544                            flow_flags,
545                            iph->daddr, iph->saddr, 0, 0,
546                            sock_net_uid(net, sk));
547 }
548
549 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
550                                const struct sock *sk)
551 {
552         const struct net *net = dev_net(skb->dev);
553         const struct iphdr *iph = ip_hdr(skb);
554         int oif = skb->dev->ifindex;
555         u8 tos = RT_TOS(iph->tos);
556         u8 prot = iph->protocol;
557         u32 mark = skb->mark;
558
559         __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
560 }
561
562 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
563 {
564         const struct inet_sock *inet = inet_sk(sk);
565         const struct ip_options_rcu *inet_opt;
566         __be32 daddr = inet->inet_daddr;
567
568         rcu_read_lock();
569         inet_opt = rcu_dereference(inet->inet_opt);
570         if (inet_opt && inet_opt->opt.srr)
571                 daddr = inet_opt->opt.faddr;
572         flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
573                            RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
574                            inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
575                            inet_sk_flowi_flags(sk),
576                            daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
577         rcu_read_unlock();
578 }
579
580 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
581                                  const struct sk_buff *skb)
582 {
583         if (skb)
584                 build_skb_flow_key(fl4, skb, sk);
585         else
586                 build_sk_flow_key(fl4, sk);
587 }
588
589 static DEFINE_SPINLOCK(fnhe_lock);
590
591 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
592 {
593         struct rtable *rt;
594
595         rt = rcu_dereference(fnhe->fnhe_rth_input);
596         if (rt) {
597                 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
598                 dst_dev_put(&rt->dst);
599                 dst_release(&rt->dst);
600         }
601         rt = rcu_dereference(fnhe->fnhe_rth_output);
602         if (rt) {
603                 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
604                 dst_dev_put(&rt->dst);
605                 dst_release(&rt->dst);
606         }
607 }
608
609 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
610 {
611         struct fib_nh_exception *fnhe, *oldest;
612
613         oldest = rcu_dereference(hash->chain);
614         for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
615              fnhe = rcu_dereference(fnhe->fnhe_next)) {
616                 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
617                         oldest = fnhe;
618         }
619         fnhe_flush_routes(oldest);
620         return oldest;
621 }
622
623 static inline u32 fnhe_hashfun(__be32 daddr)
624 {
625         static u32 fnhe_hashrnd __read_mostly;
626         u32 hval;
627
628         net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
629         hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
630         return hash_32(hval, FNHE_HASH_SHIFT);
631 }
632
633 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
634 {
635         rt->rt_pmtu = fnhe->fnhe_pmtu;
636         rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
637         rt->dst.expires = fnhe->fnhe_expires;
638
639         if (fnhe->fnhe_gw) {
640                 rt->rt_flags |= RTCF_REDIRECTED;
641                 rt->rt_gw_family = AF_INET;
642                 rt->rt_gw4 = fnhe->fnhe_gw;
643         }
644 }
645
646 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
647                                   u32 pmtu, bool lock, unsigned long expires)
648 {
649         struct fnhe_hash_bucket *hash;
650         struct fib_nh_exception *fnhe;
651         struct rtable *rt;
652         u32 genid, hval;
653         unsigned int i;
654         int depth;
655
656         genid = fnhe_genid(dev_net(nh->fib_nh_dev));
657         hval = fnhe_hashfun(daddr);
658
659         spin_lock_bh(&fnhe_lock);
660
661         hash = rcu_dereference(nh->nh_exceptions);
662         if (!hash) {
663                 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
664                 if (!hash)
665                         goto out_unlock;
666                 rcu_assign_pointer(nh->nh_exceptions, hash);
667         }
668
669         hash += hval;
670
671         depth = 0;
672         for (fnhe = rcu_dereference(hash->chain); fnhe;
673              fnhe = rcu_dereference(fnhe->fnhe_next)) {
674                 if (fnhe->fnhe_daddr == daddr)
675                         break;
676                 depth++;
677         }
678
679         if (fnhe) {
680                 if (fnhe->fnhe_genid != genid)
681                         fnhe->fnhe_genid = genid;
682                 if (gw)
683                         fnhe->fnhe_gw = gw;
684                 if (pmtu) {
685                         fnhe->fnhe_pmtu = pmtu;
686                         fnhe->fnhe_mtu_locked = lock;
687                 }
688                 fnhe->fnhe_expires = max(1UL, expires);
689                 /* Update all cached dsts too */
690                 rt = rcu_dereference(fnhe->fnhe_rth_input);
691                 if (rt)
692                         fill_route_from_fnhe(rt, fnhe);
693                 rt = rcu_dereference(fnhe->fnhe_rth_output);
694                 if (rt)
695                         fill_route_from_fnhe(rt, fnhe);
696         } else {
697                 if (depth > FNHE_RECLAIM_DEPTH)
698                         fnhe = fnhe_oldest(hash);
699                 else {
700                         fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
701                         if (!fnhe)
702                                 goto out_unlock;
703
704                         fnhe->fnhe_next = hash->chain;
705                         rcu_assign_pointer(hash->chain, fnhe);
706                 }
707                 fnhe->fnhe_genid = genid;
708                 fnhe->fnhe_daddr = daddr;
709                 fnhe->fnhe_gw = gw;
710                 fnhe->fnhe_pmtu = pmtu;
711                 fnhe->fnhe_mtu_locked = lock;
712                 fnhe->fnhe_expires = max(1UL, expires);
713
714                 /* Exception created; mark the cached routes for the nexthop
715                  * stale, so anyone caching it rechecks if this exception
716                  * applies to them.
717                  */
718                 rt = rcu_dereference(nh->nh_rth_input);
719                 if (rt)
720                         rt->dst.obsolete = DST_OBSOLETE_KILL;
721
722                 for_each_possible_cpu(i) {
723                         struct rtable __rcu **prt;
724                         prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
725                         rt = rcu_dereference(*prt);
726                         if (rt)
727                                 rt->dst.obsolete = DST_OBSOLETE_KILL;
728                 }
729         }
730
731         fnhe->fnhe_stamp = jiffies;
732
733 out_unlock:
734         spin_unlock_bh(&fnhe_lock);
735 }
736
737 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
738                              bool kill_route)
739 {
740         __be32 new_gw = icmp_hdr(skb)->un.gateway;
741         __be32 old_gw = ip_hdr(skb)->saddr;
742         struct net_device *dev = skb->dev;
743         struct in_device *in_dev;
744         struct fib_result res;
745         struct neighbour *n;
746         struct net *net;
747
748         switch (icmp_hdr(skb)->code & 7) {
749         case ICMP_REDIR_NET:
750         case ICMP_REDIR_NETTOS:
751         case ICMP_REDIR_HOST:
752         case ICMP_REDIR_HOSTTOS:
753                 break;
754
755         default:
756                 return;
757         }
758
759         if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
760                 return;
761
762         in_dev = __in_dev_get_rcu(dev);
763         if (!in_dev)
764                 return;
765
766         net = dev_net(dev);
767         if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
768             ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
769             ipv4_is_zeronet(new_gw))
770                 goto reject_redirect;
771
772         if (!IN_DEV_SHARED_MEDIA(in_dev)) {
773                 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
774                         goto reject_redirect;
775                 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
776                         goto reject_redirect;
777         } else {
778                 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
779                         goto reject_redirect;
780         }
781
782         n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
783         if (!n)
784                 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
785         if (!IS_ERR(n)) {
786                 if (!(n->nud_state & NUD_VALID)) {
787                         neigh_event_send(n, NULL);
788                 } else {
789                         if (fib_lookup(net, fl4, &res, 0) == 0) {
790                                 struct fib_nh_common *nhc = FIB_RES_NHC(res);
791                                 struct fib_nh *nh;
792
793                                 nh = container_of(nhc, struct fib_nh, nh_common);
794                                 update_or_create_fnhe(nh, fl4->daddr, new_gw,
795                                                 0, false,
796                                                 jiffies + ip_rt_gc_timeout);
797                         }
798                         if (kill_route)
799                                 rt->dst.obsolete = DST_OBSOLETE_KILL;
800                         call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
801                 }
802                 neigh_release(n);
803         }
804         return;
805
806 reject_redirect:
807 #ifdef CONFIG_IP_ROUTE_VERBOSE
808         if (IN_DEV_LOG_MARTIANS(in_dev)) {
809                 const struct iphdr *iph = (const struct iphdr *) skb->data;
810                 __be32 daddr = iph->daddr;
811                 __be32 saddr = iph->saddr;
812
813                 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
814                                      "  Advised path = %pI4 -> %pI4\n",
815                                      &old_gw, dev->name, &new_gw,
816                                      &saddr, &daddr);
817         }
818 #endif
819         ;
820 }
821
822 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
823 {
824         struct rtable *rt;
825         struct flowi4 fl4;
826         const struct iphdr *iph = (const struct iphdr *) skb->data;
827         struct net *net = dev_net(skb->dev);
828         int oif = skb->dev->ifindex;
829         u8 tos = RT_TOS(iph->tos);
830         u8 prot = iph->protocol;
831         u32 mark = skb->mark;
832
833         rt = (struct rtable *) dst;
834
835         __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
836         __ip_do_redirect(rt, skb, &fl4, true);
837 }
838
839 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
840 {
841         struct rtable *rt = (struct rtable *)dst;
842         struct dst_entry *ret = dst;
843
844         if (rt) {
845                 if (dst->obsolete > 0) {
846                         ip_rt_put(rt);
847                         ret = NULL;
848                 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
849                            rt->dst.expires) {
850                         ip_rt_put(rt);
851                         ret = NULL;
852                 }
853         }
854         return ret;
855 }
856
857 /*
858  * Algorithm:
859  *      1. The first ip_rt_redirect_number redirects are sent
860  *         with exponential backoff, then we stop sending them at all,
861  *         assuming that the host ignores our redirects.
862  *      2. If we did not see packets requiring redirects
863  *         during ip_rt_redirect_silence, we assume that the host
864  *         forgot redirected route and start to send redirects again.
865  *
866  * This algorithm is much cheaper and more intelligent than dumb load limiting
867  * in icmp.c.
868  *
869  * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
870  * and "frag. need" (breaks PMTU discovery) in icmp.c.
871  */
872
873 void ip_rt_send_redirect(struct sk_buff *skb)
874 {
875         struct rtable *rt = skb_rtable(skb);
876         struct in_device *in_dev;
877         struct inet_peer *peer;
878         struct net *net;
879         int log_martians;
880         int vif;
881
882         rcu_read_lock();
883         in_dev = __in_dev_get_rcu(rt->dst.dev);
884         if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
885                 rcu_read_unlock();
886                 return;
887         }
888         log_martians = IN_DEV_LOG_MARTIANS(in_dev);
889         vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
890         rcu_read_unlock();
891
892         net = dev_net(rt->dst.dev);
893         peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
894         if (!peer) {
895                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
896                           rt_nexthop(rt, ip_hdr(skb)->daddr));
897                 return;
898         }
899
900         /* No redirected packets during ip_rt_redirect_silence;
901          * reset the algorithm.
902          */
903         if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
904                 peer->rate_tokens = 0;
905                 peer->n_redirects = 0;
906         }
907
908         /* Too many ignored redirects; do not send anything
909          * set dst.rate_last to the last seen redirected packet.
910          */
911         if (peer->n_redirects >= ip_rt_redirect_number) {
912                 peer->rate_last = jiffies;
913                 goto out_put_peer;
914         }
915
916         /* Check for load limit; set rate_last to the latest sent
917          * redirect.
918          */
919         if (peer->rate_tokens == 0 ||
920             time_after(jiffies,
921                        (peer->rate_last +
922                         (ip_rt_redirect_load << peer->rate_tokens)))) {
923                 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
924
925                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
926                 peer->rate_last = jiffies;
927                 ++peer->rate_tokens;
928                 ++peer->n_redirects;
929 #ifdef CONFIG_IP_ROUTE_VERBOSE
930                 if (log_martians &&
931                     peer->rate_tokens == ip_rt_redirect_number)
932                         net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
933                                              &ip_hdr(skb)->saddr, inet_iif(skb),
934                                              &ip_hdr(skb)->daddr, &gw);
935 #endif
936         }
937 out_put_peer:
938         inet_putpeer(peer);
939 }
940
941 static int ip_error(struct sk_buff *skb)
942 {
943         struct rtable *rt = skb_rtable(skb);
944         struct net_device *dev = skb->dev;
945         struct in_device *in_dev;
946         struct inet_peer *peer;
947         unsigned long now;
948         struct net *net;
949         bool send;
950         int code;
951
952         if (netif_is_l3_master(skb->dev)) {
953                 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
954                 if (!dev)
955                         goto out;
956         }
957
958         in_dev = __in_dev_get_rcu(dev);
959
960         /* IP on this device is disabled. */
961         if (!in_dev)
962                 goto out;
963
964         net = dev_net(rt->dst.dev);
965         if (!IN_DEV_FORWARD(in_dev)) {
966                 switch (rt->dst.error) {
967                 case EHOSTUNREACH:
968                         __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
969                         break;
970
971                 case ENETUNREACH:
972                         __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
973                         break;
974                 }
975                 goto out;
976         }
977
978         switch (rt->dst.error) {
979         case EINVAL:
980         default:
981                 goto out;
982         case EHOSTUNREACH:
983                 code = ICMP_HOST_UNREACH;
984                 break;
985         case ENETUNREACH:
986                 code = ICMP_NET_UNREACH;
987                 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
988                 break;
989         case EACCES:
990                 code = ICMP_PKT_FILTERED;
991                 break;
992         }
993
994         peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
995                                l3mdev_master_ifindex(skb->dev), 1);
996
997         send = true;
998         if (peer) {
999                 now = jiffies;
1000                 peer->rate_tokens += now - peer->rate_last;
1001                 if (peer->rate_tokens > ip_rt_error_burst)
1002                         peer->rate_tokens = ip_rt_error_burst;
1003                 peer->rate_last = now;
1004                 if (peer->rate_tokens >= ip_rt_error_cost)
1005                         peer->rate_tokens -= ip_rt_error_cost;
1006                 else
1007                         send = false;
1008                 inet_putpeer(peer);
1009         }
1010         if (send)
1011                 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1012
1013 out:    kfree_skb(skb);
1014         return 0;
1015 }
1016
1017 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1018 {
1019         struct dst_entry *dst = &rt->dst;
1020         u32 old_mtu = ipv4_mtu(dst);
1021         struct fib_result res;
1022         bool lock = false;
1023
1024         if (ip_mtu_locked(dst))
1025                 return;
1026
1027         if (old_mtu < mtu)
1028                 return;
1029
1030         if (mtu < ip_rt_min_pmtu) {
1031                 lock = true;
1032                 mtu = min(old_mtu, ip_rt_min_pmtu);
1033         }
1034
1035         if (rt->rt_pmtu == mtu && !lock &&
1036             time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1037                 return;
1038
1039         rcu_read_lock();
1040         if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1041                 struct fib_nh_common *nhc = FIB_RES_NHC(res);
1042                 struct fib_nh *nh;
1043
1044                 nh = container_of(nhc, struct fib_nh, nh_common);
1045                 update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
1046                                       jiffies + ip_rt_mtu_expires);
1047         }
1048         rcu_read_unlock();
1049 }
1050
1051 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1052                               struct sk_buff *skb, u32 mtu)
1053 {
1054         struct rtable *rt = (struct rtable *) dst;
1055         struct flowi4 fl4;
1056
1057         ip_rt_build_flow_key(&fl4, sk, skb);
1058         __ip_rt_update_pmtu(rt, &fl4, mtu);
1059 }
1060
1061 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1062                       int oif, u8 protocol)
1063 {
1064         const struct iphdr *iph = (const struct iphdr *) skb->data;
1065         struct flowi4 fl4;
1066         struct rtable *rt;
1067         u32 mark = IP4_REPLY_MARK(net, skb->mark);
1068
1069         __build_flow_key(net, &fl4, NULL, iph, oif,
1070                          RT_TOS(iph->tos), protocol, mark, 0);
1071         rt = __ip_route_output_key(net, &fl4);
1072         if (!IS_ERR(rt)) {
1073                 __ip_rt_update_pmtu(rt, &fl4, mtu);
1074                 ip_rt_put(rt);
1075         }
1076 }
1077 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1078
1079 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1080 {
1081         const struct iphdr *iph = (const struct iphdr *) skb->data;
1082         struct flowi4 fl4;
1083         struct rtable *rt;
1084
1085         __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1086
1087         if (!fl4.flowi4_mark)
1088                 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1089
1090         rt = __ip_route_output_key(sock_net(sk), &fl4);
1091         if (!IS_ERR(rt)) {
1092                 __ip_rt_update_pmtu(rt, &fl4, mtu);
1093                 ip_rt_put(rt);
1094         }
1095 }
1096
1097 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1098 {
1099         const struct iphdr *iph = (const struct iphdr *) skb->data;
1100         struct flowi4 fl4;
1101         struct rtable *rt;
1102         struct dst_entry *odst = NULL;
1103         bool new = false;
1104         struct net *net = sock_net(sk);
1105
1106         bh_lock_sock(sk);
1107
1108         if (!ip_sk_accept_pmtu(sk))
1109                 goto out;
1110
1111         odst = sk_dst_get(sk);
1112
1113         if (sock_owned_by_user(sk) || !odst) {
1114                 __ipv4_sk_update_pmtu(skb, sk, mtu);
1115                 goto out;
1116         }
1117
1118         __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1119
1120         rt = (struct rtable *)odst;
1121         if (odst->obsolete && !odst->ops->check(odst, 0)) {
1122                 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1123                 if (IS_ERR(rt))
1124                         goto out;
1125
1126                 new = true;
1127         }
1128
1129         __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
1130
1131         if (!dst_check(&rt->dst, 0)) {
1132                 if (new)
1133                         dst_release(&rt->dst);
1134
1135                 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1136                 if (IS_ERR(rt))
1137                         goto out;
1138
1139                 new = true;
1140         }
1141
1142         if (new)
1143                 sk_dst_set(sk, &rt->dst);
1144
1145 out:
1146         bh_unlock_sock(sk);
1147         dst_release(odst);
1148 }
1149 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1150
1151 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1152                    int oif, u8 protocol)
1153 {
1154         const struct iphdr *iph = (const struct iphdr *) skb->data;
1155         struct flowi4 fl4;
1156         struct rtable *rt;
1157
1158         __build_flow_key(net, &fl4, NULL, iph, oif,
1159                          RT_TOS(iph->tos), protocol, 0, 0);
1160         rt = __ip_route_output_key(net, &fl4);
1161         if (!IS_ERR(rt)) {
1162                 __ip_do_redirect(rt, skb, &fl4, false);
1163                 ip_rt_put(rt);
1164         }
1165 }
1166 EXPORT_SYMBOL_GPL(ipv4_redirect);
1167
1168 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1169 {
1170         const struct iphdr *iph = (const struct iphdr *) skb->data;
1171         struct flowi4 fl4;
1172         struct rtable *rt;
1173         struct net *net = sock_net(sk);
1174
1175         __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1176         rt = __ip_route_output_key(net, &fl4);
1177         if (!IS_ERR(rt)) {
1178                 __ip_do_redirect(rt, skb, &fl4, false);
1179                 ip_rt_put(rt);
1180         }
1181 }
1182 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1183
1184 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1185 {
1186         struct rtable *rt = (struct rtable *) dst;
1187
1188         /* All IPV4 dsts are created with ->obsolete set to the value
1189          * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1190          * into this function always.
1191          *
1192          * When a PMTU/redirect information update invalidates a route,
1193          * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1194          * DST_OBSOLETE_DEAD.
1195          */
1196         if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1197                 return NULL;
1198         return dst;
1199 }
1200
1201 static void ipv4_link_failure(struct sk_buff *skb)
1202 {
1203         struct rtable *rt;
1204
1205         icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1206
1207         rt = skb_rtable(skb);
1208         if (rt)
1209                 dst_set_expires(&rt->dst, 0);
1210 }
1211
1212 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1213 {
1214         pr_debug("%s: %pI4 -> %pI4, %s\n",
1215                  __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1216                  skb->dev ? skb->dev->name : "?");
1217         kfree_skb(skb);
1218         WARN_ON(1);
1219         return 0;
1220 }
1221
1222 /*
1223    We do not cache source address of outgoing interface,
1224    because it is used only by IP RR, TS and SRR options,
1225    so that it out of fast path.
1226
1227    BTW remember: "addr" is allowed to be not aligned
1228    in IP options!
1229  */
1230
1231 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1232 {
1233         __be32 src;
1234
1235         if (rt_is_output_route(rt))
1236                 src = ip_hdr(skb)->saddr;
1237         else {
1238                 struct fib_result res;
1239                 struct iphdr *iph = ip_hdr(skb);
1240                 struct flowi4 fl4 = {
1241                         .daddr = iph->daddr,
1242                         .saddr = iph->saddr,
1243                         .flowi4_tos = RT_TOS(iph->tos),
1244                         .flowi4_oif = rt->dst.dev->ifindex,
1245                         .flowi4_iif = skb->dev->ifindex,
1246                         .flowi4_mark = skb->mark,
1247                 };
1248
1249                 rcu_read_lock();
1250                 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1251                         src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1252                 else
1253                         src = inet_select_addr(rt->dst.dev,
1254                                                rt_nexthop(rt, iph->daddr),
1255                                                RT_SCOPE_UNIVERSE);
1256                 rcu_read_unlock();
1257         }
1258         memcpy(addr, &src, 4);
1259 }
1260
1261 #ifdef CONFIG_IP_ROUTE_CLASSID
1262 static void set_class_tag(struct rtable *rt, u32 tag)
1263 {
1264         if (!(rt->dst.tclassid & 0xFFFF))
1265                 rt->dst.tclassid |= tag & 0xFFFF;
1266         if (!(rt->dst.tclassid & 0xFFFF0000))
1267                 rt->dst.tclassid |= tag & 0xFFFF0000;
1268 }
1269 #endif
1270
1271 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1272 {
1273         unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1274         unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1275                                     ip_rt_min_advmss);
1276
1277         return min(advmss, IPV4_MAX_PMTU - header_size);
1278 }
1279
1280 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1281 {
1282         const struct rtable *rt = (const struct rtable *) dst;
1283         unsigned int mtu = rt->rt_pmtu;
1284
1285         if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1286                 mtu = dst_metric_raw(dst, RTAX_MTU);
1287
1288         if (mtu)
1289                 return mtu;
1290
1291         mtu = READ_ONCE(dst->dev->mtu);
1292
1293         if (unlikely(ip_mtu_locked(dst))) {
1294                 if (rt->rt_gw_family && mtu > 576)
1295                         mtu = 576;
1296         }
1297
1298         mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1299
1300         return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1301 }
1302
1303 static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1304 {
1305         struct fnhe_hash_bucket *hash;
1306         struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1307         u32 hval = fnhe_hashfun(daddr);
1308
1309         spin_lock_bh(&fnhe_lock);
1310
1311         hash = rcu_dereference_protected(nh->nh_exceptions,
1312                                          lockdep_is_held(&fnhe_lock));
1313         hash += hval;
1314
1315         fnhe_p = &hash->chain;
1316         fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1317         while (fnhe) {
1318                 if (fnhe->fnhe_daddr == daddr) {
1319                         rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1320                                 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1321                         /* set fnhe_daddr to 0 to ensure it won't bind with
1322                          * new dsts in rt_bind_exception().
1323                          */
1324                         fnhe->fnhe_daddr = 0;
1325                         fnhe_flush_routes(fnhe);
1326                         kfree_rcu(fnhe, rcu);
1327                         break;
1328                 }
1329                 fnhe_p = &fnhe->fnhe_next;
1330                 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1331                                                  lockdep_is_held(&fnhe_lock));
1332         }
1333
1334         spin_unlock_bh(&fnhe_lock);
1335 }
1336
1337 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1338 {
1339         struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
1340         struct fib_nh_exception *fnhe;
1341         u32 hval;
1342
1343         if (!hash)
1344                 return NULL;
1345
1346         hval = fnhe_hashfun(daddr);
1347
1348         for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1349              fnhe = rcu_dereference(fnhe->fnhe_next)) {
1350                 if (fnhe->fnhe_daddr == daddr) {
1351                         if (fnhe->fnhe_expires &&
1352                             time_after(jiffies, fnhe->fnhe_expires)) {
1353                                 ip_del_fnhe(nh, daddr);
1354                                 break;
1355                         }
1356                         return fnhe;
1357                 }
1358         }
1359         return NULL;
1360 }
1361
1362 /* MTU selection:
1363  * 1. mtu on route is locked - use it
1364  * 2. mtu from nexthop exception
1365  * 3. mtu from egress device
1366  */
1367
1368 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1369 {
1370         struct fib_nh_common *nhc = res->nhc;
1371         struct net_device *dev = nhc->nhc_dev;
1372         struct fib_info *fi = res->fi;
1373         u32 mtu = 0;
1374
1375         if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
1376             fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1377                 mtu = fi->fib_mtu;
1378
1379         if (likely(!mtu)) {
1380                 struct fib_nh *nh = container_of(nhc, struct fib_nh, nh_common);
1381                 struct fib_nh_exception *fnhe;
1382
1383                 fnhe = find_exception(nh, daddr);
1384                 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1385                         mtu = fnhe->fnhe_pmtu;
1386         }
1387
1388         if (likely(!mtu))
1389                 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1390
1391         return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1392 }
1393
1394 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1395                               __be32 daddr, const bool do_cache)
1396 {
1397         bool ret = false;
1398
1399         spin_lock_bh(&fnhe_lock);
1400
1401         if (daddr == fnhe->fnhe_daddr) {
1402                 struct rtable __rcu **porig;
1403                 struct rtable *orig;
1404                 int genid = fnhe_genid(dev_net(rt->dst.dev));
1405
1406                 if (rt_is_input_route(rt))
1407                         porig = &fnhe->fnhe_rth_input;
1408                 else
1409                         porig = &fnhe->fnhe_rth_output;
1410                 orig = rcu_dereference(*porig);
1411
1412                 if (fnhe->fnhe_genid != genid) {
1413                         fnhe->fnhe_genid = genid;
1414                         fnhe->fnhe_gw = 0;
1415                         fnhe->fnhe_pmtu = 0;
1416                         fnhe->fnhe_expires = 0;
1417                         fnhe->fnhe_mtu_locked = false;
1418                         fnhe_flush_routes(fnhe);
1419                         orig = NULL;
1420                 }
1421                 fill_route_from_fnhe(rt, fnhe);
1422                 if (!rt->rt_gw4) {
1423                         rt->rt_gw4 = daddr;
1424                         rt->rt_gw_family = AF_INET;
1425                 }
1426
1427                 if (do_cache) {
1428                         dst_hold(&rt->dst);
1429                         rcu_assign_pointer(*porig, rt);
1430                         if (orig) {
1431                                 dst_dev_put(&orig->dst);
1432                                 dst_release(&orig->dst);
1433                         }
1434                         ret = true;
1435                 }
1436
1437                 fnhe->fnhe_stamp = jiffies;
1438         }
1439         spin_unlock_bh(&fnhe_lock);
1440
1441         return ret;
1442 }
1443
1444 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1445 {
1446         struct rtable *orig, *prev, **p;
1447         bool ret = true;
1448
1449         if (rt_is_input_route(rt)) {
1450                 p = (struct rtable **)&nh->nh_rth_input;
1451         } else {
1452                 p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
1453         }
1454         orig = *p;
1455
1456         /* hold dst before doing cmpxchg() to avoid race condition
1457          * on this dst
1458          */
1459         dst_hold(&rt->dst);
1460         prev = cmpxchg(p, orig, rt);
1461         if (prev == orig) {
1462                 if (orig) {
1463                         dst_dev_put(&orig->dst);
1464                         dst_release(&orig->dst);
1465                 }
1466         } else {
1467                 dst_release(&rt->dst);
1468                 ret = false;
1469         }
1470
1471         return ret;
1472 }
1473
1474 struct uncached_list {
1475         spinlock_t              lock;
1476         struct list_head        head;
1477 };
1478
1479 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1480
1481 void rt_add_uncached_list(struct rtable *rt)
1482 {
1483         struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1484
1485         rt->rt_uncached_list = ul;
1486
1487         spin_lock_bh(&ul->lock);
1488         list_add_tail(&rt->rt_uncached, &ul->head);
1489         spin_unlock_bh(&ul->lock);
1490 }
1491
1492 void rt_del_uncached_list(struct rtable *rt)
1493 {
1494         if (!list_empty(&rt->rt_uncached)) {
1495                 struct uncached_list *ul = rt->rt_uncached_list;
1496
1497                 spin_lock_bh(&ul->lock);
1498                 list_del(&rt->rt_uncached);
1499                 spin_unlock_bh(&ul->lock);
1500         }
1501 }
1502
1503 static void ipv4_dst_destroy(struct dst_entry *dst)
1504 {
1505         struct rtable *rt = (struct rtable *)dst;
1506
1507         ip_dst_metrics_put(dst);
1508         rt_del_uncached_list(rt);
1509 }
1510
1511 void rt_flush_dev(struct net_device *dev)
1512 {
1513         struct net *net = dev_net(dev);
1514         struct rtable *rt;
1515         int cpu;
1516
1517         for_each_possible_cpu(cpu) {
1518                 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1519
1520                 spin_lock_bh(&ul->lock);
1521                 list_for_each_entry(rt, &ul->head, rt_uncached) {
1522                         if (rt->dst.dev != dev)
1523                                 continue;
1524                         rt->dst.dev = net->loopback_dev;
1525                         dev_hold(rt->dst.dev);
1526                         dev_put(dev);
1527                 }
1528                 spin_unlock_bh(&ul->lock);
1529         }
1530 }
1531
1532 static bool rt_cache_valid(const struct rtable *rt)
1533 {
1534         return  rt &&
1535                 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1536                 !rt_is_expired(rt);
1537 }
1538
1539 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1540                            const struct fib_result *res,
1541                            struct fib_nh_exception *fnhe,
1542                            struct fib_info *fi, u16 type, u32 itag,
1543                            const bool do_cache)
1544 {
1545         bool cached = false;
1546
1547         if (fi) {
1548                 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1549                 struct fib_nh *nh;
1550
1551                 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1552                         rt->rt_gw_family = nhc->nhc_gw_family;
1553                         /* only INET and INET6 are supported */
1554                         if (likely(nhc->nhc_gw_family == AF_INET))
1555                                 rt->rt_gw4 = nhc->nhc_gw.ipv4;
1556                         else
1557                                 rt->rt_gw6 = nhc->nhc_gw.ipv6;
1558                 }
1559
1560                 ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1561
1562                 nh = container_of(nhc, struct fib_nh, nh_common);
1563 #ifdef CONFIG_IP_ROUTE_CLASSID
1564                 rt->dst.tclassid = nh->nh_tclassid;
1565 #endif
1566                 rt->dst.lwtstate = lwtstate_get(nh->fib_nh_lws);
1567                 if (unlikely(fnhe))
1568                         cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1569                 else if (do_cache)
1570                         cached = rt_cache_route(nh, rt);
1571                 if (unlikely(!cached)) {
1572                         /* Routes we intend to cache in nexthop exception or
1573                          * FIB nexthop have the DST_NOCACHE bit clear.
1574                          * However, if we are unsuccessful at storing this
1575                          * route into the cache we really need to set it.
1576                          */
1577                         if (!rt->rt_gw4) {
1578                                 rt->rt_gw_family = AF_INET;
1579                                 rt->rt_gw4 = daddr;
1580                         }
1581                         rt_add_uncached_list(rt);
1582                 }
1583         } else
1584                 rt_add_uncached_list(rt);
1585
1586 #ifdef CONFIG_IP_ROUTE_CLASSID
1587 #ifdef CONFIG_IP_MULTIPLE_TABLES
1588         set_class_tag(rt, res->tclassid);
1589 #endif
1590         set_class_tag(rt, itag);
1591 #endif
1592 }
1593
1594 struct rtable *rt_dst_alloc(struct net_device *dev,
1595                             unsigned int flags, u16 type,
1596                             bool nopolicy, bool noxfrm, bool will_cache)
1597 {
1598         struct rtable *rt;
1599
1600         rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1601                        (will_cache ? 0 : DST_HOST) |
1602                        (nopolicy ? DST_NOPOLICY : 0) |
1603                        (noxfrm ? DST_NOXFRM : 0));
1604
1605         if (rt) {
1606                 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1607                 rt->rt_flags = flags;
1608                 rt->rt_type = type;
1609                 rt->rt_is_input = 0;
1610                 rt->rt_iif = 0;
1611                 rt->rt_pmtu = 0;
1612                 rt->rt_mtu_locked = 0;
1613                 rt->rt_gw_family = 0;
1614                 rt->rt_gw4 = 0;
1615                 INIT_LIST_HEAD(&rt->rt_uncached);
1616
1617                 rt->dst.output = ip_output;
1618                 if (flags & RTCF_LOCAL)
1619                         rt->dst.input = ip_local_deliver;
1620         }
1621
1622         return rt;
1623 }
1624 EXPORT_SYMBOL(rt_dst_alloc);
1625
1626 /* called in rcu_read_lock() section */
1627 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1628                           u8 tos, struct net_device *dev,
1629                           struct in_device *in_dev, u32 *itag)
1630 {
1631         int err;
1632
1633         /* Primary sanity checks. */
1634         if (!in_dev)
1635                 return -EINVAL;
1636
1637         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1638             skb->protocol != htons(ETH_P_IP))
1639                 return -EINVAL;
1640
1641         if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1642                 return -EINVAL;
1643
1644         if (ipv4_is_zeronet(saddr)) {
1645                 if (!ipv4_is_local_multicast(daddr) &&
1646                     ip_hdr(skb)->protocol != IPPROTO_IGMP)
1647                         return -EINVAL;
1648         } else {
1649                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1650                                           in_dev, itag);
1651                 if (err < 0)
1652                         return err;
1653         }
1654         return 0;
1655 }
1656
1657 /* called in rcu_read_lock() section */
1658 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1659                              u8 tos, struct net_device *dev, int our)
1660 {
1661         struct in_device *in_dev = __in_dev_get_rcu(dev);
1662         unsigned int flags = RTCF_MULTICAST;
1663         struct rtable *rth;
1664         u32 itag = 0;
1665         int err;
1666
1667         err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1668         if (err)
1669                 return err;
1670
1671         if (our)
1672                 flags |= RTCF_LOCAL;
1673
1674         rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1675                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1676         if (!rth)
1677                 return -ENOBUFS;
1678
1679 #ifdef CONFIG_IP_ROUTE_CLASSID
1680         rth->dst.tclassid = itag;
1681 #endif
1682         rth->dst.output = ip_rt_bug;
1683         rth->rt_is_input= 1;
1684
1685 #ifdef CONFIG_IP_MROUTE
1686         if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1687                 rth->dst.input = ip_mr_input;
1688 #endif
1689         RT_CACHE_STAT_INC(in_slow_mc);
1690
1691         skb_dst_set(skb, &rth->dst);
1692         return 0;
1693 }
1694
1695
1696 static void ip_handle_martian_source(struct net_device *dev,
1697                                      struct in_device *in_dev,
1698                                      struct sk_buff *skb,
1699                                      __be32 daddr,
1700                                      __be32 saddr)
1701 {
1702         RT_CACHE_STAT_INC(in_martian_src);
1703 #ifdef CONFIG_IP_ROUTE_VERBOSE
1704         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1705                 /*
1706                  *      RFC1812 recommendation, if source is martian,
1707                  *      the only hint is MAC header.
1708                  */
1709                 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1710                         &daddr, &saddr, dev->name);
1711                 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1712                         print_hex_dump(KERN_WARNING, "ll header: ",
1713                                        DUMP_PREFIX_OFFSET, 16, 1,
1714                                        skb_mac_header(skb),
1715                                        dev->hard_header_len, false);
1716                 }
1717         }
1718 #endif
1719 }
1720
1721 /* called in rcu_read_lock() section */
1722 static int __mkroute_input(struct sk_buff *skb,
1723                            const struct fib_result *res,
1724                            struct in_device *in_dev,
1725                            __be32 daddr, __be32 saddr, u32 tos)
1726 {
1727         struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1728         struct net_device *dev = nhc->nhc_dev;
1729         struct fib_nh_exception *fnhe;
1730         struct rtable *rth;
1731         struct fib_nh *nh;
1732         int err;
1733         struct in_device *out_dev;
1734         bool do_cache;
1735         u32 itag = 0;
1736
1737         /* get a working reference to the output device */
1738         out_dev = __in_dev_get_rcu(dev);
1739         if (!out_dev) {
1740                 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1741                 return -EINVAL;
1742         }
1743
1744         err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1745                                   in_dev->dev, in_dev, &itag);
1746         if (err < 0) {
1747                 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1748                                          saddr);
1749
1750                 goto cleanup;
1751         }
1752
1753         do_cache = res->fi && !itag;
1754         if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1755             skb->protocol == htons(ETH_P_IP)) {
1756                 __be32 gw;
1757
1758                 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1759                 if (IN_DEV_SHARED_MEDIA(out_dev) ||
1760                     inet_addr_onlink(out_dev, saddr, gw))
1761                         IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1762         }
1763
1764         if (skb->protocol != htons(ETH_P_IP)) {
1765                 /* Not IP (i.e. ARP). Do not create route, if it is
1766                  * invalid for proxy arp. DNAT routes are always valid.
1767                  *
1768                  * Proxy arp feature have been extended to allow, ARP
1769                  * replies back to the same interface, to support
1770                  * Private VLAN switch technologies. See arp.c.
1771                  */
1772                 if (out_dev == in_dev &&
1773                     IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1774                         err = -EINVAL;
1775                         goto cleanup;
1776                 }
1777         }
1778
1779         nh = container_of(nhc, struct fib_nh, nh_common);
1780         fnhe = find_exception(nh, daddr);
1781         if (do_cache) {
1782                 if (fnhe)
1783                         rth = rcu_dereference(fnhe->fnhe_rth_input);
1784                 else
1785                         rth = rcu_dereference(nh->nh_rth_input);
1786                 if (rt_cache_valid(rth)) {
1787                         skb_dst_set_noref(skb, &rth->dst);
1788                         goto out;
1789                 }
1790         }
1791
1792         rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1793                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
1794                            IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1795         if (!rth) {
1796                 err = -ENOBUFS;
1797                 goto cleanup;
1798         }
1799
1800         rth->rt_is_input = 1;
1801         RT_CACHE_STAT_INC(in_slow_tot);
1802
1803         rth->dst.input = ip_forward;
1804
1805         rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1806                        do_cache);
1807         lwtunnel_set_redirect(&rth->dst);
1808         skb_dst_set(skb, &rth->dst);
1809 out:
1810         err = 0;
1811  cleanup:
1812         return err;
1813 }
1814
1815 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1816 /* To make ICMP packets follow the right flow, the multipath hash is
1817  * calculated from the inner IP addresses.
1818  */
1819 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1820                                  struct flow_keys *hash_keys)
1821 {
1822         const struct iphdr *outer_iph = ip_hdr(skb);
1823         const struct iphdr *key_iph = outer_iph;
1824         const struct iphdr *inner_iph;
1825         const struct icmphdr *icmph;
1826         struct iphdr _inner_iph;
1827         struct icmphdr _icmph;
1828
1829         if (likely(outer_iph->protocol != IPPROTO_ICMP))
1830                 goto out;
1831
1832         if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1833                 goto out;
1834
1835         icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1836                                    &_icmph);
1837         if (!icmph)
1838                 goto out;
1839
1840         if (icmph->type != ICMP_DEST_UNREACH &&
1841             icmph->type != ICMP_REDIRECT &&
1842             icmph->type != ICMP_TIME_EXCEEDED &&
1843             icmph->type != ICMP_PARAMETERPROB)
1844                 goto out;
1845
1846         inner_iph = skb_header_pointer(skb,
1847                                        outer_iph->ihl * 4 + sizeof(_icmph),
1848                                        sizeof(_inner_iph), &_inner_iph);
1849         if (!inner_iph)
1850                 goto out;
1851
1852         key_iph = inner_iph;
1853 out:
1854         hash_keys->addrs.v4addrs.src = key_iph->saddr;
1855         hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1856 }
1857
1858 /* if skb is set it will be used and fl4 can be NULL */
1859 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
1860                        const struct sk_buff *skb, struct flow_keys *flkeys)
1861 {
1862         u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
1863         struct flow_keys hash_keys;
1864         u32 mhash;
1865
1866         switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1867         case 0:
1868                 memset(&hash_keys, 0, sizeof(hash_keys));
1869                 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1870                 if (skb) {
1871                         ip_multipath_l3_keys(skb, &hash_keys);
1872                 } else {
1873                         hash_keys.addrs.v4addrs.src = fl4->saddr;
1874                         hash_keys.addrs.v4addrs.dst = fl4->daddr;
1875                 }
1876                 break;
1877         case 1:
1878                 /* skb is currently provided only when forwarding */
1879                 if (skb) {
1880                         unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1881                         struct flow_keys keys;
1882
1883                         /* short-circuit if we already have L4 hash present */
1884                         if (skb->l4_hash)
1885                                 return skb_get_hash_raw(skb) >> 1;
1886
1887                         memset(&hash_keys, 0, sizeof(hash_keys));
1888
1889                         if (!flkeys) {
1890                                 skb_flow_dissect_flow_keys(skb, &keys, flag);
1891                                 flkeys = &keys;
1892                         }
1893
1894                         hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1895                         hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
1896                         hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
1897                         hash_keys.ports.src = flkeys->ports.src;
1898                         hash_keys.ports.dst = flkeys->ports.dst;
1899                         hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
1900                 } else {
1901                         memset(&hash_keys, 0, sizeof(hash_keys));
1902                         hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1903                         hash_keys.addrs.v4addrs.src = fl4->saddr;
1904                         hash_keys.addrs.v4addrs.dst = fl4->daddr;
1905                         hash_keys.ports.src = fl4->fl4_sport;
1906                         hash_keys.ports.dst = fl4->fl4_dport;
1907                         hash_keys.basic.ip_proto = fl4->flowi4_proto;
1908                 }
1909                 break;
1910         }
1911         mhash = flow_hash_from_keys(&hash_keys);
1912
1913         if (multipath_hash)
1914                 mhash = jhash_2words(mhash, multipath_hash, 0);
1915
1916         return mhash >> 1;
1917 }
1918 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
1919
1920 static int ip_mkroute_input(struct sk_buff *skb,
1921                             struct fib_result *res,
1922                             struct in_device *in_dev,
1923                             __be32 daddr, __be32 saddr, u32 tos,
1924                             struct flow_keys *hkeys)
1925 {
1926 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1927         if (res->fi && res->fi->fib_nhs > 1) {
1928                 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
1929
1930                 fib_select_multipath(res, h);
1931         }
1932 #endif
1933
1934         /* create a routing cache entry */
1935         return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1936 }
1937
1938 /*
1939  *      NOTE. We drop all the packets that has local source
1940  *      addresses, because every properly looped back packet
1941  *      must have correct destination already attached by output routine.
1942  *
1943  *      Such approach solves two big problems:
1944  *      1. Not simplex devices are handled properly.
1945  *      2. IP spoofing attempts are filtered with 100% of guarantee.
1946  *      called with rcu_read_lock()
1947  */
1948
1949 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1950                                u8 tos, struct net_device *dev,
1951                                struct fib_result *res)
1952 {
1953         struct in_device *in_dev = __in_dev_get_rcu(dev);
1954         struct flow_keys *flkeys = NULL, _flkeys;
1955         struct net    *net = dev_net(dev);
1956         struct ip_tunnel_info *tun_info;
1957         int             err = -EINVAL;
1958         unsigned int    flags = 0;
1959         u32             itag = 0;
1960         struct rtable   *rth;
1961         struct flowi4   fl4;
1962         bool do_cache;
1963
1964         /* IP on this device is disabled. */
1965
1966         if (!in_dev)
1967                 goto out;
1968
1969         /* Check for the most weird martians, which can be not detected
1970            by fib_lookup.
1971          */
1972
1973         tun_info = skb_tunnel_info(skb);
1974         if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1975                 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
1976         else
1977                 fl4.flowi4_tun_key.tun_id = 0;
1978         skb_dst_drop(skb);
1979
1980         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1981                 goto martian_source;
1982
1983         res->fi = NULL;
1984         res->table = NULL;
1985         if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1986                 goto brd_input;
1987
1988         /* Accept zero addresses only to limited broadcast;
1989          * I even do not know to fix it or not. Waiting for complains :-)
1990          */
1991         if (ipv4_is_zeronet(saddr))
1992                 goto martian_source;
1993
1994         if (ipv4_is_zeronet(daddr))
1995                 goto martian_destination;
1996
1997         /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
1998          * and call it once if daddr or/and saddr are loopback addresses
1999          */
2000         if (ipv4_is_loopback(daddr)) {
2001                 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2002                         goto martian_destination;
2003         } else if (ipv4_is_loopback(saddr)) {
2004                 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2005                         goto martian_source;
2006         }
2007
2008         /*
2009          *      Now we are ready to route packet.
2010          */
2011         fl4.flowi4_oif = 0;
2012         fl4.flowi4_iif = dev->ifindex;
2013         fl4.flowi4_mark = skb->mark;
2014         fl4.flowi4_tos = tos;
2015         fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2016         fl4.flowi4_flags = 0;
2017         fl4.daddr = daddr;
2018         fl4.saddr = saddr;
2019         fl4.flowi4_uid = sock_net_uid(net, NULL);
2020
2021         if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2022                 flkeys = &_flkeys;
2023         } else {
2024                 fl4.flowi4_proto = 0;
2025                 fl4.fl4_sport = 0;
2026                 fl4.fl4_dport = 0;
2027         }
2028
2029         err = fib_lookup(net, &fl4, res, 0);
2030         if (err != 0) {
2031                 if (!IN_DEV_FORWARD(in_dev))
2032                         err = -EHOSTUNREACH;
2033                 goto no_route;
2034         }
2035
2036         if (res->type == RTN_BROADCAST) {
2037                 if (IN_DEV_BFORWARD(in_dev))
2038                         goto make_route;
2039                 goto brd_input;
2040         }
2041
2042         if (res->type == RTN_LOCAL) {
2043                 err = fib_validate_source(skb, saddr, daddr, tos,
2044                                           0, dev, in_dev, &itag);
2045                 if (err < 0)
2046                         goto martian_source;
2047                 goto local_input;
2048         }
2049
2050         if (!IN_DEV_FORWARD(in_dev)) {
2051                 err = -EHOSTUNREACH;
2052                 goto no_route;
2053         }
2054         if (res->type != RTN_UNICAST)
2055                 goto martian_destination;
2056
2057 make_route:
2058         err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2059 out:    return err;
2060
2061 brd_input:
2062         if (skb->protocol != htons(ETH_P_IP))
2063                 goto e_inval;
2064
2065         if (!ipv4_is_zeronet(saddr)) {
2066                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2067                                           in_dev, &itag);
2068                 if (err < 0)
2069                         goto martian_source;
2070         }
2071         flags |= RTCF_BROADCAST;
2072         res->type = RTN_BROADCAST;
2073         RT_CACHE_STAT_INC(in_brd);
2074
2075 local_input:
2076         do_cache = false;
2077         if (res->fi) {
2078                 if (!itag) {
2079                         struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2080                         struct fib_nh *nh;
2081
2082                         nh = container_of(nhc, struct fib_nh, nh_common);
2083                         rth = rcu_dereference(nh->nh_rth_input);
2084                         if (rt_cache_valid(rth)) {
2085                                 skb_dst_set_noref(skb, &rth->dst);
2086                                 err = 0;
2087                                 goto out;
2088                         }
2089                         do_cache = true;
2090                 }
2091         }
2092
2093         rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
2094                            flags | RTCF_LOCAL, res->type,
2095                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2096         if (!rth)
2097                 goto e_nobufs;
2098
2099         rth->dst.output= ip_rt_bug;
2100 #ifdef CONFIG_IP_ROUTE_CLASSID
2101         rth->dst.tclassid = itag;
2102 #endif
2103         rth->rt_is_input = 1;
2104
2105         RT_CACHE_STAT_INC(in_slow_tot);
2106         if (res->type == RTN_UNREACHABLE) {
2107                 rth->dst.input= ip_error;
2108                 rth->dst.error= -err;
2109                 rth->rt_flags   &= ~RTCF_LOCAL;
2110         }
2111
2112         if (do_cache) {
2113                 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2114                 struct fib_nh *nh;
2115
2116                 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2117                 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2118                         WARN_ON(rth->dst.input == lwtunnel_input);
2119                         rth->dst.lwtstate->orig_input = rth->dst.input;
2120                         rth->dst.input = lwtunnel_input;
2121                 }
2122
2123                 nh = container_of(nhc, struct fib_nh, nh_common);
2124                 if (unlikely(!rt_cache_route(nh, rth)))
2125                         rt_add_uncached_list(rth);
2126         }
2127         skb_dst_set(skb, &rth->dst);
2128         err = 0;
2129         goto out;
2130
2131 no_route:
2132         RT_CACHE_STAT_INC(in_no_route);
2133         res->type = RTN_UNREACHABLE;
2134         res->fi = NULL;
2135         res->table = NULL;
2136         goto local_input;
2137
2138         /*
2139          *      Do not cache martian addresses: they should be logged (RFC1812)
2140          */
2141 martian_destination:
2142         RT_CACHE_STAT_INC(in_martian_dst);
2143 #ifdef CONFIG_IP_ROUTE_VERBOSE
2144         if (IN_DEV_LOG_MARTIANS(in_dev))
2145                 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2146                                      &daddr, &saddr, dev->name);
2147 #endif
2148
2149 e_inval:
2150         err = -EINVAL;
2151         goto out;
2152
2153 e_nobufs:
2154         err = -ENOBUFS;
2155         goto out;
2156
2157 martian_source:
2158         ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2159         goto out;
2160 }
2161
2162 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2163                          u8 tos, struct net_device *dev)
2164 {
2165         struct fib_result res;
2166         int err;
2167
2168         tos &= IPTOS_RT_MASK;
2169         rcu_read_lock();
2170         err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2171         rcu_read_unlock();
2172
2173         return err;
2174 }
2175 EXPORT_SYMBOL(ip_route_input_noref);
2176
2177 /* called with rcu_read_lock held */
2178 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2179                        u8 tos, struct net_device *dev, struct fib_result *res)
2180 {
2181         /* Multicast recognition logic is moved from route cache to here.
2182            The problem was that too many Ethernet cards have broken/missing
2183            hardware multicast filters :-( As result the host on multicasting
2184            network acquires a lot of useless route cache entries, sort of
2185            SDR messages from all the world. Now we try to get rid of them.
2186            Really, provided software IP multicast filter is organized
2187            reasonably (at least, hashed), it does not result in a slowdown
2188            comparing with route cache reject entries.
2189            Note, that multicast routers are not affected, because
2190            route cache entry is created eventually.
2191          */
2192         if (ipv4_is_multicast(daddr)) {
2193                 struct in_device *in_dev = __in_dev_get_rcu(dev);
2194                 int our = 0;
2195                 int err = -EINVAL;
2196
2197                 if (!in_dev)
2198                         return err;
2199                 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2200                                       ip_hdr(skb)->protocol);
2201
2202                 /* check l3 master if no match yet */
2203                 if (!our && netif_is_l3_slave(dev)) {
2204                         struct in_device *l3_in_dev;
2205
2206                         l3_in_dev = __in_dev_get_rcu(skb->dev);
2207                         if (l3_in_dev)
2208                                 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2209                                                       ip_hdr(skb)->protocol);
2210                 }
2211
2212                 if (our
2213 #ifdef CONFIG_IP_MROUTE
2214                         ||
2215                     (!ipv4_is_local_multicast(daddr) &&
2216                      IN_DEV_MFORWARD(in_dev))
2217 #endif
2218                    ) {
2219                         err = ip_route_input_mc(skb, daddr, saddr,
2220                                                 tos, dev, our);
2221                 }
2222                 return err;
2223         }
2224
2225         return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2226 }
2227
2228 /* called with rcu_read_lock() */
2229 static struct rtable *__mkroute_output(const struct fib_result *res,
2230                                        const struct flowi4 *fl4, int orig_oif,
2231                                        struct net_device *dev_out,
2232                                        unsigned int flags)
2233 {
2234         struct fib_info *fi = res->fi;
2235         struct fib_nh_exception *fnhe;
2236         struct in_device *in_dev;
2237         u16 type = res->type;
2238         struct rtable *rth;
2239         bool do_cache;
2240
2241         in_dev = __in_dev_get_rcu(dev_out);
2242         if (!in_dev)
2243                 return ERR_PTR(-EINVAL);
2244
2245         if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2246                 if (ipv4_is_loopback(fl4->saddr) &&
2247                     !(dev_out->flags & IFF_LOOPBACK) &&
2248                     !netif_is_l3_master(dev_out))
2249                         return ERR_PTR(-EINVAL);
2250
2251         if (ipv4_is_lbcast(fl4->daddr))
2252                 type = RTN_BROADCAST;
2253         else if (ipv4_is_multicast(fl4->daddr))
2254                 type = RTN_MULTICAST;
2255         else if (ipv4_is_zeronet(fl4->daddr))
2256                 return ERR_PTR(-EINVAL);
2257
2258         if (dev_out->flags & IFF_LOOPBACK)
2259                 flags |= RTCF_LOCAL;
2260
2261         do_cache = true;
2262         if (type == RTN_BROADCAST) {
2263                 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2264                 fi = NULL;
2265         } else if (type == RTN_MULTICAST) {
2266                 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2267                 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2268                                      fl4->flowi4_proto))
2269                         flags &= ~RTCF_LOCAL;
2270                 else
2271                         do_cache = false;
2272                 /* If multicast route do not exist use
2273                  * default one, but do not gateway in this case.
2274                  * Yes, it is hack.
2275                  */
2276                 if (fi && res->prefixlen < 4)
2277                         fi = NULL;
2278         } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2279                    (orig_oif != dev_out->ifindex)) {
2280                 /* For local routes that require a particular output interface
2281                  * we do not want to cache the result.  Caching the result
2282                  * causes incorrect behaviour when there are multiple source
2283                  * addresses on the interface, the end result being that if the
2284                  * intended recipient is waiting on that interface for the
2285                  * packet he won't receive it because it will be delivered on
2286                  * the loopback interface and the IP_PKTINFO ipi_ifindex will
2287                  * be set to the loopback interface as well.
2288                  */
2289                 do_cache = false;
2290         }
2291
2292         fnhe = NULL;
2293         do_cache &= fi != NULL;
2294         if (fi) {
2295                 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2296                 struct fib_nh *nh = container_of(nhc, struct fib_nh, nh_common);
2297                 struct rtable __rcu **prth;
2298
2299                 fnhe = find_exception(nh, fl4->daddr);
2300                 if (!do_cache)
2301                         goto add;
2302                 if (fnhe) {
2303                         prth = &fnhe->fnhe_rth_output;
2304                 } else {
2305                         if (unlikely(fl4->flowi4_flags &
2306                                      FLOWI_FLAG_KNOWN_NH &&
2307                                      !(nhc->nhc_gw_family &&
2308                                        nhc->nhc_scope == RT_SCOPE_LINK))) {
2309                                 do_cache = false;
2310                                 goto add;
2311                         }
2312                         prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
2313                 }
2314                 rth = rcu_dereference(*prth);
2315                 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2316                         return rth;
2317         }
2318
2319 add:
2320         rth = rt_dst_alloc(dev_out, flags, type,
2321                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
2322                            IN_DEV_CONF_GET(in_dev, NOXFRM),
2323                            do_cache);
2324         if (!rth)
2325                 return ERR_PTR(-ENOBUFS);
2326
2327         rth->rt_iif = orig_oif;
2328
2329         RT_CACHE_STAT_INC(out_slow_tot);
2330
2331         if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2332                 if (flags & RTCF_LOCAL &&
2333                     !(dev_out->flags & IFF_LOOPBACK)) {
2334                         rth->dst.output = ip_mc_output;
2335                         RT_CACHE_STAT_INC(out_slow_mc);
2336                 }
2337 #ifdef CONFIG_IP_MROUTE
2338                 if (type == RTN_MULTICAST) {
2339                         if (IN_DEV_MFORWARD(in_dev) &&
2340                             !ipv4_is_local_multicast(fl4->daddr)) {
2341                                 rth->dst.input = ip_mr_input;
2342                                 rth->dst.output = ip_mc_output;
2343                         }
2344                 }
2345 #endif
2346         }
2347
2348         rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2349         lwtunnel_set_redirect(&rth->dst);
2350
2351         return rth;
2352 }
2353
2354 /*
2355  * Major route resolver routine.
2356  */
2357
2358 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2359                                         const struct sk_buff *skb)
2360 {
2361         __u8 tos = RT_FL_TOS(fl4);
2362         struct fib_result res = {
2363                 .type           = RTN_UNSPEC,
2364                 .fi             = NULL,
2365                 .table          = NULL,
2366                 .tclassid       = 0,
2367         };
2368         struct rtable *rth;
2369
2370         fl4->flowi4_iif = LOOPBACK_IFINDEX;
2371         fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2372         fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2373                          RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2374
2375         rcu_read_lock();
2376         rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2377         rcu_read_unlock();
2378
2379         return rth;
2380 }
2381 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2382
2383 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2384                                             struct fib_result *res,
2385                                             const struct sk_buff *skb)
2386 {
2387         struct net_device *dev_out = NULL;
2388         int orig_oif = fl4->flowi4_oif;
2389         unsigned int flags = 0;
2390         struct rtable *rth;
2391         int err = -ENETUNREACH;
2392
2393         if (fl4->saddr) {
2394                 rth = ERR_PTR(-EINVAL);
2395                 if (ipv4_is_multicast(fl4->saddr) ||
2396                     ipv4_is_lbcast(fl4->saddr) ||
2397                     ipv4_is_zeronet(fl4->saddr))
2398                         goto out;
2399
2400                 /* I removed check for oif == dev_out->oif here.
2401                    It was wrong for two reasons:
2402                    1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2403                       is assigned to multiple interfaces.
2404                    2. Moreover, we are allowed to send packets with saddr
2405                       of another iface. --ANK
2406                  */
2407
2408                 if (fl4->flowi4_oif == 0 &&
2409                     (ipv4_is_multicast(fl4->daddr) ||
2410                      ipv4_is_lbcast(fl4->daddr))) {
2411                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2412                         dev_out = __ip_dev_find(net, fl4->saddr, false);
2413                         if (!dev_out)
2414                                 goto out;
2415
2416                         /* Special hack: user can direct multicasts
2417                            and limited broadcast via necessary interface
2418                            without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2419                            This hack is not just for fun, it allows
2420                            vic,vat and friends to work.
2421                            They bind socket to loopback, set ttl to zero
2422                            and expect that it will work.
2423                            From the viewpoint of routing cache they are broken,
2424                            because we are not allowed to build multicast path
2425                            with loopback source addr (look, routing cache
2426                            cannot know, that ttl is zero, so that packet
2427                            will not leave this host and route is valid).
2428                            Luckily, this hack is good workaround.
2429                          */
2430
2431                         fl4->flowi4_oif = dev_out->ifindex;
2432                         goto make_route;
2433                 }
2434
2435                 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2436                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2437                         if (!__ip_dev_find(net, fl4->saddr, false))
2438                                 goto out;
2439                 }
2440         }
2441
2442
2443         if (fl4->flowi4_oif) {
2444                 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2445                 rth = ERR_PTR(-ENODEV);
2446                 if (!dev_out)
2447                         goto out;
2448
2449                 /* RACE: Check return value of inet_select_addr instead. */
2450                 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2451                         rth = ERR_PTR(-ENETUNREACH);
2452                         goto out;
2453                 }
2454                 if (ipv4_is_local_multicast(fl4->daddr) ||
2455                     ipv4_is_lbcast(fl4->daddr) ||
2456                     fl4->flowi4_proto == IPPROTO_IGMP) {
2457                         if (!fl4->saddr)
2458                                 fl4->saddr = inet_select_addr(dev_out, 0,
2459                                                               RT_SCOPE_LINK);
2460                         goto make_route;
2461                 }
2462                 if (!fl4->saddr) {
2463                         if (ipv4_is_multicast(fl4->daddr))
2464                                 fl4->saddr = inet_select_addr(dev_out, 0,
2465                                                               fl4->flowi4_scope);
2466                         else if (!fl4->daddr)
2467                                 fl4->saddr = inet_select_addr(dev_out, 0,
2468                                                               RT_SCOPE_HOST);
2469                 }
2470         }
2471
2472         if (!fl4->daddr) {
2473                 fl4->daddr = fl4->saddr;
2474                 if (!fl4->daddr)
2475                         fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2476                 dev_out = net->loopback_dev;
2477                 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2478                 res->type = RTN_LOCAL;
2479                 flags |= RTCF_LOCAL;
2480                 goto make_route;
2481         }
2482
2483         err = fib_lookup(net, fl4, res, 0);
2484         if (err) {
2485                 res->fi = NULL;
2486                 res->table = NULL;
2487                 if (fl4->flowi4_oif &&
2488                     (ipv4_is_multicast(fl4->daddr) ||
2489                     !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2490                         /* Apparently, routing tables are wrong. Assume,
2491                            that the destination is on link.
2492
2493                            WHY? DW.
2494                            Because we are allowed to send to iface
2495                            even if it has NO routes and NO assigned
2496                            addresses. When oif is specified, routing
2497                            tables are looked up with only one purpose:
2498                            to catch if destination is gatewayed, rather than
2499                            direct. Moreover, if MSG_DONTROUTE is set,
2500                            we send packet, ignoring both routing tables
2501                            and ifaddr state. --ANK
2502
2503
2504                            We could make it even if oif is unknown,
2505                            likely IPv6, but we do not.
2506                          */
2507
2508                         if (fl4->saddr == 0)
2509                                 fl4->saddr = inet_select_addr(dev_out, 0,
2510                                                               RT_SCOPE_LINK);
2511                         res->type = RTN_UNICAST;
2512                         goto make_route;
2513                 }
2514                 rth = ERR_PTR(err);
2515                 goto out;
2516         }
2517
2518         if (res->type == RTN_LOCAL) {
2519                 if (!fl4->saddr) {
2520                         if (res->fi->fib_prefsrc)
2521                                 fl4->saddr = res->fi->fib_prefsrc;
2522                         else
2523                                 fl4->saddr = fl4->daddr;
2524                 }
2525
2526                 /* L3 master device is the loopback for that domain */
2527                 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2528                         net->loopback_dev;
2529
2530                 /* make sure orig_oif points to fib result device even
2531                  * though packet rx/tx happens over loopback or l3mdev
2532                  */
2533                 orig_oif = FIB_RES_OIF(*res);
2534
2535                 fl4->flowi4_oif = dev_out->ifindex;
2536                 flags |= RTCF_LOCAL;
2537                 goto make_route;
2538         }
2539
2540         fib_select_path(net, res, fl4, skb);
2541
2542         dev_out = FIB_RES_DEV(*res);
2543         fl4->flowi4_oif = dev_out->ifindex;
2544
2545
2546 make_route:
2547         rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2548
2549 out:
2550         return rth;
2551 }
2552
2553 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2554 {
2555         return NULL;
2556 }
2557
2558 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2559 {
2560         unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2561
2562         return mtu ? : dst->dev->mtu;
2563 }
2564
2565 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2566                                           struct sk_buff *skb, u32 mtu)
2567 {
2568 }
2569
2570 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2571                                        struct sk_buff *skb)
2572 {
2573 }
2574
2575 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2576                                           unsigned long old)
2577 {
2578         return NULL;
2579 }
2580
2581 static struct dst_ops ipv4_dst_blackhole_ops = {
2582         .family                 =       AF_INET,
2583         .check                  =       ipv4_blackhole_dst_check,
2584         .mtu                    =       ipv4_blackhole_mtu,
2585         .default_advmss         =       ipv4_default_advmss,
2586         .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
2587         .redirect               =       ipv4_rt_blackhole_redirect,
2588         .cow_metrics            =       ipv4_rt_blackhole_cow_metrics,
2589         .neigh_lookup           =       ipv4_neigh_lookup,
2590 };
2591
2592 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2593 {
2594         struct rtable *ort = (struct rtable *) dst_orig;
2595         struct rtable *rt;
2596
2597         rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2598         if (rt) {
2599                 struct dst_entry *new = &rt->dst;
2600
2601                 new->__use = 1;
2602                 new->input = dst_discard;
2603                 new->output = dst_discard_out;
2604
2605                 new->dev = net->loopback_dev;
2606                 if (new->dev)
2607                         dev_hold(new->dev);
2608
2609                 rt->rt_is_input = ort->rt_is_input;
2610                 rt->rt_iif = ort->rt_iif;
2611                 rt->rt_pmtu = ort->rt_pmtu;
2612                 rt->rt_mtu_locked = ort->rt_mtu_locked;
2613
2614                 rt->rt_genid = rt_genid_ipv4(net);
2615                 rt->rt_flags = ort->rt_flags;
2616                 rt->rt_type = ort->rt_type;
2617                 rt->rt_gw_family = ort->rt_gw_family;
2618                 if (rt->rt_gw_family == AF_INET)
2619                         rt->rt_gw4 = ort->rt_gw4;
2620                 else if (rt->rt_gw_family == AF_INET6)
2621                         rt->rt_gw6 = ort->rt_gw6;
2622
2623                 INIT_LIST_HEAD(&rt->rt_uncached);
2624         }
2625
2626         dst_release(dst_orig);
2627
2628         return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2629 }
2630
2631 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2632                                     const struct sock *sk)
2633 {
2634         struct rtable *rt = __ip_route_output_key(net, flp4);
2635
2636         if (IS_ERR(rt))
2637                 return rt;
2638
2639         if (flp4->flowi4_proto)
2640                 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2641                                                         flowi4_to_flowi(flp4),
2642                                                         sk, 0);
2643
2644         return rt;
2645 }
2646 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2647
2648 /* called with rcu_read_lock held */
2649 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2650                         struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2651                         struct sk_buff *skb, u32 portid, u32 seq)
2652 {
2653         struct rtmsg *r;
2654         struct nlmsghdr *nlh;
2655         unsigned long expires = 0;
2656         u32 error;
2657         u32 metrics[RTAX_MAX];
2658
2659         nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
2660         if (!nlh)
2661                 return -EMSGSIZE;
2662
2663         r = nlmsg_data(nlh);
2664         r->rtm_family    = AF_INET;
2665         r->rtm_dst_len  = 32;
2666         r->rtm_src_len  = 0;
2667         r->rtm_tos      = fl4->flowi4_tos;
2668         r->rtm_table    = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2669         if (nla_put_u32(skb, RTA_TABLE, table_id))
2670                 goto nla_put_failure;
2671         r->rtm_type     = rt->rt_type;
2672         r->rtm_scope    = RT_SCOPE_UNIVERSE;
2673         r->rtm_protocol = RTPROT_UNSPEC;
2674         r->rtm_flags    = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2675         if (rt->rt_flags & RTCF_NOTIFY)
2676                 r->rtm_flags |= RTM_F_NOTIFY;
2677         if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2678                 r->rtm_flags |= RTCF_DOREDIRECT;
2679
2680         if (nla_put_in_addr(skb, RTA_DST, dst))
2681                 goto nla_put_failure;
2682         if (src) {
2683                 r->rtm_src_len = 32;
2684                 if (nla_put_in_addr(skb, RTA_SRC, src))
2685                         goto nla_put_failure;
2686         }
2687         if (rt->dst.dev &&
2688             nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2689                 goto nla_put_failure;
2690 #ifdef CONFIG_IP_ROUTE_CLASSID
2691         if (rt->dst.tclassid &&
2692             nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2693                 goto nla_put_failure;
2694 #endif
2695         if (!rt_is_input_route(rt) &&
2696             fl4->saddr != src) {
2697                 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2698                         goto nla_put_failure;
2699         }
2700         if (rt->rt_gw_family == AF_INET &&
2701             nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2702                 goto nla_put_failure;
2703         } else if (rt->rt_gw_family == AF_INET6) {
2704                 int alen = sizeof(struct in6_addr);
2705                 struct nlattr *nla;
2706                 struct rtvia *via;
2707
2708                 nla = nla_reserve(skb, RTA_VIA, alen + 2);
2709                 if (!nla)
2710                         goto nla_put_failure;
2711
2712                 via = nla_data(nla);
2713                 via->rtvia_family = AF_INET6;
2714                 memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2715         }
2716
2717         expires = rt->dst.expires;
2718         if (expires) {
2719                 unsigned long now = jiffies;
2720
2721                 if (time_before(now, expires))
2722                         expires -= now;
2723                 else
2724                         expires = 0;
2725         }
2726
2727         memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2728         if (rt->rt_pmtu && expires)
2729                 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2730         if (rt->rt_mtu_locked && expires)
2731                 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2732         if (rtnetlink_put_metrics(skb, metrics) < 0)
2733                 goto nla_put_failure;
2734
2735         if (fl4->flowi4_mark &&
2736             nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2737                 goto nla_put_failure;
2738
2739         if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2740             nla_put_u32(skb, RTA_UID,
2741                         from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
2742                 goto nla_put_failure;
2743
2744         error = rt->dst.error;
2745
2746         if (rt_is_input_route(rt)) {
2747 #ifdef CONFIG_IP_MROUTE
2748                 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2749                     IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2750                         int err = ipmr_get_route(net, skb,
2751                                                  fl4->saddr, fl4->daddr,
2752                                                  r, portid);
2753
2754                         if (err <= 0) {
2755                                 if (err == 0)
2756                                         return 0;
2757                                 goto nla_put_failure;
2758                         }
2759                 } else
2760 #endif
2761                         if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
2762                                 goto nla_put_failure;
2763         }
2764
2765         if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2766                 goto nla_put_failure;
2767
2768         nlmsg_end(skb, nlh);
2769         return 0;
2770
2771 nla_put_failure:
2772         nlmsg_cancel(skb, nlh);
2773         return -EMSGSIZE;
2774 }
2775
2776 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
2777                                                    u8 ip_proto, __be16 sport,
2778                                                    __be16 dport)
2779 {
2780         struct sk_buff *skb;
2781         struct iphdr *iph;
2782
2783         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2784         if (!skb)
2785                 return NULL;
2786
2787         /* Reserve room for dummy headers, this skb can pass
2788          * through good chunk of routing engine.
2789          */
2790         skb_reset_mac_header(skb);
2791         skb_reset_network_header(skb);
2792         skb->protocol = htons(ETH_P_IP);
2793         iph = skb_put(skb, sizeof(struct iphdr));
2794         iph->protocol = ip_proto;
2795         iph->saddr = src;
2796         iph->daddr = dst;
2797         iph->version = 0x4;
2798         iph->frag_off = 0;
2799         iph->ihl = 0x5;
2800         skb_set_transport_header(skb, skb->len);
2801
2802         switch (iph->protocol) {
2803         case IPPROTO_UDP: {
2804                 struct udphdr *udph;
2805
2806                 udph = skb_put_zero(skb, sizeof(struct udphdr));
2807                 udph->source = sport;
2808                 udph->dest = dport;
2809                 udph->len = sizeof(struct udphdr);
2810                 udph->check = 0;
2811                 break;
2812         }
2813         case IPPROTO_TCP: {
2814                 struct tcphdr *tcph;
2815
2816                 tcph = skb_put_zero(skb, sizeof(struct tcphdr));
2817                 tcph->source    = sport;
2818                 tcph->dest      = dport;
2819                 tcph->doff      = sizeof(struct tcphdr) / 4;
2820                 tcph->rst = 1;
2821                 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
2822                                             src, dst, 0);
2823                 break;
2824         }
2825         case IPPROTO_ICMP: {
2826                 struct icmphdr *icmph;
2827
2828                 icmph = skb_put_zero(skb, sizeof(struct icmphdr));
2829                 icmph->type = ICMP_ECHO;
2830                 icmph->code = 0;
2831         }
2832         }
2833
2834         return skb;
2835 }
2836
2837 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
2838                                        const struct nlmsghdr *nlh,
2839                                        struct nlattr **tb,
2840                                        struct netlink_ext_ack *extack)
2841 {
2842         struct rtmsg *rtm;
2843         int i, err;
2844
2845         if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
2846                 NL_SET_ERR_MSG(extack,
2847                                "ipv4: Invalid header for route get request");
2848                 return -EINVAL;
2849         }
2850
2851         if (!netlink_strict_get_check(skb))
2852                 return nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
2853                                    rtm_ipv4_policy, extack);
2854
2855         rtm = nlmsg_data(nlh);
2856         if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
2857             (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
2858             rtm->rtm_table || rtm->rtm_protocol ||
2859             rtm->rtm_scope || rtm->rtm_type) {
2860                 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
2861                 return -EINVAL;
2862         }
2863
2864         if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
2865                                RTM_F_LOOKUP_TABLE |
2866                                RTM_F_FIB_MATCH)) {
2867                 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
2868                 return -EINVAL;
2869         }
2870
2871         err = nlmsg_parse_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
2872                                  rtm_ipv4_policy, extack);
2873         if (err)
2874                 return err;
2875
2876         if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
2877             (tb[RTA_DST] && !rtm->rtm_dst_len)) {
2878                 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
2879                 return -EINVAL;
2880         }
2881
2882         for (i = 0; i <= RTA_MAX; i++) {
2883                 if (!tb[i])
2884                         continue;
2885
2886                 switch (i) {
2887                 case RTA_IIF:
2888                 case RTA_OIF:
2889                 case RTA_SRC:
2890                 case RTA_DST:
2891                 case RTA_IP_PROTO:
2892                 case RTA_SPORT:
2893                 case RTA_DPORT:
2894                 case RTA_MARK:
2895                 case RTA_UID:
2896                         break;
2897                 default:
2898                         NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
2899                         return -EINVAL;
2900                 }
2901         }
2902
2903         return 0;
2904 }
2905
2906 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2907                              struct netlink_ext_ack *extack)
2908 {
2909         struct net *net = sock_net(in_skb->sk);
2910         struct nlattr *tb[RTA_MAX+1];
2911         u32 table_id = RT_TABLE_MAIN;
2912         __be16 sport = 0, dport = 0;
2913         struct fib_result res = {};
2914         u8 ip_proto = IPPROTO_UDP;
2915         struct rtable *rt = NULL;
2916         struct sk_buff *skb;
2917         struct rtmsg *rtm;
2918         struct flowi4 fl4 = {};
2919         __be32 dst = 0;
2920         __be32 src = 0;
2921         kuid_t uid;
2922         u32 iif;
2923         int err;
2924         int mark;
2925
2926         err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
2927         if (err < 0)
2928                 return err;
2929
2930         rtm = nlmsg_data(nlh);
2931         src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2932         dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2933         iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2934         mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2935         if (tb[RTA_UID])
2936                 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
2937         else
2938                 uid = (iif ? INVALID_UID : current_uid());
2939
2940         if (tb[RTA_IP_PROTO]) {
2941                 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
2942                                                   &ip_proto, AF_INET, extack);
2943                 if (err)
2944                         return err;
2945         }
2946
2947         if (tb[RTA_SPORT])
2948                 sport = nla_get_be16(tb[RTA_SPORT]);
2949
2950         if (tb[RTA_DPORT])
2951                 dport = nla_get_be16(tb[RTA_DPORT]);
2952
2953         skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
2954         if (!skb)
2955                 return -ENOBUFS;
2956
2957         fl4.daddr = dst;
2958         fl4.saddr = src;
2959         fl4.flowi4_tos = rtm->rtm_tos;
2960         fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2961         fl4.flowi4_mark = mark;
2962         fl4.flowi4_uid = uid;
2963         if (sport)
2964                 fl4.fl4_sport = sport;
2965         if (dport)
2966                 fl4.fl4_dport = dport;
2967         fl4.flowi4_proto = ip_proto;
2968
2969         rcu_read_lock();
2970
2971         if (iif) {
2972                 struct net_device *dev;
2973
2974                 dev = dev_get_by_index_rcu(net, iif);
2975                 if (!dev) {
2976                         err = -ENODEV;
2977                         goto errout_rcu;
2978                 }
2979
2980                 fl4.flowi4_iif = iif; /* for rt_fill_info */
2981                 skb->dev        = dev;
2982                 skb->mark       = mark;
2983                 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
2984                                          dev, &res);
2985
2986                 rt = skb_rtable(skb);
2987                 if (err == 0 && rt->dst.error)
2988                         err = -rt->dst.error;
2989         } else {
2990                 fl4.flowi4_iif = LOOPBACK_IFINDEX;
2991                 skb->dev = net->loopback_dev;
2992                 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
2993                 err = 0;
2994                 if (IS_ERR(rt))
2995                         err = PTR_ERR(rt);
2996                 else
2997                         skb_dst_set(skb, &rt->dst);
2998         }
2999
3000         if (err)
3001                 goto errout_rcu;
3002
3003         if (rtm->rtm_flags & RTM_F_NOTIFY)
3004                 rt->rt_flags |= RTCF_NOTIFY;
3005
3006         if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3007                 table_id = res.table ? res.table->tb_id : 0;
3008
3009         /* reset skb for netlink reply msg */
3010         skb_trim(skb, 0);
3011         skb_reset_network_header(skb);
3012         skb_reset_transport_header(skb);
3013         skb_reset_mac_header(skb);
3014
3015         if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3016                 if (!res.fi) {
3017                         err = fib_props[res.type].error;
3018                         if (!err)
3019                                 err = -EHOSTUNREACH;
3020                         goto errout_rcu;
3021                 }
3022                 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3023                                     nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
3024                                     rt->rt_type, res.prefix, res.prefixlen,
3025                                     fl4.flowi4_tos, res.fi, 0);
3026         } else {
3027                 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3028                                    NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
3029         }
3030         if (err < 0)
3031                 goto errout_rcu;
3032
3033         rcu_read_unlock();
3034
3035         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3036
3037 errout_free:
3038         return err;
3039 errout_rcu:
3040         rcu_read_unlock();
3041         kfree_skb(skb);
3042         goto errout_free;
3043 }
3044
3045 void ip_rt_multicast_event(struct in_device *in_dev)
3046 {
3047         rt_cache_flush(dev_net(in_dev->dev));
3048 }
3049
3050 #ifdef CONFIG_SYSCTL
3051 static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
3052 static int ip_rt_gc_min_interval __read_mostly  = HZ / 2;
3053 static int ip_rt_gc_elasticity __read_mostly    = 8;
3054 static int ip_min_valid_pmtu __read_mostly      = IPV4_MIN_MTU;
3055
3056 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3057                                         void __user *buffer,
3058                                         size_t *lenp, loff_t *ppos)
3059 {
3060         struct net *net = (struct net *)__ctl->extra1;
3061
3062         if (write) {
3063                 rt_cache_flush(net);
3064                 fnhe_genid_bump(net);
3065                 return 0;
3066         }
3067
3068         return -EINVAL;
3069 }
3070
3071 static struct ctl_table ipv4_route_table[] = {
3072         {
3073                 .procname       = "gc_thresh",
3074                 .data           = &ipv4_dst_ops.gc_thresh,
3075                 .maxlen         = sizeof(int),
3076                 .mode           = 0644,
3077                 .proc_handler   = proc_dointvec,
3078         },
3079         {
3080                 .procname       = "max_size",
3081                 .data           = &ip_rt_max_size,
3082                 .maxlen         = sizeof(int),
3083                 .mode           = 0644,
3084                 .proc_handler   = proc_dointvec,
3085         },
3086         {
3087                 /*  Deprecated. Use gc_min_interval_ms */
3088
3089                 .procname       = "gc_min_interval",
3090                 .data           = &ip_rt_gc_min_interval,
3091                 .maxlen         = sizeof(int),
3092                 .mode           = 0644,
3093                 .proc_handler   = proc_dointvec_jiffies,
3094         },
3095         {
3096                 .procname       = "gc_min_interval_ms",
3097                 .data           = &ip_rt_gc_min_interval,
3098                 .maxlen         = sizeof(int),
3099                 .mode           = 0644,
3100                 .proc_handler   = proc_dointvec_ms_jiffies,
3101         },
3102         {
3103                 .procname       = "gc_timeout",
3104                 .data           = &ip_rt_gc_timeout,
3105                 .maxlen         = sizeof(int),
3106                 .mode           = 0644,
3107                 .proc_handler   = proc_dointvec_jiffies,
3108         },
3109         {
3110                 .procname       = "gc_interval",
3111                 .data           = &ip_rt_gc_interval,
3112                 .maxlen         = sizeof(int),
3113                 .mode           = 0644,
3114                 .proc_handler   = proc_dointvec_jiffies,
3115         },
3116         {
3117                 .procname       = "redirect_load",
3118                 .data           = &ip_rt_redirect_load,
3119                 .maxlen         = sizeof(int),
3120                 .mode           = 0644,
3121                 .proc_handler   = proc_dointvec,
3122         },
3123         {
3124                 .procname       = "redirect_number",
3125                 .data           = &ip_rt_redirect_number,
3126                 .maxlen         = sizeof(int),
3127                 .mode           = 0644,
3128                 .proc_handler   = proc_dointvec,
3129         },
3130         {
3131                 .procname       = "redirect_silence",
3132                 .data           = &ip_rt_redirect_silence,
3133                 .maxlen         = sizeof(int),
3134                 .mode           = 0644,
3135                 .proc_handler   = proc_dointvec,
3136         },
3137         {
3138                 .procname       = "error_cost",
3139                 .data           = &ip_rt_error_cost,
3140                 .maxlen         = sizeof(int),
3141                 .mode           = 0644,
3142                 .proc_handler   = proc_dointvec,
3143         },
3144         {
3145                 .procname       = "error_burst",
3146                 .data           = &ip_rt_error_burst,
3147                 .maxlen         = sizeof(int),
3148                 .mode           = 0644,
3149                 .proc_handler   = proc_dointvec,
3150         },
3151         {
3152                 .procname       = "gc_elasticity",
3153                 .data           = &ip_rt_gc_elasticity,
3154                 .maxlen         = sizeof(int),
3155                 .mode           = 0644,
3156                 .proc_handler   = proc_dointvec,
3157         },
3158         {
3159                 .procname       = "mtu_expires",
3160                 .data           = &ip_rt_mtu_expires,
3161                 .maxlen         = sizeof(int),
3162                 .mode           = 0644,
3163                 .proc_handler   = proc_dointvec_jiffies,
3164         },
3165         {
3166                 .procname       = "min_pmtu",
3167                 .data           = &ip_rt_min_pmtu,
3168                 .maxlen         = sizeof(int),
3169                 .mode           = 0644,
3170                 .proc_handler   = proc_dointvec_minmax,
3171                 .extra1         = &ip_min_valid_pmtu,
3172         },
3173         {
3174                 .procname       = "min_adv_mss",
3175                 .data           = &ip_rt_min_advmss,
3176                 .maxlen         = sizeof(int),
3177                 .mode           = 0644,
3178                 .proc_handler   = proc_dointvec,
3179         },
3180         { }
3181 };
3182
3183 static struct ctl_table ipv4_route_flush_table[] = {
3184         {
3185                 .procname       = "flush",
3186                 .maxlen         = sizeof(int),
3187                 .mode           = 0200,
3188                 .proc_handler   = ipv4_sysctl_rtcache_flush,
3189         },
3190         { },
3191 };
3192
3193 static __net_init int sysctl_route_net_init(struct net *net)
3194 {
3195         struct ctl_table *tbl;
3196
3197         tbl = ipv4_route_flush_table;
3198         if (!net_eq(net, &init_net)) {
3199                 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3200                 if (!tbl)
3201                         goto err_dup;
3202
3203                 /* Don't export sysctls to unprivileged users */
3204                 if (net->user_ns != &init_user_ns)
3205                         tbl[0].procname = NULL;
3206         }
3207         tbl[0].extra1 = net;
3208
3209         net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3210         if (!net->ipv4.route_hdr)
3211                 goto err_reg;
3212         return 0;
3213
3214 err_reg:
3215         if (tbl != ipv4_route_flush_table)
3216                 kfree(tbl);
3217 err_dup:
3218         return -ENOMEM;
3219 }
3220
3221 static __net_exit void sysctl_route_net_exit(struct net *net)
3222 {
3223         struct ctl_table *tbl;
3224
3225         tbl = net->ipv4.route_hdr->ctl_table_arg;
3226         unregister_net_sysctl_table(net->ipv4.route_hdr);
3227         BUG_ON(tbl == ipv4_route_flush_table);
3228         kfree(tbl);
3229 }
3230
3231 static __net_initdata struct pernet_operations sysctl_route_ops = {
3232         .init = sysctl_route_net_init,
3233         .exit = sysctl_route_net_exit,
3234 };
3235 #endif
3236
3237 static __net_init int rt_genid_init(struct net *net)
3238 {
3239         atomic_set(&net->ipv4.rt_genid, 0);
3240         atomic_set(&net->fnhe_genid, 0);
3241         atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3242         return 0;
3243 }
3244
3245 static __net_initdata struct pernet_operations rt_genid_ops = {
3246         .init = rt_genid_init,
3247 };
3248
3249 static int __net_init ipv4_inetpeer_init(struct net *net)
3250 {
3251         struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3252
3253         if (!bp)
3254                 return -ENOMEM;
3255         inet_peer_base_init(bp);
3256         net->ipv4.peers = bp;
3257         return 0;
3258 }
3259
3260 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3261 {
3262         struct inet_peer_base *bp = net->ipv4.peers;
3263
3264         net->ipv4.peers = NULL;
3265         inetpeer_invalidate_tree(bp);
3266         kfree(bp);
3267 }
3268
3269 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3270         .init   =       ipv4_inetpeer_init,
3271         .exit   =       ipv4_inetpeer_exit,
3272 };
3273
3274 #ifdef CONFIG_IP_ROUTE_CLASSID
3275 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3276 #endif /* CONFIG_IP_ROUTE_CLASSID */
3277
3278 int __init ip_rt_init(void)
3279 {
3280         int cpu;
3281
3282         ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
3283                                   GFP_KERNEL);
3284         if (!ip_idents)
3285                 panic("IP: failed to allocate ip_idents\n");
3286
3287         prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
3288
3289         ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
3290         if (!ip_tstamps)
3291                 panic("IP: failed to allocate ip_tstamps\n");
3292
3293         for_each_possible_cpu(cpu) {
3294                 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3295
3296                 INIT_LIST_HEAD(&ul->head);
3297                 spin_lock_init(&ul->lock);
3298         }
3299 #ifdef CONFIG_IP_ROUTE_CLASSID
3300         ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3301         if (!ip_rt_acct)
3302                 panic("IP: failed to allocate ip_rt_acct\n");
3303 #endif
3304
3305         ipv4_dst_ops.kmem_cachep =
3306                 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3307                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3308
3309         ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3310
3311         if (dst_entries_init(&ipv4_dst_ops) < 0)
3312                 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3313
3314         if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3315                 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3316
3317         ipv4_dst_ops.gc_thresh = ~0;
3318         ip_rt_max_size = INT_MAX;
3319
3320         devinet_init();
3321         ip_fib_init();
3322
3323         if (ip_rt_proc_init())
3324                 pr_err("Unable to create route proc files\n");
3325 #ifdef CONFIG_XFRM
3326         xfrm_init();
3327         xfrm4_init();
3328 #endif
3329         rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3330                       RTNL_FLAG_DOIT_UNLOCKED);
3331
3332 #ifdef CONFIG_SYSCTL
3333         register_pernet_subsys(&sysctl_route_ops);
3334 #endif
3335         register_pernet_subsys(&rt_genid_ops);
3336         register_pernet_subsys(&ipv4_inetpeer_ops);
3337         return 0;
3338 }
3339
3340 #ifdef CONFIG_SYSCTL
3341 /*
3342  * We really need to sanitize the damn ipv4 init order, then all
3343  * this nonsense will go away.
3344  */
3345 void __init ip_static_sysctl_init(void)
3346 {
3347         register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
3348 }
3349 #endif