]> asedeno.scripts.mit.edu Git - linux.git/blob - net/netfilter/nf_conntrack_core.c
Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
[linux.git] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/types.h>
18 #include <linux/netfilter.h>
19 #include <linux/module.h>
20 #include <linux/sched.h>
21 #include <linux/skbuff.h>
22 #include <linux/proc_fs.h>
23 #include <linux/vmalloc.h>
24 #include <linux/stddef.h>
25 #include <linux/slab.h>
26 #include <linux/random.h>
27 #include <linux/jhash.h>
28 #include <linux/err.h>
29 #include <linux/percpu.h>
30 #include <linux/moduleparam.h>
31 #include <linux/notifier.h>
32 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/socket.h>
35 #include <linux/mm.h>
36 #include <linux/nsproxy.h>
37 #include <linux/rculist_nulls.h>
38
39 #include <net/netfilter/nf_conntrack.h>
40 #include <net/netfilter/nf_conntrack_l3proto.h>
41 #include <net/netfilter/nf_conntrack_l4proto.h>
42 #include <net/netfilter/nf_conntrack_expect.h>
43 #include <net/netfilter/nf_conntrack_helper.h>
44 #include <net/netfilter/nf_conntrack_seqadj.h>
45 #include <net/netfilter/nf_conntrack_core.h>
46 #include <net/netfilter/nf_conntrack_extend.h>
47 #include <net/netfilter/nf_conntrack_acct.h>
48 #include <net/netfilter/nf_conntrack_ecache.h>
49 #include <net/netfilter/nf_conntrack_zones.h>
50 #include <net/netfilter/nf_conntrack_timestamp.h>
51 #include <net/netfilter/nf_conntrack_timeout.h>
52 #include <net/netfilter/nf_conntrack_labels.h>
53 #include <net/netfilter/nf_conntrack_synproxy.h>
54 #include <net/netfilter/nf_nat.h>
55 #include <net/netfilter/nf_nat_core.h>
56 #include <net/netfilter/nf_nat_helper.h>
57 #include <net/netns/hash.h>
58
59 #include "nf_internals.h"
60
61 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
62                                       enum nf_nat_manip_type manip,
63                                       const struct nlattr *attr) __read_mostly;
64 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
65
66 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
67 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
68
69 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
70 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
71
72 struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
73 EXPORT_SYMBOL_GPL(nf_conntrack_hash);
74
75 struct conntrack_gc_work {
76         struct delayed_work     dwork;
77         u32                     last_bucket;
78         bool                    exiting;
79         bool                    early_drop;
80         long                    next_gc_run;
81 };
82
83 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
84 static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
85 static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
86 static __read_mostly bool nf_conntrack_locks_all;
87
88 /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
89 #define GC_MAX_BUCKETS_DIV      128u
90 /* upper bound of full table scan */
91 #define GC_MAX_SCAN_JIFFIES     (16u * HZ)
92 /* desired ratio of entries found to be expired */
93 #define GC_EVICT_RATIO  50u
94
95 static struct conntrack_gc_work conntrack_gc_work;
96
97 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
98 {
99         /* 1) Acquire the lock */
100         spin_lock(lock);
101
102         /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
103          * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
104          */
105         if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
106                 return;
107
108         /* fast path failed, unlock */
109         spin_unlock(lock);
110
111         /* Slow path 1) get global lock */
112         spin_lock(&nf_conntrack_locks_all_lock);
113
114         /* Slow path 2) get the lock we want */
115         spin_lock(lock);
116
117         /* Slow path 3) release the global lock */
118         spin_unlock(&nf_conntrack_locks_all_lock);
119 }
120 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
121
122 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
123 {
124         h1 %= CONNTRACK_LOCKS;
125         h2 %= CONNTRACK_LOCKS;
126         spin_unlock(&nf_conntrack_locks[h1]);
127         if (h1 != h2)
128                 spin_unlock(&nf_conntrack_locks[h2]);
129 }
130
131 /* return true if we need to recompute hashes (in case hash table was resized) */
132 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
133                                      unsigned int h2, unsigned int sequence)
134 {
135         h1 %= CONNTRACK_LOCKS;
136         h2 %= CONNTRACK_LOCKS;
137         if (h1 <= h2) {
138                 nf_conntrack_lock(&nf_conntrack_locks[h1]);
139                 if (h1 != h2)
140                         spin_lock_nested(&nf_conntrack_locks[h2],
141                                          SINGLE_DEPTH_NESTING);
142         } else {
143                 nf_conntrack_lock(&nf_conntrack_locks[h2]);
144                 spin_lock_nested(&nf_conntrack_locks[h1],
145                                  SINGLE_DEPTH_NESTING);
146         }
147         if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
148                 nf_conntrack_double_unlock(h1, h2);
149                 return true;
150         }
151         return false;
152 }
153
154 static void nf_conntrack_all_lock(void)
155 {
156         int i;
157
158         spin_lock(&nf_conntrack_locks_all_lock);
159
160         nf_conntrack_locks_all = true;
161
162         for (i = 0; i < CONNTRACK_LOCKS; i++) {
163                 spin_lock(&nf_conntrack_locks[i]);
164
165                 /* This spin_unlock provides the "release" to ensure that
166                  * nf_conntrack_locks_all==true is visible to everyone that
167                  * acquired spin_lock(&nf_conntrack_locks[]).
168                  */
169                 spin_unlock(&nf_conntrack_locks[i]);
170         }
171 }
172
173 static void nf_conntrack_all_unlock(void)
174 {
175         /* All prior stores must be complete before we clear
176          * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
177          * might observe the false value but not the entire
178          * critical section.
179          * It pairs with the smp_load_acquire() in nf_conntrack_lock()
180          */
181         smp_store_release(&nf_conntrack_locks_all, false);
182         spin_unlock(&nf_conntrack_locks_all_lock);
183 }
184
185 unsigned int nf_conntrack_htable_size __read_mostly;
186 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
187
188 unsigned int nf_conntrack_max __read_mostly;
189 EXPORT_SYMBOL_GPL(nf_conntrack_max);
190 seqcount_t nf_conntrack_generation __read_mostly;
191 static unsigned int nf_conntrack_hash_rnd __read_mostly;
192
193 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
194                               const struct net *net)
195 {
196         unsigned int n;
197         u32 seed;
198
199         get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
200
201         /* The direction must be ignored, so we hash everything up to the
202          * destination ports (which is a multiple of 4) and treat the last
203          * three bytes manually.
204          */
205         seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
206         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
207         return jhash2((u32 *)tuple, n, seed ^
208                       (((__force __u16)tuple->dst.u.all << 16) |
209                       tuple->dst.protonum));
210 }
211
212 static u32 scale_hash(u32 hash)
213 {
214         return reciprocal_scale(hash, nf_conntrack_htable_size);
215 }
216
217 static u32 __hash_conntrack(const struct net *net,
218                             const struct nf_conntrack_tuple *tuple,
219                             unsigned int size)
220 {
221         return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
222 }
223
224 static u32 hash_conntrack(const struct net *net,
225                           const struct nf_conntrack_tuple *tuple)
226 {
227         return scale_hash(hash_conntrack_raw(tuple, net));
228 }
229
230 bool
231 nf_ct_get_tuple(const struct sk_buff *skb,
232                 unsigned int nhoff,
233                 unsigned int dataoff,
234                 u_int16_t l3num,
235                 u_int8_t protonum,
236                 struct net *net,
237                 struct nf_conntrack_tuple *tuple,
238                 const struct nf_conntrack_l3proto *l3proto,
239                 const struct nf_conntrack_l4proto *l4proto)
240 {
241         memset(tuple, 0, sizeof(*tuple));
242
243         tuple->src.l3num = l3num;
244         if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
245                 return false;
246
247         tuple->dst.protonum = protonum;
248         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
249
250         return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
251 }
252 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
253
254 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
255                        u_int16_t l3num,
256                        struct net *net, struct nf_conntrack_tuple *tuple)
257 {
258         const struct nf_conntrack_l3proto *l3proto;
259         const struct nf_conntrack_l4proto *l4proto;
260         unsigned int protoff;
261         u_int8_t protonum;
262         int ret;
263
264         rcu_read_lock();
265
266         l3proto = __nf_ct_l3proto_find(l3num);
267         ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
268         if (ret != NF_ACCEPT) {
269                 rcu_read_unlock();
270                 return false;
271         }
272
273         l4proto = __nf_ct_l4proto_find(l3num, protonum);
274
275         ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
276                               l3proto, l4proto);
277
278         rcu_read_unlock();
279         return ret;
280 }
281 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
282
283 bool
284 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
285                    const struct nf_conntrack_tuple *orig,
286                    const struct nf_conntrack_l3proto *l3proto,
287                    const struct nf_conntrack_l4proto *l4proto)
288 {
289         memset(inverse, 0, sizeof(*inverse));
290
291         inverse->src.l3num = orig->src.l3num;
292         if (l3proto->invert_tuple(inverse, orig) == 0)
293                 return false;
294
295         inverse->dst.dir = !orig->dst.dir;
296
297         inverse->dst.protonum = orig->dst.protonum;
298         return l4proto->invert_tuple(inverse, orig);
299 }
300 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
301
302 static void
303 clean_from_lists(struct nf_conn *ct)
304 {
305         pr_debug("clean_from_lists(%p)\n", ct);
306         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
307         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
308
309         /* Destroy all pending expectations */
310         nf_ct_remove_expectations(ct);
311 }
312
313 /* must be called with local_bh_disable */
314 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
315 {
316         struct ct_pcpu *pcpu;
317
318         /* add this conntrack to the (per cpu) dying list */
319         ct->cpu = smp_processor_id();
320         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
321
322         spin_lock(&pcpu->lock);
323         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
324                              &pcpu->dying);
325         spin_unlock(&pcpu->lock);
326 }
327
328 /* must be called with local_bh_disable */
329 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
330 {
331         struct ct_pcpu *pcpu;
332
333         /* add this conntrack to the (per cpu) unconfirmed list */
334         ct->cpu = smp_processor_id();
335         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
336
337         spin_lock(&pcpu->lock);
338         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
339                              &pcpu->unconfirmed);
340         spin_unlock(&pcpu->lock);
341 }
342
343 /* must be called with local_bh_disable */
344 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
345 {
346         struct ct_pcpu *pcpu;
347
348         /* We overload first tuple to link into unconfirmed or dying list.*/
349         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
350
351         spin_lock(&pcpu->lock);
352         BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
353         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
354         spin_unlock(&pcpu->lock);
355 }
356
357 #define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
358
359 /* Released via destroy_conntrack() */
360 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
361                                  const struct nf_conntrack_zone *zone,
362                                  gfp_t flags)
363 {
364         struct nf_conn *tmpl, *p;
365
366         if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
367                 tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
368                 if (!tmpl)
369                         return NULL;
370
371                 p = tmpl;
372                 tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
373                 if (tmpl != p) {
374                         tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
375                         tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
376                 }
377         } else {
378                 tmpl = kzalloc(sizeof(*tmpl), flags);
379                 if (!tmpl)
380                         return NULL;
381         }
382
383         tmpl->status = IPS_TEMPLATE;
384         write_pnet(&tmpl->ct_net, net);
385         nf_ct_zone_add(tmpl, zone);
386         atomic_set(&tmpl->ct_general.use, 0);
387
388         return tmpl;
389 }
390 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
391
392 void nf_ct_tmpl_free(struct nf_conn *tmpl)
393 {
394         nf_ct_ext_destroy(tmpl);
395         nf_ct_ext_free(tmpl);
396
397         if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
398                 kfree((char *)tmpl - tmpl->proto.tmpl_padto);
399         else
400                 kfree(tmpl);
401 }
402 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
403
404 static void
405 destroy_conntrack(struct nf_conntrack *nfct)
406 {
407         struct nf_conn *ct = (struct nf_conn *)nfct;
408         const struct nf_conntrack_l4proto *l4proto;
409
410         pr_debug("destroy_conntrack(%p)\n", ct);
411         WARN_ON(atomic_read(&nfct->use) != 0);
412
413         if (unlikely(nf_ct_is_template(ct))) {
414                 nf_ct_tmpl_free(ct);
415                 return;
416         }
417         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
418         if (l4proto->destroy)
419                 l4proto->destroy(ct);
420
421         local_bh_disable();
422         /* Expectations will have been removed in clean_from_lists,
423          * except TFTP can create an expectation on the first packet,
424          * before connection is in the list, so we need to clean here,
425          * too.
426          */
427         nf_ct_remove_expectations(ct);
428
429         nf_ct_del_from_dying_or_unconfirmed_list(ct);
430
431         local_bh_enable();
432
433         if (ct->master)
434                 nf_ct_put(ct->master);
435
436         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
437         nf_conntrack_free(ct);
438 }
439
440 static void nf_ct_delete_from_lists(struct nf_conn *ct)
441 {
442         struct net *net = nf_ct_net(ct);
443         unsigned int hash, reply_hash;
444         unsigned int sequence;
445
446         nf_ct_helper_destroy(ct);
447
448         local_bh_disable();
449         do {
450                 sequence = read_seqcount_begin(&nf_conntrack_generation);
451                 hash = hash_conntrack(net,
452                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
453                 reply_hash = hash_conntrack(net,
454                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
455         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
456
457         clean_from_lists(ct);
458         nf_conntrack_double_unlock(hash, reply_hash);
459
460         nf_ct_add_to_dying_list(ct);
461
462         local_bh_enable();
463 }
464
465 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
466 {
467         struct nf_conn_tstamp *tstamp;
468
469         if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
470                 return false;
471
472         tstamp = nf_conn_tstamp_find(ct);
473         if (tstamp && tstamp->stop == 0)
474                 tstamp->stop = ktime_get_real_ns();
475
476         if (nf_conntrack_event_report(IPCT_DESTROY, ct,
477                                     portid, report) < 0) {
478                 /* destroy event was not delivered. nf_ct_put will
479                  * be done by event cache worker on redelivery.
480                  */
481                 nf_ct_delete_from_lists(ct);
482                 nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
483                 return false;
484         }
485
486         nf_conntrack_ecache_work(nf_ct_net(ct));
487         nf_ct_delete_from_lists(ct);
488         nf_ct_put(ct);
489         return true;
490 }
491 EXPORT_SYMBOL_GPL(nf_ct_delete);
492
493 static inline bool
494 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
495                 const struct nf_conntrack_tuple *tuple,
496                 const struct nf_conntrack_zone *zone,
497                 const struct net *net)
498 {
499         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
500
501         /* A conntrack can be recreated with the equal tuple,
502          * so we need to check that the conntrack is confirmed
503          */
504         return nf_ct_tuple_equal(tuple, &h->tuple) &&
505                nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
506                nf_ct_is_confirmed(ct) &&
507                net_eq(net, nf_ct_net(ct));
508 }
509
510 /* caller must hold rcu readlock and none of the nf_conntrack_locks */
511 static void nf_ct_gc_expired(struct nf_conn *ct)
512 {
513         if (!atomic_inc_not_zero(&ct->ct_general.use))
514                 return;
515
516         if (nf_ct_should_gc(ct))
517                 nf_ct_kill(ct);
518
519         nf_ct_put(ct);
520 }
521
522 /*
523  * Warning :
524  * - Caller must take a reference on returned object
525  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
526  */
527 static struct nf_conntrack_tuple_hash *
528 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
529                       const struct nf_conntrack_tuple *tuple, u32 hash)
530 {
531         struct nf_conntrack_tuple_hash *h;
532         struct hlist_nulls_head *ct_hash;
533         struct hlist_nulls_node *n;
534         unsigned int bucket, hsize;
535
536 begin:
537         nf_conntrack_get_ht(&ct_hash, &hsize);
538         bucket = reciprocal_scale(hash, hsize);
539
540         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
541                 struct nf_conn *ct;
542
543                 ct = nf_ct_tuplehash_to_ctrack(h);
544                 if (nf_ct_is_expired(ct)) {
545                         nf_ct_gc_expired(ct);
546                         continue;
547                 }
548
549                 if (nf_ct_is_dying(ct))
550                         continue;
551
552                 if (nf_ct_key_equal(h, tuple, zone, net))
553                         return h;
554         }
555         /*
556          * if the nulls value we got at the end of this lookup is
557          * not the expected one, we must restart lookup.
558          * We probably met an item that was moved to another chain.
559          */
560         if (get_nulls_value(n) != bucket) {
561                 NF_CT_STAT_INC_ATOMIC(net, search_restart);
562                 goto begin;
563         }
564
565         return NULL;
566 }
567
568 /* Find a connection corresponding to a tuple. */
569 static struct nf_conntrack_tuple_hash *
570 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
571                         const struct nf_conntrack_tuple *tuple, u32 hash)
572 {
573         struct nf_conntrack_tuple_hash *h;
574         struct nf_conn *ct;
575
576         rcu_read_lock();
577 begin:
578         h = ____nf_conntrack_find(net, zone, tuple, hash);
579         if (h) {
580                 ct = nf_ct_tuplehash_to_ctrack(h);
581                 if (unlikely(nf_ct_is_dying(ct) ||
582                              !atomic_inc_not_zero(&ct->ct_general.use)))
583                         h = NULL;
584                 else {
585                         if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
586                                 nf_ct_put(ct);
587                                 goto begin;
588                         }
589                 }
590         }
591         rcu_read_unlock();
592
593         return h;
594 }
595
596 struct nf_conntrack_tuple_hash *
597 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
598                       const struct nf_conntrack_tuple *tuple)
599 {
600         return __nf_conntrack_find_get(net, zone, tuple,
601                                        hash_conntrack_raw(tuple, net));
602 }
603 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
604
605 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
606                                        unsigned int hash,
607                                        unsigned int reply_hash)
608 {
609         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
610                            &nf_conntrack_hash[hash]);
611         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
612                            &nf_conntrack_hash[reply_hash]);
613 }
614
615 int
616 nf_conntrack_hash_check_insert(struct nf_conn *ct)
617 {
618         const struct nf_conntrack_zone *zone;
619         struct net *net = nf_ct_net(ct);
620         unsigned int hash, reply_hash;
621         struct nf_conntrack_tuple_hash *h;
622         struct hlist_nulls_node *n;
623         unsigned int sequence;
624
625         zone = nf_ct_zone(ct);
626
627         local_bh_disable();
628         do {
629                 sequence = read_seqcount_begin(&nf_conntrack_generation);
630                 hash = hash_conntrack(net,
631                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
632                 reply_hash = hash_conntrack(net,
633                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
634         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
635
636         /* See if there's one in the list already, including reverse */
637         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
638                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
639                                     zone, net))
640                         goto out;
641
642         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
643                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
644                                     zone, net))
645                         goto out;
646
647         smp_wmb();
648         /* The caller holds a reference to this object */
649         atomic_set(&ct->ct_general.use, 2);
650         __nf_conntrack_hash_insert(ct, hash, reply_hash);
651         nf_conntrack_double_unlock(hash, reply_hash);
652         NF_CT_STAT_INC(net, insert);
653         local_bh_enable();
654         return 0;
655
656 out:
657         nf_conntrack_double_unlock(hash, reply_hash);
658         NF_CT_STAT_INC(net, insert_failed);
659         local_bh_enable();
660         return -EEXIST;
661 }
662 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
663
664 static inline void nf_ct_acct_update(struct nf_conn *ct,
665                                      enum ip_conntrack_info ctinfo,
666                                      unsigned int len)
667 {
668         struct nf_conn_acct *acct;
669
670         acct = nf_conn_acct_find(ct);
671         if (acct) {
672                 struct nf_conn_counter *counter = acct->counter;
673
674                 atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
675                 atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
676         }
677 }
678
679 static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
680                              const struct nf_conn *loser_ct)
681 {
682         struct nf_conn_acct *acct;
683
684         acct = nf_conn_acct_find(loser_ct);
685         if (acct) {
686                 struct nf_conn_counter *counter = acct->counter;
687                 unsigned int bytes;
688
689                 /* u32 should be fine since we must have seen one packet. */
690                 bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
691                 nf_ct_acct_update(ct, ctinfo, bytes);
692         }
693 }
694
695 /* Resolve race on insertion if this protocol allows this. */
696 static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
697                                enum ip_conntrack_info ctinfo,
698                                struct nf_conntrack_tuple_hash *h)
699 {
700         /* This is the conntrack entry already in hashes that won race. */
701         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
702         const struct nf_conntrack_l4proto *l4proto;
703
704         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
705         if (l4proto->allow_clash &&
706             ((ct->status & IPS_NAT_DONE_MASK) == 0) &&
707             !nf_ct_is_dying(ct) &&
708             atomic_inc_not_zero(&ct->ct_general.use)) {
709                 enum ip_conntrack_info oldinfo;
710                 struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
711
712                 nf_ct_acct_merge(ct, ctinfo, loser_ct);
713                 nf_conntrack_put(&loser_ct->ct_general);
714                 nf_ct_set(skb, ct, oldinfo);
715                 return NF_ACCEPT;
716         }
717         NF_CT_STAT_INC(net, drop);
718         return NF_DROP;
719 }
720
721 /* Confirm a connection given skb; places it in hash table */
722 int
723 __nf_conntrack_confirm(struct sk_buff *skb)
724 {
725         const struct nf_conntrack_zone *zone;
726         unsigned int hash, reply_hash;
727         struct nf_conntrack_tuple_hash *h;
728         struct nf_conn *ct;
729         struct nf_conn_help *help;
730         struct nf_conn_tstamp *tstamp;
731         struct hlist_nulls_node *n;
732         enum ip_conntrack_info ctinfo;
733         struct net *net;
734         unsigned int sequence;
735         int ret = NF_DROP;
736
737         ct = nf_ct_get(skb, &ctinfo);
738         net = nf_ct_net(ct);
739
740         /* ipt_REJECT uses nf_conntrack_attach to attach related
741            ICMP/TCP RST packets in other direction.  Actual packet
742            which created connection will be IP_CT_NEW or for an
743            expected connection, IP_CT_RELATED. */
744         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
745                 return NF_ACCEPT;
746
747         zone = nf_ct_zone(ct);
748         local_bh_disable();
749
750         do {
751                 sequence = read_seqcount_begin(&nf_conntrack_generation);
752                 /* reuse the hash saved before */
753                 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
754                 hash = scale_hash(hash);
755                 reply_hash = hash_conntrack(net,
756                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
757
758         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
759
760         /* We're not in hash table, and we refuse to set up related
761          * connections for unconfirmed conns.  But packet copies and
762          * REJECT will give spurious warnings here.
763          */
764
765         /* No external references means no one else could have
766          * confirmed us.
767          */
768         WARN_ON(nf_ct_is_confirmed(ct));
769         pr_debug("Confirming conntrack %p\n", ct);
770         /* We have to check the DYING flag after unlink to prevent
771          * a race against nf_ct_get_next_corpse() possibly called from
772          * user context, else we insert an already 'dead' hash, blocking
773          * further use of that particular connection -JM.
774          */
775         nf_ct_del_from_dying_or_unconfirmed_list(ct);
776
777         if (unlikely(nf_ct_is_dying(ct))) {
778                 nf_ct_add_to_dying_list(ct);
779                 goto dying;
780         }
781
782         /* See if there's one in the list already, including reverse:
783            NAT could have grabbed it without realizing, since we're
784            not in the hash.  If there is, we lost race. */
785         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
786                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
787                                     zone, net))
788                         goto out;
789
790         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
791                 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
792                                     zone, net))
793                         goto out;
794
795         /* Timer relative to confirmation time, not original
796            setting time, otherwise we'd get timer wrap in
797            weird delay cases. */
798         ct->timeout += nfct_time_stamp;
799         atomic_inc(&ct->ct_general.use);
800         ct->status |= IPS_CONFIRMED;
801
802         /* set conntrack timestamp, if enabled. */
803         tstamp = nf_conn_tstamp_find(ct);
804         if (tstamp) {
805                 if (skb->tstamp == 0)
806                         __net_timestamp(skb);
807
808                 tstamp->start = ktime_to_ns(skb->tstamp);
809         }
810         /* Since the lookup is lockless, hash insertion must be done after
811          * starting the timer and setting the CONFIRMED bit. The RCU barriers
812          * guarantee that no other CPU can find the conntrack before the above
813          * stores are visible.
814          */
815         __nf_conntrack_hash_insert(ct, hash, reply_hash);
816         nf_conntrack_double_unlock(hash, reply_hash);
817         local_bh_enable();
818
819         help = nfct_help(ct);
820         if (help && help->helper)
821                 nf_conntrack_event_cache(IPCT_HELPER, ct);
822
823         nf_conntrack_event_cache(master_ct(ct) ?
824                                  IPCT_RELATED : IPCT_NEW, ct);
825         return NF_ACCEPT;
826
827 out:
828         nf_ct_add_to_dying_list(ct);
829         ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
830 dying:
831         nf_conntrack_double_unlock(hash, reply_hash);
832         NF_CT_STAT_INC(net, insert_failed);
833         local_bh_enable();
834         return ret;
835 }
836 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
837
838 /* Returns true if a connection correspondings to the tuple (required
839    for NAT). */
840 int
841 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
842                          const struct nf_conn *ignored_conntrack)
843 {
844         struct net *net = nf_ct_net(ignored_conntrack);
845         const struct nf_conntrack_zone *zone;
846         struct nf_conntrack_tuple_hash *h;
847         struct hlist_nulls_head *ct_hash;
848         unsigned int hash, hsize;
849         struct hlist_nulls_node *n;
850         struct nf_conn *ct;
851
852         zone = nf_ct_zone(ignored_conntrack);
853
854         rcu_read_lock();
855  begin:
856         nf_conntrack_get_ht(&ct_hash, &hsize);
857         hash = __hash_conntrack(net, tuple, hsize);
858
859         hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
860                 ct = nf_ct_tuplehash_to_ctrack(h);
861
862                 if (ct == ignored_conntrack)
863                         continue;
864
865                 if (nf_ct_is_expired(ct)) {
866                         nf_ct_gc_expired(ct);
867                         continue;
868                 }
869
870                 if (nf_ct_key_equal(h, tuple, zone, net)) {
871                         NF_CT_STAT_INC_ATOMIC(net, found);
872                         rcu_read_unlock();
873                         return 1;
874                 }
875         }
876
877         if (get_nulls_value(n) != hash) {
878                 NF_CT_STAT_INC_ATOMIC(net, search_restart);
879                 goto begin;
880         }
881
882         rcu_read_unlock();
883
884         return 0;
885 }
886 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
887
888 #define NF_CT_EVICTION_RANGE    8
889
890 /* There's a small race here where we may free a just-assured
891    connection.  Too bad: we're in trouble anyway. */
892 static unsigned int early_drop_list(struct net *net,
893                                     struct hlist_nulls_head *head)
894 {
895         struct nf_conntrack_tuple_hash *h;
896         struct hlist_nulls_node *n;
897         unsigned int drops = 0;
898         struct nf_conn *tmp;
899
900         hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
901                 tmp = nf_ct_tuplehash_to_ctrack(h);
902
903                 if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
904                         continue;
905
906                 if (nf_ct_is_expired(tmp)) {
907                         nf_ct_gc_expired(tmp);
908                         continue;
909                 }
910
911                 if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
912                     !net_eq(nf_ct_net(tmp), net) ||
913                     nf_ct_is_dying(tmp))
914                         continue;
915
916                 if (!atomic_inc_not_zero(&tmp->ct_general.use))
917                         continue;
918
919                 /* kill only if still in same netns -- might have moved due to
920                  * SLAB_TYPESAFE_BY_RCU rules.
921                  *
922                  * We steal the timer reference.  If that fails timer has
923                  * already fired or someone else deleted it. Just drop ref
924                  * and move to next entry.
925                  */
926                 if (net_eq(nf_ct_net(tmp), net) &&
927                     nf_ct_is_confirmed(tmp) &&
928                     nf_ct_delete(tmp, 0, 0))
929                         drops++;
930
931                 nf_ct_put(tmp);
932         }
933
934         return drops;
935 }
936
937 static noinline int early_drop(struct net *net, unsigned int _hash)
938 {
939         unsigned int i;
940
941         for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
942                 struct hlist_nulls_head *ct_hash;
943                 unsigned int hash, hsize, drops;
944
945                 rcu_read_lock();
946                 nf_conntrack_get_ht(&ct_hash, &hsize);
947                 hash = reciprocal_scale(_hash++, hsize);
948
949                 drops = early_drop_list(net, &ct_hash[hash]);
950                 rcu_read_unlock();
951
952                 if (drops) {
953                         NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
954                         return true;
955                 }
956         }
957
958         return false;
959 }
960
961 static bool gc_worker_skip_ct(const struct nf_conn *ct)
962 {
963         return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
964 }
965
966 static bool gc_worker_can_early_drop(const struct nf_conn *ct)
967 {
968         const struct nf_conntrack_l4proto *l4proto;
969
970         if (!test_bit(IPS_ASSURED_BIT, &ct->status))
971                 return true;
972
973         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
974         if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
975                 return true;
976
977         return false;
978 }
979
980 #define DAY     (86400 * HZ)
981
982 /* Set an arbitrary timeout large enough not to ever expire, this save
983  * us a check for the IPS_OFFLOAD_BIT from the packet path via
984  * nf_ct_is_expired().
985  */
986 static void nf_ct_offload_timeout(struct nf_conn *ct)
987 {
988         if (nf_ct_expires(ct) < DAY / 2)
989                 ct->timeout = nfct_time_stamp + DAY;
990 }
991
992 static void gc_worker(struct work_struct *work)
993 {
994         unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
995         unsigned int i, goal, buckets = 0, expired_count = 0;
996         unsigned int nf_conntrack_max95 = 0;
997         struct conntrack_gc_work *gc_work;
998         unsigned int ratio, scanned = 0;
999         unsigned long next_run;
1000
1001         gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
1002
1003         goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
1004         i = gc_work->last_bucket;
1005         if (gc_work->early_drop)
1006                 nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
1007
1008         do {
1009                 struct nf_conntrack_tuple_hash *h;
1010                 struct hlist_nulls_head *ct_hash;
1011                 struct hlist_nulls_node *n;
1012                 unsigned int hashsz;
1013                 struct nf_conn *tmp;
1014
1015                 i++;
1016                 rcu_read_lock();
1017
1018                 nf_conntrack_get_ht(&ct_hash, &hashsz);
1019                 if (i >= hashsz)
1020                         i = 0;
1021
1022                 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
1023                         struct net *net;
1024
1025                         tmp = nf_ct_tuplehash_to_ctrack(h);
1026
1027                         scanned++;
1028                         if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
1029                                 nf_ct_offload_timeout(tmp);
1030                                 continue;
1031                         }
1032
1033                         if (nf_ct_is_expired(tmp)) {
1034                                 nf_ct_gc_expired(tmp);
1035                                 expired_count++;
1036                                 continue;
1037                         }
1038
1039                         if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
1040                                 continue;
1041
1042                         net = nf_ct_net(tmp);
1043                         if (atomic_read(&net->ct.count) < nf_conntrack_max95)
1044                                 continue;
1045
1046                         /* need to take reference to avoid possible races */
1047                         if (!atomic_inc_not_zero(&tmp->ct_general.use))
1048                                 continue;
1049
1050                         if (gc_worker_skip_ct(tmp)) {
1051                                 nf_ct_put(tmp);
1052                                 continue;
1053                         }
1054
1055                         if (gc_worker_can_early_drop(tmp))
1056                                 nf_ct_kill(tmp);
1057
1058                         nf_ct_put(tmp);
1059                 }
1060
1061                 /* could check get_nulls_value() here and restart if ct
1062                  * was moved to another chain.  But given gc is best-effort
1063                  * we will just continue with next hash slot.
1064                  */
1065                 rcu_read_unlock();
1066                 cond_resched();
1067         } while (++buckets < goal);
1068
1069         if (gc_work->exiting)
1070                 return;
1071
1072         /*
1073          * Eviction will normally happen from the packet path, and not
1074          * from this gc worker.
1075          *
1076          * This worker is only here to reap expired entries when system went
1077          * idle after a busy period.
1078          *
1079          * The heuristics below are supposed to balance conflicting goals:
1080          *
1081          * 1. Minimize time until we notice a stale entry
1082          * 2. Maximize scan intervals to not waste cycles
1083          *
1084          * Normally, expire ratio will be close to 0.
1085          *
1086          * As soon as a sizeable fraction of the entries have expired
1087          * increase scan frequency.
1088          */
1089         ratio = scanned ? expired_count * 100 / scanned : 0;
1090         if (ratio > GC_EVICT_RATIO) {
1091                 gc_work->next_gc_run = min_interval;
1092         } else {
1093                 unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
1094
1095                 BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
1096
1097                 gc_work->next_gc_run += min_interval;
1098                 if (gc_work->next_gc_run > max)
1099                         gc_work->next_gc_run = max;
1100         }
1101
1102         next_run = gc_work->next_gc_run;
1103         gc_work->last_bucket = i;
1104         gc_work->early_drop = false;
1105         queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
1106 }
1107
1108 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1109 {
1110         INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
1111         gc_work->next_gc_run = HZ;
1112         gc_work->exiting = false;
1113 }
1114
1115 static struct nf_conn *
1116 __nf_conntrack_alloc(struct net *net,
1117                      const struct nf_conntrack_zone *zone,
1118                      const struct nf_conntrack_tuple *orig,
1119                      const struct nf_conntrack_tuple *repl,
1120                      gfp_t gfp, u32 hash)
1121 {
1122         struct nf_conn *ct;
1123
1124         /* We don't want any race condition at early drop stage */
1125         atomic_inc(&net->ct.count);
1126
1127         if (nf_conntrack_max &&
1128             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
1129                 if (!early_drop(net, hash)) {
1130                         if (!conntrack_gc_work.early_drop)
1131                                 conntrack_gc_work.early_drop = true;
1132                         atomic_dec(&net->ct.count);
1133                         net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
1134                         return ERR_PTR(-ENOMEM);
1135                 }
1136         }
1137
1138         /*
1139          * Do not use kmem_cache_zalloc(), as this cache uses
1140          * SLAB_TYPESAFE_BY_RCU.
1141          */
1142         ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
1143         if (ct == NULL)
1144                 goto out;
1145
1146         spin_lock_init(&ct->lock);
1147         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
1148         ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
1149         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
1150         /* save hash for reusing when confirming */
1151         *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
1152         ct->status = 0;
1153         write_pnet(&ct->ct_net, net);
1154         memset(&ct->__nfct_init_offset[0], 0,
1155                offsetof(struct nf_conn, proto) -
1156                offsetof(struct nf_conn, __nfct_init_offset[0]));
1157
1158         nf_ct_zone_add(ct, zone);
1159
1160         /* Because we use RCU lookups, we set ct_general.use to zero before
1161          * this is inserted in any list.
1162          */
1163         atomic_set(&ct->ct_general.use, 0);
1164         return ct;
1165 out:
1166         atomic_dec(&net->ct.count);
1167         return ERR_PTR(-ENOMEM);
1168 }
1169
1170 struct nf_conn *nf_conntrack_alloc(struct net *net,
1171                                    const struct nf_conntrack_zone *zone,
1172                                    const struct nf_conntrack_tuple *orig,
1173                                    const struct nf_conntrack_tuple *repl,
1174                                    gfp_t gfp)
1175 {
1176         return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
1177 }
1178 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
1179
1180 void nf_conntrack_free(struct nf_conn *ct)
1181 {
1182         struct net *net = nf_ct_net(ct);
1183
1184         /* A freed object has refcnt == 0, that's
1185          * the golden rule for SLAB_TYPESAFE_BY_RCU
1186          */
1187         WARN_ON(atomic_read(&ct->ct_general.use) != 0);
1188
1189         nf_ct_ext_destroy(ct);
1190         nf_ct_ext_free(ct);
1191         kmem_cache_free(nf_conntrack_cachep, ct);
1192         smp_mb__before_atomic();
1193         atomic_dec(&net->ct.count);
1194 }
1195 EXPORT_SYMBOL_GPL(nf_conntrack_free);
1196
1197
1198 /* Allocate a new conntrack: we return -ENOMEM if classification
1199    failed due to stress.  Otherwise it really is unclassifiable. */
1200 static noinline struct nf_conntrack_tuple_hash *
1201 init_conntrack(struct net *net, struct nf_conn *tmpl,
1202                const struct nf_conntrack_tuple *tuple,
1203                const struct nf_conntrack_l3proto *l3proto,
1204                const struct nf_conntrack_l4proto *l4proto,
1205                struct sk_buff *skb,
1206                unsigned int dataoff, u32 hash)
1207 {
1208         struct nf_conn *ct;
1209         struct nf_conn_help *help;
1210         struct nf_conntrack_tuple repl_tuple;
1211         struct nf_conntrack_ecache *ecache;
1212         struct nf_conntrack_expect *exp = NULL;
1213         const struct nf_conntrack_zone *zone;
1214         struct nf_conn_timeout *timeout_ext;
1215         struct nf_conntrack_zone tmp;
1216         unsigned int *timeouts;
1217
1218         if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
1219                 pr_debug("Can't invert tuple.\n");
1220                 return NULL;
1221         }
1222
1223         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1224         ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
1225                                   hash);
1226         if (IS_ERR(ct))
1227                 return (struct nf_conntrack_tuple_hash *)ct;
1228
1229         if (!nf_ct_add_synproxy(ct, tmpl)) {
1230                 nf_conntrack_free(ct);
1231                 return ERR_PTR(-ENOMEM);
1232         }
1233
1234         timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
1235         if (timeout_ext) {
1236                 timeouts = nf_ct_timeout_data(timeout_ext);
1237                 if (unlikely(!timeouts))
1238                         timeouts = l4proto->get_timeouts(net);
1239         } else {
1240                 timeouts = l4proto->get_timeouts(net);
1241         }
1242
1243         if (!l4proto->new(ct, skb, dataoff, timeouts)) {
1244                 nf_conntrack_free(ct);
1245                 pr_debug("can't track with proto module\n");
1246                 return NULL;
1247         }
1248
1249         if (timeout_ext)
1250                 nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1251                                       GFP_ATOMIC);
1252
1253         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1254         nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1255         nf_ct_labels_ext_add(ct);
1256
1257         ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1258         nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1259                                  ecache ? ecache->expmask : 0,
1260                              GFP_ATOMIC);
1261
1262         local_bh_disable();
1263         if (net->ct.expect_count) {
1264                 spin_lock(&nf_conntrack_expect_lock);
1265                 exp = nf_ct_find_expectation(net, zone, tuple);
1266                 if (exp) {
1267                         pr_debug("expectation arrives ct=%p exp=%p\n",
1268                                  ct, exp);
1269                         /* Welcome, Mr. Bond.  We've been expecting you... */
1270                         __set_bit(IPS_EXPECTED_BIT, &ct->status);
1271                         /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1272                         ct->master = exp->master;
1273                         if (exp->helper) {
1274                                 help = nf_ct_helper_ext_add(ct, exp->helper,
1275                                                             GFP_ATOMIC);
1276                                 if (help)
1277                                         rcu_assign_pointer(help->helper, exp->helper);
1278                         }
1279
1280 #ifdef CONFIG_NF_CONNTRACK_MARK
1281                         ct->mark = exp->master->mark;
1282 #endif
1283 #ifdef CONFIG_NF_CONNTRACK_SECMARK
1284                         ct->secmark = exp->master->secmark;
1285 #endif
1286                         NF_CT_STAT_INC(net, expect_new);
1287                 }
1288                 spin_unlock(&nf_conntrack_expect_lock);
1289         }
1290         if (!exp)
1291                 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
1292
1293         /* Now it is inserted into the unconfirmed list, bump refcount */
1294         nf_conntrack_get(&ct->ct_general);
1295         nf_ct_add_to_unconfirmed_list(ct);
1296
1297         local_bh_enable();
1298
1299         if (exp) {
1300                 if (exp->expectfn)
1301                         exp->expectfn(ct, exp);
1302                 nf_ct_expect_put(exp);
1303         }
1304
1305         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1306 }
1307
1308 /* On success, returns 0, sets skb->_nfct | ctinfo */
1309 static int
1310 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1311                   struct sk_buff *skb,
1312                   unsigned int dataoff,
1313                   u_int16_t l3num,
1314                   u_int8_t protonum,
1315                   const struct nf_conntrack_l3proto *l3proto,
1316                   const struct nf_conntrack_l4proto *l4proto)
1317 {
1318         const struct nf_conntrack_zone *zone;
1319         struct nf_conntrack_tuple tuple;
1320         struct nf_conntrack_tuple_hash *h;
1321         enum ip_conntrack_info ctinfo;
1322         struct nf_conntrack_zone tmp;
1323         struct nf_conn *ct;
1324         u32 hash;
1325
1326         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1327                              dataoff, l3num, protonum, net, &tuple, l3proto,
1328                              l4proto)) {
1329                 pr_debug("Can't get tuple\n");
1330                 return 0;
1331         }
1332
1333         /* look for tuple match */
1334         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1335         hash = hash_conntrack_raw(&tuple, net);
1336         h = __nf_conntrack_find_get(net, zone, &tuple, hash);
1337         if (!h) {
1338                 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
1339                                    skb, dataoff, hash);
1340                 if (!h)
1341                         return 0;
1342                 if (IS_ERR(h))
1343                         return PTR_ERR(h);
1344         }
1345         ct = nf_ct_tuplehash_to_ctrack(h);
1346
1347         /* It exists; we have (non-exclusive) reference. */
1348         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1349                 ctinfo = IP_CT_ESTABLISHED_REPLY;
1350         } else {
1351                 /* Once we've had two way comms, always ESTABLISHED. */
1352                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1353                         pr_debug("normal packet for %p\n", ct);
1354                         ctinfo = IP_CT_ESTABLISHED;
1355                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1356                         pr_debug("related packet for %p\n", ct);
1357                         ctinfo = IP_CT_RELATED;
1358                 } else {
1359                         pr_debug("new packet for %p\n", ct);
1360                         ctinfo = IP_CT_NEW;
1361                 }
1362         }
1363         nf_ct_set(skb, ct, ctinfo);
1364         return 0;
1365 }
1366
1367 unsigned int
1368 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
1369                 struct sk_buff *skb)
1370 {
1371         const struct nf_conntrack_l3proto *l3proto;
1372         const struct nf_conntrack_l4proto *l4proto;
1373         struct nf_conn *ct, *tmpl;
1374         enum ip_conntrack_info ctinfo;
1375         unsigned int *timeouts;
1376         unsigned int dataoff;
1377         u_int8_t protonum;
1378         int ret;
1379
1380         tmpl = nf_ct_get(skb, &ctinfo);
1381         if (tmpl || ctinfo == IP_CT_UNTRACKED) {
1382                 /* Previously seen (loopback or untracked)?  Ignore. */
1383                 if ((tmpl && !nf_ct_is_template(tmpl)) ||
1384                      ctinfo == IP_CT_UNTRACKED) {
1385                         NF_CT_STAT_INC_ATOMIC(net, ignore);
1386                         return NF_ACCEPT;
1387                 }
1388                 skb->_nfct = 0;
1389         }
1390
1391         /* rcu_read_lock()ed by nf_hook_thresh */
1392         l3proto = __nf_ct_l3proto_find(pf);
1393         ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
1394                                    &dataoff, &protonum);
1395         if (ret <= 0) {
1396                 pr_debug("not prepared to track yet or error occurred\n");
1397                 NF_CT_STAT_INC_ATOMIC(net, error);
1398                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1399                 ret = -ret;
1400                 goto out;
1401         }
1402
1403         l4proto = __nf_ct_l4proto_find(pf, protonum);
1404
1405         /* It may be an special packet, error, unclean...
1406          * inverse of the return code tells to the netfilter
1407          * core what to do with the packet. */
1408         if (l4proto->error != NULL) {
1409                 ret = l4proto->error(net, tmpl, skb, dataoff, pf, hooknum);
1410                 if (ret <= 0) {
1411                         NF_CT_STAT_INC_ATOMIC(net, error);
1412                         NF_CT_STAT_INC_ATOMIC(net, invalid);
1413                         ret = -ret;
1414                         goto out;
1415                 }
1416                 /* ICMP[v6] protocol trackers may assign one conntrack. */
1417                 if (skb->_nfct)
1418                         goto out;
1419         }
1420 repeat:
1421         ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
1422                                 l3proto, l4proto);
1423         if (ret < 0) {
1424                 /* Too stressed to deal. */
1425                 NF_CT_STAT_INC_ATOMIC(net, drop);
1426                 ret = NF_DROP;
1427                 goto out;
1428         }
1429
1430         ct = nf_ct_get(skb, &ctinfo);
1431         if (!ct) {
1432                 /* Not valid part of a connection */
1433                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1434                 ret = NF_ACCEPT;
1435                 goto out;
1436         }
1437
1438         /* Decide what timeout policy we want to apply to this flow. */
1439         timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
1440
1441         ret = l4proto->packet(ct, skb, dataoff, ctinfo, timeouts);
1442         if (ret <= 0) {
1443                 /* Invalid: inverse of the return code tells
1444                  * the netfilter core what to do */
1445                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1446                 nf_conntrack_put(&ct->ct_general);
1447                 skb->_nfct = 0;
1448                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1449                 if (ret == -NF_DROP)
1450                         NF_CT_STAT_INC_ATOMIC(net, drop);
1451                 /* Special case: TCP tracker reports an attempt to reopen a
1452                  * closed/aborted connection. We have to go back and create a
1453                  * fresh conntrack.
1454                  */
1455                 if (ret == -NF_REPEAT)
1456                         goto repeat;
1457                 ret = -ret;
1458                 goto out;
1459         }
1460
1461         if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
1462             !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1463                 nf_conntrack_event_cache(IPCT_REPLY, ct);
1464 out:
1465         if (tmpl)
1466                 nf_ct_put(tmpl);
1467
1468         return ret;
1469 }
1470 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1471
1472 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1473                           const struct nf_conntrack_tuple *orig)
1474 {
1475         bool ret;
1476
1477         rcu_read_lock();
1478         ret = nf_ct_invert_tuple(inverse, orig,
1479                                  __nf_ct_l3proto_find(orig->src.l3num),
1480                                  __nf_ct_l4proto_find(orig->src.l3num,
1481                                                       orig->dst.protonum));
1482         rcu_read_unlock();
1483         return ret;
1484 }
1485 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1486
1487 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
1488    implicitly racy: see __nf_conntrack_confirm */
1489 void nf_conntrack_alter_reply(struct nf_conn *ct,
1490                               const struct nf_conntrack_tuple *newreply)
1491 {
1492         struct nf_conn_help *help = nfct_help(ct);
1493
1494         /* Should be unconfirmed, so not in hash table yet */
1495         WARN_ON(nf_ct_is_confirmed(ct));
1496
1497         pr_debug("Altering reply tuple of %p to ", ct);
1498         nf_ct_dump_tuple(newreply);
1499
1500         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1501         if (ct->master || (help && !hlist_empty(&help->expectations)))
1502                 return;
1503
1504         rcu_read_lock();
1505         __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1506         rcu_read_unlock();
1507 }
1508 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1509
1510 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1511 void __nf_ct_refresh_acct(struct nf_conn *ct,
1512                           enum ip_conntrack_info ctinfo,
1513                           const struct sk_buff *skb,
1514                           unsigned long extra_jiffies,
1515                           int do_acct)
1516 {
1517         WARN_ON(!skb);
1518
1519         /* Only update if this is not a fixed timeout */
1520         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1521                 goto acct;
1522
1523         /* If not in hash table, timer will not be active yet */
1524         if (nf_ct_is_confirmed(ct))
1525                 extra_jiffies += nfct_time_stamp;
1526
1527         ct->timeout = extra_jiffies;
1528 acct:
1529         if (do_acct)
1530                 nf_ct_acct_update(ct, ctinfo, skb->len);
1531 }
1532 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1533
1534 bool nf_ct_kill_acct(struct nf_conn *ct,
1535                      enum ip_conntrack_info ctinfo,
1536                      const struct sk_buff *skb)
1537 {
1538         nf_ct_acct_update(ct, ctinfo, skb->len);
1539
1540         return nf_ct_delete(ct, 0, 0);
1541 }
1542 EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
1543
1544 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1545
1546 #include <linux/netfilter/nfnetlink.h>
1547 #include <linux/netfilter/nfnetlink_conntrack.h>
1548 #include <linux/mutex.h>
1549
1550 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1551  * in ip_conntrack_core, since we don't want the protocols to autoload
1552  * or depend on ctnetlink */
1553 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1554                                const struct nf_conntrack_tuple *tuple)
1555 {
1556         if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1557             nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1558                 goto nla_put_failure;
1559         return 0;
1560
1561 nla_put_failure:
1562         return -1;
1563 }
1564 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1565
1566 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1567         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1568         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1569 };
1570 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1571
1572 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1573                                struct nf_conntrack_tuple *t)
1574 {
1575         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1576                 return -EINVAL;
1577
1578         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1579         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1580
1581         return 0;
1582 }
1583 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1584
1585 unsigned int nf_ct_port_nlattr_tuple_size(void)
1586 {
1587         static unsigned int size __read_mostly;
1588
1589         if (!size)
1590                 size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1591
1592         return size;
1593 }
1594 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1595 #endif
1596
1597 /* Used by ipt_REJECT and ip6t_REJECT. */
1598 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1599 {
1600         struct nf_conn *ct;
1601         enum ip_conntrack_info ctinfo;
1602
1603         /* This ICMP is in reverse direction to the packet which caused it */
1604         ct = nf_ct_get(skb, &ctinfo);
1605         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1606                 ctinfo = IP_CT_RELATED_REPLY;
1607         else
1608                 ctinfo = IP_CT_RELATED;
1609
1610         /* Attach to new skbuff, and increment count */
1611         nf_ct_set(nskb, ct, ctinfo);
1612         nf_conntrack_get(skb_nfct(nskb));
1613 }
1614
1615 /* Bring out ya dead! */
1616 static struct nf_conn *
1617 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
1618                 void *data, unsigned int *bucket)
1619 {
1620         struct nf_conntrack_tuple_hash *h;
1621         struct nf_conn *ct;
1622         struct hlist_nulls_node *n;
1623         spinlock_t *lockp;
1624
1625         for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
1626                 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
1627                 local_bh_disable();
1628                 nf_conntrack_lock(lockp);
1629                 if (*bucket < nf_conntrack_htable_size) {
1630                         hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
1631                                 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1632                                         continue;
1633                                 ct = nf_ct_tuplehash_to_ctrack(h);
1634                                 if (iter(ct, data))
1635                                         goto found;
1636                         }
1637                 }
1638                 spin_unlock(lockp);
1639                 local_bh_enable();
1640                 cond_resched();
1641         }
1642
1643         return NULL;
1644 found:
1645         atomic_inc(&ct->ct_general.use);
1646         spin_unlock(lockp);
1647         local_bh_enable();
1648         return ct;
1649 }
1650
1651 static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
1652                                   void *data, u32 portid, int report)
1653 {
1654         unsigned int bucket = 0, sequence;
1655         struct nf_conn *ct;
1656
1657         might_sleep();
1658
1659         for (;;) {
1660                 sequence = read_seqcount_begin(&nf_conntrack_generation);
1661
1662                 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
1663                         /* Time to push up daises... */
1664
1665                         nf_ct_delete(ct, portid, report);
1666                         nf_ct_put(ct);
1667                         cond_resched();
1668                 }
1669
1670                 if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
1671                         break;
1672                 bucket = 0;
1673         }
1674 }
1675
1676 struct iter_data {
1677         int (*iter)(struct nf_conn *i, void *data);
1678         void *data;
1679         struct net *net;
1680 };
1681
1682 static int iter_net_only(struct nf_conn *i, void *data)
1683 {
1684         struct iter_data *d = data;
1685
1686         if (!net_eq(d->net, nf_ct_net(i)))
1687                 return 0;
1688
1689         return d->iter(i, d->data);
1690 }
1691
1692 static void
1693 __nf_ct_unconfirmed_destroy(struct net *net)
1694 {
1695         int cpu;
1696
1697         for_each_possible_cpu(cpu) {
1698                 struct nf_conntrack_tuple_hash *h;
1699                 struct hlist_nulls_node *n;
1700                 struct ct_pcpu *pcpu;
1701
1702                 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1703
1704                 spin_lock_bh(&pcpu->lock);
1705                 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
1706                         struct nf_conn *ct;
1707
1708                         ct = nf_ct_tuplehash_to_ctrack(h);
1709
1710                         /* we cannot call iter() on unconfirmed list, the
1711                          * owning cpu can reallocate ct->ext at any time.
1712                          */
1713                         set_bit(IPS_DYING_BIT, &ct->status);
1714                 }
1715                 spin_unlock_bh(&pcpu->lock);
1716                 cond_resched();
1717         }
1718 }
1719
1720 void nf_ct_unconfirmed_destroy(struct net *net)
1721 {
1722         might_sleep();
1723
1724         if (atomic_read(&net->ct.count) > 0) {
1725                 __nf_ct_unconfirmed_destroy(net);
1726                 nf_queue_nf_hook_drop(net);
1727                 synchronize_net();
1728         }
1729 }
1730 EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
1731
1732 void nf_ct_iterate_cleanup_net(struct net *net,
1733                                int (*iter)(struct nf_conn *i, void *data),
1734                                void *data, u32 portid, int report)
1735 {
1736         struct iter_data d;
1737
1738         might_sleep();
1739
1740         if (atomic_read(&net->ct.count) == 0)
1741                 return;
1742
1743         d.iter = iter;
1744         d.data = data;
1745         d.net = net;
1746
1747         nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
1748 }
1749 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
1750
1751 /**
1752  * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
1753  * @iter: callback to invoke for each conntrack
1754  * @data: data to pass to @iter
1755  *
1756  * Like nf_ct_iterate_cleanup, but first marks conntracks on the
1757  * unconfirmed list as dying (so they will not be inserted into
1758  * main table).
1759  *
1760  * Can only be called in module exit path.
1761  */
1762 void
1763 nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
1764 {
1765         struct net *net;
1766
1767         down_read(&net_rwsem);
1768         for_each_net(net) {
1769                 if (atomic_read(&net->ct.count) == 0)
1770                         continue;
1771                 __nf_ct_unconfirmed_destroy(net);
1772                 nf_queue_nf_hook_drop(net);
1773         }
1774         up_read(&net_rwsem);
1775
1776         /* Need to wait for netns cleanup worker to finish, if its
1777          * running -- it might have deleted a net namespace from
1778          * the global list, so our __nf_ct_unconfirmed_destroy() might
1779          * not have affected all namespaces.
1780          */
1781         net_ns_barrier();
1782
1783         /* a conntrack could have been unlinked from unconfirmed list
1784          * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
1785          * This makes sure its inserted into conntrack table.
1786          */
1787         synchronize_net();
1788
1789         nf_ct_iterate_cleanup(iter, data, 0, 0);
1790 }
1791 EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
1792
1793 static int kill_all(struct nf_conn *i, void *data)
1794 {
1795         return net_eq(nf_ct_net(i), data);
1796 }
1797
1798 void nf_ct_free_hashtable(void *hash, unsigned int size)
1799 {
1800         if (is_vmalloc_addr(hash))
1801                 vfree(hash);
1802         else
1803                 free_pages((unsigned long)hash,
1804                            get_order(sizeof(struct hlist_head) * size));
1805 }
1806 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1807
1808 void nf_conntrack_cleanup_start(void)
1809 {
1810         conntrack_gc_work.exiting = true;
1811         RCU_INIT_POINTER(ip_ct_attach, NULL);
1812 }
1813
1814 void nf_conntrack_cleanup_end(void)
1815 {
1816         RCU_INIT_POINTER(nf_ct_destroy, NULL);
1817
1818         cancel_delayed_work_sync(&conntrack_gc_work.dwork);
1819         nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
1820
1821         nf_conntrack_proto_fini();
1822         nf_conntrack_seqadj_fini();
1823         nf_conntrack_labels_fini();
1824         nf_conntrack_helper_fini();
1825         nf_conntrack_timeout_fini();
1826         nf_conntrack_ecache_fini();
1827         nf_conntrack_tstamp_fini();
1828         nf_conntrack_acct_fini();
1829         nf_conntrack_expect_fini();
1830
1831         kmem_cache_destroy(nf_conntrack_cachep);
1832 }
1833
1834 /*
1835  * Mishearing the voices in his head, our hero wonders how he's
1836  * supposed to kill the mall.
1837  */
1838 void nf_conntrack_cleanup_net(struct net *net)
1839 {
1840         LIST_HEAD(single);
1841
1842         list_add(&net->exit_list, &single);
1843         nf_conntrack_cleanup_net_list(&single);
1844 }
1845
1846 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
1847 {
1848         int busy;
1849         struct net *net;
1850
1851         /*
1852          * This makes sure all current packets have passed through
1853          *  netfilter framework.  Roll on, two-stage module
1854          *  delete...
1855          */
1856         synchronize_net();
1857 i_see_dead_people:
1858         busy = 0;
1859         list_for_each_entry(net, net_exit_list, exit_list) {
1860                 nf_ct_iterate_cleanup(kill_all, net, 0, 0);
1861                 if (atomic_read(&net->ct.count) != 0)
1862                         busy = 1;
1863         }
1864         if (busy) {
1865                 schedule();
1866                 goto i_see_dead_people;
1867         }
1868
1869         list_for_each_entry(net, net_exit_list, exit_list) {
1870                 nf_conntrack_proto_pernet_fini(net);
1871                 nf_conntrack_helper_pernet_fini(net);
1872                 nf_conntrack_ecache_pernet_fini(net);
1873                 nf_conntrack_tstamp_pernet_fini(net);
1874                 nf_conntrack_acct_pernet_fini(net);
1875                 nf_conntrack_expect_pernet_fini(net);
1876                 free_percpu(net->ct.stat);
1877                 free_percpu(net->ct.pcpu_lists);
1878         }
1879 }
1880
1881 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1882 {
1883         struct hlist_nulls_head *hash;
1884         unsigned int nr_slots, i;
1885         size_t sz;
1886
1887         if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
1888                 return NULL;
1889
1890         BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1891         nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1892
1893         if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
1894                 return NULL;
1895
1896         sz = nr_slots * sizeof(struct hlist_nulls_head);
1897         hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1898                                         get_order(sz));
1899         if (!hash)
1900                 hash = vzalloc(sz);
1901
1902         if (hash && nulls)
1903                 for (i = 0; i < nr_slots; i++)
1904                         INIT_HLIST_NULLS_HEAD(&hash[i], i);
1905
1906         return hash;
1907 }
1908 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1909
1910 int nf_conntrack_hash_resize(unsigned int hashsize)
1911 {
1912         int i, bucket;
1913         unsigned int old_size;
1914         struct hlist_nulls_head *hash, *old_hash;
1915         struct nf_conntrack_tuple_hash *h;
1916         struct nf_conn *ct;
1917
1918         if (!hashsize)
1919                 return -EINVAL;
1920
1921         hash = nf_ct_alloc_hashtable(&hashsize, 1);
1922         if (!hash)
1923                 return -ENOMEM;
1924
1925         old_size = nf_conntrack_htable_size;
1926         if (old_size == hashsize) {
1927                 nf_ct_free_hashtable(hash, hashsize);
1928                 return 0;
1929         }
1930
1931         local_bh_disable();
1932         nf_conntrack_all_lock();
1933         write_seqcount_begin(&nf_conntrack_generation);
1934
1935         /* Lookups in the old hash might happen in parallel, which means we
1936          * might get false negatives during connection lookup. New connections
1937          * created because of a false negative won't make it into the hash
1938          * though since that required taking the locks.
1939          */
1940
1941         for (i = 0; i < nf_conntrack_htable_size; i++) {
1942                 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
1943                         h = hlist_nulls_entry(nf_conntrack_hash[i].first,
1944                                               struct nf_conntrack_tuple_hash, hnnode);
1945                         ct = nf_ct_tuplehash_to_ctrack(h);
1946                         hlist_nulls_del_rcu(&h->hnnode);
1947                         bucket = __hash_conntrack(nf_ct_net(ct),
1948                                                   &h->tuple, hashsize);
1949                         hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1950                 }
1951         }
1952         old_size = nf_conntrack_htable_size;
1953         old_hash = nf_conntrack_hash;
1954
1955         nf_conntrack_hash = hash;
1956         nf_conntrack_htable_size = hashsize;
1957
1958         write_seqcount_end(&nf_conntrack_generation);
1959         nf_conntrack_all_unlock();
1960         local_bh_enable();
1961
1962         synchronize_net();
1963         nf_ct_free_hashtable(old_hash, old_size);
1964         return 0;
1965 }
1966
1967 int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
1968 {
1969         unsigned int hashsize;
1970         int rc;
1971
1972         if (current->nsproxy->net_ns != &init_net)
1973                 return -EOPNOTSUPP;
1974
1975         /* On boot, we can set this without any fancy locking. */
1976         if (!nf_conntrack_htable_size)
1977                 return param_set_uint(val, kp);
1978
1979         rc = kstrtouint(val, 0, &hashsize);
1980         if (rc)
1981                 return rc;
1982
1983         return nf_conntrack_hash_resize(hashsize);
1984 }
1985 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1986
1987 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1988                   &nf_conntrack_htable_size, 0600);
1989
1990 static __always_inline unsigned int total_extension_size(void)
1991 {
1992         /* remember to add new extensions below */
1993         BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
1994
1995         return sizeof(struct nf_ct_ext) +
1996                sizeof(struct nf_conn_help)
1997 #if IS_ENABLED(CONFIG_NF_NAT)
1998                 + sizeof(struct nf_conn_nat)
1999 #endif
2000                 + sizeof(struct nf_conn_seqadj)
2001                 + sizeof(struct nf_conn_acct)
2002 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2003                 + sizeof(struct nf_conntrack_ecache)
2004 #endif
2005 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
2006                 + sizeof(struct nf_conn_tstamp)
2007 #endif
2008 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
2009                 + sizeof(struct nf_conn_timeout)
2010 #endif
2011 #ifdef CONFIG_NF_CONNTRACK_LABELS
2012                 + sizeof(struct nf_conn_labels)
2013 #endif
2014 #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
2015                 + sizeof(struct nf_conn_synproxy)
2016 #endif
2017         ;
2018 };
2019
2020 int nf_conntrack_init_start(void)
2021 {
2022         int max_factor = 8;
2023         int ret = -ENOMEM;
2024         int i;
2025
2026         /* struct nf_ct_ext uses u8 to store offsets/size */
2027         BUILD_BUG_ON(total_extension_size() > 255u);
2028
2029         seqcount_init(&nf_conntrack_generation);
2030
2031         for (i = 0; i < CONNTRACK_LOCKS; i++)
2032                 spin_lock_init(&nf_conntrack_locks[i]);
2033
2034         if (!nf_conntrack_htable_size) {
2035                 /* Idea from tcp.c: use 1/16384 of memory.
2036                  * On i386: 32MB machine has 512 buckets.
2037                  * >= 1GB machines have 16384 buckets.
2038                  * >= 4GB machines have 65536 buckets.
2039                  */
2040                 nf_conntrack_htable_size
2041                         = (((totalram_pages << PAGE_SHIFT) / 16384)
2042                            / sizeof(struct hlist_head));
2043                 if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
2044                         nf_conntrack_htable_size = 65536;
2045                 else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
2046                         nf_conntrack_htable_size = 16384;
2047                 if (nf_conntrack_htable_size < 32)
2048                         nf_conntrack_htable_size = 32;
2049
2050                 /* Use a max. factor of four by default to get the same max as
2051                  * with the old struct list_heads. When a table size is given
2052                  * we use the old value of 8 to avoid reducing the max.
2053                  * entries. */
2054                 max_factor = 4;
2055         }
2056
2057         nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
2058         if (!nf_conntrack_hash)
2059                 return -ENOMEM;
2060
2061         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
2062
2063         nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
2064                                                 sizeof(struct nf_conn),
2065                                                 NFCT_INFOMASK + 1,
2066                                                 SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
2067         if (!nf_conntrack_cachep)
2068                 goto err_cachep;
2069
2070         ret = nf_conntrack_expect_init();
2071         if (ret < 0)
2072                 goto err_expect;
2073
2074         ret = nf_conntrack_acct_init();
2075         if (ret < 0)
2076                 goto err_acct;
2077
2078         ret = nf_conntrack_tstamp_init();
2079         if (ret < 0)
2080                 goto err_tstamp;
2081
2082         ret = nf_conntrack_ecache_init();
2083         if (ret < 0)
2084                 goto err_ecache;
2085
2086         ret = nf_conntrack_timeout_init();
2087         if (ret < 0)
2088                 goto err_timeout;
2089
2090         ret = nf_conntrack_helper_init();
2091         if (ret < 0)
2092                 goto err_helper;
2093
2094         ret = nf_conntrack_labels_init();
2095         if (ret < 0)
2096                 goto err_labels;
2097
2098         ret = nf_conntrack_seqadj_init();
2099         if (ret < 0)
2100                 goto err_seqadj;
2101
2102         ret = nf_conntrack_proto_init();
2103         if (ret < 0)
2104                 goto err_proto;
2105
2106         conntrack_gc_work_init(&conntrack_gc_work);
2107         queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
2108
2109         return 0;
2110
2111 err_proto:
2112         nf_conntrack_seqadj_fini();
2113 err_seqadj:
2114         nf_conntrack_labels_fini();
2115 err_labels:
2116         nf_conntrack_helper_fini();
2117 err_helper:
2118         nf_conntrack_timeout_fini();
2119 err_timeout:
2120         nf_conntrack_ecache_fini();
2121 err_ecache:
2122         nf_conntrack_tstamp_fini();
2123 err_tstamp:
2124         nf_conntrack_acct_fini();
2125 err_acct:
2126         nf_conntrack_expect_fini();
2127 err_expect:
2128         kmem_cache_destroy(nf_conntrack_cachep);
2129 err_cachep:
2130         nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
2131         return ret;
2132 }
2133
2134 void nf_conntrack_init_end(void)
2135 {
2136         /* For use by REJECT target */
2137         RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
2138         RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
2139 }
2140
2141 /*
2142  * We need to use special "null" values, not used in hash table
2143  */
2144 #define UNCONFIRMED_NULLS_VAL   ((1<<30)+0)
2145 #define DYING_NULLS_VAL         ((1<<30)+1)
2146 #define TEMPLATE_NULLS_VAL      ((1<<30)+2)
2147
2148 int nf_conntrack_init_net(struct net *net)
2149 {
2150         int ret = -ENOMEM;
2151         int cpu;
2152
2153         BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
2154         atomic_set(&net->ct.count, 0);
2155
2156         net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
2157         if (!net->ct.pcpu_lists)
2158                 goto err_stat;
2159
2160         for_each_possible_cpu(cpu) {
2161                 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2162
2163                 spin_lock_init(&pcpu->lock);
2164                 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
2165                 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
2166         }
2167
2168         net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
2169         if (!net->ct.stat)
2170                 goto err_pcpu_lists;
2171
2172         ret = nf_conntrack_expect_pernet_init(net);
2173         if (ret < 0)
2174                 goto err_expect;
2175         ret = nf_conntrack_acct_pernet_init(net);
2176         if (ret < 0)
2177                 goto err_acct;
2178         ret = nf_conntrack_tstamp_pernet_init(net);
2179         if (ret < 0)
2180                 goto err_tstamp;
2181         ret = nf_conntrack_ecache_pernet_init(net);
2182         if (ret < 0)
2183                 goto err_ecache;
2184         ret = nf_conntrack_helper_pernet_init(net);
2185         if (ret < 0)
2186                 goto err_helper;
2187         ret = nf_conntrack_proto_pernet_init(net);
2188         if (ret < 0)
2189                 goto err_proto;
2190         return 0;
2191
2192 err_proto:
2193         nf_conntrack_helper_pernet_fini(net);
2194 err_helper:
2195         nf_conntrack_ecache_pernet_fini(net);
2196 err_ecache:
2197         nf_conntrack_tstamp_pernet_fini(net);
2198 err_tstamp:
2199         nf_conntrack_acct_pernet_fini(net);
2200 err_acct:
2201         nf_conntrack_expect_pernet_fini(net);
2202 err_expect:
2203         free_percpu(net->ct.stat);
2204 err_pcpu_lists:
2205         free_percpu(net->ct.pcpu_lists);
2206 err_stat:
2207         return ret;
2208 }