]> asedeno.scripts.mit.edu Git - linux.git/blob - net/netfilter/nf_conntrack_expect.c
Merge drm-misc-next-fixes-2019-07-18 into drm-misc-fixes
[linux.git] / net / netfilter / nf_conntrack_expect.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Expectation handling for nf_conntrack. */
3
4 /* (C) 1999-2001 Paul `Rusty' Russell
5  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
6  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
7  * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
8  */
9
10 #include <linux/types.h>
11 #include <linux/netfilter.h>
12 #include <linux/skbuff.h>
13 #include <linux/proc_fs.h>
14 #include <linux/seq_file.h>
15 #include <linux/stddef.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/percpu.h>
19 #include <linux/kernel.h>
20 #include <linux/jhash.h>
21 #include <linux/moduleparam.h>
22 #include <linux/export.h>
23 #include <net/net_namespace.h>
24 #include <net/netns/hash.h>
25
26 #include <net/netfilter/nf_conntrack.h>
27 #include <net/netfilter/nf_conntrack_core.h>
28 #include <net/netfilter/nf_conntrack_expect.h>
29 #include <net/netfilter/nf_conntrack_helper.h>
30 #include <net/netfilter/nf_conntrack_tuple.h>
31 #include <net/netfilter/nf_conntrack_zones.h>
32
33 unsigned int nf_ct_expect_hsize __read_mostly;
34 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
35
36 struct hlist_head *nf_ct_expect_hash __read_mostly;
37 EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
38
39 unsigned int nf_ct_expect_max __read_mostly;
40
41 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
42 static unsigned int nf_ct_expect_hashrnd __read_mostly;
43
44 /* nf_conntrack_expect helper functions */
45 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
46                                 u32 portid, int report)
47 {
48         struct nf_conn_help *master_help = nfct_help(exp->master);
49         struct net *net = nf_ct_exp_net(exp);
50
51         WARN_ON(!master_help);
52         WARN_ON(timer_pending(&exp->timeout));
53
54         hlist_del_rcu(&exp->hnode);
55         net->ct.expect_count--;
56
57         hlist_del_rcu(&exp->lnode);
58         master_help->expecting[exp->class]--;
59
60         nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
61         nf_ct_expect_put(exp);
62
63         NF_CT_STAT_INC(net, expect_delete);
64 }
65 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
66
67 static void nf_ct_expectation_timed_out(struct timer_list *t)
68 {
69         struct nf_conntrack_expect *exp = from_timer(exp, t, timeout);
70
71         spin_lock_bh(&nf_conntrack_expect_lock);
72         nf_ct_unlink_expect(exp);
73         spin_unlock_bh(&nf_conntrack_expect_lock);
74         nf_ct_expect_put(exp);
75 }
76
77 static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
78 {
79         unsigned int hash, seed;
80
81         get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
82
83         seed = nf_ct_expect_hashrnd ^ net_hash_mix(n);
84
85         hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
86                       (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
87                        (__force __u16)tuple->dst.u.all) ^ seed);
88
89         return reciprocal_scale(hash, nf_ct_expect_hsize);
90 }
91
92 static bool
93 nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
94                 const struct nf_conntrack_expect *i,
95                 const struct nf_conntrack_zone *zone,
96                 const struct net *net)
97 {
98         return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
99                net_eq(net, nf_ct_net(i->master)) &&
100                nf_ct_zone_equal_any(i->master, zone);
101 }
102
103 bool nf_ct_remove_expect(struct nf_conntrack_expect *exp)
104 {
105         if (del_timer(&exp->timeout)) {
106                 nf_ct_unlink_expect(exp);
107                 nf_ct_expect_put(exp);
108                 return true;
109         }
110         return false;
111 }
112 EXPORT_SYMBOL_GPL(nf_ct_remove_expect);
113
114 struct nf_conntrack_expect *
115 __nf_ct_expect_find(struct net *net,
116                     const struct nf_conntrack_zone *zone,
117                     const struct nf_conntrack_tuple *tuple)
118 {
119         struct nf_conntrack_expect *i;
120         unsigned int h;
121
122         if (!net->ct.expect_count)
123                 return NULL;
124
125         h = nf_ct_expect_dst_hash(net, tuple);
126         hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
127                 if (nf_ct_exp_equal(tuple, i, zone, net))
128                         return i;
129         }
130         return NULL;
131 }
132 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
133
134 /* Just find a expectation corresponding to a tuple. */
135 struct nf_conntrack_expect *
136 nf_ct_expect_find_get(struct net *net,
137                       const struct nf_conntrack_zone *zone,
138                       const struct nf_conntrack_tuple *tuple)
139 {
140         struct nf_conntrack_expect *i;
141
142         rcu_read_lock();
143         i = __nf_ct_expect_find(net, zone, tuple);
144         if (i && !refcount_inc_not_zero(&i->use))
145                 i = NULL;
146         rcu_read_unlock();
147
148         return i;
149 }
150 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
151
152 /* If an expectation for this connection is found, it gets delete from
153  * global list then returned. */
154 struct nf_conntrack_expect *
155 nf_ct_find_expectation(struct net *net,
156                        const struct nf_conntrack_zone *zone,
157                        const struct nf_conntrack_tuple *tuple)
158 {
159         struct nf_conntrack_expect *i, *exp = NULL;
160         unsigned int h;
161
162         if (!net->ct.expect_count)
163                 return NULL;
164
165         h = nf_ct_expect_dst_hash(net, tuple);
166         hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
167                 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
168                     nf_ct_exp_equal(tuple, i, zone, net)) {
169                         exp = i;
170                         break;
171                 }
172         }
173         if (!exp)
174                 return NULL;
175
176         /* If master is not in hash table yet (ie. packet hasn't left
177            this machine yet), how can other end know about expected?
178            Hence these are not the droids you are looking for (if
179            master ct never got confirmed, we'd hold a reference to it
180            and weird things would happen to future packets). */
181         if (!nf_ct_is_confirmed(exp->master))
182                 return NULL;
183
184         /* Avoid race with other CPUs, that for exp->master ct, is
185          * about to invoke ->destroy(), or nf_ct_delete() via timeout
186          * or early_drop().
187          *
188          * The atomic_inc_not_zero() check tells:  If that fails, we
189          * know that the ct is being destroyed.  If it succeeds, we
190          * can be sure the ct cannot disappear underneath.
191          */
192         if (unlikely(nf_ct_is_dying(exp->master) ||
193                      !atomic_inc_not_zero(&exp->master->ct_general.use)))
194                 return NULL;
195
196         if (exp->flags & NF_CT_EXPECT_PERMANENT) {
197                 refcount_inc(&exp->use);
198                 return exp;
199         } else if (del_timer(&exp->timeout)) {
200                 nf_ct_unlink_expect(exp);
201                 return exp;
202         }
203         /* Undo exp->master refcnt increase, if del_timer() failed */
204         nf_ct_put(exp->master);
205
206         return NULL;
207 }
208
209 /* delete all expectations for this conntrack */
210 void nf_ct_remove_expectations(struct nf_conn *ct)
211 {
212         struct nf_conn_help *help = nfct_help(ct);
213         struct nf_conntrack_expect *exp;
214         struct hlist_node *next;
215
216         /* Optimization: most connection never expect any others. */
217         if (!help)
218                 return;
219
220         spin_lock_bh(&nf_conntrack_expect_lock);
221         hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
222                 nf_ct_remove_expect(exp);
223         }
224         spin_unlock_bh(&nf_conntrack_expect_lock);
225 }
226 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
227
228 /* Would two expected things clash? */
229 static inline int expect_clash(const struct nf_conntrack_expect *a,
230                                const struct nf_conntrack_expect *b)
231 {
232         /* Part covered by intersection of masks must be unequal,
233            otherwise they clash */
234         struct nf_conntrack_tuple_mask intersect_mask;
235         int count;
236
237         intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
238
239         for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
240                 intersect_mask.src.u3.all[count] =
241                         a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
242         }
243
244         return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
245                net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
246                nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
247 }
248
249 static inline int expect_matches(const struct nf_conntrack_expect *a,
250                                  const struct nf_conntrack_expect *b)
251 {
252         return a->master == b->master &&
253                nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
254                nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
255                net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
256                nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
257 }
258
259 /* Generally a bad idea to call this: could have matched already. */
260 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
261 {
262         spin_lock_bh(&nf_conntrack_expect_lock);
263         nf_ct_remove_expect(exp);
264         spin_unlock_bh(&nf_conntrack_expect_lock);
265 }
266 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
267
268 /* We don't increase the master conntrack refcount for non-fulfilled
269  * conntracks. During the conntrack destruction, the expectations are
270  * always killed before the conntrack itself */
271 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
272 {
273         struct nf_conntrack_expect *new;
274
275         new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
276         if (!new)
277                 return NULL;
278
279         new->master = me;
280         refcount_set(&new->use, 1);
281         return new;
282 }
283 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
284
285 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
286                        u_int8_t family,
287                        const union nf_inet_addr *saddr,
288                        const union nf_inet_addr *daddr,
289                        u_int8_t proto, const __be16 *src, const __be16 *dst)
290 {
291         int len;
292
293         if (family == AF_INET)
294                 len = 4;
295         else
296                 len = 16;
297
298         exp->flags = 0;
299         exp->class = class;
300         exp->expectfn = NULL;
301         exp->helper = NULL;
302         exp->tuple.src.l3num = family;
303         exp->tuple.dst.protonum = proto;
304
305         if (saddr) {
306                 memcpy(&exp->tuple.src.u3, saddr, len);
307                 if (sizeof(exp->tuple.src.u3) > len)
308                         /* address needs to be cleared for nf_ct_tuple_equal */
309                         memset((void *)&exp->tuple.src.u3 + len, 0x00,
310                                sizeof(exp->tuple.src.u3) - len);
311                 memset(&exp->mask.src.u3, 0xFF, len);
312                 if (sizeof(exp->mask.src.u3) > len)
313                         memset((void *)&exp->mask.src.u3 + len, 0x00,
314                                sizeof(exp->mask.src.u3) - len);
315         } else {
316                 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
317                 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
318         }
319
320         if (src) {
321                 exp->tuple.src.u.all = *src;
322                 exp->mask.src.u.all = htons(0xFFFF);
323         } else {
324                 exp->tuple.src.u.all = 0;
325                 exp->mask.src.u.all = 0;
326         }
327
328         memcpy(&exp->tuple.dst.u3, daddr, len);
329         if (sizeof(exp->tuple.dst.u3) > len)
330                 /* address needs to be cleared for nf_ct_tuple_equal */
331                 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
332                        sizeof(exp->tuple.dst.u3) - len);
333
334         exp->tuple.dst.u.all = *dst;
335
336 #if IS_ENABLED(CONFIG_NF_NAT)
337         memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
338         memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
339 #endif
340 }
341 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
342
343 static void nf_ct_expect_free_rcu(struct rcu_head *head)
344 {
345         struct nf_conntrack_expect *exp;
346
347         exp = container_of(head, struct nf_conntrack_expect, rcu);
348         kmem_cache_free(nf_ct_expect_cachep, exp);
349 }
350
351 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
352 {
353         if (refcount_dec_and_test(&exp->use))
354                 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
355 }
356 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
357
358 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
359 {
360         struct nf_conn_help *master_help = nfct_help(exp->master);
361         struct nf_conntrack_helper *helper;
362         struct net *net = nf_ct_exp_net(exp);
363         unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
364
365         /* two references : one for hash insert, one for the timer */
366         refcount_add(2, &exp->use);
367
368         timer_setup(&exp->timeout, nf_ct_expectation_timed_out, 0);
369         helper = rcu_dereference_protected(master_help->helper,
370                                            lockdep_is_held(&nf_conntrack_expect_lock));
371         if (helper) {
372                 exp->timeout.expires = jiffies +
373                         helper->expect_policy[exp->class].timeout * HZ;
374         }
375         add_timer(&exp->timeout);
376
377         hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
378         master_help->expecting[exp->class]++;
379
380         hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
381         net->ct.expect_count++;
382
383         NF_CT_STAT_INC(net, expect_create);
384 }
385
386 /* Race with expectations being used means we could have none to find; OK. */
387 static void evict_oldest_expect(struct nf_conn *master,
388                                 struct nf_conntrack_expect *new)
389 {
390         struct nf_conn_help *master_help = nfct_help(master);
391         struct nf_conntrack_expect *exp, *last = NULL;
392
393         hlist_for_each_entry(exp, &master_help->expectations, lnode) {
394                 if (exp->class == new->class)
395                         last = exp;
396         }
397
398         if (last)
399                 nf_ct_remove_expect(last);
400 }
401
402 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
403 {
404         const struct nf_conntrack_expect_policy *p;
405         struct nf_conntrack_expect *i;
406         struct nf_conn *master = expect->master;
407         struct nf_conn_help *master_help = nfct_help(master);
408         struct nf_conntrack_helper *helper;
409         struct net *net = nf_ct_exp_net(expect);
410         struct hlist_node *next;
411         unsigned int h;
412         int ret = 0;
413
414         if (!master_help) {
415                 ret = -ESHUTDOWN;
416                 goto out;
417         }
418         h = nf_ct_expect_dst_hash(net, &expect->tuple);
419         hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
420                 if (expect_matches(i, expect)) {
421                         if (i->class != expect->class)
422                                 return -EALREADY;
423
424                         if (nf_ct_remove_expect(i))
425                                 break;
426                 } else if (expect_clash(i, expect)) {
427                         ret = -EBUSY;
428                         goto out;
429                 }
430         }
431         /* Will be over limit? */
432         helper = rcu_dereference_protected(master_help->helper,
433                                            lockdep_is_held(&nf_conntrack_expect_lock));
434         if (helper) {
435                 p = &helper->expect_policy[expect->class];
436                 if (p->max_expected &&
437                     master_help->expecting[expect->class] >= p->max_expected) {
438                         evict_oldest_expect(master, expect);
439                         if (master_help->expecting[expect->class]
440                                                 >= p->max_expected) {
441                                 ret = -EMFILE;
442                                 goto out;
443                         }
444                 }
445         }
446
447         if (net->ct.expect_count >= nf_ct_expect_max) {
448                 net_warn_ratelimited("nf_conntrack: expectation table full\n");
449                 ret = -EMFILE;
450         }
451 out:
452         return ret;
453 }
454
455 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
456                                 u32 portid, int report)
457 {
458         int ret;
459
460         spin_lock_bh(&nf_conntrack_expect_lock);
461         ret = __nf_ct_expect_check(expect);
462         if (ret < 0)
463                 goto out;
464
465         nf_ct_expect_insert(expect);
466
467         spin_unlock_bh(&nf_conntrack_expect_lock);
468         nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
469         return 0;
470 out:
471         spin_unlock_bh(&nf_conntrack_expect_lock);
472         return ret;
473 }
474 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
475
476 void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data),
477                                   void *data)
478 {
479         struct nf_conntrack_expect *exp;
480         const struct hlist_node *next;
481         unsigned int i;
482
483         spin_lock_bh(&nf_conntrack_expect_lock);
484
485         for (i = 0; i < nf_ct_expect_hsize; i++) {
486                 hlist_for_each_entry_safe(exp, next,
487                                           &nf_ct_expect_hash[i],
488                                           hnode) {
489                         if (iter(exp, data) && del_timer(&exp->timeout)) {
490                                 nf_ct_unlink_expect(exp);
491                                 nf_ct_expect_put(exp);
492                         }
493                 }
494         }
495
496         spin_unlock_bh(&nf_conntrack_expect_lock);
497 }
498 EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_destroy);
499
500 void nf_ct_expect_iterate_net(struct net *net,
501                               bool (*iter)(struct nf_conntrack_expect *e, void *data),
502                               void *data,
503                               u32 portid, int report)
504 {
505         struct nf_conntrack_expect *exp;
506         const struct hlist_node *next;
507         unsigned int i;
508
509         spin_lock_bh(&nf_conntrack_expect_lock);
510
511         for (i = 0; i < nf_ct_expect_hsize; i++) {
512                 hlist_for_each_entry_safe(exp, next,
513                                           &nf_ct_expect_hash[i],
514                                           hnode) {
515
516                         if (!net_eq(nf_ct_exp_net(exp), net))
517                                 continue;
518
519                         if (iter(exp, data) && del_timer(&exp->timeout)) {
520                                 nf_ct_unlink_expect_report(exp, portid, report);
521                                 nf_ct_expect_put(exp);
522                         }
523                 }
524         }
525
526         spin_unlock_bh(&nf_conntrack_expect_lock);
527 }
528 EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_net);
529
530 #ifdef CONFIG_NF_CONNTRACK_PROCFS
531 struct ct_expect_iter_state {
532         struct seq_net_private p;
533         unsigned int bucket;
534 };
535
536 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
537 {
538         struct ct_expect_iter_state *st = seq->private;
539         struct hlist_node *n;
540
541         for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
542                 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
543                 if (n)
544                         return n;
545         }
546         return NULL;
547 }
548
549 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
550                                              struct hlist_node *head)
551 {
552         struct ct_expect_iter_state *st = seq->private;
553
554         head = rcu_dereference(hlist_next_rcu(head));
555         while (head == NULL) {
556                 if (++st->bucket >= nf_ct_expect_hsize)
557                         return NULL;
558                 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
559         }
560         return head;
561 }
562
563 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
564 {
565         struct hlist_node *head = ct_expect_get_first(seq);
566
567         if (head)
568                 while (pos && (head = ct_expect_get_next(seq, head)))
569                         pos--;
570         return pos ? NULL : head;
571 }
572
573 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
574         __acquires(RCU)
575 {
576         rcu_read_lock();
577         return ct_expect_get_idx(seq, *pos);
578 }
579
580 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
581 {
582         (*pos)++;
583         return ct_expect_get_next(seq, v);
584 }
585
586 static void exp_seq_stop(struct seq_file *seq, void *v)
587         __releases(RCU)
588 {
589         rcu_read_unlock();
590 }
591
592 static int exp_seq_show(struct seq_file *s, void *v)
593 {
594         struct nf_conntrack_expect *expect;
595         struct nf_conntrack_helper *helper;
596         struct hlist_node *n = v;
597         char *delim = "";
598
599         expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
600
601         if (expect->timeout.function)
602                 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
603                            ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
604         else
605                 seq_puts(s, "- ");
606         seq_printf(s, "l3proto = %u proto=%u ",
607                    expect->tuple.src.l3num,
608                    expect->tuple.dst.protonum);
609         print_tuple(s, &expect->tuple,
610                     nf_ct_l4proto_find(expect->tuple.dst.protonum));
611
612         if (expect->flags & NF_CT_EXPECT_PERMANENT) {
613                 seq_puts(s, "PERMANENT");
614                 delim = ",";
615         }
616         if (expect->flags & NF_CT_EXPECT_INACTIVE) {
617                 seq_printf(s, "%sINACTIVE", delim);
618                 delim = ",";
619         }
620         if (expect->flags & NF_CT_EXPECT_USERSPACE)
621                 seq_printf(s, "%sUSERSPACE", delim);
622
623         helper = rcu_dereference(nfct_help(expect->master)->helper);
624         if (helper) {
625                 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
626                 if (helper->expect_policy[expect->class].name[0])
627                         seq_printf(s, "/%s",
628                                    helper->expect_policy[expect->class].name);
629         }
630
631         seq_putc(s, '\n');
632
633         return 0;
634 }
635
636 static const struct seq_operations exp_seq_ops = {
637         .start = exp_seq_start,
638         .next = exp_seq_next,
639         .stop = exp_seq_stop,
640         .show = exp_seq_show
641 };
642 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
643
644 static int exp_proc_init(struct net *net)
645 {
646 #ifdef CONFIG_NF_CONNTRACK_PROCFS
647         struct proc_dir_entry *proc;
648         kuid_t root_uid;
649         kgid_t root_gid;
650
651         proc = proc_create_net("nf_conntrack_expect", 0440, net->proc_net,
652                         &exp_seq_ops, sizeof(struct ct_expect_iter_state));
653         if (!proc)
654                 return -ENOMEM;
655
656         root_uid = make_kuid(net->user_ns, 0);
657         root_gid = make_kgid(net->user_ns, 0);
658         if (uid_valid(root_uid) && gid_valid(root_gid))
659                 proc_set_user(proc, root_uid, root_gid);
660 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
661         return 0;
662 }
663
664 static void exp_proc_remove(struct net *net)
665 {
666 #ifdef CONFIG_NF_CONNTRACK_PROCFS
667         remove_proc_entry("nf_conntrack_expect", net->proc_net);
668 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
669 }
670
671 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
672
673 int nf_conntrack_expect_pernet_init(struct net *net)
674 {
675         net->ct.expect_count = 0;
676         return exp_proc_init(net);
677 }
678
679 void nf_conntrack_expect_pernet_fini(struct net *net)
680 {
681         exp_proc_remove(net);
682 }
683
684 int nf_conntrack_expect_init(void)
685 {
686         if (!nf_ct_expect_hsize) {
687                 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
688                 if (!nf_ct_expect_hsize)
689                         nf_ct_expect_hsize = 1;
690         }
691         nf_ct_expect_max = nf_ct_expect_hsize * 4;
692         nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
693                                 sizeof(struct nf_conntrack_expect),
694                                 0, 0, NULL);
695         if (!nf_ct_expect_cachep)
696                 return -ENOMEM;
697
698         nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
699         if (!nf_ct_expect_hash) {
700                 kmem_cache_destroy(nf_ct_expect_cachep);
701                 return -ENOMEM;
702         }
703
704         return 0;
705 }
706
707 void nf_conntrack_expect_fini(void)
708 {
709         rcu_barrier(); /* Wait for call_rcu() before destroy */
710         kmem_cache_destroy(nf_ct_expect_cachep);
711         kvfree(nf_ct_expect_hash);
712 }