1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/types.h>
3 #include <linux/netfilter.h>
4 #include <linux/slab.h>
5 #include <linux/module.h>
6 #include <linux/skbuff.h>
7 #include <linux/proc_fs.h>
8 #include <linux/seq_file.h>
9 #include <linux/percpu.h>
10 #include <linux/netdevice.h>
11 #include <linux/security.h>
12 #include <net/net_namespace.h>
14 #include <linux/sysctl.h>
17 #include <net/netfilter/nf_conntrack.h>
18 #include <net/netfilter/nf_conntrack_core.h>
19 #include <net/netfilter/nf_conntrack_l4proto.h>
20 #include <net/netfilter/nf_conntrack_expect.h>
21 #include <net/netfilter/nf_conntrack_helper.h>
22 #include <net/netfilter/nf_conntrack_acct.h>
23 #include <net/netfilter/nf_conntrack_zones.h>
24 #include <net/netfilter/nf_conntrack_timestamp.h>
25 #include <linux/rculist_nulls.h>
27 unsigned int nf_conntrack_net_id __read_mostly;
29 #ifdef CONFIG_NF_CONNTRACK_PROCFS
31 print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
32 const struct nf_conntrack_l4proto *l4proto)
34 switch (tuple->src.l3num) {
36 seq_printf(s, "src=%pI4 dst=%pI4 ",
37 &tuple->src.u3.ip, &tuple->dst.u3.ip);
40 seq_printf(s, "src=%pI6 dst=%pI6 ",
41 tuple->src.u3.ip6, tuple->dst.u3.ip6);
47 switch (l4proto->l4proto) {
49 seq_printf(s, "type=%u code=%u id=%u ",
50 tuple->dst.u.icmp.type,
51 tuple->dst.u.icmp.code,
52 ntohs(tuple->src.u.icmp.id));
55 seq_printf(s, "sport=%hu dport=%hu ",
56 ntohs(tuple->src.u.tcp.port),
57 ntohs(tuple->dst.u.tcp.port));
59 case IPPROTO_UDPLITE: /* fallthrough */
61 seq_printf(s, "sport=%hu dport=%hu ",
62 ntohs(tuple->src.u.udp.port),
63 ntohs(tuple->dst.u.udp.port));
67 seq_printf(s, "sport=%hu dport=%hu ",
68 ntohs(tuple->src.u.dccp.port),
69 ntohs(tuple->dst.u.dccp.port));
72 seq_printf(s, "sport=%hu dport=%hu ",
73 ntohs(tuple->src.u.sctp.port),
74 ntohs(tuple->dst.u.sctp.port));
77 seq_printf(s, "type=%u code=%u id=%u ",
78 tuple->dst.u.icmp.type,
79 tuple->dst.u.icmp.code,
80 ntohs(tuple->src.u.icmp.id));
83 seq_printf(s, "srckey=0x%x dstkey=0x%x ",
84 ntohs(tuple->src.u.gre.key),
85 ntohs(tuple->dst.u.gre.key));
91 EXPORT_SYMBOL_GPL(print_tuple);
93 struct ct_iter_state {
94 struct seq_net_private p;
95 struct hlist_nulls_head *hash;
96 unsigned int htable_size;
101 static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
103 struct ct_iter_state *st = seq->private;
104 struct hlist_nulls_node *n;
107 st->bucket < st->htable_size;
110 hlist_nulls_first_rcu(&st->hash[st->bucket]));
117 static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
118 struct hlist_nulls_node *head)
120 struct ct_iter_state *st = seq->private;
122 head = rcu_dereference(hlist_nulls_next_rcu(head));
123 while (is_a_nulls(head)) {
124 if (likely(get_nulls_value(head) == st->bucket)) {
125 if (++st->bucket >= st->htable_size)
128 head = rcu_dereference(
129 hlist_nulls_first_rcu(&st->hash[st->bucket]));
134 static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
136 struct hlist_nulls_node *head = ct_get_first(seq);
139 while (pos && (head = ct_get_next(seq, head)))
141 return pos ? NULL : head;
144 static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
147 struct ct_iter_state *st = seq->private;
149 st->time_now = ktime_get_real_ns();
152 nf_conntrack_get_ht(&st->hash, &st->htable_size);
153 return ct_get_idx(seq, *pos);
156 static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
159 return ct_get_next(s, v);
162 static void ct_seq_stop(struct seq_file *s, void *v)
168 #ifdef CONFIG_NF_CONNTRACK_SECMARK
169 static void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
175 ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
179 seq_printf(s, "secctx=%s ", secctx);
181 security_release_secctx(secctx, len);
184 static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
189 #ifdef CONFIG_NF_CONNTRACK_ZONES
190 static void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
193 const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
195 if (zone->dir != dir)
198 case NF_CT_DEFAULT_ZONE_DIR:
199 seq_printf(s, "zone=%u ", zone->id);
201 case NF_CT_ZONE_DIR_ORIG:
202 seq_printf(s, "zone-orig=%u ", zone->id);
204 case NF_CT_ZONE_DIR_REPL:
205 seq_printf(s, "zone-reply=%u ", zone->id);
212 static inline void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
218 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
219 static void ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
221 struct ct_iter_state *st = s->private;
222 struct nf_conn_tstamp *tstamp;
225 tstamp = nf_conn_tstamp_find(ct);
227 delta_time = st->time_now - tstamp->start;
229 delta_time = div_s64(delta_time, NSEC_PER_SEC);
233 seq_printf(s, "delta-time=%llu ",
234 (unsigned long long)delta_time);
240 ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
245 static const char* l3proto_name(u16 proto)
248 case AF_INET: return "ipv4";
249 case AF_INET6: return "ipv6";
255 static const char* l4proto_name(u16 proto)
258 case IPPROTO_ICMP: return "icmp";
259 case IPPROTO_TCP: return "tcp";
260 case IPPROTO_UDP: return "udp";
261 case IPPROTO_DCCP: return "dccp";
262 case IPPROTO_GRE: return "gre";
263 case IPPROTO_SCTP: return "sctp";
264 case IPPROTO_UDPLITE: return "udplite";
270 /* return 0 on success, 1 in case of error */
271 static int ct_seq_show(struct seq_file *s, void *v)
273 struct nf_conntrack_tuple_hash *hash = v;
274 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
275 const struct nf_conntrack_l4proto *l4proto;
276 struct net *net = seq_file_net(s);
280 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
283 if (nf_ct_should_gc(ct)) {
288 /* we only want to print DIR_ORIGINAL */
289 if (NF_CT_DIRECTION(hash))
292 if (!net_eq(nf_ct_net(ct), net))
295 l4proto = __nf_ct_l4proto_find(nf_ct_protonum(ct));
299 seq_printf(s, "%-8s %u %-8s %u ",
300 l3proto_name(nf_ct_l3num(ct)), nf_ct_l3num(ct),
301 l4proto_name(l4proto->l4proto), nf_ct_protonum(ct));
303 if (!test_bit(IPS_OFFLOAD_BIT, &ct->status))
304 seq_printf(s, "%ld ", nf_ct_expires(ct) / HZ);
306 if (l4proto->print_conntrack)
307 l4proto->print_conntrack(s, ct);
309 print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
312 ct_show_zone(s, ct, NF_CT_ZONE_DIR_ORIG);
314 if (seq_has_overflowed(s))
317 if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
320 if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
321 seq_puts(s, "[UNREPLIED] ");
323 print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, l4proto);
325 ct_show_zone(s, ct, NF_CT_ZONE_DIR_REPL);
327 if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
330 if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
331 seq_puts(s, "[OFFLOAD] ");
332 else if (test_bit(IPS_ASSURED_BIT, &ct->status))
333 seq_puts(s, "[ASSURED] ");
335 if (seq_has_overflowed(s))
338 #if defined(CONFIG_NF_CONNTRACK_MARK)
339 seq_printf(s, "mark=%u ", ct->mark);
342 ct_show_secctx(s, ct);
343 ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR);
344 ct_show_delta_time(s, ct);
346 seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
348 if (seq_has_overflowed(s))
357 static const struct seq_operations ct_seq_ops = {
358 .start = ct_seq_start,
364 static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
366 struct net *net = seq_file_net(seq);
370 return SEQ_START_TOKEN;
372 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
373 if (!cpu_possible(cpu))
376 return per_cpu_ptr(net->ct.stat, cpu);
382 static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
384 struct net *net = seq_file_net(seq);
387 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
388 if (!cpu_possible(cpu))
391 return per_cpu_ptr(net->ct.stat, cpu);
397 static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
401 static int ct_cpu_seq_show(struct seq_file *seq, void *v)
403 struct net *net = seq_file_net(seq);
404 unsigned int nr_conntracks = atomic_read(&net->ct.count);
405 const struct ip_conntrack_stat *st = v;
407 if (v == SEQ_START_TOKEN) {
408 seq_puts(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
412 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
413 "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
436 static const struct seq_operations ct_cpu_seq_ops = {
437 .start = ct_cpu_seq_start,
438 .next = ct_cpu_seq_next,
439 .stop = ct_cpu_seq_stop,
440 .show = ct_cpu_seq_show,
443 static int nf_conntrack_standalone_init_proc(struct net *net)
445 struct proc_dir_entry *pde;
449 pde = proc_create_net("nf_conntrack", 0440, net->proc_net, &ct_seq_ops,
450 sizeof(struct ct_iter_state));
452 goto out_nf_conntrack;
454 root_uid = make_kuid(net->user_ns, 0);
455 root_gid = make_kgid(net->user_ns, 0);
456 if (uid_valid(root_uid) && gid_valid(root_gid))
457 proc_set_user(pde, root_uid, root_gid);
459 pde = proc_create_net("nf_conntrack", 0444, net->proc_net_stat,
460 &ct_cpu_seq_ops, sizeof(struct seq_net_private));
462 goto out_stat_nf_conntrack;
465 out_stat_nf_conntrack:
466 remove_proc_entry("nf_conntrack", net->proc_net);
471 static void nf_conntrack_standalone_fini_proc(struct net *net)
473 remove_proc_entry("nf_conntrack", net->proc_net_stat);
474 remove_proc_entry("nf_conntrack", net->proc_net);
477 static int nf_conntrack_standalone_init_proc(struct net *net)
482 static void nf_conntrack_standalone_fini_proc(struct net *net)
485 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
490 /* Log invalid packets of a given protocol */
491 static int log_invalid_proto_min __read_mostly;
492 static int log_invalid_proto_max __read_mostly = 255;
494 /* size the user *wants to set */
495 static unsigned int nf_conntrack_htable_size_user __read_mostly;
498 nf_conntrack_hash_sysctl(struct ctl_table *table, int write,
499 void __user *buffer, size_t *lenp, loff_t *ppos)
503 ret = proc_dointvec(table, write, buffer, lenp, ppos);
504 if (ret < 0 || !write)
507 /* update ret, we might not be able to satisfy request */
508 ret = nf_conntrack_hash_resize(nf_conntrack_htable_size_user);
510 /* update it to the actual value used by conntrack */
511 nf_conntrack_htable_size_user = nf_conntrack_htable_size;
515 static struct ctl_table_header *nf_ct_netfilter_header;
517 static struct ctl_table nf_ct_sysctl_table[] = {
519 .procname = "nf_conntrack_max",
520 .data = &nf_conntrack_max,
521 .maxlen = sizeof(int),
523 .proc_handler = proc_dointvec,
526 .procname = "nf_conntrack_count",
527 .data = &init_net.ct.count,
528 .maxlen = sizeof(int),
530 .proc_handler = proc_dointvec,
533 .procname = "nf_conntrack_buckets",
534 .data = &nf_conntrack_htable_size_user,
535 .maxlen = sizeof(unsigned int),
537 .proc_handler = nf_conntrack_hash_sysctl,
540 .procname = "nf_conntrack_checksum",
541 .data = &init_net.ct.sysctl_checksum,
542 .maxlen = sizeof(unsigned int),
544 .proc_handler = proc_dointvec,
547 .procname = "nf_conntrack_log_invalid",
548 .data = &init_net.ct.sysctl_log_invalid,
549 .maxlen = sizeof(unsigned int),
551 .proc_handler = proc_dointvec_minmax,
552 .extra1 = &log_invalid_proto_min,
553 .extra2 = &log_invalid_proto_max,
556 .procname = "nf_conntrack_expect_max",
557 .data = &nf_ct_expect_max,
558 .maxlen = sizeof(int),
560 .proc_handler = proc_dointvec,
565 static struct ctl_table nf_ct_netfilter_table[] = {
567 .procname = "nf_conntrack_max",
568 .data = &nf_conntrack_max,
569 .maxlen = sizeof(int),
571 .proc_handler = proc_dointvec,
576 static int nf_conntrack_standalone_init_sysctl(struct net *net)
578 struct ctl_table *table;
580 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
585 table[1].data = &net->ct.count;
586 table[3].data = &net->ct.sysctl_checksum;
587 table[4].data = &net->ct.sysctl_log_invalid;
589 /* Don't export sysctls to unprivileged users */
590 if (net->user_ns != &init_user_ns)
591 table[0].procname = NULL;
593 if (!net_eq(&init_net, net))
594 table[2].mode = 0444;
596 net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table);
597 if (!net->ct.sysctl_header)
598 goto out_unregister_netfilter;
602 out_unregister_netfilter:
608 static void nf_conntrack_standalone_fini_sysctl(struct net *net)
610 struct ctl_table *table;
612 table = net->ct.sysctl_header->ctl_table_arg;
613 unregister_net_sysctl_table(net->ct.sysctl_header);
617 static int nf_conntrack_standalone_init_sysctl(struct net *net)
622 static void nf_conntrack_standalone_fini_sysctl(struct net *net)
625 #endif /* CONFIG_SYSCTL */
627 static int nf_conntrack_pernet_init(struct net *net)
631 ret = nf_conntrack_init_net(net);
635 ret = nf_conntrack_standalone_init_proc(net);
639 net->ct.sysctl_checksum = 1;
640 net->ct.sysctl_log_invalid = 0;
641 ret = nf_conntrack_standalone_init_sysctl(net);
648 nf_conntrack_standalone_fini_proc(net);
650 nf_conntrack_cleanup_net(net);
655 static void nf_conntrack_pernet_exit(struct list_head *net_exit_list)
659 list_for_each_entry(net, net_exit_list, exit_list) {
660 nf_conntrack_standalone_fini_sysctl(net);
661 nf_conntrack_standalone_fini_proc(net);
663 nf_conntrack_cleanup_net_list(net_exit_list);
666 static struct pernet_operations nf_conntrack_net_ops = {
667 .init = nf_conntrack_pernet_init,
668 .exit_batch = nf_conntrack_pernet_exit,
669 .id = &nf_conntrack_net_id,
670 .size = sizeof(struct nf_conntrack_net),
673 static int __init nf_conntrack_standalone_init(void)
675 int ret = nf_conntrack_init_start();
679 BUILD_BUG_ON(SKB_NFCT_PTRMASK != NFCT_PTRMASK);
680 BUILD_BUG_ON(NFCT_INFOMASK <= IP_CT_NUMBER);
683 nf_ct_netfilter_header =
684 register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
685 if (!nf_ct_netfilter_header) {
686 pr_err("nf_conntrack: can't register to sysctl.\n");
691 nf_conntrack_htable_size_user = nf_conntrack_htable_size;
694 ret = register_pernet_subsys(&nf_conntrack_net_ops);
698 nf_conntrack_init_end();
703 unregister_net_sysctl_table(nf_ct_netfilter_header);
706 nf_conntrack_cleanup_end();
711 static void __exit nf_conntrack_standalone_fini(void)
713 nf_conntrack_cleanup_start();
714 unregister_pernet_subsys(&nf_conntrack_net_ops);
716 unregister_net_sysctl_table(nf_ct_netfilter_header);
718 nf_conntrack_cleanup_end();
721 module_init(nf_conntrack_standalone_init);
722 module_exit(nf_conntrack_standalone_fini);