]> asedeno.scripts.mit.edu Git - linux.git/blob - net/netfilter/nf_flow_table_core.c
netfilter: nf_flow_table: conntrack picks up expired flows
[linux.git] / net / netfilter / nf_flow_table_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/netdevice.h>
8 #include <net/ip.h>
9 #include <net/ip6_route.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_flow_table.h>
12 #include <net/netfilter/nf_conntrack.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_tuple.h>
15
16 struct flow_offload_entry {
17         struct flow_offload     flow;
18         struct nf_conn          *ct;
19         struct rcu_head         rcu_head;
20 };
21
22 static DEFINE_MUTEX(flowtable_lock);
23 static LIST_HEAD(flowtables);
24
25 static void
26 flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
27                       struct nf_flow_route *route,
28                       enum flow_offload_tuple_dir dir)
29 {
30         struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
31         struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
32         struct dst_entry *other_dst = route->tuple[!dir].dst;
33         struct dst_entry *dst = route->tuple[dir].dst;
34
35         ft->dir = dir;
36
37         switch (ctt->src.l3num) {
38         case NFPROTO_IPV4:
39                 ft->src_v4 = ctt->src.u3.in;
40                 ft->dst_v4 = ctt->dst.u3.in;
41                 ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
42                 break;
43         case NFPROTO_IPV6:
44                 ft->src_v6 = ctt->src.u3.in6;
45                 ft->dst_v6 = ctt->dst.u3.in6;
46                 ft->mtu = ip6_dst_mtu_forward(dst);
47                 break;
48         }
49
50         ft->l3proto = ctt->src.l3num;
51         ft->l4proto = ctt->dst.protonum;
52         ft->src_port = ctt->src.u.tcp.port;
53         ft->dst_port = ctt->dst.u.tcp.port;
54
55         ft->iifidx = other_dst->dev->ifindex;
56         ft->dst_cache = dst;
57 }
58
59 struct flow_offload *
60 flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
61 {
62         struct flow_offload_entry *entry;
63         struct flow_offload *flow;
64
65         if (unlikely(nf_ct_is_dying(ct) ||
66             !atomic_inc_not_zero(&ct->ct_general.use)))
67                 return NULL;
68
69         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
70         if (!entry)
71                 goto err_ct_refcnt;
72
73         flow = &entry->flow;
74
75         if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
76                 goto err_dst_cache_original;
77
78         if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
79                 goto err_dst_cache_reply;
80
81         entry->ct = ct;
82
83         flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL);
84         flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY);
85
86         if (ct->status & IPS_SRC_NAT)
87                 flow->flags |= FLOW_OFFLOAD_SNAT;
88         if (ct->status & IPS_DST_NAT)
89                 flow->flags |= FLOW_OFFLOAD_DNAT;
90
91         return flow;
92
93 err_dst_cache_reply:
94         dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
95 err_dst_cache_original:
96         kfree(entry);
97 err_ct_refcnt:
98         nf_ct_put(ct);
99
100         return NULL;
101 }
102 EXPORT_SYMBOL_GPL(flow_offload_alloc);
103
104 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
105 {
106         tcp->state = TCP_CONNTRACK_ESTABLISHED;
107         tcp->seen[0].td_maxwin = 0;
108         tcp->seen[1].td_maxwin = 0;
109 }
110
111 #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
112 #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
113
114 static void flow_offload_fixup_ct(struct nf_conn *ct)
115 {
116         const struct nf_conntrack_l4proto *l4proto;
117         unsigned int timeout;
118         int l4num;
119
120         l4num = nf_ct_protonum(ct);
121         if (l4num == IPPROTO_TCP)
122                 flow_offload_fixup_tcp(&ct->proto.tcp);
123
124         l4proto = nf_ct_l4proto_find(l4num);
125         if (!l4proto)
126                 return;
127
128         if (l4num == IPPROTO_TCP)
129                 timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
130         else if (l4num == IPPROTO_UDP)
131                 timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
132         else
133                 return;
134
135         ct->timeout = nfct_time_stamp + timeout;
136 }
137
138 void flow_offload_free(struct flow_offload *flow)
139 {
140         struct flow_offload_entry *e;
141
142         dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
143         dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
144         e = container_of(flow, struct flow_offload_entry, flow);
145         if (flow->flags & FLOW_OFFLOAD_DYING)
146                 nf_ct_delete(e->ct, 0, 0);
147         nf_ct_put(e->ct);
148         kfree_rcu(e, rcu_head);
149 }
150 EXPORT_SYMBOL_GPL(flow_offload_free);
151
152 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
153 {
154         const struct flow_offload_tuple *tuple = data;
155
156         return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
157 }
158
159 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
160 {
161         const struct flow_offload_tuple_rhash *tuplehash = data;
162
163         return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
164 }
165
166 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
167                                         const void *ptr)
168 {
169         const struct flow_offload_tuple *tuple = arg->key;
170         const struct flow_offload_tuple_rhash *x = ptr;
171
172         if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
173                 return 1;
174
175         return 0;
176 }
177
178 static const struct rhashtable_params nf_flow_offload_rhash_params = {
179         .head_offset            = offsetof(struct flow_offload_tuple_rhash, node),
180         .hashfn                 = flow_offload_hash,
181         .obj_hashfn             = flow_offload_hash_obj,
182         .obj_cmpfn              = flow_offload_hash_cmp,
183         .automatic_shrinking    = true,
184 };
185
186 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
187 {
188         int err;
189
190         err = rhashtable_insert_fast(&flow_table->rhashtable,
191                                      &flow->tuplehash[0].node,
192                                      nf_flow_offload_rhash_params);
193         if (err < 0)
194                 return err;
195
196         err = rhashtable_insert_fast(&flow_table->rhashtable,
197                                      &flow->tuplehash[1].node,
198                                      nf_flow_offload_rhash_params);
199         if (err < 0) {
200                 rhashtable_remove_fast(&flow_table->rhashtable,
201                                        &flow->tuplehash[0].node,
202                                        nf_flow_offload_rhash_params);
203                 return err;
204         }
205
206         flow->timeout = (u32)jiffies;
207         return 0;
208 }
209 EXPORT_SYMBOL_GPL(flow_offload_add);
210
211 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
212 {
213         return (__s32)(flow->timeout - (u32)jiffies) <= 0;
214 }
215
216 static void flow_offload_del(struct nf_flowtable *flow_table,
217                              struct flow_offload *flow)
218 {
219         struct flow_offload_entry *e;
220
221         rhashtable_remove_fast(&flow_table->rhashtable,
222                                &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
223                                nf_flow_offload_rhash_params);
224         rhashtable_remove_fast(&flow_table->rhashtable,
225                                &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
226                                nf_flow_offload_rhash_params);
227
228         e = container_of(flow, struct flow_offload_entry, flow);
229         clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
230
231         if (nf_flow_has_expired(flow))
232                 flow_offload_fixup_ct(e->ct);
233
234         flow_offload_free(flow);
235 }
236
237 void flow_offload_teardown(struct flow_offload *flow)
238 {
239         struct flow_offload_entry *e;
240
241         flow->flags |= FLOW_OFFLOAD_TEARDOWN;
242
243         e = container_of(flow, struct flow_offload_entry, flow);
244         flow_offload_fixup_ct(e->ct);
245 }
246 EXPORT_SYMBOL_GPL(flow_offload_teardown);
247
248 struct flow_offload_tuple_rhash *
249 flow_offload_lookup(struct nf_flowtable *flow_table,
250                     struct flow_offload_tuple *tuple)
251 {
252         struct flow_offload_tuple_rhash *tuplehash;
253         struct flow_offload *flow;
254         struct flow_offload_entry *e;
255         int dir;
256
257         tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
258                                       nf_flow_offload_rhash_params);
259         if (!tuplehash)
260                 return NULL;
261
262         dir = tuplehash->tuple.dir;
263         flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
264         if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
265                 return NULL;
266
267         e = container_of(flow, struct flow_offload_entry, flow);
268         if (unlikely(nf_ct_is_dying(e->ct)))
269                 return NULL;
270
271         return tuplehash;
272 }
273 EXPORT_SYMBOL_GPL(flow_offload_lookup);
274
275 static int
276 nf_flow_table_iterate(struct nf_flowtable *flow_table,
277                       void (*iter)(struct flow_offload *flow, void *data),
278                       void *data)
279 {
280         struct flow_offload_tuple_rhash *tuplehash;
281         struct rhashtable_iter hti;
282         struct flow_offload *flow;
283         int err = 0;
284
285         rhashtable_walk_enter(&flow_table->rhashtable, &hti);
286         rhashtable_walk_start(&hti);
287
288         while ((tuplehash = rhashtable_walk_next(&hti))) {
289                 if (IS_ERR(tuplehash)) {
290                         if (PTR_ERR(tuplehash) != -EAGAIN) {
291                                 err = PTR_ERR(tuplehash);
292                                 break;
293                         }
294                         continue;
295                 }
296                 if (tuplehash->tuple.dir)
297                         continue;
298
299                 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
300
301                 iter(flow, data);
302         }
303         rhashtable_walk_stop(&hti);
304         rhashtable_walk_exit(&hti);
305
306         return err;
307 }
308
309 static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
310 {
311         struct nf_flowtable *flow_table = data;
312         struct flow_offload_entry *e;
313
314         e = container_of(flow, struct flow_offload_entry, flow);
315         if (nf_flow_has_expired(flow) || nf_ct_is_dying(e->ct) ||
316             (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)))
317                 flow_offload_del(flow_table, flow);
318 }
319
320 static void nf_flow_offload_work_gc(struct work_struct *work)
321 {
322         struct nf_flowtable *flow_table;
323
324         flow_table = container_of(work, struct nf_flowtable, gc_work.work);
325         nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
326         queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
327 }
328
329 static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
330                                 __be16 port, __be16 new_port)
331 {
332         struct tcphdr *tcph;
333
334         if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
335             skb_try_make_writable(skb, thoff + sizeof(*tcph)))
336                 return -1;
337
338         tcph = (void *)(skb_network_header(skb) + thoff);
339         inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
340
341         return 0;
342 }
343
344 static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
345                                 __be16 port, __be16 new_port)
346 {
347         struct udphdr *udph;
348
349         if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
350             skb_try_make_writable(skb, thoff + sizeof(*udph)))
351                 return -1;
352
353         udph = (void *)(skb_network_header(skb) + thoff);
354         if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
355                 inet_proto_csum_replace2(&udph->check, skb, port,
356                                          new_port, true);
357                 if (!udph->check)
358                         udph->check = CSUM_MANGLED_0;
359         }
360
361         return 0;
362 }
363
364 static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
365                             u8 protocol, __be16 port, __be16 new_port)
366 {
367         switch (protocol) {
368         case IPPROTO_TCP:
369                 if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
370                         return NF_DROP;
371                 break;
372         case IPPROTO_UDP:
373                 if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
374                         return NF_DROP;
375                 break;
376         }
377
378         return 0;
379 }
380
381 int nf_flow_snat_port(const struct flow_offload *flow,
382                       struct sk_buff *skb, unsigned int thoff,
383                       u8 protocol, enum flow_offload_tuple_dir dir)
384 {
385         struct flow_ports *hdr;
386         __be16 port, new_port;
387
388         if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
389             skb_try_make_writable(skb, thoff + sizeof(*hdr)))
390                 return -1;
391
392         hdr = (void *)(skb_network_header(skb) + thoff);
393
394         switch (dir) {
395         case FLOW_OFFLOAD_DIR_ORIGINAL:
396                 port = hdr->source;
397                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
398                 hdr->source = new_port;
399                 break;
400         case FLOW_OFFLOAD_DIR_REPLY:
401                 port = hdr->dest;
402                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
403                 hdr->dest = new_port;
404                 break;
405         default:
406                 return -1;
407         }
408
409         return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
410 }
411 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
412
413 int nf_flow_dnat_port(const struct flow_offload *flow,
414                       struct sk_buff *skb, unsigned int thoff,
415                       u8 protocol, enum flow_offload_tuple_dir dir)
416 {
417         struct flow_ports *hdr;
418         __be16 port, new_port;
419
420         if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
421             skb_try_make_writable(skb, thoff + sizeof(*hdr)))
422                 return -1;
423
424         hdr = (void *)(skb_network_header(skb) + thoff);
425
426         switch (dir) {
427         case FLOW_OFFLOAD_DIR_ORIGINAL:
428                 port = hdr->dest;
429                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
430                 hdr->dest = new_port;
431                 break;
432         case FLOW_OFFLOAD_DIR_REPLY:
433                 port = hdr->source;
434                 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
435                 hdr->source = new_port;
436                 break;
437         default:
438                 return -1;
439         }
440
441         return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
442 }
443 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
444
445 int nf_flow_table_init(struct nf_flowtable *flowtable)
446 {
447         int err;
448
449         INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
450
451         err = rhashtable_init(&flowtable->rhashtable,
452                               &nf_flow_offload_rhash_params);
453         if (err < 0)
454                 return err;
455
456         queue_delayed_work(system_power_efficient_wq,
457                            &flowtable->gc_work, HZ);
458
459         mutex_lock(&flowtable_lock);
460         list_add(&flowtable->list, &flowtables);
461         mutex_unlock(&flowtable_lock);
462
463         return 0;
464 }
465 EXPORT_SYMBOL_GPL(nf_flow_table_init);
466
467 static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
468 {
469         struct net_device *dev = data;
470         struct flow_offload_entry *e;
471
472         e = container_of(flow, struct flow_offload_entry, flow);
473
474         if (!dev) {
475                 flow_offload_teardown(flow);
476                 return;
477         }
478         if (net_eq(nf_ct_net(e->ct), dev_net(dev)) &&
479             (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
480              flow->tuplehash[1].tuple.iifidx == dev->ifindex))
481                 flow_offload_dead(flow);
482 }
483
484 static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
485                                           struct net_device *dev)
486 {
487         nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
488         flush_delayed_work(&flowtable->gc_work);
489 }
490
491 void nf_flow_table_cleanup(struct net_device *dev)
492 {
493         struct nf_flowtable *flowtable;
494
495         mutex_lock(&flowtable_lock);
496         list_for_each_entry(flowtable, &flowtables, list)
497                 nf_flow_table_iterate_cleanup(flowtable, dev);
498         mutex_unlock(&flowtable_lock);
499 }
500 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
501
502 void nf_flow_table_free(struct nf_flowtable *flow_table)
503 {
504         mutex_lock(&flowtable_lock);
505         list_del(&flow_table->list);
506         mutex_unlock(&flowtable_lock);
507         cancel_delayed_work_sync(&flow_table->gc_work);
508         nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
509         nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
510         rhashtable_destroy(&flow_table->rhashtable);
511 }
512 EXPORT_SYMBOL_GPL(nf_flow_table_free);
513
514 MODULE_LICENSE("GPL");
515 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");