1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <linux/rhashtable.h>
6 #include <linux/netdevice.h>
7 #include <linux/tc_act/tc_csum.h>
8 #include <net/flow_offload.h>
9 #include <net/netfilter/nf_flow_table.h>
10 #include <net/netfilter/nf_conntrack.h>
11 #include <net/netfilter/nf_conntrack_core.h>
12 #include <net/netfilter/nf_conntrack_tuple.h>
14 static struct work_struct nf_flow_offload_work;
15 static DEFINE_SPINLOCK(flow_offload_pending_list_lock);
16 static LIST_HEAD(flow_offload_pending_list);
18 struct flow_offload_work {
19 struct list_head list;
20 enum flow_cls_command cmd;
22 struct nf_flowtable *flowtable;
23 struct flow_offload *flow;
27 struct flow_dissector_key_control control;
28 struct flow_dissector_key_basic basic;
30 struct flow_dissector_key_ipv4_addrs ipv4;
32 struct flow_dissector_key_tcp tcp;
33 struct flow_dissector_key_ports tp;
34 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
36 struct nf_flow_match {
37 struct flow_dissector dissector;
38 struct nf_flow_key key;
39 struct nf_flow_key mask;
43 struct nf_flow_match match;
44 struct flow_rule *rule;
47 #define NF_FLOW_DISSECTOR(__match, __type, __field) \
48 (__match)->dissector.offset[__type] = \
49 offsetof(struct nf_flow_key, __field)
51 static int nf_flow_rule_match(struct nf_flow_match *match,
52 const struct flow_offload_tuple *tuple)
54 struct nf_flow_key *mask = &match->mask;
55 struct nf_flow_key *key = &match->key;
57 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
58 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
59 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
60 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
61 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
63 switch (tuple->l3proto) {
65 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
66 key->basic.n_proto = htons(ETH_P_IP);
67 key->ipv4.src = tuple->src_v4.s_addr;
68 mask->ipv4.src = 0xffffffff;
69 key->ipv4.dst = tuple->dst_v4.s_addr;
70 mask->ipv4.dst = 0xffffffff;
75 mask->basic.n_proto = 0xffff;
77 switch (tuple->l4proto) {
80 mask->tcp.flags = TCP_FLAG_RST | TCP_FLAG_FIN;
81 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
89 key->basic.ip_proto = tuple->l4proto;
90 mask->basic.ip_proto = 0xff;
92 key->tp.src = tuple->src_port;
93 mask->tp.src = 0xffff;
94 key->tp.dst = tuple->dst_port;
95 mask->tp.dst = 0xffff;
97 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL) |
98 BIT(FLOW_DISSECTOR_KEY_BASIC) |
99 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
100 BIT(FLOW_DISSECTOR_KEY_PORTS);
104 static void flow_offload_mangle(struct flow_action_entry *entry,
105 enum flow_action_mangle_base htype,
106 u32 offset, u8 *value, u8 *mask)
108 entry->id = FLOW_ACTION_MANGLE;
109 entry->mangle.htype = htype;
110 entry->mangle.offset = offset;
111 memcpy(&entry->mangle.mask, mask, sizeof(u32));
112 memcpy(&entry->mangle.val, value, sizeof(u32));
115 static inline struct flow_action_entry *
116 flow_action_entry_next(struct nf_flow_rule *flow_rule)
118 int i = flow_rule->rule->action.num_entries++;
120 return &flow_rule->rule->action.entries[i];
123 static int flow_offload_eth_src(struct net *net,
124 const struct flow_offload *flow,
125 enum flow_offload_tuple_dir dir,
126 struct nf_flow_rule *flow_rule)
128 const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple;
129 struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
130 struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
131 struct net_device *dev;
135 dev = dev_get_by_index(net, tuple->iifidx);
140 memcpy(&val16, dev->dev_addr, 2);
142 flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
143 (u8 *)&val, (u8 *)&mask);
146 memcpy(&val, dev->dev_addr + 2, 4);
147 flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
148 (u8 *)&val, (u8 *)&mask);
154 static int flow_offload_eth_dst(struct net *net,
155 const struct flow_offload *flow,
156 enum flow_offload_tuple_dir dir,
157 struct nf_flow_rule *flow_rule)
159 const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple;
160 struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
161 struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
166 n = dst_neigh_lookup(tuple->dst_cache, &tuple->dst_v4);
171 memcpy(&val, n->ha, 4);
172 flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
173 (u8 *)&val, (u8 *)&mask);
176 memcpy(&val16, n->ha + 4, 2);
178 flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
179 (u8 *)&val, (u8 *)&mask);
185 static void flow_offload_ipv4_snat(struct net *net,
186 const struct flow_offload *flow,
187 enum flow_offload_tuple_dir dir,
188 struct nf_flow_rule *flow_rule)
190 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
191 u32 mask = ~htonl(0xffffffff);
196 case FLOW_OFFLOAD_DIR_ORIGINAL:
197 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
198 offset = offsetof(struct iphdr, saddr);
200 case FLOW_OFFLOAD_DIR_REPLY:
201 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
202 offset = offsetof(struct iphdr, daddr);
208 flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
209 (u8 *)&addr, (u8 *)&mask);
212 static void flow_offload_ipv4_dnat(struct net *net,
213 const struct flow_offload *flow,
214 enum flow_offload_tuple_dir dir,
215 struct nf_flow_rule *flow_rule)
217 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
218 u32 mask = ~htonl(0xffffffff);
223 case FLOW_OFFLOAD_DIR_ORIGINAL:
224 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
225 offset = offsetof(struct iphdr, daddr);
227 case FLOW_OFFLOAD_DIR_REPLY:
228 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
229 offset = offsetof(struct iphdr, saddr);
235 flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
236 (u8 *)&addr, (u8 *)&mask);
239 static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
243 struct flow_action_entry *entry;
246 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) {
247 entry = flow_action_entry_next(flow_rule);
248 flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
254 static void flow_offload_ipv6_snat(struct net *net,
255 const struct flow_offload *flow,
256 enum flow_offload_tuple_dir dir,
257 struct nf_flow_rule *flow_rule)
259 u32 mask = ~htonl(0xffffffff);
264 case FLOW_OFFLOAD_DIR_ORIGINAL:
265 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr;
266 offset = offsetof(struct ipv6hdr, saddr);
268 case FLOW_OFFLOAD_DIR_REPLY:
269 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr;
270 offset = offsetof(struct ipv6hdr, daddr);
276 flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask);
279 static void flow_offload_ipv6_dnat(struct net *net,
280 const struct flow_offload *flow,
281 enum flow_offload_tuple_dir dir,
282 struct nf_flow_rule *flow_rule)
284 u32 mask = ~htonl(0xffffffff);
289 case FLOW_OFFLOAD_DIR_ORIGINAL:
290 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr;
291 offset = offsetof(struct ipv6hdr, daddr);
293 case FLOW_OFFLOAD_DIR_REPLY:
294 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr;
295 offset = offsetof(struct ipv6hdr, saddr);
301 flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask);
304 static int flow_offload_l4proto(const struct flow_offload *flow)
306 u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
311 type = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
314 type = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
323 static void flow_offload_port_snat(struct net *net,
324 const struct flow_offload *flow,
325 enum flow_offload_tuple_dir dir,
326 struct nf_flow_rule *flow_rule)
328 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
329 u32 mask = ~htonl(0xffff0000);
334 case FLOW_OFFLOAD_DIR_ORIGINAL:
335 port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
336 offset = 0; /* offsetof(struct tcphdr, source); */
338 case FLOW_OFFLOAD_DIR_REPLY:
339 port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
340 offset = 0; /* offsetof(struct tcphdr, dest); */
346 flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
347 (u8 *)&port, (u8 *)&mask);
350 static void flow_offload_port_dnat(struct net *net,
351 const struct flow_offload *flow,
352 enum flow_offload_tuple_dir dir,
353 struct nf_flow_rule *flow_rule)
355 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
356 u32 mask = ~htonl(0xffff);
361 case FLOW_OFFLOAD_DIR_ORIGINAL:
362 port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
363 offset = 0; /* offsetof(struct tcphdr, source); */
365 case FLOW_OFFLOAD_DIR_REPLY:
366 port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
367 offset = 0; /* offsetof(struct tcphdr, dest); */
373 flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
374 (u8 *)&port, (u8 *)&mask);
377 static void flow_offload_ipv4_checksum(struct net *net,
378 const struct flow_offload *flow,
379 struct nf_flow_rule *flow_rule)
381 u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
382 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
384 entry->id = FLOW_ACTION_CSUM;
385 entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR;
389 entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_TCP;
392 entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_UDP;
397 static void flow_offload_redirect(const struct flow_offload *flow,
398 enum flow_offload_tuple_dir dir,
399 struct nf_flow_rule *flow_rule)
401 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
404 rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
405 entry->id = FLOW_ACTION_REDIRECT;
406 entry->dev = rt->dst.dev;
407 dev_hold(rt->dst.dev);
410 int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
411 enum flow_offload_tuple_dir dir,
412 struct nf_flow_rule *flow_rule)
414 if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
415 flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
418 if (flow->flags & FLOW_OFFLOAD_SNAT) {
419 flow_offload_ipv4_snat(net, flow, dir, flow_rule);
420 flow_offload_port_snat(net, flow, dir, flow_rule);
422 if (flow->flags & FLOW_OFFLOAD_DNAT) {
423 flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
424 flow_offload_port_dnat(net, flow, dir, flow_rule);
426 if (flow->flags & FLOW_OFFLOAD_SNAT ||
427 flow->flags & FLOW_OFFLOAD_DNAT)
428 flow_offload_ipv4_checksum(net, flow, flow_rule);
430 flow_offload_redirect(flow, dir, flow_rule);
434 EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv4);
436 int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
437 enum flow_offload_tuple_dir dir,
438 struct nf_flow_rule *flow_rule)
440 if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
441 flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
444 if (flow->flags & FLOW_OFFLOAD_SNAT) {
445 flow_offload_ipv6_snat(net, flow, dir, flow_rule);
446 flow_offload_port_snat(net, flow, dir, flow_rule);
448 if (flow->flags & FLOW_OFFLOAD_DNAT) {
449 flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
450 flow_offload_port_dnat(net, flow, dir, flow_rule);
453 flow_offload_redirect(flow, dir, flow_rule);
457 EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv6);
459 #define NF_FLOW_RULE_ACTION_MAX 16
461 static struct nf_flow_rule *
462 nf_flow_offload_rule_alloc(struct net *net,
463 const struct flow_offload_work *offload,
464 enum flow_offload_tuple_dir dir)
466 const struct nf_flowtable *flowtable = offload->flowtable;
467 const struct flow_offload *flow = offload->flow;
468 const struct flow_offload_tuple *tuple;
469 struct nf_flow_rule *flow_rule;
472 flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
476 flow_rule->rule = flow_rule_alloc(NF_FLOW_RULE_ACTION_MAX);
477 if (!flow_rule->rule)
480 flow_rule->rule->match.dissector = &flow_rule->match.dissector;
481 flow_rule->rule->match.mask = &flow_rule->match.mask;
482 flow_rule->rule->match.key = &flow_rule->match.key;
484 tuple = &flow->tuplehash[dir].tuple;
485 err = nf_flow_rule_match(&flow_rule->match, tuple);
489 flow_rule->rule->action.num_entries = 0;
490 if (flowtable->type->action(net, flow, dir, flow_rule) < 0)
496 kfree(flow_rule->rule);
503 static void __nf_flow_offload_destroy(struct nf_flow_rule *flow_rule)
505 struct flow_action_entry *entry;
508 for (i = 0; i < flow_rule->rule->action.num_entries; i++) {
509 entry = &flow_rule->rule->action.entries[i];
510 if (entry->id != FLOW_ACTION_REDIRECT)
515 kfree(flow_rule->rule);
519 static void nf_flow_offload_destroy(struct nf_flow_rule *flow_rule[])
523 for (i = 0; i < FLOW_OFFLOAD_DIR_MAX; i++)
524 __nf_flow_offload_destroy(flow_rule[i]);
527 static int nf_flow_offload_alloc(const struct flow_offload_work *offload,
528 struct nf_flow_rule *flow_rule[])
530 struct net *net = read_pnet(&offload->flowtable->net);
532 flow_rule[0] = nf_flow_offload_rule_alloc(net, offload,
533 FLOW_OFFLOAD_DIR_ORIGINAL);
537 flow_rule[1] = nf_flow_offload_rule_alloc(net, offload,
538 FLOW_OFFLOAD_DIR_REPLY);
540 __nf_flow_offload_destroy(flow_rule[0]);
547 static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
548 __be16 proto, int priority,
549 enum flow_cls_command cmd,
550 const struct flow_offload_tuple *tuple,
551 struct netlink_ext_ack *extack)
553 cls_flow->common.protocol = proto;
554 cls_flow->common.prio = priority;
555 cls_flow->common.extack = extack;
556 cls_flow->command = cmd;
557 cls_flow->cookie = (unsigned long)tuple;
560 static int flow_offload_tuple_add(struct flow_offload_work *offload,
561 struct nf_flow_rule *flow_rule,
562 enum flow_offload_tuple_dir dir)
564 struct nf_flowtable *flowtable = offload->flowtable;
565 struct flow_cls_offload cls_flow = {};
566 struct flow_block_cb *block_cb;
567 struct netlink_ext_ack extack;
568 __be16 proto = ETH_P_ALL;
571 nf_flow_offload_init(&cls_flow, proto, offload->priority,
573 &offload->flow->tuplehash[dir].tuple, &extack);
574 cls_flow.rule = flow_rule->rule;
576 list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) {
577 err = block_cb->cb(TC_SETUP_FT, &cls_flow,
588 static void flow_offload_tuple_del(struct flow_offload_work *offload,
589 enum flow_offload_tuple_dir dir)
591 struct nf_flowtable *flowtable = offload->flowtable;
592 struct flow_cls_offload cls_flow = {};
593 struct flow_block_cb *block_cb;
594 struct netlink_ext_ack extack;
595 __be16 proto = ETH_P_ALL;
597 nf_flow_offload_init(&cls_flow, proto, offload->priority,
599 &offload->flow->tuplehash[dir].tuple, &extack);
601 list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
602 block_cb->cb(TC_SETUP_FT, &cls_flow, block_cb->cb_priv);
604 offload->flow->flags |= FLOW_OFFLOAD_HW_DEAD;
607 static int flow_offload_rule_add(struct flow_offload_work *offload,
608 struct nf_flow_rule *flow_rule[])
612 ok_count += flow_offload_tuple_add(offload, flow_rule[0],
613 FLOW_OFFLOAD_DIR_ORIGINAL);
614 ok_count += flow_offload_tuple_add(offload, flow_rule[1],
615 FLOW_OFFLOAD_DIR_REPLY);
622 static int flow_offload_work_add(struct flow_offload_work *offload)
624 struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
627 err = nf_flow_offload_alloc(offload, flow_rule);
631 err = flow_offload_rule_add(offload, flow_rule);
633 nf_flow_offload_destroy(flow_rule);
638 static void flow_offload_work_del(struct flow_offload_work *offload)
640 flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
641 flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
644 static void flow_offload_tuple_stats(struct flow_offload_work *offload,
645 enum flow_offload_tuple_dir dir,
646 struct flow_stats *stats)
648 struct nf_flowtable *flowtable = offload->flowtable;
649 struct flow_cls_offload cls_flow = {};
650 struct flow_block_cb *block_cb;
651 struct netlink_ext_ack extack;
652 __be16 proto = ETH_P_ALL;
654 nf_flow_offload_init(&cls_flow, proto, offload->priority,
656 &offload->flow->tuplehash[dir].tuple, &extack);
658 list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
659 block_cb->cb(TC_SETUP_FT, &cls_flow, block_cb->cb_priv);
660 memcpy(stats, &cls_flow.stats, sizeof(*stats));
663 static void flow_offload_work_stats(struct flow_offload_work *offload)
665 struct flow_stats stats[FLOW_OFFLOAD_DIR_MAX] = {};
668 flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_ORIGINAL, &stats[0]);
669 flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY, &stats[1]);
671 lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
672 offload->flow->timeout = max_t(u64, offload->flow->timeout,
673 lastused + NF_FLOW_TIMEOUT);
676 static void flow_offload_work_handler(struct work_struct *work)
678 struct flow_offload_work *offload, *next;
679 LIST_HEAD(offload_pending_list);
682 spin_lock_bh(&flow_offload_pending_list_lock);
683 list_replace_init(&flow_offload_pending_list, &offload_pending_list);
684 spin_unlock_bh(&flow_offload_pending_list_lock);
686 list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
687 switch (offload->cmd) {
688 case FLOW_CLS_REPLACE:
689 ret = flow_offload_work_add(offload);
691 offload->flow->flags &= ~FLOW_OFFLOAD_HW;
693 case FLOW_CLS_DESTROY:
694 flow_offload_work_del(offload);
697 flow_offload_work_stats(offload);
702 list_del(&offload->list);
707 static void flow_offload_queue_work(struct flow_offload_work *offload)
709 spin_lock_bh(&flow_offload_pending_list_lock);
710 list_add_tail(&offload->list, &flow_offload_pending_list);
711 spin_unlock_bh(&flow_offload_pending_list_lock);
713 schedule_work(&nf_flow_offload_work);
716 void nf_flow_offload_add(struct nf_flowtable *flowtable,
717 struct flow_offload *flow)
719 struct flow_offload_work *offload;
721 offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
725 offload->cmd = FLOW_CLS_REPLACE;
726 offload->flow = flow;
727 offload->priority = flowtable->priority;
728 offload->flowtable = flowtable;
729 flow->flags |= FLOW_OFFLOAD_HW;
731 flow_offload_queue_work(offload);
734 void nf_flow_offload_del(struct nf_flowtable *flowtable,
735 struct flow_offload *flow)
737 struct flow_offload_work *offload;
739 offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
743 offload->cmd = FLOW_CLS_DESTROY;
744 offload->flow = flow;
745 offload->flow->flags |= FLOW_OFFLOAD_HW_DYING;
746 offload->flowtable = flowtable;
748 flow_offload_queue_work(offload);
751 void nf_flow_offload_stats(struct nf_flowtable *flowtable,
752 struct flow_offload *flow)
754 struct flow_offload_work *offload;
757 delta = flow->timeout - jiffies;
758 if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) ||
759 flow->flags & FLOW_OFFLOAD_HW_DYING)
762 offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
766 offload->cmd = FLOW_CLS_STATS;
767 offload->flow = flow;
768 offload->flowtable = flowtable;
770 flow_offload_queue_work(offload);
773 void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
775 if (flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD)
776 flush_work(&nf_flow_offload_work);
779 static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
780 struct flow_block_offload *bo,
781 enum flow_block_command cmd)
783 struct flow_block_cb *block_cb, *next;
787 case FLOW_BLOCK_BIND:
788 list_splice(&bo->cb_list, &flowtable->flow_block.cb_list);
790 case FLOW_BLOCK_UNBIND:
791 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
792 list_del(&block_cb->list);
793 flow_block_cb_free(block_cb);
804 int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
805 struct net_device *dev,
806 enum flow_block_command cmd)
808 struct netlink_ext_ack extack = {};
809 struct flow_block_offload bo = {};
812 if (!(flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD))
815 if (!dev->netdev_ops->ndo_setup_tc)
818 bo.net = dev_net(dev);
819 bo.block = &flowtable->flow_block;
821 bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
823 INIT_LIST_HEAD(&bo.cb_list);
825 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
829 return nf_flow_table_block_setup(flowtable, &bo, cmd);
831 EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup);
833 int nf_flow_table_offload_init(void)
835 INIT_WORK(&nf_flow_offload_work, flow_offload_work_handler);
840 void nf_flow_table_offload_exit(void)
842 struct flow_offload_work *offload, *next;
843 LIST_HEAD(offload_pending_list);
845 cancel_work_sync(&nf_flow_offload_work);
847 list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
848 list_del(&offload->list);