1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <linux/rhashtable.h>
6 #include <linux/netdevice.h>
7 #include <linux/tc_act/tc_csum.h>
8 #include <net/flow_offload.h>
9 #include <net/netfilter/nf_flow_table.h>
10 #include <net/netfilter/nf_conntrack.h>
11 #include <net/netfilter/nf_conntrack_core.h>
12 #include <net/netfilter/nf_conntrack_tuple.h>
14 static struct work_struct nf_flow_offload_work;
15 static DEFINE_SPINLOCK(flow_offload_pending_list_lock);
16 static LIST_HEAD(flow_offload_pending_list);
18 struct flow_offload_work {
19 struct list_head list;
20 enum flow_cls_command cmd;
22 struct nf_flowtable *flowtable;
23 struct flow_offload *flow;
27 struct flow_dissector_key_control control;
28 struct flow_dissector_key_basic basic;
30 struct flow_dissector_key_ipv4_addrs ipv4;
31 struct flow_dissector_key_ipv6_addrs ipv6;
33 struct flow_dissector_key_tcp tcp;
34 struct flow_dissector_key_ports tp;
35 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
37 struct nf_flow_match {
38 struct flow_dissector dissector;
39 struct nf_flow_key key;
40 struct nf_flow_key mask;
44 struct nf_flow_match match;
45 struct flow_rule *rule;
48 #define NF_FLOW_DISSECTOR(__match, __type, __field) \
49 (__match)->dissector.offset[__type] = \
50 offsetof(struct nf_flow_key, __field)
52 static int nf_flow_rule_match(struct nf_flow_match *match,
53 const struct flow_offload_tuple *tuple)
55 struct nf_flow_key *mask = &match->mask;
56 struct nf_flow_key *key = &match->key;
58 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
59 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
60 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
61 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
62 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
63 NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
65 switch (tuple->l3proto) {
67 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
68 key->basic.n_proto = htons(ETH_P_IP);
69 key->ipv4.src = tuple->src_v4.s_addr;
70 mask->ipv4.src = 0xffffffff;
71 key->ipv4.dst = tuple->dst_v4.s_addr;
72 mask->ipv4.dst = 0xffffffff;
75 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
76 key->basic.n_proto = htons(ETH_P_IPV6);
77 key->ipv6.src = tuple->src_v6;
78 memset(&mask->ipv6.src, 0xff, sizeof(mask->ipv6.src));
79 key->ipv6.dst = tuple->dst_v6;
80 memset(&mask->ipv6.dst, 0xff, sizeof(mask->ipv6.dst));
85 match->dissector.used_keys |= BIT(key->control.addr_type);
86 mask->basic.n_proto = 0xffff;
88 switch (tuple->l4proto) {
91 mask->tcp.flags = cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16);
92 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
100 key->basic.ip_proto = tuple->l4proto;
101 mask->basic.ip_proto = 0xff;
103 key->tp.src = tuple->src_port;
104 mask->tp.src = 0xffff;
105 key->tp.dst = tuple->dst_port;
106 mask->tp.dst = 0xffff;
108 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL) |
109 BIT(FLOW_DISSECTOR_KEY_BASIC) |
110 BIT(FLOW_DISSECTOR_KEY_PORTS);
114 static void flow_offload_mangle(struct flow_action_entry *entry,
115 enum flow_action_mangle_base htype, u32 offset,
116 const __be32 *value, const __be32 *mask)
118 entry->id = FLOW_ACTION_MANGLE;
119 entry->mangle.htype = htype;
120 entry->mangle.offset = offset;
121 memcpy(&entry->mangle.mask, mask, sizeof(u32));
122 memcpy(&entry->mangle.val, value, sizeof(u32));
125 static inline struct flow_action_entry *
126 flow_action_entry_next(struct nf_flow_rule *flow_rule)
128 int i = flow_rule->rule->action.num_entries++;
130 return &flow_rule->rule->action.entries[i];
133 static int flow_offload_eth_src(struct net *net,
134 const struct flow_offload *flow,
135 enum flow_offload_tuple_dir dir,
136 struct nf_flow_rule *flow_rule)
138 const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple;
139 struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
140 struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
141 struct net_device *dev;
145 dev = dev_get_by_index(net, tuple->iifidx);
150 memcpy(&val16, dev->dev_addr, 2);
152 flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
156 memcpy(&val, dev->dev_addr + 2, 4);
157 flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
164 static int flow_offload_eth_dst(struct net *net,
165 const struct flow_offload *flow,
166 enum flow_offload_tuple_dir dir,
167 struct nf_flow_rule *flow_rule)
169 struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
170 struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
171 const void *daddr = &flow->tuplehash[!dir].tuple.src_v4;
172 const struct dst_entry *dst_cache;
173 unsigned char ha[ETH_ALEN];
179 dst_cache = flow->tuplehash[dir].tuple.dst_cache;
180 n = dst_neigh_lookup(dst_cache, daddr);
184 read_lock_bh(&n->lock);
185 nud_state = n->nud_state;
186 ether_addr_copy(ha, n->ha);
187 read_unlock_bh(&n->lock);
189 if (!(nud_state & NUD_VALID)) {
196 flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
200 memcpy(&val16, ha + 4, 2);
202 flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
209 static void flow_offload_ipv4_snat(struct net *net,
210 const struct flow_offload *flow,
211 enum flow_offload_tuple_dir dir,
212 struct nf_flow_rule *flow_rule)
214 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
215 u32 mask = ~htonl(0xffffffff);
220 case FLOW_OFFLOAD_DIR_ORIGINAL:
221 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
222 offset = offsetof(struct iphdr, saddr);
224 case FLOW_OFFLOAD_DIR_REPLY:
225 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
226 offset = offsetof(struct iphdr, daddr);
232 flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
236 static void flow_offload_ipv4_dnat(struct net *net,
237 const struct flow_offload *flow,
238 enum flow_offload_tuple_dir dir,
239 struct nf_flow_rule *flow_rule)
241 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
242 u32 mask = ~htonl(0xffffffff);
247 case FLOW_OFFLOAD_DIR_ORIGINAL:
248 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
249 offset = offsetof(struct iphdr, daddr);
251 case FLOW_OFFLOAD_DIR_REPLY:
252 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
253 offset = offsetof(struct iphdr, saddr);
259 flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
263 static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
265 const __be32 *addr, const __be32 *mask)
267 struct flow_action_entry *entry;
270 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) {
271 entry = flow_action_entry_next(flow_rule);
272 flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
273 offset + i, &addr[i], mask);
277 static void flow_offload_ipv6_snat(struct net *net,
278 const struct flow_offload *flow,
279 enum flow_offload_tuple_dir dir,
280 struct nf_flow_rule *flow_rule)
282 u32 mask = ~htonl(0xffffffff);
287 case FLOW_OFFLOAD_DIR_ORIGINAL:
288 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr32;
289 offset = offsetof(struct ipv6hdr, saddr);
291 case FLOW_OFFLOAD_DIR_REPLY:
292 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr32;
293 offset = offsetof(struct ipv6hdr, daddr);
299 flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
302 static void flow_offload_ipv6_dnat(struct net *net,
303 const struct flow_offload *flow,
304 enum flow_offload_tuple_dir dir,
305 struct nf_flow_rule *flow_rule)
307 u32 mask = ~htonl(0xffffffff);
312 case FLOW_OFFLOAD_DIR_ORIGINAL:
313 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr32;
314 offset = offsetof(struct ipv6hdr, daddr);
316 case FLOW_OFFLOAD_DIR_REPLY:
317 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr32;
318 offset = offsetof(struct ipv6hdr, saddr);
324 flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
327 static int flow_offload_l4proto(const struct flow_offload *flow)
329 u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
334 type = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
337 type = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
346 static void flow_offload_port_snat(struct net *net,
347 const struct flow_offload *flow,
348 enum flow_offload_tuple_dir dir,
349 struct nf_flow_rule *flow_rule)
351 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
356 case FLOW_OFFLOAD_DIR_ORIGINAL:
357 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
358 offset = 0; /* offsetof(struct tcphdr, source); */
359 port = htonl(port << 16);
360 mask = ~htonl(0xffff0000);
362 case FLOW_OFFLOAD_DIR_REPLY:
363 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
364 offset = 0; /* offsetof(struct tcphdr, dest); */
366 mask = ~htonl(0xffff);
372 flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
376 static void flow_offload_port_dnat(struct net *net,
377 const struct flow_offload *flow,
378 enum flow_offload_tuple_dir dir,
379 struct nf_flow_rule *flow_rule)
381 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
386 case FLOW_OFFLOAD_DIR_ORIGINAL:
387 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port);
388 offset = 0; /* offsetof(struct tcphdr, dest); */
390 mask = ~htonl(0xffff);
392 case FLOW_OFFLOAD_DIR_REPLY:
393 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port);
394 offset = 0; /* offsetof(struct tcphdr, source); */
395 port = htonl(port << 16);
396 mask = ~htonl(0xffff0000);
402 flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
406 static void flow_offload_ipv4_checksum(struct net *net,
407 const struct flow_offload *flow,
408 struct nf_flow_rule *flow_rule)
410 u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
411 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
413 entry->id = FLOW_ACTION_CSUM;
414 entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR;
418 entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_TCP;
421 entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_UDP;
426 static void flow_offload_redirect(const struct flow_offload *flow,
427 enum flow_offload_tuple_dir dir,
428 struct nf_flow_rule *flow_rule)
430 struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
433 rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
434 entry->id = FLOW_ACTION_REDIRECT;
435 entry->dev = rt->dst.dev;
436 dev_hold(rt->dst.dev);
439 int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
440 enum flow_offload_tuple_dir dir,
441 struct nf_flow_rule *flow_rule)
443 if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
444 flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
447 if (flow->flags & FLOW_OFFLOAD_SNAT) {
448 flow_offload_ipv4_snat(net, flow, dir, flow_rule);
449 flow_offload_port_snat(net, flow, dir, flow_rule);
451 if (flow->flags & FLOW_OFFLOAD_DNAT) {
452 flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
453 flow_offload_port_dnat(net, flow, dir, flow_rule);
455 if (flow->flags & FLOW_OFFLOAD_SNAT ||
456 flow->flags & FLOW_OFFLOAD_DNAT)
457 flow_offload_ipv4_checksum(net, flow, flow_rule);
459 flow_offload_redirect(flow, dir, flow_rule);
463 EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv4);
465 int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
466 enum flow_offload_tuple_dir dir,
467 struct nf_flow_rule *flow_rule)
469 if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
470 flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
473 if (flow->flags & FLOW_OFFLOAD_SNAT) {
474 flow_offload_ipv6_snat(net, flow, dir, flow_rule);
475 flow_offload_port_snat(net, flow, dir, flow_rule);
477 if (flow->flags & FLOW_OFFLOAD_DNAT) {
478 flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
479 flow_offload_port_dnat(net, flow, dir, flow_rule);
482 flow_offload_redirect(flow, dir, flow_rule);
486 EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv6);
488 #define NF_FLOW_RULE_ACTION_MAX 16
490 static struct nf_flow_rule *
491 nf_flow_offload_rule_alloc(struct net *net,
492 const struct flow_offload_work *offload,
493 enum flow_offload_tuple_dir dir)
495 const struct nf_flowtable *flowtable = offload->flowtable;
496 const struct flow_offload *flow = offload->flow;
497 const struct flow_offload_tuple *tuple;
498 struct nf_flow_rule *flow_rule;
501 flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
505 flow_rule->rule = flow_rule_alloc(NF_FLOW_RULE_ACTION_MAX);
506 if (!flow_rule->rule)
509 flow_rule->rule->match.dissector = &flow_rule->match.dissector;
510 flow_rule->rule->match.mask = &flow_rule->match.mask;
511 flow_rule->rule->match.key = &flow_rule->match.key;
513 tuple = &flow->tuplehash[dir].tuple;
514 err = nf_flow_rule_match(&flow_rule->match, tuple);
518 flow_rule->rule->action.num_entries = 0;
519 if (flowtable->type->action(net, flow, dir, flow_rule) < 0)
525 kfree(flow_rule->rule);
532 static void __nf_flow_offload_destroy(struct nf_flow_rule *flow_rule)
534 struct flow_action_entry *entry;
537 for (i = 0; i < flow_rule->rule->action.num_entries; i++) {
538 entry = &flow_rule->rule->action.entries[i];
539 if (entry->id != FLOW_ACTION_REDIRECT)
544 kfree(flow_rule->rule);
548 static void nf_flow_offload_destroy(struct nf_flow_rule *flow_rule[])
552 for (i = 0; i < FLOW_OFFLOAD_DIR_MAX; i++)
553 __nf_flow_offload_destroy(flow_rule[i]);
556 static int nf_flow_offload_alloc(const struct flow_offload_work *offload,
557 struct nf_flow_rule *flow_rule[])
559 struct net *net = read_pnet(&offload->flowtable->net);
561 flow_rule[0] = nf_flow_offload_rule_alloc(net, offload,
562 FLOW_OFFLOAD_DIR_ORIGINAL);
566 flow_rule[1] = nf_flow_offload_rule_alloc(net, offload,
567 FLOW_OFFLOAD_DIR_REPLY);
569 __nf_flow_offload_destroy(flow_rule[0]);
576 static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
577 __be16 proto, int priority,
578 enum flow_cls_command cmd,
579 const struct flow_offload_tuple *tuple,
580 struct netlink_ext_ack *extack)
582 cls_flow->common.protocol = proto;
583 cls_flow->common.prio = priority;
584 cls_flow->common.extack = extack;
585 cls_flow->command = cmd;
586 cls_flow->cookie = (unsigned long)tuple;
589 static int flow_offload_tuple_add(struct flow_offload_work *offload,
590 struct nf_flow_rule *flow_rule,
591 enum flow_offload_tuple_dir dir)
593 struct nf_flowtable *flowtable = offload->flowtable;
594 struct flow_cls_offload cls_flow = {};
595 struct flow_block_cb *block_cb;
596 struct netlink_ext_ack extack;
597 __be16 proto = ETH_P_ALL;
600 nf_flow_offload_init(&cls_flow, proto, offload->priority,
602 &offload->flow->tuplehash[dir].tuple, &extack);
603 cls_flow.rule = flow_rule->rule;
605 list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) {
606 err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
617 static void flow_offload_tuple_del(struct flow_offload_work *offload,
618 enum flow_offload_tuple_dir dir)
620 struct nf_flowtable *flowtable = offload->flowtable;
621 struct flow_cls_offload cls_flow = {};
622 struct flow_block_cb *block_cb;
623 struct netlink_ext_ack extack;
624 __be16 proto = ETH_P_ALL;
626 nf_flow_offload_init(&cls_flow, proto, offload->priority,
628 &offload->flow->tuplehash[dir].tuple, &extack);
630 list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
631 block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv);
633 offload->flow->flags |= FLOW_OFFLOAD_HW_DEAD;
636 static int flow_offload_rule_add(struct flow_offload_work *offload,
637 struct nf_flow_rule *flow_rule[])
641 ok_count += flow_offload_tuple_add(offload, flow_rule[0],
642 FLOW_OFFLOAD_DIR_ORIGINAL);
643 ok_count += flow_offload_tuple_add(offload, flow_rule[1],
644 FLOW_OFFLOAD_DIR_REPLY);
651 static int flow_offload_work_add(struct flow_offload_work *offload)
653 struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
656 err = nf_flow_offload_alloc(offload, flow_rule);
660 err = flow_offload_rule_add(offload, flow_rule);
662 nf_flow_offload_destroy(flow_rule);
667 static void flow_offload_work_del(struct flow_offload_work *offload)
669 flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
670 flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
673 static void flow_offload_tuple_stats(struct flow_offload_work *offload,
674 enum flow_offload_tuple_dir dir,
675 struct flow_stats *stats)
677 struct nf_flowtable *flowtable = offload->flowtable;
678 struct flow_cls_offload cls_flow = {};
679 struct flow_block_cb *block_cb;
680 struct netlink_ext_ack extack;
681 __be16 proto = ETH_P_ALL;
683 nf_flow_offload_init(&cls_flow, proto, offload->priority,
685 &offload->flow->tuplehash[dir].tuple, &extack);
687 list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list)
688 block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv);
689 memcpy(stats, &cls_flow.stats, sizeof(*stats));
692 static void flow_offload_work_stats(struct flow_offload_work *offload)
694 struct flow_stats stats[FLOW_OFFLOAD_DIR_MAX] = {};
697 flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_ORIGINAL, &stats[0]);
698 flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY, &stats[1]);
700 lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
701 offload->flow->timeout = max_t(u64, offload->flow->timeout,
702 lastused + NF_FLOW_TIMEOUT);
705 static void flow_offload_work_handler(struct work_struct *work)
707 struct flow_offload_work *offload, *next;
708 LIST_HEAD(offload_pending_list);
711 spin_lock_bh(&flow_offload_pending_list_lock);
712 list_replace_init(&flow_offload_pending_list, &offload_pending_list);
713 spin_unlock_bh(&flow_offload_pending_list_lock);
715 list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
716 switch (offload->cmd) {
717 case FLOW_CLS_REPLACE:
718 ret = flow_offload_work_add(offload);
720 offload->flow->flags &= ~FLOW_OFFLOAD_HW;
722 case FLOW_CLS_DESTROY:
723 flow_offload_work_del(offload);
726 flow_offload_work_stats(offload);
731 list_del(&offload->list);
736 static void flow_offload_queue_work(struct flow_offload_work *offload)
738 spin_lock_bh(&flow_offload_pending_list_lock);
739 list_add_tail(&offload->list, &flow_offload_pending_list);
740 spin_unlock_bh(&flow_offload_pending_list_lock);
742 schedule_work(&nf_flow_offload_work);
745 void nf_flow_offload_add(struct nf_flowtable *flowtable,
746 struct flow_offload *flow)
748 struct flow_offload_work *offload;
750 offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
754 offload->cmd = FLOW_CLS_REPLACE;
755 offload->flow = flow;
756 offload->priority = flowtable->priority;
757 offload->flowtable = flowtable;
758 flow->flags |= FLOW_OFFLOAD_HW;
760 flow_offload_queue_work(offload);
763 void nf_flow_offload_del(struct nf_flowtable *flowtable,
764 struct flow_offload *flow)
766 struct flow_offload_work *offload;
768 offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
772 offload->cmd = FLOW_CLS_DESTROY;
773 offload->flow = flow;
774 offload->flow->flags |= FLOW_OFFLOAD_HW_DYING;
775 offload->flowtable = flowtable;
777 flow_offload_queue_work(offload);
780 void nf_flow_offload_stats(struct nf_flowtable *flowtable,
781 struct flow_offload *flow)
783 struct flow_offload_work *offload;
786 delta = nf_flow_timeout_delta(flow->timeout);
787 if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) ||
788 flow->flags & FLOW_OFFLOAD_HW_DYING)
791 offload = kzalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
795 offload->cmd = FLOW_CLS_STATS;
796 offload->flow = flow;
797 offload->flowtable = flowtable;
799 flow_offload_queue_work(offload);
802 void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
804 if (flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD)
805 flush_work(&nf_flow_offload_work);
808 static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
809 struct flow_block_offload *bo,
810 enum flow_block_command cmd)
812 struct flow_block_cb *block_cb, *next;
816 case FLOW_BLOCK_BIND:
817 list_splice(&bo->cb_list, &flowtable->flow_block.cb_list);
819 case FLOW_BLOCK_UNBIND:
820 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
821 list_del(&block_cb->list);
822 flow_block_cb_free(block_cb);
833 int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
834 struct net_device *dev,
835 enum flow_block_command cmd)
837 struct netlink_ext_ack extack = {};
838 struct flow_block_offload bo = {};
841 if (!(flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD))
844 if (!dev->netdev_ops->ndo_setup_tc)
847 bo.net = dev_net(dev);
848 bo.block = &flowtable->flow_block;
850 bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
852 INIT_LIST_HEAD(&bo.cb_list);
854 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, &bo);
858 return nf_flow_table_block_setup(flowtable, &bo, cmd);
860 EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup);
862 int nf_flow_table_offload_init(void)
864 INIT_WORK(&nf_flow_offload_work, flow_offload_work_handler);
869 void nf_flow_table_offload_exit(void)
871 struct flow_offload_work *offload, *next;
872 LIST_HEAD(offload_pending_list);
874 cancel_work_sync(&nf_flow_offload_work);
876 list_for_each_entry_safe(offload, next, &offload_pending_list, list) {
877 list_del(&offload->list);