1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <net/flow_offload.h>
6 #include <net/netfilter/nf_tables.h>
7 #include <net/netfilter/nf_tables_offload.h>
8 #include <net/pkt_cls.h>
10 static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
12 struct nft_flow_rule *flow;
14 flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
18 flow->rule = flow_rule_alloc(num_actions);
24 flow->rule->match.dissector = &flow->match.dissector;
25 flow->rule->match.mask = &flow->match.mask;
26 flow->rule->match.key = &flow->match.key;
31 struct nft_flow_rule *nft_flow_rule_create(const struct nft_rule *rule)
33 struct nft_offload_ctx ctx = {
35 .type = NFT_OFFLOAD_DEP_UNSPEC,
38 struct nft_flow_rule *flow;
39 int num_actions = 0, err;
40 struct nft_expr *expr;
42 expr = nft_expr_first(rule);
43 while (expr->ops && expr != nft_expr_last(rule)) {
44 if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
47 expr = nft_expr_next(expr);
50 flow = nft_flow_rule_alloc(num_actions);
52 return ERR_PTR(-ENOMEM);
54 expr = nft_expr_first(rule);
55 while (expr->ops && expr != nft_expr_last(rule)) {
56 if (!expr->ops->offload) {
60 err = expr->ops->offload(&ctx, flow, expr);
64 expr = nft_expr_next(expr);
66 flow->proto = ctx.dep.l3num;
70 nft_flow_rule_destroy(flow);
75 void nft_flow_rule_destroy(struct nft_flow_rule *flow)
81 void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
82 enum nft_offload_dep_type type)
87 void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
88 const void *data, u32 len)
90 switch (ctx->dep.type) {
91 case NFT_OFFLOAD_DEP_NETWORK:
92 WARN_ON(len != sizeof(__u16));
93 memcpy(&ctx->dep.l3num, data, sizeof(__u16));
95 case NFT_OFFLOAD_DEP_TRANSPORT:
96 WARN_ON(len != sizeof(__u8));
97 memcpy(&ctx->dep.protonum, data, sizeof(__u8));
102 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
105 static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
107 struct netlink_ext_ack *extack)
109 common->protocol = proto;
110 common->extack = extack;
113 static int nft_setup_cb_call(struct nft_base_chain *basechain,
114 enum tc_setup_type type, void *type_data)
116 struct flow_block_cb *block_cb;
119 list_for_each_entry(block_cb, &basechain->flow_block.cb_list, list) {
120 err = block_cb->cb(type, type_data, block_cb->cb_priv);
127 static int nft_flow_offload_rule(struct nft_trans *trans,
128 enum flow_cls_command command)
130 struct nft_flow_rule *flow = nft_trans_flow_rule(trans);
131 struct nft_rule *rule = nft_trans_rule(trans);
132 struct flow_cls_offload cls_flow = {};
133 struct nft_base_chain *basechain;
134 struct netlink_ext_ack extack;
135 __be16 proto = ETH_P_ALL;
137 if (!nft_is_base_chain(trans->ctx.chain))
140 basechain = nft_base_chain(trans->ctx.chain);
145 nft_flow_offload_common_init(&cls_flow.common, proto, &extack);
146 cls_flow.command = command;
147 cls_flow.cookie = (unsigned long) rule;
149 cls_flow.rule = flow->rule;
151 return nft_setup_cb_call(basechain, TC_SETUP_CLSFLOWER, &cls_flow);
154 static int nft_flow_offload_bind(struct flow_block_offload *bo,
155 struct nft_base_chain *basechain)
157 list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
161 static int nft_flow_offload_unbind(struct flow_block_offload *bo,
162 struct nft_base_chain *basechain)
164 struct flow_block_cb *block_cb, *next;
166 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
167 list_del(&block_cb->list);
168 flow_block_cb_free(block_cb);
174 static int nft_block_setup(struct nft_base_chain *basechain,
175 struct flow_block_offload *bo,
176 enum flow_block_command cmd)
181 case FLOW_BLOCK_BIND:
182 err = nft_flow_offload_bind(bo, basechain);
184 case FLOW_BLOCK_UNBIND:
185 err = nft_flow_offload_unbind(bo, basechain);
195 static int nft_block_offload_cmd(struct nft_base_chain *chain,
196 struct net_device *dev,
197 enum flow_block_command cmd)
199 struct netlink_ext_ack extack = {};
200 struct flow_block_offload bo = {};
203 bo.net = dev_net(dev);
204 bo.block = &chain->flow_block;
206 bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
208 INIT_LIST_HEAD(&bo.cb_list);
210 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
214 return nft_block_setup(chain, &bo, cmd);
217 static void nft_indr_block_ing_cmd(struct net_device *dev,
218 struct nft_base_chain *chain,
219 flow_indr_block_bind_cb_t *cb,
221 enum flow_block_command cmd)
223 struct netlink_ext_ack extack = {};
224 struct flow_block_offload bo = {};
229 bo.net = dev_net(dev);
230 bo.block = &chain->flow_block;
232 bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
234 INIT_LIST_HEAD(&bo.cb_list);
236 cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
238 nft_block_setup(chain, &bo, cmd);
241 static int nft_indr_block_offload_cmd(struct nft_base_chain *chain,
242 struct net_device *dev,
243 enum flow_block_command cmd)
245 struct flow_block_offload bo = {};
246 struct netlink_ext_ack extack = {};
248 bo.net = dev_net(dev);
249 bo.block = &chain->flow_block;
251 bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
253 INIT_LIST_HEAD(&bo.cb_list);
255 flow_indr_block_call(dev, &bo, cmd);
257 if (list_empty(&bo.cb_list))
260 return nft_block_setup(chain, &bo, cmd);
263 #define FLOW_SETUP_BLOCK TC_SETUP_BLOCK
265 static int nft_flow_offload_chain(struct nft_trans *trans,
266 enum flow_block_command cmd)
268 struct nft_chain *chain = trans->ctx.chain;
269 struct nft_base_chain *basechain;
270 struct net_device *dev;
272 if (!nft_is_base_chain(chain))
275 basechain = nft_base_chain(chain);
276 dev = basechain->ops.dev;
280 /* Only default policy to accept is supported for now. */
281 if (cmd == FLOW_BLOCK_BIND &&
282 nft_trans_chain_policy(trans) != -1 &&
283 nft_trans_chain_policy(trans) != NF_ACCEPT)
286 if (dev->netdev_ops->ndo_setup_tc)
287 return nft_block_offload_cmd(basechain, dev, cmd);
289 return nft_indr_block_offload_cmd(basechain, dev, cmd);
292 int nft_flow_rule_offload_commit(struct net *net)
294 struct nft_trans *trans;
297 list_for_each_entry(trans, &net->nft.commit_list, list) {
298 if (trans->ctx.family != NFPROTO_NETDEV)
301 switch (trans->msg_type) {
302 case NFT_MSG_NEWCHAIN:
303 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
306 err = nft_flow_offload_chain(trans, FLOW_BLOCK_BIND);
308 case NFT_MSG_DELCHAIN:
309 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
312 err = nft_flow_offload_chain(trans, FLOW_BLOCK_UNBIND);
314 case NFT_MSG_NEWRULE:
315 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
318 if (trans->ctx.flags & NLM_F_REPLACE ||
319 !(trans->ctx.flags & NLM_F_APPEND))
322 err = nft_flow_offload_rule(trans, FLOW_CLS_REPLACE);
323 nft_flow_rule_destroy(nft_trans_flow_rule(trans));
325 case NFT_MSG_DELRULE:
326 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
329 err = nft_flow_offload_rule(trans, FLOW_CLS_DESTROY);
340 void nft_indr_block_get_and_ing_cmd(struct net_device *dev,
341 flow_indr_block_bind_cb_t *cb,
343 enum flow_block_command command)
345 struct net *net = dev_net(dev);
346 const struct nft_table *table;
347 const struct nft_chain *chain;
349 list_for_each_entry_rcu(table, &net->nft.tables, list) {
350 if (table->family != NFPROTO_NETDEV)
353 list_for_each_entry_rcu(chain, &table->chains, list) {
354 if (nft_is_base_chain(chain)) {
355 struct nft_base_chain *basechain;
357 basechain = nft_base_chain(chain);
358 if (!strncmp(basechain->dev_name, dev->name,
360 nft_indr_block_ing_cmd(dev, basechain,