1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4 * stmmac TC Handling (HW only)
7 #include <net/pkt_cls.h>
8 #include <net/tc_act/tc_gact.h>
14 static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
16 memset(entry, 0, sizeof(*entry));
18 entry->is_last = true;
19 entry->is_frag = false;
22 entry->val.match_data = 0x0;
23 entry->val.match_en = 0x0;
25 entry->val.dma_ch_no = 0x0;
28 static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
29 struct tc_cls_u32_offload *cls,
32 struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
33 u32 loc = cls->knode.handle;
36 for (i = 0; i < priv->tc_entries_max; i++) {
37 entry = &priv->tc_entries[i];
38 if (!entry->in_use && !first && free)
40 if ((entry->handle == loc) && !free && !entry->is_frag)
51 memset(&first->val, 0, sizeof(first->val));
57 static int tc_fill_actions(struct stmmac_tc_entry *entry,
58 struct stmmac_tc_entry *frag,
59 struct tc_cls_u32_offload *cls)
61 struct stmmac_tc_entry *action_entry = entry;
62 const struct tc_action *act;
63 struct tcf_exts *exts;
66 exts = cls->knode.exts;
67 if (!tcf_exts_has_actions(exts))
72 tcf_exts_for_each_action(i, act, exts) {
74 if (is_tcf_gact_ok(act)) {
75 action_entry->val.af = 1;
79 if (is_tcf_gact_shot(act)) {
80 action_entry->val.rf = 1;
91 static int tc_fill_entry(struct stmmac_priv *priv,
92 struct tc_cls_u32_offload *cls)
94 struct stmmac_tc_entry *entry, *frag = NULL;
95 struct tc_u32_sel *sel = cls->knode.sel;
96 u32 off, data, mask, real_off, rem;
97 u32 prio = cls->common.prio << 16;
100 /* Only 1 match per entry */
101 if (sel->nkeys <= 0 || sel->nkeys > 1)
104 off = sel->keys[0].off << sel->offshift;
105 data = sel->keys[0].val;
106 mask = sel->keys[0].mask;
108 switch (ntohs(cls->common.protocol)) {
118 if (off > priv->tc_off_max)
124 entry = tc_find_entry(priv, cls, true);
129 frag = tc_find_entry(priv, cls, true);
135 entry->frag_ptr = frag;
136 entry->val.match_en = (mask << (rem * 8)) &
137 GENMASK(31, rem * 8);
138 entry->val.match_data = (data << (rem * 8)) &
139 GENMASK(31, rem * 8);
140 entry->val.frame_offset = real_off;
143 frag->val.match_en = (mask >> (rem * 8)) &
144 GENMASK(rem * 8 - 1, 0);
145 frag->val.match_data = (data >> (rem * 8)) &
146 GENMASK(rem * 8 - 1, 0);
147 frag->val.frame_offset = real_off + 1;
149 frag->is_frag = true;
151 entry->frag_ptr = NULL;
152 entry->val.match_en = mask;
153 entry->val.match_data = data;
154 entry->val.frame_offset = real_off;
158 ret = tc_fill_actions(entry, frag, cls);
166 frag->in_use = false;
167 entry->in_use = false;
171 static void tc_unfill_entry(struct stmmac_priv *priv,
172 struct tc_cls_u32_offload *cls)
174 struct stmmac_tc_entry *entry;
176 entry = tc_find_entry(priv, cls, false);
180 entry->in_use = false;
181 if (entry->frag_ptr) {
182 entry = entry->frag_ptr;
183 entry->is_frag = false;
184 entry->in_use = false;
188 static int tc_config_knode(struct stmmac_priv *priv,
189 struct tc_cls_u32_offload *cls)
193 ret = tc_fill_entry(priv, cls);
197 ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
198 priv->tc_entries_max);
205 tc_unfill_entry(priv, cls);
209 static int tc_delete_knode(struct stmmac_priv *priv,
210 struct tc_cls_u32_offload *cls)
214 /* Set entry and fragments as not used */
215 tc_unfill_entry(priv, cls);
217 ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
218 priv->tc_entries_max);
225 static int tc_setup_cls_u32(struct stmmac_priv *priv,
226 struct tc_cls_u32_offload *cls)
228 switch (cls->command) {
229 case TC_CLSU32_REPLACE_KNODE:
230 tc_unfill_entry(priv, cls);
232 case TC_CLSU32_NEW_KNODE:
233 return tc_config_knode(priv, cls);
234 case TC_CLSU32_DELETE_KNODE:
235 return tc_delete_knode(priv, cls);
241 static int tc_init(struct stmmac_priv *priv)
243 struct dma_features *dma_cap = &priv->dma_cap;
247 if (dma_cap->l3l4fnum) {
248 priv->flow_entries_max = dma_cap->l3l4fnum;
249 priv->flow_entries = devm_kcalloc(priv->device,
251 sizeof(*priv->flow_entries),
253 if (!priv->flow_entries)
256 for (i = 0; i < priv->flow_entries_max; i++)
257 priv->flow_entries[i].idx = i;
259 dev_info(priv->device, "Enabled Flow TC (entries=%d)\n",
260 priv->flow_entries_max);
263 /* Fail silently as we can still use remaining features, e.g. CBS */
264 if (!dma_cap->frpsel)
267 switch (dma_cap->frpbs) {
269 priv->tc_off_max = 64;
272 priv->tc_off_max = 128;
275 priv->tc_off_max = 256;
281 switch (dma_cap->frpes) {
295 /* Reserve one last filter which lets all pass */
296 priv->tc_entries_max = count;
297 priv->tc_entries = devm_kcalloc(priv->device,
298 count, sizeof(*priv->tc_entries), GFP_KERNEL);
299 if (!priv->tc_entries)
302 tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
304 dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
305 priv->tc_entries_max, priv->tc_off_max);
309 static int tc_setup_cbs(struct stmmac_priv *priv,
310 struct tc_cbs_qopt_offload *qopt)
312 u32 tx_queues_count = priv->plat->tx_queues_to_use;
313 u32 queue = qopt->queue;
319 /* Queue 0 is not AVB capable */
320 if (queue <= 0 || queue >= tx_queues_count)
322 if (!priv->dma_cap.av)
324 if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
327 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
328 if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
329 ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
333 priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
334 } else if (!qopt->enable) {
335 return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
338 /* Port Transmit Rate and Speed Divider */
339 ptr = (priv->speed == SPEED_100) ? 4 : 8;
340 speed_div = (priv->speed == SPEED_100) ? 100000 : 1000000;
342 /* Final adjustments for HW */
343 value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
344 priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
346 value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
347 priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
349 value = qopt->hicredit * 1024ll * 8;
350 priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
352 value = qopt->locredit * 1024ll * 8;
353 priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
355 ret = stmmac_config_cbs(priv, priv->hw,
356 priv->plat->tx_queues_cfg[queue].send_slope,
357 priv->plat->tx_queues_cfg[queue].idle_slope,
358 priv->plat->tx_queues_cfg[queue].high_credit,
359 priv->plat->tx_queues_cfg[queue].low_credit,
364 dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
365 queue, qopt->sendslope, qopt->idleslope,
366 qopt->hicredit, qopt->locredit);
370 static int tc_parse_flow_actions(struct stmmac_priv *priv,
371 struct flow_action *action,
372 struct stmmac_flow_entry *entry)
374 struct flow_action_entry *act;
377 if (!flow_action_has_entries(action))
380 flow_action_for_each(i, act, action) {
382 case FLOW_ACTION_DROP:
383 entry->action |= STMMAC_FLOW_ACTION_DROP;
390 /* Nothing to do, maybe inverse filter ? */
394 static int tc_add_basic_flow(struct stmmac_priv *priv,
395 struct flow_cls_offload *cls,
396 struct stmmac_flow_entry *entry)
398 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
399 struct flow_dissector *dissector = rule->match.dissector;
400 struct flow_match_basic match;
402 /* Nothing to do here */
403 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
406 flow_rule_match_basic(rule, &match);
407 entry->ip_proto = match.key->ip_proto;
411 static int tc_add_ip4_flow(struct stmmac_priv *priv,
412 struct flow_cls_offload *cls,
413 struct stmmac_flow_entry *entry)
415 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
416 struct flow_dissector *dissector = rule->match.dissector;
417 bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
418 struct flow_match_ipv4_addrs match;
422 /* Nothing to do here */
423 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS))
426 flow_rule_match_ipv4_addrs(rule, &match);
427 hw_match = ntohl(match.key->src) & ntohl(match.mask->src);
429 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
430 false, true, inv, hw_match);
435 hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst);
437 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
438 false, false, inv, hw_match);
446 static int tc_add_ports_flow(struct stmmac_priv *priv,
447 struct flow_cls_offload *cls,
448 struct stmmac_flow_entry *entry)
450 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
451 struct flow_dissector *dissector = rule->match.dissector;
452 bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
453 struct flow_match_ports match;
458 /* Nothing to do here */
459 if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS))
462 switch (entry->ip_proto) {
473 flow_rule_match_ports(rule, &match);
475 hw_match = ntohs(match.key->src) & ntohs(match.mask->src);
477 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
478 is_udp, true, inv, hw_match);
483 hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst);
485 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
486 is_udp, false, inv, hw_match);
495 static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
496 struct flow_cls_offload *cls,
501 for (i = 0; i < priv->flow_entries_max; i++) {
502 struct stmmac_flow_entry *entry = &priv->flow_entries[i];
504 if (entry->cookie == cls->cookie)
506 if (get_free && (entry->in_use == false))
514 int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
515 struct stmmac_flow_entry *entry);
516 } tc_flow_parsers[] = {
517 { .fn = tc_add_basic_flow },
518 { .fn = tc_add_ip4_flow },
519 { .fn = tc_add_ports_flow },
522 static int tc_add_flow(struct stmmac_priv *priv,
523 struct flow_cls_offload *cls)
525 struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
526 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
530 entry = tc_find_flow(priv, cls, true);
535 ret = tc_parse_flow_actions(priv, &rule->action, entry);
539 for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
540 ret = tc_flow_parsers[i].fn(priv, cls, entry);
542 entry->in_use = true;
550 entry->cookie = cls->cookie;
554 static int tc_del_flow(struct stmmac_priv *priv,
555 struct flow_cls_offload *cls)
557 struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
560 if (!entry || !entry->in_use)
564 ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false,
565 false, false, false, 0);
567 ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false,
568 false, false, false, 0);
571 entry->in_use = false;
573 entry->is_l4 = false;
577 static int tc_setup_cls(struct stmmac_priv *priv,
578 struct flow_cls_offload *cls)
582 switch (cls->command) {
583 case FLOW_CLS_REPLACE:
584 ret = tc_add_flow(priv, cls);
586 case FLOW_CLS_DESTROY:
587 ret = tc_del_flow(priv, cls);
596 const struct stmmac_tc_ops dwmac510_tc_ops = {
598 .setup_cls_u32 = tc_setup_cls_u32,
599 .setup_cbs = tc_setup_cbs,
600 .setup_cls = tc_setup_cls,