1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
5 #include "cxgb4_tc_mqprio.h"
8 static int cxgb4_mqprio_validate(struct net_device *dev,
9 struct tc_mqprio_qopt_offload *mqprio)
11 u64 min_rate = 0, max_rate = 0, max_link_rate;
12 struct port_info *pi = netdev2pinfo(dev);
13 struct adapter *adap = netdev2adap(dev);
14 u32 qcount = 0, qoffset = 0;
15 u32 link_ok, speed, mtu;
19 if (!mqprio->qopt.num_tc)
22 if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) {
23 netdev_err(dev, "Only full TC hardware offload is supported\n");
25 } else if (mqprio->mode != TC_MQPRIO_MODE_CHANNEL) {
26 netdev_err(dev, "Only channel mode offload is supported\n");
28 } else if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
29 netdev_err(dev, "Only bandwidth rate shaper supported\n");
31 } else if (mqprio->qopt.num_tc > adap->params.nsched_cls) {
33 "Only %u traffic classes supported by hardware\n",
34 adap->params.nsched_cls);
38 ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
40 netdev_err(dev, "Failed to get link speed, ret: %d\n", ret);
44 /* Convert from Mbps to bps */
45 max_link_rate = (u64)speed * 1000 * 1000;
47 for (i = 0; i < mqprio->qopt.num_tc; i++) {
48 qoffset = max_t(u16, mqprio->qopt.offset[i], qoffset);
49 qcount += mqprio->qopt.count[i];
51 /* Convert byte per second to bits per second */
52 min_rate += (mqprio->min_rate[i] * 8);
53 max_rate += (mqprio->max_rate[i] * 8);
56 if (qoffset >= adap->tids.neotids || qcount > adap->tids.neotids)
59 if (min_rate > max_link_rate || max_rate > max_link_rate) {
61 "Total Min/Max (%llu/%llu) Rate > supported (%llu)\n",
62 min_rate, max_rate, max_link_rate);
69 static int cxgb4_init_eosw_txq(struct net_device *dev,
70 struct sge_eosw_txq *eosw_txq,
73 struct adapter *adap = netdev2adap(dev);
74 struct sge_eosw_desc *ring;
76 memset(eosw_txq, 0, sizeof(*eosw_txq));
78 ring = kcalloc(CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM,
79 sizeof(*ring), GFP_KERNEL);
83 eosw_txq->desc = ring;
84 eosw_txq->ndesc = CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM;
85 spin_lock_init(&eosw_txq->lock);
86 eosw_txq->state = CXGB4_EO_STATE_CLOSED;
87 eosw_txq->eotid = eotid;
88 eosw_txq->hwtid = adap->tids.eotid_base + eosw_txq->eotid;
89 eosw_txq->cred = adap->params.ofldq_wr_cred;
90 eosw_txq->hwqid = hwqid;
91 eosw_txq->netdev = dev;
92 tasklet_init(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart,
93 (unsigned long)eosw_txq);
97 static void cxgb4_clean_eosw_txq(struct net_device *dev,
98 struct sge_eosw_txq *eosw_txq)
100 struct adapter *adap = netdev2adap(dev);
102 cxgb4_eosw_txq_free_desc(adap, eosw_txq, eosw_txq->ndesc);
104 eosw_txq->last_pidx = 0;
106 eosw_txq->last_cidx = 0;
107 eosw_txq->flowc_idx = 0;
109 eosw_txq->cred = adap->params.ofldq_wr_cred;
110 eosw_txq->ncompl = 0;
111 eosw_txq->last_compl = 0;
112 eosw_txq->state = CXGB4_EO_STATE_CLOSED;
115 static void cxgb4_free_eosw_txq(struct net_device *dev,
116 struct sge_eosw_txq *eosw_txq)
118 spin_lock_bh(&eosw_txq->lock);
119 cxgb4_clean_eosw_txq(dev, eosw_txq);
120 kfree(eosw_txq->desc);
121 spin_unlock_bh(&eosw_txq->lock);
122 tasklet_kill(&eosw_txq->qresume_tsk);
125 static int cxgb4_mqprio_alloc_hw_resources(struct net_device *dev)
127 struct port_info *pi = netdev2pinfo(dev);
128 struct adapter *adap = netdev2adap(dev);
129 struct sge_ofld_rxq *eorxq;
130 struct sge_eohw_txq *eotxq;
134 /* Allocate ETHOFLD hardware queue structures if not done already */
135 if (!refcount_read(&adap->tc_mqprio->refcnt)) {
136 adap->sge.eohw_rxq = kcalloc(adap->sge.eoqsets,
137 sizeof(struct sge_ofld_rxq),
139 if (!adap->sge.eohw_rxq)
142 adap->sge.eohw_txq = kcalloc(adap->sge.eoqsets,
143 sizeof(struct sge_eohw_txq),
145 if (!adap->sge.eohw_txq) {
146 kfree(adap->sge.eohw_rxq);
151 if (!(adap->flags & CXGB4_USING_MSIX))
152 msix = -((int)adap->sge.intrq.abs_id + 1);
154 for (i = 0; i < pi->nqsets; i++) {
155 eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
156 eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
158 /* Allocate Rxqs for receiving ETHOFLD Tx completions */
160 msix = cxgb4_get_msix_idx_from_bmap(adap);
162 goto out_free_queues;
164 eorxq->msix = &adap->msix_info[msix];
165 snprintf(eorxq->msix->desc,
166 sizeof(eorxq->msix->desc),
167 "%s-eorxq%d", dev->name, i);
170 init_rspq(adap, &eorxq->rspq,
171 CXGB4_EOHW_RXQ_DEFAULT_INTR_USEC,
172 CXGB4_EOHW_RXQ_DEFAULT_PKT_CNT,
173 CXGB4_EOHW_RXQ_DEFAULT_DESC_NUM,
174 CXGB4_EOHW_RXQ_DEFAULT_DESC_SIZE);
176 eorxq->fl.size = CXGB4_EOHW_FLQ_DEFAULT_DESC_NUM;
178 ret = t4_sge_alloc_rxq(adap, &eorxq->rspq, false,
179 dev, msix, &eorxq->fl,
180 cxgb4_ethofld_rx_handler,
183 goto out_free_queues;
185 /* Allocate ETHOFLD hardware Txqs */
186 eotxq->q.size = CXGB4_EOHW_TXQ_DEFAULT_DESC_NUM;
187 ret = t4_sge_alloc_ethofld_txq(adap, eotxq, dev,
188 eorxq->rspq.cntxt_id);
190 goto out_free_queues;
192 /* Allocate IRQs, set IRQ affinity, and start Rx */
193 if (adap->flags & CXGB4_USING_MSIX) {
194 ret = request_irq(eorxq->msix->vec, t4_sge_intr_msix, 0,
195 eorxq->msix->desc, &eorxq->rspq);
199 cxgb4_set_msix_aff(adap, eorxq->msix->vec,
200 &eorxq->msix->aff_mask, i);
203 if (adap->flags & CXGB4_FULL_INIT_DONE)
204 cxgb4_enable_rx(adap, &eorxq->rspq);
207 refcount_inc(&adap->tc_mqprio->refcnt);
212 eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
214 if (adap->flags & CXGB4_FULL_INIT_DONE)
215 cxgb4_quiesce_rx(&eorxq->rspq);
217 if (adap->flags & CXGB4_USING_MSIX) {
218 cxgb4_clear_msix_aff(eorxq->msix->vec,
219 eorxq->msix->aff_mask);
220 free_irq(eorxq->msix->vec, &eorxq->rspq);
225 for (i = 0; i < pi->nqsets; i++) {
226 eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
227 eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
229 if (eorxq->rspq.desc)
230 free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
232 cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
233 t4_sge_free_ethofld_txq(adap, eotxq);
236 kfree(adap->sge.eohw_txq);
237 kfree(adap->sge.eohw_rxq);
242 void cxgb4_mqprio_free_hw_resources(struct net_device *dev)
244 struct port_info *pi = netdev2pinfo(dev);
245 struct adapter *adap = netdev2adap(dev);
246 struct sge_ofld_rxq *eorxq;
247 struct sge_eohw_txq *eotxq;
250 /* Return if no ETHOFLD structures have been allocated yet */
251 if (!refcount_read(&adap->tc_mqprio->refcnt))
254 /* Return if no hardware queues have been allocated */
255 if (!adap->sge.eohw_rxq[pi->first_qset].rspq.desc)
258 for (i = 0; i < pi->nqsets; i++) {
259 eorxq = &adap->sge.eohw_rxq[pi->first_qset + i];
260 eotxq = &adap->sge.eohw_txq[pi->first_qset + i];
262 /* Device removal path will already disable NAPI
263 * before unregistering netdevice. So, only disable
264 * NAPI if we're not in device removal path
266 if (!(adap->flags & CXGB4_SHUTTING_DOWN))
267 cxgb4_quiesce_rx(&eorxq->rspq);
269 if (adap->flags & CXGB4_USING_MSIX) {
270 cxgb4_clear_msix_aff(eorxq->msix->vec,
271 eorxq->msix->aff_mask);
272 free_irq(eorxq->msix->vec, &eorxq->rspq);
275 free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
276 t4_sge_free_ethofld_txq(adap, eotxq);
279 /* Free up ETHOFLD structures if there are no users */
280 if (refcount_dec_and_test(&adap->tc_mqprio->refcnt)) {
281 kfree(adap->sge.eohw_txq);
282 kfree(adap->sge.eohw_rxq);
286 static int cxgb4_mqprio_alloc_tc(struct net_device *dev,
287 struct tc_mqprio_qopt_offload *mqprio)
289 struct ch_sched_params p = {
290 .type = SCHED_CLASS_TYPE_PACKET,
291 .u.params.level = SCHED_CLASS_LEVEL_CL_RL,
292 .u.params.mode = SCHED_CLASS_MODE_FLOW,
293 .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
294 .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
295 .u.params.class = SCHED_CLS_NONE,
296 .u.params.weight = 0,
297 .u.params.pktsize = dev->mtu,
299 struct cxgb4_tc_port_mqprio *tc_port_mqprio;
300 struct port_info *pi = netdev2pinfo(dev);
301 struct adapter *adap = netdev2adap(dev);
302 struct sched_class *e;
306 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
307 p.u.params.channel = pi->tx_chan;
308 for (i = 0; i < mqprio->qopt.num_tc; i++) {
309 /* Convert from bytes per second to Kbps */
310 p.u.params.minrate = mqprio->min_rate[i] * 8 / 1000;
311 p.u.params.maxrate = mqprio->max_rate[i] * 8 / 1000;
313 e = cxgb4_sched_class_alloc(dev, &p);
319 tc_port_mqprio->tc_hwtc_map[i] = e->idx;
326 cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
331 static void cxgb4_mqprio_free_tc(struct net_device *dev)
333 struct cxgb4_tc_port_mqprio *tc_port_mqprio;
334 struct port_info *pi = netdev2pinfo(dev);
335 struct adapter *adap = netdev2adap(dev);
338 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
339 for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++)
340 cxgb4_sched_class_free(dev, tc_port_mqprio->tc_hwtc_map[i]);
343 static int cxgb4_mqprio_class_bind(struct net_device *dev,
344 struct sge_eosw_txq *eosw_txq,
347 struct ch_sched_flowc fe;
350 init_completion(&eosw_txq->completion);
352 fe.tid = eosw_txq->eotid;
355 ret = cxgb4_sched_class_bind(dev, &fe, SCHED_FLOWC);
359 ret = wait_for_completion_timeout(&eosw_txq->completion,
360 CXGB4_FLOWC_WAIT_TIMEOUT);
367 static void cxgb4_mqprio_class_unbind(struct net_device *dev,
368 struct sge_eosw_txq *eosw_txq,
371 struct adapter *adap = netdev2adap(dev);
372 struct ch_sched_flowc fe;
374 /* If we're shutting down, interrupts are disabled and no completions
375 * come back. So, skip waiting for completions in this scenario.
377 if (!(adap->flags & CXGB4_SHUTTING_DOWN))
378 init_completion(&eosw_txq->completion);
380 fe.tid = eosw_txq->eotid;
382 cxgb4_sched_class_unbind(dev, &fe, SCHED_FLOWC);
384 if (!(adap->flags & CXGB4_SHUTTING_DOWN))
385 wait_for_completion_timeout(&eosw_txq->completion,
386 CXGB4_FLOWC_WAIT_TIMEOUT);
389 static int cxgb4_mqprio_enable_offload(struct net_device *dev,
390 struct tc_mqprio_qopt_offload *mqprio)
392 struct cxgb4_tc_port_mqprio *tc_port_mqprio;
393 u32 qoffset, qcount, tot_qcount, qid, hwqid;
394 struct port_info *pi = netdev2pinfo(dev);
395 struct adapter *adap = netdev2adap(dev);
396 struct sge_eosw_txq *eosw_txq;
401 ret = cxgb4_mqprio_alloc_hw_resources(dev);
405 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
406 for (i = 0; i < mqprio->qopt.num_tc; i++) {
407 qoffset = mqprio->qopt.offset[i];
408 qcount = mqprio->qopt.count[i];
409 for (j = 0; j < qcount; j++) {
410 eotid = cxgb4_get_free_eotid(&adap->tids);
413 goto out_free_eotids;
417 hwqid = pi->first_qset + (eotid % pi->nqsets);
418 eosw_txq = &tc_port_mqprio->eosw_txq[qid];
419 ret = cxgb4_init_eosw_txq(dev, eosw_txq,
422 goto out_free_eotids;
424 cxgb4_alloc_eotid(&adap->tids, eotid, eosw_txq);
426 hwtc = tc_port_mqprio->tc_hwtc_map[i];
427 ret = cxgb4_mqprio_class_bind(dev, eosw_txq, hwtc);
429 goto out_free_eotids;
433 memcpy(&tc_port_mqprio->mqprio, mqprio,
434 sizeof(struct tc_mqprio_qopt_offload));
436 /* Inform the stack about the configured tc params.
438 * Set the correct queue map. If no queue count has been
439 * specified, then send the traffic through default NIC
440 * queues; instead of ETHOFLD queues.
442 ret = netdev_set_num_tc(dev, mqprio->qopt.num_tc);
444 goto out_free_eotids;
446 tot_qcount = pi->nqsets;
447 for (i = 0; i < mqprio->qopt.num_tc; i++) {
448 qcount = mqprio->qopt.count[i];
450 qoffset = mqprio->qopt.offset[i] + pi->nqsets;
456 ret = netdev_set_tc_queue(dev, i, qcount, qoffset);
460 tot_qcount += mqprio->qopt.count[i];
463 ret = netif_set_real_num_tx_queues(dev, tot_qcount);
467 tc_port_mqprio->state = CXGB4_MQPRIO_STATE_ACTIVE;
471 netdev_reset_tc(dev);
472 i = mqprio->qopt.num_tc;
476 qoffset = mqprio->qopt.offset[i];
477 qcount = mqprio->qopt.count[i];
478 for (j = 0; j < qcount; j++) {
479 eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
481 hwtc = tc_port_mqprio->tc_hwtc_map[i];
482 cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
484 cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
485 cxgb4_free_eosw_txq(dev, eosw_txq);
489 cxgb4_mqprio_free_hw_resources(dev);
493 static void cxgb4_mqprio_disable_offload(struct net_device *dev)
495 struct cxgb4_tc_port_mqprio *tc_port_mqprio;
496 struct port_info *pi = netdev2pinfo(dev);
497 struct adapter *adap = netdev2adap(dev);
498 struct sge_eosw_txq *eosw_txq;
503 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
504 if (tc_port_mqprio->state != CXGB4_MQPRIO_STATE_ACTIVE)
507 netdev_reset_tc(dev);
508 netif_set_real_num_tx_queues(dev, pi->nqsets);
510 for (i = 0; i < tc_port_mqprio->mqprio.qopt.num_tc; i++) {
511 qoffset = tc_port_mqprio->mqprio.qopt.offset[i];
512 qcount = tc_port_mqprio->mqprio.qopt.count[i];
513 for (j = 0; j < qcount; j++) {
514 eosw_txq = &tc_port_mqprio->eosw_txq[qoffset + j];
516 hwtc = tc_port_mqprio->tc_hwtc_map[i];
517 cxgb4_mqprio_class_unbind(dev, eosw_txq, hwtc);
519 cxgb4_free_eotid(&adap->tids, eosw_txq->eotid);
520 cxgb4_free_eosw_txq(dev, eosw_txq);
524 cxgb4_mqprio_free_hw_resources(dev);
526 /* Free up the traffic classes */
527 cxgb4_mqprio_free_tc(dev);
529 memset(&tc_port_mqprio->mqprio, 0,
530 sizeof(struct tc_mqprio_qopt_offload));
532 tc_port_mqprio->state = CXGB4_MQPRIO_STATE_DISABLED;
535 int cxgb4_setup_tc_mqprio(struct net_device *dev,
536 struct tc_mqprio_qopt_offload *mqprio)
538 bool needs_bring_up = false;
541 ret = cxgb4_mqprio_validate(dev, mqprio);
545 /* To configure tc params, the current allocated EOTIDs must
546 * be freed up. However, they can't be freed up if there's
547 * traffic running on the interface. So, ensure interface is
548 * down before configuring tc params.
550 if (netif_running(dev)) {
552 needs_bring_up = true;
555 cxgb4_mqprio_disable_offload(dev);
557 /* If requested for clear, then just return since resources are
558 * already freed up by now.
560 if (!mqprio->qopt.num_tc)
563 /* Allocate free available traffic classes and configure
564 * their rate parameters.
566 ret = cxgb4_mqprio_alloc_tc(dev, mqprio);
570 ret = cxgb4_mqprio_enable_offload(dev, mqprio);
572 cxgb4_mqprio_free_tc(dev);
583 int cxgb4_init_tc_mqprio(struct adapter *adap)
585 struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio;
586 struct cxgb4_tc_mqprio *tc_mqprio;
587 struct sge_eosw_txq *eosw_txq;
591 tc_mqprio = kzalloc(sizeof(*tc_mqprio), GFP_KERNEL);
595 tc_port_mqprio = kcalloc(adap->params.nports, sizeof(*tc_port_mqprio),
597 if (!tc_port_mqprio) {
599 goto out_free_mqprio;
602 tc_mqprio->port_mqprio = tc_port_mqprio;
603 for (i = 0; i < adap->params.nports; i++) {
604 port_mqprio = &tc_mqprio->port_mqprio[i];
605 eosw_txq = kcalloc(adap->tids.neotids, sizeof(*eosw_txq),
611 port_mqprio->eosw_txq = eosw_txq;
614 adap->tc_mqprio = tc_mqprio;
615 refcount_set(&adap->tc_mqprio->refcnt, 0);
619 for (i = 0; i < adap->params.nports; i++) {
620 port_mqprio = &tc_mqprio->port_mqprio[i];
621 kfree(port_mqprio->eosw_txq);
623 kfree(tc_port_mqprio);
630 void cxgb4_cleanup_tc_mqprio(struct adapter *adap)
632 struct cxgb4_tc_port_mqprio *port_mqprio;
635 if (adap->tc_mqprio) {
636 if (adap->tc_mqprio->port_mqprio) {
637 for (i = 0; i < adap->params.nports; i++) {
638 struct net_device *dev = adap->port[i];
641 cxgb4_mqprio_disable_offload(dev);
642 port_mqprio = &adap->tc_mqprio->port_mqprio[i];
643 kfree(port_mqprio->eosw_txq);
645 kfree(adap->tc_mqprio->port_mqprio);
647 kfree(adap->tc_mqprio);