1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Ethernet driver
4 * Copyright (C) 2020 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/etherdevice.h>
15 #include "otx2_common.h"
16 #include "otx2_struct.h"
17 #include "otx2_txrx.h"
19 #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
21 static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
23 struct nix_cqe_hdr_s *cqe_hdr;
25 cqe_hdr = (struct nix_cqe_hdr_s *)CQE_ADDR(cq, cq->cq_head);
26 if (cqe_hdr->cqe_type == NIX_XQE_TYPE_INVALID)
30 cq->cq_head &= (cq->cqe_cnt - 1);
35 static unsigned int frag_num(unsigned int i)
38 return (i & ~3) + 3 - (i & 3);
44 static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf,
45 struct sk_buff *skb, int seg, int *len)
47 const skb_frag_t *frag;
51 /* First segment is always skb->data */
53 page = virt_to_page(skb->data);
54 offset = offset_in_page(skb->data);
55 *len = skb_headlen(skb);
57 frag = &skb_shinfo(skb)->frags[seg - 1];
58 page = skb_frag_page(frag);
59 offset = skb_frag_off(frag);
60 *len = skb_frag_size(frag);
62 return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE);
65 static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
69 for (seg = 0; seg < sg->num_segs; seg++) {
70 otx2_dma_unmap_page(pfvf, sg->dma_addr[seg],
71 sg->size[seg], DMA_TO_DEVICE);
76 static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
77 struct otx2_cq_queue *cq,
78 struct otx2_snd_queue *sq,
79 struct nix_cqe_tx_s *cqe,
80 int budget, int *tx_pkts, int *tx_bytes)
82 struct nix_send_comp_s *snd_comp = &cqe->comp;
83 struct sk_buff *skb = NULL;
86 if (unlikely(snd_comp->status))
87 net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
88 pfvf->netdev->name, cq->cint_idx,
91 sg = &sq->sg[snd_comp->sqe_id];
92 skb = (struct sk_buff *)sg->skb;
96 *tx_bytes += skb->len;
98 otx2_dma_unmap_skb_frags(pfvf, sg);
99 napi_consume_skb(skb, budget);
103 static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
109 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
110 page = virt_to_page(va);
111 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
112 va - page_address(page), len, pfvf->rbsize);
114 otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
115 pfvf->rbsize, DMA_FROM_DEVICE);
118 static void otx2_set_rxhash(struct otx2_nic *pfvf,
119 struct nix_cqe_rx_s *cqe, struct sk_buff *skb)
121 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
122 struct otx2_rss_info *rss;
125 if (!(pfvf->netdev->features & NETIF_F_RXHASH))
128 rss = &pfvf->hw.rss_info;
129 if (rss->flowkey_cfg) {
130 if (rss->flowkey_cfg &
131 ~(NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6))
132 hash_type = PKT_HASH_TYPE_L4;
134 hash_type = PKT_HASH_TYPE_L3;
135 hash = cqe->hdr.flow_tag;
137 skb_set_hash(skb, hash, hash_type);
140 static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
141 struct nix_cqe_rx_s *cqe, int qidx)
143 struct otx2_drv_stats *stats = &pfvf->hw.drv_stats;
144 struct nix_rx_parse_s *parse = &cqe->parse;
146 if (parse->errlev == NPC_ERRLVL_RE) {
147 switch (parse->errcode) {
149 case ERRCODE_FCS_RCV:
150 atomic_inc(&stats->rx_fcs_errs);
152 case ERRCODE_UNDERSIZE:
153 atomic_inc(&stats->rx_undersize_errs);
155 case ERRCODE_OVERSIZE:
156 atomic_inc(&stats->rx_oversize_errs);
158 case ERRCODE_OL2_LEN_MISMATCH:
159 atomic_inc(&stats->rx_len_errs);
162 atomic_inc(&stats->rx_other_errs);
165 } else if (parse->errlev == NPC_ERRLVL_NIX) {
166 switch (parse->errcode) {
167 case ERRCODE_OL3_LEN:
168 case ERRCODE_OL4_LEN:
169 case ERRCODE_IL3_LEN:
170 case ERRCODE_IL4_LEN:
171 atomic_inc(&stats->rx_len_errs);
173 case ERRCODE_OL4_CSUM:
174 case ERRCODE_IL4_CSUM:
175 atomic_inc(&stats->rx_csum_errs);
178 atomic_inc(&stats->rx_other_errs);
182 atomic_inc(&stats->rx_other_errs);
183 /* For now ignore all the NPC parser errors and
184 * pass the packets to stack.
189 /* If RXALL is enabled pass on packets to stack. */
190 if (cqe->sg.segs && (pfvf->netdev->features & NETIF_F_RXALL))
193 /* Free buffer back to pool */
195 otx2_aura_freeptr(pfvf, qidx, cqe->sg.seg_addr & ~0x07ULL);
199 static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
200 struct napi_struct *napi,
201 struct otx2_cq_queue *cq,
202 struct nix_cqe_rx_s *cqe)
204 struct nix_rx_parse_s *parse = &cqe->parse;
205 struct sk_buff *skb = NULL;
207 if (unlikely(parse->errlev || parse->errcode)) {
208 if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
212 skb = napi_get_frags(napi);
216 otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size);
219 otx2_set_rxhash(pfvf, cqe, skb);
221 skb_record_rx_queue(skb, cq->cq_idx);
222 if (pfvf->netdev->features & NETIF_F_RXCSUM)
223 skb->ip_summed = CHECKSUM_UNNECESSARY;
225 napi_gro_frags(napi);
228 static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
229 struct napi_struct *napi,
230 struct otx2_cq_queue *cq, int budget)
232 struct nix_cqe_rx_s *cqe;
233 int processed_cqe = 0;
236 while (likely(processed_cqe < budget)) {
237 cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
238 if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
245 cq->cq_head &= (cq->cqe_cnt - 1);
247 otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
249 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
250 cqe->sg.seg_addr = 0x00;
254 /* Free CQEs to HW */
255 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
256 ((u64)cq->cq_idx << 32) | processed_cqe);
258 if (unlikely(!cq->pool_ptrs))
261 /* Refill pool with new buffers */
262 while (cq->pool_ptrs) {
263 bufptr = otx2_alloc_rbuf(pfvf, cq->rbpool, GFP_ATOMIC);
264 if (unlikely(bufptr <= 0)) {
265 struct refill_work *work;
266 struct delayed_work *dwork;
268 work = &pfvf->refill_wrk[cq->cq_idx];
269 dwork = &work->pool_refill_work;
270 /* Schedule a task if no other task is running */
271 if (!cq->refill_task_sched) {
272 cq->refill_task_sched = true;
273 schedule_delayed_work(dwork,
274 msecs_to_jiffies(100));
278 otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
282 return processed_cqe;
285 static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
286 struct otx2_cq_queue *cq, int budget)
288 int tx_pkts = 0, tx_bytes = 0;
289 struct nix_cqe_tx_s *cqe;
290 int processed_cqe = 0;
292 while (likely(processed_cqe < budget)) {
293 cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
294 if (unlikely(!cqe)) {
299 otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx],
300 cqe, budget, &tx_pkts, &tx_bytes);
302 cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
306 /* Free CQEs to HW */
307 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
308 ((u64)cq->cq_idx << 32) | processed_cqe);
310 if (likely(tx_pkts)) {
311 struct netdev_queue *txq;
313 txq = netdev_get_tx_queue(pfvf->netdev, cq->cint_idx);
314 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
315 /* Check if queue was stopped earlier due to ring full */
317 if (netif_tx_queue_stopped(txq) &&
318 netif_carrier_ok(pfvf->netdev))
319 netif_tx_wake_queue(txq);
324 int otx2_napi_handler(struct napi_struct *napi, int budget)
326 struct otx2_cq_poll *cq_poll;
327 int workdone = 0, cq_idx, i;
328 struct otx2_cq_queue *cq;
329 struct otx2_qset *qset;
330 struct otx2_nic *pfvf;
332 cq_poll = container_of(napi, struct otx2_cq_poll, napi);
333 pfvf = (struct otx2_nic *)cq_poll->dev;
336 for (i = CQS_PER_CINT - 1; i >= 0; i--) {
337 cq_idx = cq_poll->cq_ids[i];
338 if (unlikely(cq_idx == CINT_INVALID_CQ))
340 cq = &qset->cq[cq_idx];
341 if (cq->cq_type == CQ_RX) {
342 /* If the RQ refill WQ task is running, skip napi
343 * scheduler for this queue.
345 if (cq->refill_task_sched)
347 workdone += otx2_rx_napi_handler(pfvf, napi,
350 workdone += otx2_tx_napi_handler(pfvf, cq, budget);
355 otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
357 if (workdone < budget && napi_complete_done(napi, workdone)) {
358 /* If interface is going down, don't re-enable IRQ */
359 if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
362 /* Re-enable interrupts */
363 otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
369 static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
373 /* Packet data stores should finish before SQE is flushed to HW */
377 memcpy(sq->lmt_addr, sq->sqe_base, size);
378 status = otx2_lmt_flush(sq->io_addr);
379 } while (status == 0);
382 sq->head &= (sq->sqe_cnt - 1);
385 #define MAX_SEGS_PER_SG 3
386 /* Add SQE scatter/gather subdescriptor structure */
387 static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
388 struct sk_buff *skb, int num_segs, int *offset)
390 struct nix_sqe_sg_s *sg = NULL;
391 u64 dma_addr, *iova = NULL;
395 sq->sg[sq->head].num_segs = 0;
397 for (seg = 0; seg < num_segs; seg++) {
398 if ((seg % MAX_SEGS_PER_SG) == 0) {
399 sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
400 sg->ld_type = NIX_SEND_LDTYPE_LDD;
401 sg->subdc = NIX_SUBDC_SG;
403 sg_lens = (void *)sg;
404 iova = (void *)sg + sizeof(*sg);
405 /* Next subdc always starts at a 16byte boundary.
406 * So if sg->segs is whether 2 or 3, offset += 16bytes.
408 if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1))
409 *offset += sizeof(*sg) + (3 * sizeof(u64));
411 *offset += sizeof(*sg) + sizeof(u64);
413 dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len);
414 if (dma_mapping_error(pfvf->dev, dma_addr))
417 sg_lens[frag_num(seg % MAX_SEGS_PER_SG)] = len;
421 /* Save DMA mapping info for later unmapping */
422 sq->sg[sq->head].dma_addr[seg] = dma_addr;
423 sq->sg[sq->head].size[seg] = len;
424 sq->sg[sq->head].num_segs++;
427 sq->sg[sq->head].skb = (u64)skb;
431 /* Add SQE header subdescriptor structure */
432 static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
433 struct nix_sqe_hdr_s *sqe_hdr,
434 struct sk_buff *skb, u16 qidx)
438 /* Check if SQE was framed before, if yes then no need to
439 * set these constants again and again.
441 if (!sqe_hdr->total) {
442 /* Don't free Tx buffers to Aura */
444 sqe_hdr->aura = sq->aura_id;
445 /* Post a CQE Tx after pkt transmission */
449 sqe_hdr->total = skb->len;
450 /* Set SQE identifier which will be used later for freeing SKB */
451 sqe_hdr->sqe_id = sq->head;
453 /* Offload TCP/UDP checksum to HW */
454 if (skb->ip_summed == CHECKSUM_PARTIAL) {
455 sqe_hdr->ol3ptr = skb_network_offset(skb);
456 sqe_hdr->ol4ptr = skb_transport_offset(skb);
457 /* get vlan protocol Ethertype */
458 if (eth_type_vlan(skb->protocol))
459 skb->protocol = vlan_get_protocol(skb);
461 if (skb->protocol == htons(ETH_P_IP)) {
462 proto = ip_hdr(skb)->protocol;
463 /* In case of TSO, HW needs this to be explicitly set.
464 * So set this always, instead of adding a check.
466 sqe_hdr->ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
467 } else if (skb->protocol == htons(ETH_P_IPV6)) {
468 proto = ipv6_hdr(skb)->nexthdr;
471 if (proto == IPPROTO_TCP)
472 sqe_hdr->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
473 else if (proto == IPPROTO_UDP)
474 sqe_hdr->ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
478 bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
479 struct sk_buff *skb, u16 qidx)
481 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
482 struct otx2_nic *pfvf = netdev_priv(netdev);
483 int offset, num_segs, free_sqe;
484 struct nix_sqe_hdr_s *sqe_hdr;
486 /* Check if there is room for new SQE.
487 * 'Num of SQBs freed to SQ's pool - SQ's Aura count'
488 * will give free SQE count.
490 free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
492 if (!free_sqe || free_sqe < sq->sqe_thresh)
495 num_segs = skb_shinfo(skb)->nr_frags + 1;
497 /* If SKB doesn't fit in a single SQE, linearize it.
498 * TODO: Consider adding JUMP descriptor instead.
500 if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) {
501 if (__skb_linearize(skb)) {
502 dev_kfree_skb_any(skb);
505 num_segs = skb_shinfo(skb)->nr_frags + 1;
508 /* Set SQE's SEND_HDR.
509 * Do not clear the first 64bit as it contains constant info.
511 memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
512 sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
513 otx2_sqe_add_hdr(pfvf, sq, sqe_hdr, skb, qidx);
514 offset = sizeof(*sqe_hdr);
516 /* Add SG subdesc with data frags */
517 if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) {
518 otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]);
522 sqe_hdr->sizem1 = (offset / 16) - 1;
524 netdev_tx_sent_queue(txq, skb->len);
526 /* Flush SQE to HW */
527 otx2_sqe_flush(sq, offset);
532 void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
534 struct nix_cqe_rx_s *cqe;
535 int processed_cqe = 0;
538 while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
541 iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
542 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
543 otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE);
544 put_page(virt_to_page(phys_to_virt(pa)));
548 /* Free CQEs to HW */
549 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
550 ((u64)cq->cq_idx << 32) | processed_cqe);
553 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
555 struct sk_buff *skb = NULL;
556 struct otx2_snd_queue *sq;
557 struct nix_cqe_tx_s *cqe;
558 int processed_cqe = 0;
561 sq = &pfvf->qset.sq[cq->cint_idx];
563 while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) {
564 sg = &sq->sg[cqe->comp.sqe_id];
565 skb = (struct sk_buff *)sg->skb;
567 otx2_dma_unmap_skb_frags(pfvf, sg);
568 dev_kfree_skb_any(skb);
574 /* Free CQEs to HW */
575 otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
576 ((u64)cq->cq_idx << 32) | processed_cqe);
579 int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
584 otx2_mbox_lock(&pfvf->mbox);
586 msg = otx2_mbox_alloc_msg_nix_lf_start_rx(&pfvf->mbox);
588 msg = otx2_mbox_alloc_msg_nix_lf_stop_rx(&pfvf->mbox);
591 otx2_mbox_unlock(&pfvf->mbox);
595 err = otx2_sync_mbox_msg(&pfvf->mbox);
596 otx2_mbox_unlock(&pfvf->mbox);