1 // SPDX-License-Identifier: GPL-2.0-only
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
7 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
12 #include "aq_hw_utils.h"
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
18 static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
20 unsigned int len = PAGE_SIZE << rxpage->order;
22 dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
24 /* Drop the ref for being in the ring. */
25 __free_pages(rxpage->page, rxpage->order);
29 static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
36 page = dev_alloc_pages(order);
40 daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
43 if (unlikely(dma_mapping_error(dev, daddr)))
47 rxpage->daddr = daddr;
48 rxpage->order = order;
54 __free_pages(page, order);
60 static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
65 if (rxbuf->rxdata.page) {
66 /* One means ring is the only user and can reuse */
67 if (page_ref_count(rxbuf->rxdata.page) > 1) {
68 /* Try reuse buffer */
69 rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
70 if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
71 (PAGE_SIZE << order)) {
72 self->stats.rx.pg_flips++;
74 /* Buffer exhausted. We have other users and
75 * should release this page and realloc
77 aq_free_rxpage(&rxbuf->rxdata,
78 aq_nic_get_dev(self->aq_nic));
79 self->stats.rx.pg_losts++;
82 rxbuf->rxdata.pg_off = 0;
83 self->stats.rx.pg_reuses++;
87 if (!rxbuf->rxdata.page) {
88 ret = aq_get_rxpage(&rxbuf->rxdata, order,
89 aq_nic_get_dev(self->aq_nic));
96 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
97 struct aq_nic_s *aq_nic)
102 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
104 if (!self->buff_ring) {
108 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
109 self->size * self->dx_size,
110 &self->dx_ring_pa, GFP_KERNEL);
111 if (!self->dx_ring) {
125 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
126 struct aq_nic_s *aq_nic,
128 struct aq_nic_cfg_s *aq_nic_cfg)
132 self->aq_nic = aq_nic;
134 self->size = aq_nic_cfg->txds;
135 self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
137 self = aq_ring_alloc(self, aq_nic);
152 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
153 struct aq_nic_s *aq_nic,
155 struct aq_nic_cfg_s *aq_nic_cfg)
159 self->aq_nic = aq_nic;
161 self->size = aq_nic_cfg->rxds;
162 self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
163 self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
164 (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
166 if (aq_nic_cfg->rxpageorder > self->page_order)
167 self->page_order = aq_nic_cfg->rxpageorder;
169 self = aq_ring_alloc(self, aq_nic);
185 aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
186 unsigned int idx, unsigned int size, unsigned int dx_size)
188 struct device *dev = aq_nic_get_dev(aq_nic);
189 size_t sz = size * dx_size + AQ_CFG_RXDS_DEF;
191 memset(self, 0, sizeof(*self));
193 self->aq_nic = aq_nic;
196 self->dx_size = dx_size;
198 self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa,
200 if (!self->dx_ring) {
208 int aq_ring_init(struct aq_ring_s *self)
217 static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
220 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
223 void aq_ring_update_queue_state(struct aq_ring_s *ring)
225 if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
226 aq_ring_queue_stop(ring);
227 else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
228 aq_ring_queue_wake(ring);
231 void aq_ring_queue_wake(struct aq_ring_s *ring)
233 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
235 if (__netif_subqueue_stopped(ndev, ring->idx)) {
236 netif_wake_subqueue(ndev, ring->idx);
237 ring->stats.tx.queue_restarts++;
241 void aq_ring_queue_stop(struct aq_ring_s *ring)
243 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
245 if (!__netif_subqueue_stopped(ndev, ring->idx))
246 netif_stop_subqueue(ndev, ring->idx);
249 bool aq_ring_tx_clean(struct aq_ring_s *self)
251 struct device *dev = aq_nic_get_dev(self->aq_nic);
254 for (budget = AQ_CFG_TX_CLEAN_BUDGET;
255 budget && self->sw_head != self->hw_head; budget--) {
256 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
258 if (likely(buff->is_mapped)) {
259 if (unlikely(buff->is_sop)) {
261 buff->eop_index != 0xffffU &&
262 (!aq_ring_dx_in_range(self->sw_head,
267 dma_unmap_single(dev, buff->pa, buff->len,
270 dma_unmap_page(dev, buff->pa, buff->len,
275 if (unlikely(buff->is_eop))
276 dev_kfree_skb_any(buff->skb);
279 buff->eop_index = 0xffffU;
280 self->sw_head = aq_ring_next_dx(self, self->sw_head);
286 static void aq_rx_checksum(struct aq_ring_s *self,
287 struct aq_ring_buff_s *buff,
290 if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
293 if (unlikely(buff->is_cso_err)) {
294 ++self->stats.rx.errors;
295 skb->ip_summed = CHECKSUM_NONE;
298 if (buff->is_ip_cso) {
299 __skb_incr_checksum_unnecessary(skb);
301 skb->ip_summed = CHECKSUM_NONE;
304 if (buff->is_udp_cso || buff->is_tcp_cso)
305 __skb_incr_checksum_unnecessary(skb);
308 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
309 int aq_ring_rx_clean(struct aq_ring_s *self,
310 struct napi_struct *napi,
314 struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
315 bool is_rsc_completed = true;
318 for (; (self->sw_head != self->hw_head) && budget;
319 self->sw_head = aq_ring_next_dx(self, self->sw_head),
320 --budget, ++(*work_done)) {
321 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
322 bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self);
323 struct aq_ring_buff_s *buff_ = NULL;
324 struct sk_buff *skb = NULL;
325 unsigned int next_ = 0U;
329 if (buff->is_cleaned)
336 buff_ = &self->buff_ring[next_];
338 aq_ring_dx_in_range(self->sw_head,
342 if (unlikely(!is_rsc_completed))
345 buff->is_error |= buff_->is_error;
346 buff->is_cso_err |= buff_->is_cso_err;
348 } while (!buff_->is_eop);
350 if (!is_rsc_completed) {
354 if (buff->is_error || buff->is_cso_err) {
358 buff_ = &self->buff_ring[next_];
360 buff_->is_cleaned = true;
361 } while (!buff_->is_eop);
363 ++self->stats.rx.errors;
368 if (buff->is_error) {
369 ++self->stats.rx.errors;
373 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
376 buff->len, DMA_FROM_DEVICE);
378 /* for single fragment packets use build_skb() */
380 buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
381 skb = build_skb(aq_buf_vaddr(&buff->rxdata),
382 AQ_CFG_RX_FRAME_MAX);
383 if (unlikely(!skb)) {
389 aq_ptp_extract_ts(self->aq_nic, skb,
390 aq_buf_vaddr(&buff->rxdata),
392 skb_put(skb, buff->len);
393 page_ref_inc(buff->rxdata.page);
395 skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
396 if (unlikely(!skb)) {
402 aq_ptp_extract_ts(self->aq_nic, skb,
403 aq_buf_vaddr(&buff->rxdata),
407 if (hdr_len > AQ_CFG_RX_HDR_SIZE)
408 hdr_len = eth_get_headlen(skb->dev,
409 aq_buf_vaddr(&buff->rxdata),
412 memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
413 ALIGN(hdr_len, sizeof(long)));
415 if (buff->len - hdr_len > 0) {
416 skb_add_rx_frag(skb, 0, buff->rxdata.page,
417 buff->rxdata.pg_off + hdr_len,
419 AQ_CFG_RX_FRAME_MAX);
420 page_ref_inc(buff->rxdata.page);
428 buff_ = &self->buff_ring[next_];
430 dma_sync_single_range_for_cpu(
431 aq_nic_get_dev(self->aq_nic),
433 buff_->rxdata.pg_off,
436 skb_add_rx_frag(skb, i++,
438 buff_->rxdata.pg_off,
440 AQ_CFG_RX_FRAME_MAX);
441 page_ref_inc(buff_->rxdata.page);
442 buff_->is_cleaned = 1;
444 buff->is_ip_cso &= buff_->is_ip_cso;
445 buff->is_udp_cso &= buff_->is_udp_cso;
446 buff->is_tcp_cso &= buff_->is_tcp_cso;
447 buff->is_cso_err |= buff_->is_cso_err;
449 } while (!buff_->is_eop);
454 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
457 skb->protocol = eth_type_trans(skb, ndev);
459 aq_rx_checksum(self, buff, skb);
461 skb_set_hash(skb, buff->rss_hash,
462 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
464 /* Send all PTP traffic to 0 queue */
465 skb_record_rx_queue(skb, is_ptp_ring ? 0 : self->idx);
467 ++self->stats.rx.packets;
468 self->stats.rx.bytes += skb->len;
470 napi_gro_receive(napi, skb);
477 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
479 while (self->sw_head != self->hw_head) {
482 aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw,
484 (self->sw_head * self->dx_size),
486 aq_ptp_tx_hwtstamp(aq_nic, ns);
488 self->sw_head = aq_ring_next_dx(self, self->sw_head);
492 int aq_ring_rx_fill(struct aq_ring_s *self)
494 unsigned int page_order = self->page_order;
495 struct aq_ring_buff_s *buff = NULL;
499 if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
503 for (i = aq_ring_avail_dx(self); i--;
504 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
505 buff = &self->buff_ring[self->sw_tail];
508 buff->len = AQ_CFG_RX_FRAME_MAX;
510 err = aq_get_rxpages(self, buff, page_order);
514 buff->pa = aq_buf_daddr(&buff->rxdata);
522 void aq_ring_rx_deinit(struct aq_ring_s *self)
527 for (; self->sw_head != self->sw_tail;
528 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
529 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
531 aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
537 void aq_ring_free(struct aq_ring_s *self)
542 kfree(self->buff_ring);
545 dma_free_coherent(aq_nic_get_dev(self->aq_nic),
546 self->size * self->dx_size, self->dx_ring,