2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
47 #include <net/busy_poll.h>
48 #ifdef CONFIG_CHELSIO_T4_FCOE
49 #include <scsi/fc/fc_fcoe.h>
50 #endif /* CONFIG_CHELSIO_T4_FCOE */
53 #include "t4_values.h"
56 #include "cxgb4_ptp.h"
57 #include "cxgb4_uld.h"
58 #include "cxgb4_tc_mqprio.h"
62 * Rx buffer size. We use largish buffers if possible but settle for single
63 * pages under memory shortage.
66 # define FL_PG_ORDER 0
68 # define FL_PG_ORDER (16 - PAGE_SHIFT)
71 /* RX_PULL_LEN should be <= RX_COPY_THRES */
72 #define RX_COPY_THRES 256
73 #define RX_PULL_LEN 128
76 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
77 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
79 #define RX_PKT_SKB_LEN 512
82 * Max number of Tx descriptors we clean up at a time. Should be modest as
83 * freeing skbs isn't cheap and it happens while holding locks. We just need
84 * to free packets faster than they arrive, we eventually catch up and keep
85 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should
86 * also match the CIDX Flush Threshold.
88 #define MAX_TX_RECLAIM 32
91 * Max number of Rx buffers we replenish at a time. Again keep this modest,
92 * allocating buffers isn't cheap either.
94 #define MAX_RX_REFILL 16U
97 * Period of the Rx queue check timer. This timer is infrequent as it has
98 * something to do only when the system experiences severe memory shortage.
100 #define RX_QCHECK_PERIOD (HZ / 2)
103 * Period of the Tx queue check timer.
105 #define TX_QCHECK_PERIOD (HZ / 2)
108 * Max number of Tx descriptors to be reclaimed by the Tx timer.
110 #define MAX_TIMER_TX_RECLAIM 100
113 * Timer index used when backing off due to memory shortage.
115 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
118 * Suspension threshold for non-Ethernet Tx queues. We require enough room
119 * for a full sized WR.
121 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
124 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
127 #define MAX_IMM_TX_PKT_LEN 256
130 * Max size of a WR sent through a control Tx queue.
132 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
134 struct rx_sw_desc { /* SW state per Rx descriptor */
140 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
141 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
142 * We could easily support more but there doesn't seem to be much need for
145 #define FL_MTU_SMALL 1500
146 #define FL_MTU_LARGE 9000
148 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
151 struct sge *s = &adapter->sge;
153 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
156 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
157 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
160 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
161 * these to specify the buffer size as an index into the SGE Free List Buffer
162 * Size register array. We also use bit 4, when the buffer has been unmapped
163 * for DMA, but this is of course never sent to the hardware and is only used
164 * to prevent double unmappings. All of the above requires that the Free List
165 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
166 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
167 * Free List Buffer alignment is 32 bytes, this works out for us ...
170 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
171 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
172 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
175 * XXX We shouldn't depend on being able to use these indices.
176 * XXX Especially when some other Master PF has initialized the
177 * XXX adapter or we use the Firmware Configuration File. We
178 * XXX should really search through the Host Buffer Size register
179 * XXX array for the appropriately sized buffer indices.
181 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
182 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
184 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
185 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
188 static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
189 #define MIN_NAPI_WORK 1
191 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
193 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
196 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
198 return !(d->dma_addr & RX_UNMAPPED_BUF);
202 * txq_avail - return the number of available slots in a Tx queue
205 * Returns the number of descriptors in a Tx queue available to write new
208 static inline unsigned int txq_avail(const struct sge_txq *q)
210 return q->size - 1 - q->in_use;
214 * fl_cap - return the capacity of a free-buffer list
217 * Returns the capacity of a free-buffer list. The capacity is less than
218 * the size because one descriptor needs to be left unpopulated, otherwise
219 * HW will think the FL is empty.
221 static inline unsigned int fl_cap(const struct sge_fl *fl)
223 return fl->size - 8; /* 1 descriptor = 8 buffers */
227 * fl_starving - return whether a Free List is starving.
228 * @adapter: pointer to the adapter
231 * Tests specified Free List to see whether the number of buffers
232 * available to the hardware has falled below our "starvation"
235 static inline bool fl_starving(const struct adapter *adapter,
236 const struct sge_fl *fl)
238 const struct sge *s = &adapter->sge;
240 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
243 int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
246 const skb_frag_t *fp, *end;
247 const struct skb_shared_info *si;
249 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
250 if (dma_mapping_error(dev, *addr))
253 si = skb_shinfo(skb);
254 end = &si->frags[si->nr_frags];
256 for (fp = si->frags; fp < end; fp++) {
257 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
259 if (dma_mapping_error(dev, *addr))
265 while (fp-- > si->frags)
266 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
268 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
272 EXPORT_SYMBOL(cxgb4_map_skb);
274 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
275 const dma_addr_t *addr)
277 const skb_frag_t *fp, *end;
278 const struct skb_shared_info *si;
280 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
282 si = skb_shinfo(skb);
283 end = &si->frags[si->nr_frags];
284 for (fp = si->frags; fp < end; fp++)
285 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
288 #ifdef CONFIG_NEED_DMA_MAP_STATE
290 * deferred_unmap_destructor - unmap a packet when it is freed
293 * This is the packet destructor used for Tx packets that need to remain
294 * mapped until they are freed rather than until their Tx descriptors are
297 static void deferred_unmap_destructor(struct sk_buff *skb)
299 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
303 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
304 const struct ulptx_sgl *sgl, const struct sge_txq *q)
306 const struct ulptx_sge_pair *p;
307 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
309 if (likely(skb_headlen(skb)))
310 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
313 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
319 * the complexity below is because of the possibility of a wrap-around
320 * in the middle of an SGL
322 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
323 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
324 unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
325 ntohl(p->len[0]), DMA_TO_DEVICE);
326 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
327 ntohl(p->len[1]), DMA_TO_DEVICE);
329 } else if ((u8 *)p == (u8 *)q->stat) {
330 p = (const struct ulptx_sge_pair *)q->desc;
332 } else if ((u8 *)p + 8 == (u8 *)q->stat) {
333 const __be64 *addr = (const __be64 *)q->desc;
335 dma_unmap_page(dev, be64_to_cpu(addr[0]),
336 ntohl(p->len[0]), DMA_TO_DEVICE);
337 dma_unmap_page(dev, be64_to_cpu(addr[1]),
338 ntohl(p->len[1]), DMA_TO_DEVICE);
339 p = (const struct ulptx_sge_pair *)&addr[2];
341 const __be64 *addr = (const __be64 *)q->desc;
343 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
344 ntohl(p->len[0]), DMA_TO_DEVICE);
345 dma_unmap_page(dev, be64_to_cpu(addr[0]),
346 ntohl(p->len[1]), DMA_TO_DEVICE);
347 p = (const struct ulptx_sge_pair *)&addr[1];
353 if ((u8 *)p == (u8 *)q->stat)
354 p = (const struct ulptx_sge_pair *)q->desc;
355 addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
356 *(const __be64 *)q->desc;
357 dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
363 * free_tx_desc - reclaims Tx descriptors and their buffers
364 * @adapter: the adapter
365 * @q: the Tx queue to reclaim descriptors from
366 * @n: the number of descriptors to reclaim
367 * @unmap: whether the buffers should be unmapped for DMA
369 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
370 * Tx buffers. Called with the Tx queue lock held.
372 void free_tx_desc(struct adapter *adap, struct sge_txq *q,
373 unsigned int n, bool unmap)
375 struct tx_sw_desc *d;
376 unsigned int cidx = q->cidx;
377 struct device *dev = adap->pdev_dev;
381 if (d->skb) { /* an SGL is present */
383 unmap_sgl(dev, d->skb, d->sgl, q);
384 dev_consume_skb_any(d->skb);
388 if (++cidx == q->size) {
397 * Return the number of reclaimable descriptors in a Tx queue.
399 static inline int reclaimable(const struct sge_txq *q)
401 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
403 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
407 * reclaim_completed_tx - reclaims completed TX Descriptors
409 * @q: the Tx queue to reclaim completed descriptors from
410 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
411 * @unmap: whether the buffers should be unmapped for DMA
413 * Reclaims Tx Descriptors that the SGE has indicated it has processed,
414 * and frees the associated buffers if possible. If @max == -1, then
415 * we'll use a defaiult maximum. Called with the TX Queue locked.
417 static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
418 int maxreclaim, bool unmap)
420 int reclaim = reclaimable(q);
424 * Limit the amount of clean up work we do at a time to keep
425 * the Tx lock hold time O(1).
428 maxreclaim = MAX_TX_RECLAIM;
429 if (reclaim > maxreclaim)
430 reclaim = maxreclaim;
432 free_tx_desc(adap, q, reclaim, unmap);
433 q->in_use -= reclaim;
440 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
442 * @q: the Tx queue to reclaim completed descriptors from
443 * @unmap: whether the buffers should be unmapped for DMA
445 * Reclaims Tx descriptors that the SGE has indicated it has processed,
446 * and frees the associated buffers if possible. Called with the Tx
449 void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
452 (void)reclaim_completed_tx(adap, q, -1, unmap);
454 EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
456 static inline int get_buf_size(struct adapter *adapter,
457 const struct rx_sw_desc *d)
459 struct sge *s = &adapter->sge;
460 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
463 switch (rx_buf_size_idx) {
464 case RX_SMALL_PG_BUF:
465 buf_size = PAGE_SIZE;
468 case RX_LARGE_PG_BUF:
469 buf_size = PAGE_SIZE << s->fl_pg_order;
472 case RX_SMALL_MTU_BUF:
473 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
476 case RX_LARGE_MTU_BUF:
477 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
488 * free_rx_bufs - free the Rx buffers on an SGE free list
490 * @q: the SGE free list to free buffers from
491 * @n: how many buffers to free
493 * Release the next @n buffers on an SGE free-buffer Rx queue. The
494 * buffers must be made inaccessible to HW before calling this function.
496 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
499 struct rx_sw_desc *d = &q->sdesc[q->cidx];
501 if (is_buf_mapped(d))
502 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
503 get_buf_size(adap, d),
507 if (++q->cidx == q->size)
514 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
516 * @q: the SGE free list
518 * Unmap the current buffer on an SGE free-buffer Rx queue. The
519 * buffer must be made inaccessible to HW before calling this function.
521 * This is similar to @free_rx_bufs above but does not free the buffer.
522 * Do note that the FL still loses any further access to the buffer.
524 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
526 struct rx_sw_desc *d = &q->sdesc[q->cidx];
528 if (is_buf_mapped(d))
529 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
530 get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
532 if (++q->cidx == q->size)
537 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
539 if (q->pend_cred >= 8) {
540 u32 val = adap->params.arch.sge_fl_db;
542 if (is_t4(adap->params.chip))
543 val |= PIDX_V(q->pend_cred / 8);
545 val |= PIDX_T5_V(q->pend_cred / 8);
547 /* Make sure all memory writes to the Free List queue are
548 * committed before we tell the hardware about them.
552 /* If we don't have access to the new User Doorbell (T5+), use
553 * the old doorbell mechanism; otherwise use the new BAR2
556 if (unlikely(q->bar2_addr == NULL)) {
557 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
558 val | QID_V(q->cntxt_id));
560 writel(val | QID_V(q->bar2_qid),
561 q->bar2_addr + SGE_UDB_KDOORBELL);
563 /* This Write memory Barrier will force the write to
564 * the User Doorbell area to be flushed.
572 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
576 sd->dma_addr = mapping; /* includes size low bits */
580 * refill_fl - refill an SGE Rx buffer ring
582 * @q: the ring to refill
583 * @n: the number of new buffers to allocate
584 * @gfp: the gfp flags for the allocations
586 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
587 * allocated with the supplied gfp flags. The caller must assure that
588 * @n does not exceed the queue's capacity. If afterwards the queue is
589 * found critically low mark it as starving in the bitmap of starving FLs.
591 * Returns the number of buffers allocated.
593 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
596 struct sge *s = &adap->sge;
599 unsigned int cred = q->avail;
600 __be64 *d = &q->desc[q->pidx];
601 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
604 #ifdef CONFIG_DEBUG_FS
605 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
610 node = dev_to_node(adap->pdev_dev);
612 if (s->fl_pg_order == 0)
613 goto alloc_small_pages;
616 * Prefer large buffers
619 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
621 q->large_alloc_failed++;
622 break; /* fall back to single pages */
625 mapping = dma_map_page(adap->pdev_dev, pg, 0,
626 PAGE_SIZE << s->fl_pg_order,
628 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
629 __free_pages(pg, s->fl_pg_order);
631 goto out; /* do not try small pages for this error */
633 mapping |= RX_LARGE_PG_BUF;
634 *d++ = cpu_to_be64(mapping);
636 set_rx_sw_desc(sd, pg, mapping);
640 if (++q->pidx == q->size) {
650 pg = alloc_pages_node(node, gfp, 0);
656 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
658 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
663 *d++ = cpu_to_be64(mapping);
665 set_rx_sw_desc(sd, pg, mapping);
669 if (++q->pidx == q->size) {
676 out: cred = q->avail - cred;
677 q->pend_cred += cred;
680 if (unlikely(fl_starving(adap, q))) {
683 set_bit(q->cntxt_id - adap->sge.egr_start,
684 adap->sge.starving_fl);
690 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
692 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
697 * alloc_ring - allocate resources for an SGE descriptor ring
698 * @dev: the PCI device's core device
699 * @nelem: the number of descriptors
700 * @elem_size: the size of each descriptor
701 * @sw_size: the size of the SW state associated with each ring element
702 * @phys: the physical address of the allocated ring
703 * @metadata: address of the array holding the SW state for the ring
704 * @stat_size: extra space in HW ring for status information
705 * @node: preferred node for memory allocations
707 * Allocates resources for an SGE descriptor ring, such as Tx queues,
708 * free buffer lists, or response queues. Each SGE ring requires
709 * space for its HW descriptors plus, optionally, space for the SW state
710 * associated with each HW entry (the metadata). The function returns
711 * three values: the virtual address for the HW ring (the return value
712 * of the function), the bus address of the HW ring, and the address
715 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
716 size_t sw_size, dma_addr_t *phys, void *metadata,
717 size_t stat_size, int node)
719 size_t len = nelem * elem_size + stat_size;
721 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
726 s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node);
729 dma_free_coherent(dev, len, p, *phys);
734 *(void **)metadata = s;
739 * sgl_len - calculates the size of an SGL of the given capacity
740 * @n: the number of SGL entries
742 * Calculates the number of flits needed for a scatter/gather list that
743 * can hold the given number of entries.
745 static inline unsigned int sgl_len(unsigned int n)
747 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
748 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
749 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
750 * repeated sequences of { Length[i], Length[i+1], Address[i],
751 * Address[i+1] } (this ensures that all addresses are on 64-bit
752 * boundaries). If N is even, then Length[N+1] should be set to 0 and
753 * Address[N+1] is omitted.
755 * The following calculation incorporates all of the above. It's
756 * somewhat hard to follow but, briefly: the "+2" accounts for the
757 * first two flits which include the DSGL header, Length0 and
758 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
759 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
760 * finally the "+((n-1)&1)" adds the one remaining flit needed if
764 return (3 * n) / 2 + (n & 1) + 2;
768 * flits_to_desc - returns the num of Tx descriptors for the given flits
769 * @n: the number of flits
771 * Returns the number of Tx descriptors needed for the supplied number
774 static inline unsigned int flits_to_desc(unsigned int n)
776 BUG_ON(n > SGE_MAX_WR_LEN / 8);
777 return DIV_ROUND_UP(n, 8);
781 * is_eth_imm - can an Ethernet packet be sent as immediate data?
784 * Returns whether an Ethernet packet is small enough to fit as
785 * immediate data. Return value corresponds to headroom required.
787 static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
791 if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
792 chip_ver > CHELSIO_T5) {
793 hdrlen = sizeof(struct cpl_tx_tnl_lso);
794 hdrlen += sizeof(struct cpl_tx_pkt_core);
796 hdrlen = skb_shinfo(skb)->gso_size ?
797 sizeof(struct cpl_tx_pkt_lso_core) : 0;
798 hdrlen += sizeof(struct cpl_tx_pkt);
800 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
806 * calc_tx_flits - calculate the number of flits for a packet Tx WR
809 * Returns the number of flits needed for a Tx WR for the given Ethernet
810 * packet, including the needed WR and CPL headers.
812 static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
813 unsigned int chip_ver)
816 int hdrlen = is_eth_imm(skb, chip_ver);
818 /* If the skb is small enough, we can pump it out as a work request
819 * with only immediate data. In that case we just have to have the
820 * TX Packet header plus the skb data in the Work Request.
824 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
826 /* Otherwise, we're going to have to construct a Scatter gather list
827 * of the skb body and fragments. We also include the flits necessary
828 * for the TX Packet Work Request and CPL. We always have a firmware
829 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
830 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
831 * message or, if we're doing a Large Send Offload, an LSO CPL message
832 * with an embedded TX Packet Write CPL message.
834 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
835 if (skb_shinfo(skb)->gso_size) {
836 if (skb->encapsulation && chip_ver > CHELSIO_T5)
837 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
838 sizeof(struct cpl_tx_tnl_lso);
840 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
841 sizeof(struct cpl_tx_pkt_lso_core);
843 hdrlen += sizeof(struct cpl_tx_pkt_core);
844 flits += (hdrlen / sizeof(__be64));
846 flits += (sizeof(struct fw_eth_tx_pkt_wr) +
847 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
853 * calc_tx_descs - calculate the number of Tx descriptors for a packet
856 * Returns the number of Tx descriptors needed for the given Ethernet
857 * packet, including the needed WR and CPL headers.
859 static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
860 unsigned int chip_ver)
862 return flits_to_desc(calc_tx_flits(skb, chip_ver));
866 * cxgb4_write_sgl - populate a scatter/gather list for a packet
868 * @q: the Tx queue we are writing into
869 * @sgl: starting location for writing the SGL
870 * @end: points right after the end of the SGL
871 * @start: start offset into skb main-body data to include in the SGL
872 * @addr: the list of bus addresses for the SGL elements
874 * Generates a gather list for the buffers that make up a packet.
875 * The caller must provide adequate space for the SGL that will be written.
876 * The SGL includes all of the packet's page fragments and the data in its
877 * main body except for the first @start bytes. @sgl must be 16-byte
878 * aligned and within a Tx descriptor with available space. @end points
879 * right after the end of the SGL but does not account for any potential
880 * wrap around, i.e., @end > @sgl.
882 void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
883 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
884 const dma_addr_t *addr)
887 struct ulptx_sge_pair *to;
888 const struct skb_shared_info *si = skb_shinfo(skb);
889 unsigned int nfrags = si->nr_frags;
890 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
892 len = skb_headlen(skb) - start;
894 sgl->len0 = htonl(len);
895 sgl->addr0 = cpu_to_be64(addr[0] + start);
898 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
899 sgl->addr0 = cpu_to_be64(addr[1]);
902 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
903 ULPTX_NSGE_V(nfrags));
904 if (likely(--nfrags == 0))
907 * Most of the complexity below deals with the possibility we hit the
908 * end of the queue in the middle of writing the SGL. For this case
909 * only we create the SGL in a temporary buffer and then copy it.
911 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
913 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
914 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
915 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
916 to->addr[0] = cpu_to_be64(addr[i]);
917 to->addr[1] = cpu_to_be64(addr[++i]);
920 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
921 to->len[1] = cpu_to_be32(0);
922 to->addr[0] = cpu_to_be64(addr[i + 1]);
924 if (unlikely((u8 *)end > (u8 *)q->stat)) {
925 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
928 memcpy(sgl->sge, buf, part0);
929 part1 = (u8 *)end - (u8 *)q->stat;
930 memcpy(q->desc, (u8 *)buf + part0, part1);
931 end = (void *)q->desc + part1;
933 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
936 EXPORT_SYMBOL(cxgb4_write_sgl);
938 /* This function copies 64 byte coalesced work request to
939 * memory mapped BAR2 space. For coalesced WR SGE fetches
940 * data from the FIFO instead of from Host.
942 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
955 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
958 * @n: number of new descriptors to give to HW
960 * Ring the doorbel for a Tx queue.
962 inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
964 /* Make sure that all writes to the TX Descriptors are committed
965 * before we tell the hardware about them.
969 /* If we don't have access to the new User Doorbell (T5+), use the old
970 * doorbell mechanism; otherwise use the new BAR2 mechanism.
972 if (unlikely(q->bar2_addr == NULL)) {
976 /* For T4 we need to participate in the Doorbell Recovery
979 spin_lock_irqsave(&q->db_lock, flags);
981 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
982 QID_V(q->cntxt_id) | val);
985 q->db_pidx = q->pidx;
986 spin_unlock_irqrestore(&q->db_lock, flags);
988 u32 val = PIDX_T5_V(n);
990 /* T4 and later chips share the same PIDX field offset within
991 * the doorbell, but T5 and later shrank the field in order to
992 * gain a bit for Doorbell Priority. The field was absurdly
993 * large in the first place (14 bits) so we just use the T5
994 * and later limits and warn if a Queue ID is too large.
996 WARN_ON(val & DBPRIO_F);
998 /* If we're only writing a single TX Descriptor and we can use
999 * Inferred QID registers, we can use the Write Combining
1000 * Gather Buffer; otherwise we use the simple doorbell.
1002 if (n == 1 && q->bar2_qid == 0) {
1003 int index = (q->pidx
1006 u64 *wr = (u64 *)&q->desc[index];
1008 cxgb_pio_copy((u64 __iomem *)
1009 (q->bar2_addr + SGE_UDB_WCDOORBELL),
1012 writel(val | QID_V(q->bar2_qid),
1013 q->bar2_addr + SGE_UDB_KDOORBELL);
1016 /* This Write Memory Barrier will force the write to the User
1017 * Doorbell area to be flushed. This is needed to prevent
1018 * writes on different CPUs for the same queue from hitting
1019 * the adapter out of order. This is required when some Work
1020 * Requests take the Write Combine Gather Buffer path (user
1021 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
1022 * take the traditional path where we simply increment the
1023 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1024 * hardware DMA read the actual Work Request.
1029 EXPORT_SYMBOL(cxgb4_ring_tx_db);
1032 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
1034 * @q: the Tx queue where the packet will be inlined
1035 * @pos: starting position in the Tx queue where to inline the packet
1037 * Inline a packet's contents directly into Tx descriptors, starting at
1038 * the given position within the Tx DMA ring.
1039 * Most of the complexity of this operation is dealing with wrap arounds
1040 * in the middle of the packet we want to inline.
1042 void cxgb4_inline_tx_skb(const struct sk_buff *skb,
1043 const struct sge_txq *q, void *pos)
1045 int left = (void *)q->stat - pos;
1048 if (likely(skb->len <= left)) {
1049 if (likely(!skb->data_len))
1050 skb_copy_from_linear_data(skb, pos, skb->len);
1052 skb_copy_bits(skb, 0, pos, skb->len);
1055 skb_copy_bits(skb, 0, pos, left);
1056 skb_copy_bits(skb, left, q->desc, skb->len - left);
1057 pos = (void *)q->desc + (skb->len - left);
1060 /* 0-pad to multiple of 16 */
1061 p = PTR_ALIGN(pos, 8);
1062 if ((uintptr_t)p & 8)
1065 EXPORT_SYMBOL(cxgb4_inline_tx_skb);
1067 static void *inline_tx_skb_header(const struct sk_buff *skb,
1068 const struct sge_txq *q, void *pos,
1072 int left = (void *)q->stat - pos;
1074 if (likely(length <= left)) {
1075 memcpy(pos, skb->data, length);
1078 memcpy(pos, skb->data, left);
1079 memcpy(q->desc, skb->data + left, length - left);
1080 pos = (void *)q->desc + (length - left);
1082 /* 0-pad to multiple of 16 */
1083 p = PTR_ALIGN(pos, 8);
1084 if ((uintptr_t)p & 8) {
1092 * Figure out what HW csum a packet wants and return the appropriate control
1095 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1098 bool inner_hdr_csum = false;
1101 if (skb->encapsulation &&
1102 (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5))
1103 inner_hdr_csum = true;
1105 if (inner_hdr_csum) {
1106 ver = inner_ip_hdr(skb)->version;
1107 proto = (ver == 4) ? inner_ip_hdr(skb)->protocol :
1108 inner_ipv6_hdr(skb)->nexthdr;
1110 ver = ip_hdr(skb)->version;
1111 proto = (ver == 4) ? ip_hdr(skb)->protocol :
1112 ipv6_hdr(skb)->nexthdr;
1116 if (proto == IPPROTO_TCP)
1117 csum_type = TX_CSUM_TCPIP;
1118 else if (proto == IPPROTO_UDP)
1119 csum_type = TX_CSUM_UDPIP;
1122 * unknown protocol, disable HW csum
1123 * and hope a bad packet is detected
1125 return TXPKT_L4CSUM_DIS_F;
1129 * this doesn't work with extension headers
1131 if (proto == IPPROTO_TCP)
1132 csum_type = TX_CSUM_TCPIP6;
1133 else if (proto == IPPROTO_UDP)
1134 csum_type = TX_CSUM_UDPIP6;
1139 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1140 int eth_hdr_len, l4_len;
1143 if (inner_hdr_csum) {
1144 /* This allows checksum offload for all encapsulated
1145 * packets like GRE etc..
1147 l4_len = skb_inner_network_header_len(skb);
1148 eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN;
1150 l4_len = skb_network_header_len(skb);
1151 eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1153 hdr_len = TXPKT_IPHDR_LEN_V(l4_len);
1155 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1156 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1158 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1159 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1161 int start = skb_transport_offset(skb);
1163 return TXPKT_CSUM_TYPE_V(csum_type) |
1164 TXPKT_CSUM_START_V(start) |
1165 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1169 static void eth_txq_stop(struct sge_eth_txq *q)
1171 netif_tx_stop_queue(q->txq);
1175 static inline void txq_advance(struct sge_txq *q, unsigned int n)
1179 if (q->pidx >= q->size)
1183 #ifdef CONFIG_CHELSIO_T4_FCOE
1185 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1186 const struct port_info *pi, u64 *cntrl)
1188 const struct cxgb_fcoe *fcoe = &pi->fcoe;
1190 if (!(fcoe->flags & CXGB_FCOE_ENABLED))
1193 if (skb->protocol != htons(ETH_P_FCOE))
1196 skb_reset_mac_header(skb);
1197 skb->mac_len = sizeof(struct ethhdr);
1199 skb_set_network_header(skb, skb->mac_len);
1200 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
1202 if (!cxgb_fcoe_sof_eof_supported(adap, skb))
1205 /* FC CRC offload */
1206 *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
1207 TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
1208 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
1209 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
1210 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
1213 #endif /* CONFIG_CHELSIO_T4_FCOE */
1215 /* Returns tunnel type if hardware supports offloading of the same.
1216 * It is called only for T5 and onwards.
1218 enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
1221 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1222 struct port_info *pi = netdev_priv(skb->dev);
1223 struct adapter *adapter = pi->adapter;
1225 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1226 skb->inner_protocol != htons(ETH_P_TEB))
1229 switch (vlan_get_protocol(skb)) {
1230 case htons(ETH_P_IP):
1231 l4_hdr = ip_hdr(skb)->protocol;
1233 case htons(ETH_P_IPV6):
1234 l4_hdr = ipv6_hdr(skb)->nexthdr;
1242 if (adapter->vxlan_port == udp_hdr(skb)->dest)
1243 tnl_type = TX_TNL_TYPE_VXLAN;
1244 else if (adapter->geneve_port == udp_hdr(skb)->dest)
1245 tnl_type = TX_TNL_TYPE_GENEVE;
1254 static inline void t6_fill_tnl_lso(struct sk_buff *skb,
1255 struct cpl_tx_tnl_lso *tnl_lso,
1256 enum cpl_tx_tnl_lso_type tnl_type)
1259 int in_eth_xtra_len;
1260 int l3hdr_len = skb_network_header_len(skb);
1261 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1262 const struct skb_shared_info *ssi = skb_shinfo(skb);
1263 bool v6 = (ip_hdr(skb)->version == 6);
1265 val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
1266 CPL_TX_TNL_LSO_FIRST_F |
1267 CPL_TX_TNL_LSO_LAST_F |
1268 (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
1269 CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
1270 CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
1271 (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
1272 CPL_TX_TNL_LSO_IPLENSETOUT_F |
1273 (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
1274 tnl_lso->op_to_IpIdSplitOut = htonl(val);
1276 tnl_lso->IpIdOffsetOut = 0;
1278 /* Get the tunnel header length */
1279 val = skb_inner_mac_header(skb) - skb_mac_header(skb);
1280 in_eth_xtra_len = skb_inner_network_header(skb) -
1281 skb_inner_mac_header(skb) - ETH_HLEN;
1284 case TX_TNL_TYPE_VXLAN:
1285 case TX_TNL_TYPE_GENEVE:
1286 tnl_lso->UdpLenSetOut_to_TnlHdrLen =
1287 htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
1288 CPL_TX_TNL_LSO_UDPLENSETOUT_F);
1291 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
1295 tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
1296 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
1297 CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
1301 val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
1302 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
1303 CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
1304 CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
1305 tnl_lso->Flow_to_TcpHdrLen = htonl(val);
1307 tnl_lso->IpIdOffset = htons(0);
1309 tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
1310 tnl_lso->TCPSeqOffset = htonl(0);
1311 tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
1314 static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
1315 struct cpl_tx_pkt_lso_core *lso)
1317 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1318 int l3hdr_len = skb_network_header_len(skb);
1319 const struct skb_shared_info *ssi;
1322 ssi = skb_shinfo(skb);
1323 if (ssi->gso_type & SKB_GSO_TCPV6)
1326 lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1327 LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
1329 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1330 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1331 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1332 lso->ipid_ofst = htons(0);
1333 lso->mss = htons(ssi->gso_size);
1334 lso->seqno_offset = htonl(0);
1335 if (is_t4(adap->params.chip))
1336 lso->len = htonl(skb->len);
1338 lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
1340 return (void *)(lso + 1);
1344 * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
1345 * @adap: the adapter
1346 * @eq: the Ethernet TX Queue
1347 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
1349 * We're typically called here to update the state of an Ethernet TX
1350 * Queue with respect to the hardware's progress in consuming the TX
1351 * Work Requests that we've put on that Egress Queue. This happens
1352 * when we get Egress Queue Update messages and also prophylactically
1353 * in regular timer-based Ethernet TX Queue maintenance.
1355 int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
1358 struct sge_txq *q = &eq->q;
1359 unsigned int reclaimed;
1361 if (!q->in_use || !__netif_tx_trylock(eq->txq))
1364 /* Reclaim pending completed TX Descriptors. */
1365 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
1367 /* If the TX Queue is currently stopped and there's now more than half
1368 * the queue available, restart it. Otherwise bail out since the rest
1369 * of what we want do here is with the possibility of shipping any
1370 * currently buffered Coalesced TX Work Request.
1372 if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) {
1373 netif_tx_wake_queue(eq->txq);
1377 __netif_tx_unlock(eq->txq);
1381 static inline int cxgb4_validate_skb(struct sk_buff *skb,
1382 struct net_device *dev,
1387 /* The chip min packet length is 10 octets but some firmware
1388 * commands have a minimum packet length requirement. So, play
1389 * safe and reject anything shorter than @min_pkt_len.
1391 if (unlikely(skb->len < min_pkt_len))
1394 /* Discard the packet if the length is greater than mtu */
1395 max_pkt_len = ETH_HLEN + dev->mtu;
1397 if (skb_vlan_tagged(skb))
1398 max_pkt_len += VLAN_HLEN;
1400 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1407 * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
1409 * @dev: the egress net device
1411 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1413 static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1415 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1416 bool ptp_enabled = is_ptp_enabled(skb, dev);
1417 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1418 const struct skb_shared_info *ssi;
1419 struct fw_eth_tx_pkt_wr *wr;
1420 struct cpl_tx_pkt_core *cpl;
1421 int len, qidx, credits, ret;
1422 const struct port_info *pi;
1423 unsigned int flits, ndesc;
1424 bool immediate = false;
1425 u32 wr_mid, ctrl0, op;
1426 u64 cntrl, *end, *sgl;
1427 struct sge_eth_txq *q;
1428 unsigned int chip_ver;
1429 struct adapter *adap;
1431 ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
1435 pi = netdev_priv(dev);
1437 ssi = skb_shinfo(skb);
1438 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
1439 if (xfrm_offload(skb) && !ssi->gso_size)
1440 return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
1441 #endif /* CHELSIO_IPSEC_INLINE */
1443 qidx = skb_get_queue_mapping(skb);
1445 spin_lock(&adap->ptp_lock);
1446 if (!(adap->ptp_tx_skb)) {
1447 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1448 adap->ptp_tx_skb = skb_get(skb);
1450 spin_unlock(&adap->ptp_lock);
1453 q = &adap->sge.ptptxq;
1455 q = &adap->sge.ethtxq[qidx + pi->first_qset];
1457 skb_tx_timestamp(skb);
1459 reclaim_completed_tx(adap, &q->q, -1, true);
1460 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1462 #ifdef CONFIG_CHELSIO_T4_FCOE
1463 ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1464 if (unlikely(ret == -ENOTSUPP)) {
1466 spin_unlock(&adap->ptp_lock);
1469 #endif /* CONFIG_CHELSIO_T4_FCOE */
1471 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
1472 flits = calc_tx_flits(skb, chip_ver);
1473 ndesc = flits_to_desc(flits);
1474 credits = txq_avail(&q->q) - ndesc;
1476 if (unlikely(credits < 0)) {
1478 dev_err(adap->pdev_dev,
1479 "%s: Tx ring %u full while queue awake!\n",
1482 spin_unlock(&adap->ptp_lock);
1483 return NETDEV_TX_BUSY;
1486 if (is_eth_imm(skb, chip_ver))
1489 if (skb->encapsulation && chip_ver > CHELSIO_T5)
1490 tnl_type = cxgb_encap_offload_supported(skb);
1493 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
1496 spin_unlock(&adap->ptp_lock);
1500 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1501 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1502 /* After we're done injecting the Work Request for this
1503 * packet, we'll be below our "stop threshold" so stop the TX
1504 * Queue now and schedule a request for an SGE Egress Queue
1505 * Update message. The queue will get started later on when
1506 * the firmware processes this Work Request and sends us an
1507 * Egress Queue Status Update message indicating that space
1512 /* If we're using the SGE Doorbell Queue Timer facility, we
1513 * don't need to ask the Firmware to send us Egress Queue CIDX
1514 * Updates: the Hardware will do this automatically. And
1515 * since we send the Ingress Queue CIDX Updates to the
1516 * corresponding Ethernet Response Queue, we'll get them very
1520 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1523 wr = (void *)&q->q.desc[q->q.pidx];
1524 wr->equiq_to_len16 = htonl(wr_mid);
1525 wr->r3 = cpu_to_be64(0);
1526 end = (u64 *)wr + flits;
1528 len = immediate ? skb->len : 0;
1529 len += sizeof(*cpl);
1530 if (ssi->gso_size) {
1531 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1532 struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
1535 len += sizeof(*tnl_lso);
1537 len += sizeof(*lso);
1539 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1540 FW_WR_IMMDLEN_V(len));
1542 struct iphdr *iph = ip_hdr(skb);
1544 t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
1545 cpl = (void *)(tnl_lso + 1);
1546 /* Driver is expected to compute partial checksum that
1547 * does not include the IP Total Length.
1549 if (iph->version == 4) {
1552 iph->check = (u16)(~ip_fast_csum((u8 *)iph,
1555 if (skb->ip_summed == CHECKSUM_PARTIAL)
1556 cntrl = hwcsum(adap->params.chip, skb);
1558 cpl = write_tso_wr(adap, skb, lso);
1559 cntrl = hwcsum(adap->params.chip, skb);
1561 sgl = (u64 *)(cpl + 1); /* sgl start here */
1562 if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
1563 /* If current position is already at the end of the
1564 * txq, reset the current to point to start of the queue
1565 * and update the end ptr as well.
1567 if (sgl == (u64 *)q->q.stat) {
1568 int left = (u8 *)end - (u8 *)q->q.stat;
1570 end = (void *)q->q.desc + left;
1571 sgl = (void *)q->q.desc;
1575 q->tx_cso += ssi->gso_segs;
1578 op = FW_PTP_TX_PKT_WR;
1580 op = FW_ETH_TX_PKT_WR;
1581 wr->op_immdlen = htonl(FW_WR_OP_V(op) |
1582 FW_WR_IMMDLEN_V(len));
1583 cpl = (void *)(wr + 1);
1584 sgl = (u64 *)(cpl + 1);
1585 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1586 cntrl = hwcsum(adap->params.chip, skb) |
1592 if (skb_vlan_tag_present(skb)) {
1594 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1595 #ifdef CONFIG_CHELSIO_T4_FCOE
1596 if (skb->protocol == htons(ETH_P_FCOE))
1597 cntrl |= TXPKT_VLAN_V(
1598 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
1599 #endif /* CONFIG_CHELSIO_T4_FCOE */
1602 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
1603 TXPKT_PF_V(adap->pf);
1605 ctrl0 |= TXPKT_TSTAMP_F;
1606 #ifdef CONFIG_CHELSIO_T4_DCB
1607 if (is_t4(adap->params.chip))
1608 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
1610 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
1612 cpl->ctrl0 = htonl(ctrl0);
1613 cpl->pack = htons(0);
1614 cpl->len = htons(skb->len);
1615 cpl->ctrl1 = cpu_to_be64(cntrl);
1618 cxgb4_inline_tx_skb(skb, &q->q, sgl);
1619 dev_consume_skb_any(skb);
1623 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr);
1626 last_desc = q->q.pidx + ndesc - 1;
1627 if (last_desc >= q->q.size)
1628 last_desc -= q->q.size;
1629 q->q.sdesc[last_desc].skb = skb;
1630 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
1633 txq_advance(&q->q, ndesc);
1635 cxgb4_ring_tx_db(adap, &q->q, ndesc);
1637 spin_unlock(&adap->ptp_lock);
1638 return NETDEV_TX_OK;
1641 dev_kfree_skb_any(skb);
1642 return NETDEV_TX_OK;
1647 /* Egress Queue sizes, producer and consumer indices are all in units
1648 * of Egress Context Units bytes. Note that as far as the hardware is
1649 * concerned, the free list is an Egress Queue (the host produces free
1650 * buffers which the hardware consumes) and free list entries are
1651 * 64-bit PCI DMA addresses.
1653 EQ_UNIT = SGE_EQ_IDXSIZE,
1654 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
1655 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
1657 T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1658 sizeof(struct cpl_tx_pkt_lso_core) +
1659 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
1663 * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
1666 * Returns whether an Ethernet packet is small enough to fit completely as
1669 static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
1671 /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
1672 * which does not accommodate immediate data. We could dike out all
1673 * of the support code for immediate data but that would tie our hands
1674 * too much if we ever want to enhace the firmware. It would also
1675 * create more differences between the PF and VF Drivers.
1681 * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
1684 * Returns the number of flits needed for a TX Work Request for the
1685 * given Ethernet packet, including the needed WR and CPL headers.
1687 static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
1691 /* If the skb is small enough, we can pump it out as a work request
1692 * with only immediate data. In that case we just have to have the
1693 * TX Packet header plus the skb data in the Work Request.
1695 if (t4vf_is_eth_imm(skb))
1696 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
1699 /* Otherwise, we're going to have to construct a Scatter gather list
1700 * of the skb body and fragments. We also include the flits necessary
1701 * for the TX Packet Work Request and CPL. We always have a firmware
1702 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
1703 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
1704 * message or, if we're doing a Large Send Offload, an LSO CPL message
1705 * with an embedded TX Packet Write CPL message.
1707 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
1708 if (skb_shinfo(skb)->gso_size)
1709 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1710 sizeof(struct cpl_tx_pkt_lso_core) +
1711 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
1713 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1714 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
1719 * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
1721 * @dev: the egress net device
1723 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
1725 static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
1726 struct net_device *dev)
1728 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1729 const struct skb_shared_info *ssi;
1730 struct fw_eth_tx_pkt_vm_wr *wr;
1731 struct cpl_tx_pkt_core *cpl;
1732 const struct port_info *pi;
1733 unsigned int flits, ndesc;
1734 struct sge_eth_txq *txq;
1735 struct adapter *adapter;
1736 int qidx, credits, ret;
1737 size_t fw_hdr_copy_len;
1741 /* The chip minimum packet length is 10 octets but the firmware
1742 * command that we are using requires that we copy the Ethernet header
1743 * (including the VLAN tag) into the header so we reject anything
1744 * smaller than that ...
1746 fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
1747 sizeof(wr->ethtype) + sizeof(wr->vlantci);
1748 ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
1752 /* Figure out which TX Queue we're going to use. */
1753 pi = netdev_priv(dev);
1754 adapter = pi->adapter;
1755 qidx = skb_get_queue_mapping(skb);
1756 WARN_ON(qidx >= pi->nqsets);
1757 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1759 /* Take this opportunity to reclaim any TX Descriptors whose DMA
1760 * transfers have completed.
1762 reclaim_completed_tx(adapter, &txq->q, -1, true);
1764 /* Calculate the number of flits and TX Descriptors we're going to
1765 * need along with how many TX Descriptors will be left over after
1766 * we inject our Work Request.
1768 flits = t4vf_calc_tx_flits(skb);
1769 ndesc = flits_to_desc(flits);
1770 credits = txq_avail(&txq->q) - ndesc;
1772 if (unlikely(credits < 0)) {
1773 /* Not enough room for this packet's Work Request. Stop the
1774 * TX Queue and return a "busy" condition. The queue will get
1775 * started later on when the firmware informs us that space
1779 dev_err(adapter->pdev_dev,
1780 "%s: TX ring %u full while queue awake!\n",
1782 return NETDEV_TX_BUSY;
1785 if (!t4vf_is_eth_imm(skb) &&
1786 unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1787 /* We need to map the skb into PCI DMA space (because it can't
1788 * be in-lined directly into the Work Request) and the mapping
1789 * operation failed. Record the error and drop the packet.
1795 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1796 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1797 /* After we're done injecting the Work Request for this
1798 * packet, we'll be below our "stop threshold" so stop the TX
1799 * Queue now and schedule a request for an SGE Egress Queue
1800 * Update message. The queue will get started later on when
1801 * the firmware processes this Work Request and sends us an
1802 * Egress Queue Status Update message indicating that space
1807 /* If we're using the SGE Doorbell Queue Timer facility, we
1808 * don't need to ask the Firmware to send us Egress Queue CIDX
1809 * Updates: the Hardware will do this automatically. And
1810 * since we send the Ingress Queue CIDX Updates to the
1811 * corresponding Ethernet Response Queue, we'll get them very
1815 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1818 /* Start filling in our Work Request. Note that we do _not_ handle
1819 * the WR Header wrapping around the TX Descriptor Ring. If our
1820 * maximum header size ever exceeds one TX Descriptor, we'll need to
1821 * do something else here.
1823 WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1824 wr = (void *)&txq->q.desc[txq->q.pidx];
1825 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1826 wr->r3[0] = cpu_to_be32(0);
1827 wr->r3[1] = cpu_to_be32(0);
1828 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1829 end = (u64 *)wr + flits;
1831 /* If this is a Large Send Offload packet we'll put in an LSO CPL
1832 * message with an encapsulated TX Packet CPL message. Otherwise we
1833 * just use a TX Packet CPL message.
1835 ssi = skb_shinfo(skb);
1836 if (ssi->gso_size) {
1837 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1838 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1839 int l3hdr_len = skb_network_header_len(skb);
1840 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1843 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1844 FW_WR_IMMDLEN_V(sizeof(*lso) +
1846 /* Fill in the LSO CPL message. */
1848 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1852 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1853 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1854 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1855 lso->ipid_ofst = cpu_to_be16(0);
1856 lso->mss = cpu_to_be16(ssi->gso_size);
1857 lso->seqno_offset = cpu_to_be32(0);
1858 if (is_t4(adapter->params.chip))
1859 lso->len = cpu_to_be32(skb->len);
1861 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1863 /* Set up TX Packet CPL pointer, control word and perform
1866 cpl = (void *)(lso + 1);
1868 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1869 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1871 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1873 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1874 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1875 TXPKT_IPHDR_LEN_V(l3hdr_len);
1877 txq->tx_cso += ssi->gso_segs;
1881 len = (t4vf_is_eth_imm(skb)
1882 ? skb->len + sizeof(*cpl)
1885 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1886 FW_WR_IMMDLEN_V(len));
1888 /* Set up TX Packet CPL pointer, control word and perform
1891 cpl = (void *)(wr + 1);
1892 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1893 cntrl = hwcsum(adapter->params.chip, skb) |
1897 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1901 /* If there's a VLAN tag present, add that to the list of things to
1902 * do in this Work Request.
1904 if (skb_vlan_tag_present(skb)) {
1906 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1909 /* Fill in the TX Packet CPL message header. */
1910 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1911 TXPKT_INTF_V(pi->port_id) |
1913 cpl->pack = cpu_to_be16(0);
1914 cpl->len = cpu_to_be16(skb->len);
1915 cpl->ctrl1 = cpu_to_be64(cntrl);
1917 /* Fill in the body of the TX Packet CPL message with either in-lined
1918 * data or a Scatter/Gather List.
1920 if (t4vf_is_eth_imm(skb)) {
1921 /* In-line the packet's data and free the skb since we don't
1922 * need it any longer.
1924 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
1925 dev_consume_skb_any(skb);
1927 /* Write the skb's Scatter/Gather list into the TX Packet CPL
1928 * message and retain a pointer to the skb so we can free it
1929 * later when its DMA completes. (We store the skb pointer
1930 * in the Software Descriptor corresponding to the last TX
1931 * Descriptor used by the Work Request.)
1933 * The retained skb will be freed when the corresponding TX
1934 * Descriptors are reclaimed after their DMAs complete.
1935 * However, this could take quite a while since, in general,
1936 * the hardware is set up to be lazy about sending DMA
1937 * completion notifications to us and we mostly perform TX
1938 * reclaims in the transmit routine.
1940 * This is good for performamce but means that we rely on new
1941 * TX packets arriving to run the destructors of completed
1942 * packets, which open up space in their sockets' send queues.
1943 * Sometimes we do not get such new packets causing TX to
1944 * stall. A single UDP transmitter is a good example of this
1945 * situation. We have a clean up timer that periodically
1946 * reclaims completed packets but it doesn't run often enough
1947 * (nor do we want it to) to prevent lengthy stalls. A
1948 * solution to this problem is to run the destructor early,
1949 * after the packet is queued but before it's DMAd. A con is
1950 * that we lie to socket memory accounting, but the amount of
1951 * extra memory is reasonable (limited by the number of TX
1952 * descriptors), the packets do actually get freed quickly by
1953 * new packets almost always, and for protocols like TCP that
1954 * wait for acks to really free up the data the extra memory
1955 * is even less. On the positive side we run the destructors
1956 * on the sending CPU rather than on a potentially different
1957 * completing CPU, usually a good thing.
1959 * Run the destructor before telling the DMA engine about the
1960 * packet to make sure it doesn't complete and get freed
1963 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1964 struct sge_txq *tq = &txq->q;
1967 /* If the Work Request header was an exact multiple of our TX
1968 * Descriptor length, then it's possible that the starting SGL
1969 * pointer lines up exactly with the end of our TX Descriptor
1970 * ring. If that's the case, wrap around to the beginning
1973 if (unlikely((void *)sgl == (void *)tq->stat)) {
1974 sgl = (void *)tq->desc;
1975 end = (void *)((void *)tq->desc +
1976 ((void *)end - (void *)tq->stat));
1979 cxgb4_write_sgl(skb, tq, sgl, end, 0, addr);
1982 last_desc = tq->pidx + ndesc - 1;
1983 if (last_desc >= tq->size)
1984 last_desc -= tq->size;
1985 tq->sdesc[last_desc].skb = skb;
1986 tq->sdesc[last_desc].sgl = sgl;
1989 /* Advance our internal TX Queue state, tell the hardware about
1990 * the new TX descriptors and return success.
1992 txq_advance(&txq->q, ndesc);
1994 cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
1995 return NETDEV_TX_OK;
1998 /* An error of some sort happened. Free the TX skb and tell the
1999 * OS that we've "dealt" with the packet ...
2001 dev_kfree_skb_any(skb);
2002 return NETDEV_TX_OK;
2006 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
2007 * @q: the SGE control Tx queue
2009 * This is a variant of cxgb4_reclaim_completed_tx() that is used
2010 * for Tx queues that send only immediate data (presently just
2011 * the control queues) and thus do not have any sk_buffs to release.
2013 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
2015 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
2016 int reclaim = hw_cidx - q->cidx;
2021 q->in_use -= reclaim;
2025 static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max)
2035 void cxgb4_eosw_txq_free_desc(struct adapter *adap,
2036 struct sge_eosw_txq *eosw_txq, u32 ndesc)
2038 struct sge_eosw_desc *d;
2040 d = &eosw_txq->desc[eosw_txq->last_cidx];
2044 unmap_skb(adap->pdev_dev, d->skb, d->addr);
2045 memset(d->addr, 0, sizeof(d->addr));
2047 dev_consume_skb_any(d->skb);
2050 eosw_txq_advance_index(&eosw_txq->last_cidx, 1,
2052 d = &eosw_txq->desc[eosw_txq->last_cidx];
2056 static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n)
2058 eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc);
2059 eosw_txq->inuse += n;
2062 static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq,
2063 struct sk_buff *skb)
2065 if (eosw_txq->inuse == eosw_txq->ndesc)
2068 eosw_txq->desc[eosw_txq->pidx].skb = skb;
2072 static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq)
2074 return eosw_txq->desc[eosw_txq->last_pidx].skb;
2077 static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
2078 struct sk_buff *skb, u32 hdr_len)
2083 wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core);
2084 if (skb_shinfo(skb)->gso_size)
2085 wrlen += sizeof(struct cpl_tx_pkt_lso_core);
2087 wrlen += roundup(hdr_len, 16);
2089 /* Packet headers + WR + CPLs */
2090 flits = DIV_ROUND_UP(wrlen, 8);
2092 if (skb_shinfo(skb)->nr_frags > 0)
2093 nsgl = sgl_len(skb_shinfo(skb)->nr_frags);
2094 else if (skb->len - hdr_len)
2097 return flits + nsgl;
2100 static inline void *write_eo_wr(struct adapter *adap,
2101 struct sge_eosw_txq *eosw_txq,
2102 struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
2103 u32 hdr_len, u32 wrlen)
2105 const struct skb_shared_info *ssi = skb_shinfo(skb);
2106 struct cpl_tx_pkt_core *cpl;
2107 u32 immd_len, wrlen16;
2110 wrlen16 = DIV_ROUND_UP(wrlen, 16);
2111 immd_len = sizeof(struct cpl_tx_pkt_core);
2112 if (skb_shinfo(skb)->gso_size) {
2113 if (skb->encapsulation &&
2114 CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
2115 immd_len += sizeof(struct cpl_tx_tnl_lso);
2117 immd_len += sizeof(struct cpl_tx_pkt_lso_core);
2119 immd_len += hdr_len;
2121 if (!eosw_txq->ncompl ||
2122 eosw_txq->last_compl >= adap->params.ofldq_wr_cred / 2) {
2125 eosw_txq->last_compl = 0;
2128 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
2129 FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) |
2130 FW_WR_COMPL_V(compl));
2131 wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) |
2132 FW_WR_FLOWID_V(eosw_txq->hwtid));
2134 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG;
2135 wr->u.tcpseg.ethlen = skb_network_offset(skb);
2136 wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
2137 wr->u.tcpseg.tcplen = tcp_hdrlen(skb);
2138 wr->u.tcpseg.tsclk_tsoff = 0;
2139 wr->u.tcpseg.r4 = 0;
2140 wr->u.tcpseg.r5 = 0;
2141 wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len);
2143 if (ssi->gso_size) {
2144 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
2146 wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size);
2147 cpl = write_tso_wr(adap, skb, lso);
2149 wr->u.tcpseg.mss = cpu_to_be16(0xffff);
2150 cpl = (void *)(wr + 1);
2153 eosw_txq->cred -= wrlen16;
2154 eosw_txq->last_compl += wrlen16;
2158 static void ethofld_hard_xmit(struct net_device *dev,
2159 struct sge_eosw_txq *eosw_txq)
2161 struct port_info *pi = netdev2pinfo(dev);
2162 struct adapter *adap = netdev2adap(dev);
2163 u32 wrlen, wrlen16, hdr_len, data_len;
2164 enum sge_eosw_state next_state;
2165 u64 cntrl, *start, *end, *sgl;
2166 struct sge_eohw_txq *eohw_txq;
2167 struct cpl_tx_pkt_core *cpl;
2168 struct fw_eth_tx_eo_wr *wr;
2169 bool skip_eotx_wr = false;
2170 struct sge_eosw_desc *d;
2171 struct sk_buff *skb;
2175 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
2176 spin_lock(&eohw_txq->lock);
2177 reclaim_completed_tx_imm(&eohw_txq->q);
2179 d = &eosw_txq->desc[eosw_txq->last_pidx];
2181 skb_tx_timestamp(skb);
2183 wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
2184 if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
2185 eosw_txq->last_pidx == eosw_txq->flowc_idx)) {
2188 flits = DIV_ROUND_UP(hdr_len, 8);
2189 if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND)
2190 next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY;
2192 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY;
2193 skip_eotx_wr = true;
2195 hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb));
2196 data_len = skb->len - hdr_len;
2197 flits = ethofld_calc_tx_flits(adap, skb, hdr_len);
2199 ndesc = flits_to_desc(flits);
2201 wrlen16 = DIV_ROUND_UP(wrlen, 16);
2203 /* If there are no CPL credits, then wait for credits
2204 * to come back and retry again
2206 if (unlikely(wrlen16 > eosw_txq->cred))
2209 if (unlikely(skip_eotx_wr)) {
2211 eosw_txq->state = next_state;
2212 goto write_wr_headers;
2215 cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen);
2216 cntrl = hwcsum(adap->params.chip, skb);
2217 if (skb_vlan_tag_present(skb))
2218 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
2220 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
2221 TXPKT_INTF_V(pi->tx_chan) |
2222 TXPKT_PF_V(adap->pf));
2224 cpl->len = cpu_to_be16(skb->len);
2225 cpl->ctrl1 = cpu_to_be64(cntrl);
2227 start = (u64 *)(cpl + 1);
2230 sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
2233 if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, d->addr))) {
2234 memset(d->addr, 0, sizeof(d->addr));
2235 eohw_txq->mapping_err++;
2239 end = (u64 *)wr + flits;
2240 if (unlikely(start > sgl)) {
2241 left = (u8 *)end - (u8 *)eohw_txq->q.stat;
2242 end = (void *)eohw_txq->q.desc + left;
2245 if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) {
2246 /* If current position is already at the end of the
2247 * txq, reset the current to point to start of the queue
2248 * and update the end ptr as well.
2250 left = (u8 *)end - (u8 *)eohw_txq->q.stat;
2252 end = (void *)eohw_txq->q.desc + left;
2253 sgl = (void *)eohw_txq->q.desc;
2256 cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len,
2260 txq_advance(&eohw_txq->q, ndesc);
2261 cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
2262 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
2265 spin_unlock(&eohw_txq->lock);
2268 static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
2270 struct sk_buff *skb;
2273 switch (eosw_txq->state) {
2274 case CXGB4_EO_STATE_ACTIVE:
2275 case CXGB4_EO_STATE_FLOWC_OPEN_SEND:
2276 case CXGB4_EO_STATE_FLOWC_CLOSE_SEND:
2277 pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
2279 pktcount += eosw_txq->ndesc;
2281 case CXGB4_EO_STATE_FLOWC_OPEN_REPLY:
2282 case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY:
2283 case CXGB4_EO_STATE_CLOSED:
2288 while (pktcount--) {
2289 skb = eosw_txq_peek(eosw_txq);
2291 eosw_txq_advance_index(&eosw_txq->last_pidx, 1,
2296 ethofld_hard_xmit(dev, eosw_txq);
2300 static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb,
2301 struct net_device *dev)
2303 struct cxgb4_tc_port_mqprio *tc_port_mqprio;
2304 struct port_info *pi = netdev2pinfo(dev);
2305 struct adapter *adap = netdev2adap(dev);
2306 struct sge_eosw_txq *eosw_txq;
2310 ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
2314 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
2315 qid = skb_get_queue_mapping(skb) - pi->nqsets;
2316 eosw_txq = &tc_port_mqprio->eosw_txq[qid];
2317 spin_lock_bh(&eosw_txq->lock);
2318 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
2321 ret = eosw_txq_enqueue(eosw_txq, skb);
2325 /* SKB is queued for processing until credits are available.
2326 * So, call the destructor now and we'll free the skb later
2327 * after it has been successfully transmitted.
2331 eosw_txq_advance(eosw_txq, 1);
2332 ethofld_xmit(dev, eosw_txq);
2333 spin_unlock_bh(&eosw_txq->lock);
2334 return NETDEV_TX_OK;
2337 spin_unlock_bh(&eosw_txq->lock);
2339 dev_kfree_skb_any(skb);
2340 return NETDEV_TX_OK;
2343 netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
2345 struct port_info *pi = netdev_priv(dev);
2346 u16 qid = skb_get_queue_mapping(skb);
2348 if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
2349 return cxgb4_vf_eth_xmit(skb, dev);
2351 if (unlikely(qid >= pi->nqsets))
2352 return cxgb4_ethofld_xmit(skb, dev);
2354 return cxgb4_eth_xmit(skb, dev);
2358 * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
2360 * @eotid - ETHOFLD tid to bind/unbind
2361 * @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
2363 * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
2364 * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
2367 int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
2369 struct port_info *pi = netdev2pinfo(dev);
2370 struct adapter *adap = netdev2adap(dev);
2371 enum sge_eosw_state next_state;
2372 struct sge_eosw_txq *eosw_txq;
2373 u32 len, len16, nparams = 6;
2374 struct fw_flowc_wr *flowc;
2375 struct eotid_entry *entry;
2376 struct sge_ofld_rxq *rxq;
2377 struct sk_buff *skb;
2380 len = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval) * nparams;
2381 len16 = DIV_ROUND_UP(len, 16);
2383 entry = cxgb4_lookup_eotid(&adap->tids, eotid);
2387 eosw_txq = (struct sge_eosw_txq *)entry->data;
2391 skb = alloc_skb(len, GFP_KERNEL);
2395 spin_lock_bh(&eosw_txq->lock);
2396 if (tc != FW_SCHED_CLS_NONE) {
2397 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
2400 next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
2402 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
2405 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
2408 flowc = __skb_put(skb, len);
2409 memset(flowc, 0, len);
2411 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid];
2412 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) |
2413 FW_WR_FLOWID_V(eosw_txq->hwtid));
2414 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
2415 FW_FLOWC_WR_NPARAMS_V(nparams) |
2417 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
2418 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf));
2419 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
2420 flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan);
2421 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
2422 flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan);
2423 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
2424 flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id);
2425 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
2426 flowc->mnemval[4].val = cpu_to_be32(tc);
2427 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE;
2428 flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ?
2429 FW_FLOWC_MNEM_EOSTATE_CLOSING :
2430 FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
2432 eosw_txq->cred -= len16;
2434 eosw_txq->last_compl = 0;
2436 ret = eosw_txq_enqueue(eosw_txq, skb);
2438 dev_consume_skb_any(skb);
2442 eosw_txq->state = next_state;
2443 eosw_txq->flowc_idx = eosw_txq->pidx;
2444 eosw_txq_advance(eosw_txq, 1);
2445 ethofld_xmit(dev, eosw_txq);
2448 spin_unlock_bh(&eosw_txq->lock);
2453 * is_imm - check whether a packet can be sent as immediate data
2456 * Returns true if a packet can be sent as a WR with immediate data.
2458 static inline int is_imm(const struct sk_buff *skb)
2460 return skb->len <= MAX_CTRL_WR_LEN;
2464 * ctrlq_check_stop - check if a control queue is full and should stop
2466 * @wr: most recent WR written to the queue
2468 * Check if a control queue has become full and should be stopped.
2469 * We clean up control queue descriptors very lazily, only when we are out.
2470 * If the queue is still full after reclaiming any completed descriptors
2471 * we suspend it and have the last WR wake it up.
2473 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
2475 reclaim_completed_tx_imm(&q->q);
2476 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
2477 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
2484 * ctrl_xmit - send a packet through an SGE control Tx queue
2485 * @q: the control queue
2488 * Send a packet through an SGE control Tx queue. Packets sent through
2489 * a control queue must fit entirely as immediate data.
2491 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
2494 struct fw_wr_hdr *wr;
2496 if (unlikely(!is_imm(skb))) {
2499 return NET_XMIT_DROP;
2502 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
2503 spin_lock(&q->sendq.lock);
2505 if (unlikely(q->full)) {
2506 skb->priority = ndesc; /* save for restart */
2507 __skb_queue_tail(&q->sendq, skb);
2508 spin_unlock(&q->sendq.lock);
2512 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
2513 cxgb4_inline_tx_skb(skb, &q->q, wr);
2515 txq_advance(&q->q, ndesc);
2516 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
2517 ctrlq_check_stop(q, wr);
2519 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
2520 spin_unlock(&q->sendq.lock);
2523 return NET_XMIT_SUCCESS;
2527 * restart_ctrlq - restart a suspended control queue
2528 * @data: the control queue to restart
2530 * Resumes transmission on a suspended Tx control queue.
2532 static void restart_ctrlq(unsigned long data)
2534 struct sk_buff *skb;
2535 unsigned int written = 0;
2536 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
2538 spin_lock(&q->sendq.lock);
2539 reclaim_completed_tx_imm(&q->q);
2540 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
2542 while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
2543 struct fw_wr_hdr *wr;
2544 unsigned int ndesc = skb->priority; /* previously saved */
2547 /* Write descriptors and free skbs outside the lock to limit
2548 * wait times. q->full is still set so new skbs will be queued.
2550 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
2551 txq_advance(&q->q, ndesc);
2552 spin_unlock(&q->sendq.lock);
2554 cxgb4_inline_tx_skb(skb, &q->q, wr);
2557 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
2558 unsigned long old = q->q.stops;
2560 ctrlq_check_stop(q, wr);
2561 if (q->q.stops != old) { /* suspended anew */
2562 spin_lock(&q->sendq.lock);
2567 cxgb4_ring_tx_db(q->adap, &q->q, written);
2570 spin_lock(&q->sendq.lock);
2575 cxgb4_ring_tx_db(q->adap, &q->q, written);
2576 spin_unlock(&q->sendq.lock);
2580 * t4_mgmt_tx - send a management message
2581 * @adap: the adapter
2582 * @skb: the packet containing the management message
2584 * Send a management message through control queue 0.
2586 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
2591 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
2597 * is_ofld_imm - check whether a packet can be sent as immediate data
2600 * Returns true if a packet can be sent as an offload WR with immediate
2601 * data. We currently use the same limit as for Ethernet packets.
2603 static inline int is_ofld_imm(const struct sk_buff *skb)
2605 struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
2606 unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
2608 if (opcode == FW_CRYPTO_LOOKASIDE_WR)
2609 return skb->len <= SGE_MAX_WR_LEN;
2611 return skb->len <= MAX_IMM_TX_PKT_LEN;
2615 * calc_tx_flits_ofld - calculate # of flits for an offload packet
2618 * Returns the number of flits needed for the given offload packet.
2619 * These packets are already fully constructed and no additional headers
2622 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
2624 unsigned int flits, cnt;
2626 if (is_ofld_imm(skb))
2627 return DIV_ROUND_UP(skb->len, 8);
2629 flits = skb_transport_offset(skb) / 8U; /* headers */
2630 cnt = skb_shinfo(skb)->nr_frags;
2631 if (skb_tail_pointer(skb) != skb_transport_header(skb))
2633 return flits + sgl_len(cnt);
2637 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
2638 * @adap: the adapter
2639 * @q: the queue to stop
2641 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
2642 * inability to map packets. A periodic timer attempts to restart
2645 static void txq_stop_maperr(struct sge_uld_txq *q)
2649 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
2650 q->adap->sge.txq_maperr);
2654 * ofldtxq_stop - stop an offload Tx queue that has become full
2655 * @q: the queue to stop
2656 * @wr: the Work Request causing the queue to become full
2658 * Stops an offload Tx queue that has become full and modifies the packet
2659 * being written to request a wakeup.
2661 static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
2663 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
2669 * service_ofldq - service/restart a suspended offload queue
2670 * @q: the offload queue
2672 * Services an offload Tx queue by moving packets from its Pending Send
2673 * Queue to the Hardware TX ring. The function starts and ends with the
2674 * Send Queue locked, but drops the lock while putting the skb at the
2675 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock
2676 * allows more skbs to be added to the Send Queue by other threads.
2677 * The packet being processed at the head of the Pending Send Queue is
2678 * left on the queue in case we experience DMA Mapping errors, etc.
2679 * and need to give up and restart later.
2681 * service_ofldq() can be thought of as a task which opportunistically
2682 * uses other threads execution contexts. We use the Offload Queue
2683 * boolean "service_ofldq_running" to make sure that only one instance
2684 * is ever running at a time ...
2686 static void service_ofldq(struct sge_uld_txq *q)
2688 u64 *pos, *before, *end;
2690 struct sk_buff *skb;
2691 struct sge_txq *txq;
2693 unsigned int written = 0;
2694 unsigned int flits, ndesc;
2696 /* If another thread is currently in service_ofldq() processing the
2697 * Pending Send Queue then there's nothing to do. Otherwise, flag
2698 * that we're doing the work and continue. Examining/modifying
2699 * the Offload Queue boolean "service_ofldq_running" must be done
2700 * while holding the Pending Send Queue Lock.
2702 if (q->service_ofldq_running)
2704 q->service_ofldq_running = true;
2706 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
2707 /* We drop the lock while we're working with the skb at the
2708 * head of the Pending Send Queue. This allows more skbs to
2709 * be added to the Pending Send Queue while we're working on
2710 * this one. We don't need to lock to guard the TX Ring
2711 * updates because only one thread of execution is ever
2712 * allowed into service_ofldq() at a time.
2714 spin_unlock(&q->sendq.lock);
2716 cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
2718 flits = skb->priority; /* previously saved */
2719 ndesc = flits_to_desc(flits);
2720 credits = txq_avail(&q->q) - ndesc;
2721 BUG_ON(credits < 0);
2722 if (unlikely(credits < TXQ_STOP_THRES))
2723 ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data);
2725 pos = (u64 *)&q->q.desc[q->q.pidx];
2726 if (is_ofld_imm(skb))
2727 cxgb4_inline_tx_skb(skb, &q->q, pos);
2728 else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
2729 (dma_addr_t *)skb->head)) {
2731 spin_lock(&q->sendq.lock);
2734 int last_desc, hdr_len = skb_transport_offset(skb);
2736 /* The WR headers may not fit within one descriptor.
2737 * So we need to deal with wrap-around here.
2739 before = (u64 *)pos;
2740 end = (u64 *)pos + flits;
2742 pos = (void *)inline_tx_skb_header(skb, &q->q,
2745 if (before > (u64 *)pos) {
2746 left = (u8 *)end - (u8 *)txq->stat;
2747 end = (void *)txq->desc + left;
2750 /* If current position is already at the end of the
2751 * ofld queue, reset the current to point to
2752 * start of the queue and update the end ptr as well.
2754 if (pos == (u64 *)txq->stat) {
2755 left = (u8 *)end - (u8 *)txq->stat;
2756 end = (void *)txq->desc + left;
2757 pos = (void *)txq->desc;
2760 cxgb4_write_sgl(skb, &q->q, (void *)pos,
2762 (dma_addr_t *)skb->head);
2763 #ifdef CONFIG_NEED_DMA_MAP_STATE
2764 skb->dev = q->adap->port[0];
2765 skb->destructor = deferred_unmap_destructor;
2767 last_desc = q->q.pidx + ndesc - 1;
2768 if (last_desc >= q->q.size)
2769 last_desc -= q->q.size;
2770 q->q.sdesc[last_desc].skb = skb;
2773 txq_advance(&q->q, ndesc);
2775 if (unlikely(written > 32)) {
2776 cxgb4_ring_tx_db(q->adap, &q->q, written);
2780 /* Reacquire the Pending Send Queue Lock so we can unlink the
2781 * skb we've just successfully transferred to the TX Ring and
2782 * loop for the next skb which may be at the head of the
2783 * Pending Send Queue.
2785 spin_lock(&q->sendq.lock);
2786 __skb_unlink(skb, &q->sendq);
2787 if (is_ofld_imm(skb))
2790 if (likely(written))
2791 cxgb4_ring_tx_db(q->adap, &q->q, written);
2793 /*Indicate that no thread is processing the Pending Send Queue
2796 q->service_ofldq_running = false;
2800 * ofld_xmit - send a packet through an offload queue
2801 * @q: the Tx offload queue
2804 * Send an offload packet through an SGE offload queue.
2806 static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
2808 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
2809 spin_lock(&q->sendq.lock);
2811 /* Queue the new skb onto the Offload Queue's Pending Send Queue. If
2812 * that results in this new skb being the only one on the queue, start
2813 * servicing it. If there are other skbs already on the list, then
2814 * either the queue is currently being processed or it's been stopped
2815 * for some reason and it'll be restarted at a later time. Restart
2816 * paths are triggered by events like experiencing a DMA Mapping Error
2817 * or filling the Hardware TX Ring.
2819 __skb_queue_tail(&q->sendq, skb);
2820 if (q->sendq.qlen == 1)
2823 spin_unlock(&q->sendq.lock);
2824 return NET_XMIT_SUCCESS;
2828 * restart_ofldq - restart a suspended offload queue
2829 * @data: the offload queue to restart
2831 * Resumes transmission on a suspended Tx offload queue.
2833 static void restart_ofldq(unsigned long data)
2835 struct sge_uld_txq *q = (struct sge_uld_txq *)data;
2837 spin_lock(&q->sendq.lock);
2838 q->full = 0; /* the queue actually is completely empty now */
2840 spin_unlock(&q->sendq.lock);
2844 * skb_txq - return the Tx queue an offload packet should use
2847 * Returns the Tx queue an offload packet should use as indicated by bits
2848 * 1-15 in the packet's queue_mapping.
2850 static inline unsigned int skb_txq(const struct sk_buff *skb)
2852 return skb->queue_mapping >> 1;
2856 * is_ctrl_pkt - return whether an offload packet is a control packet
2859 * Returns whether an offload packet should use an OFLD or a CTRL
2860 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
2862 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
2864 return skb->queue_mapping & 1;
2867 static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
2868 unsigned int tx_uld_type)
2870 struct sge_uld_txq_info *txq_info;
2871 struct sge_uld_txq *txq;
2872 unsigned int idx = skb_txq(skb);
2874 if (unlikely(is_ctrl_pkt(skb))) {
2875 /* Single ctrl queue is a requirement for LE workaround path */
2876 if (adap->tids.nsftids)
2878 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
2881 txq_info = adap->sge.uld_txq_info[tx_uld_type];
2882 if (unlikely(!txq_info)) {
2884 return NET_XMIT_DROP;
2887 txq = &txq_info->uldtxq[idx];
2888 return ofld_xmit(txq, skb);
2892 * t4_ofld_send - send an offload packet
2893 * @adap: the adapter
2896 * Sends an offload packet. We use the packet queue_mapping to select the
2897 * appropriate Tx queue as follows: bit 0 indicates whether the packet
2898 * should be sent as regular or control, bits 1-15 select the queue.
2900 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
2905 ret = uld_send(adap, skb, CXGB4_TX_OFLD);
2911 * cxgb4_ofld_send - send an offload packet
2912 * @dev: the net device
2915 * Sends an offload packet. This is an exported version of @t4_ofld_send,
2916 * intended for ULDs.
2918 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
2920 return t4_ofld_send(netdev2adap(dev), skb);
2922 EXPORT_SYMBOL(cxgb4_ofld_send);
2924 static void *inline_tx_header(const void *src,
2925 const struct sge_txq *q,
2926 void *pos, int length)
2928 int left = (void *)q->stat - pos;
2931 if (likely(length <= left)) {
2932 memcpy(pos, src, length);
2935 memcpy(pos, src, left);
2936 memcpy(q->desc, src + left, length - left);
2937 pos = (void *)q->desc + (length - left);
2939 /* 0-pad to multiple of 16 */
2940 p = PTR_ALIGN(pos, 8);
2941 if ((uintptr_t)p & 8) {
2949 * ofld_xmit_direct - copy a WR into offload queue
2950 * @q: the Tx offload queue
2951 * @src: location of WR
2954 * Copy an immediate WR into an uncontended SGE offload queue.
2956 static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
2963 /* Use the lower limit as the cut-off */
2964 if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) {
2966 return NET_XMIT_DROP;
2969 /* Don't return NET_XMIT_CN here as the current
2970 * implementation doesn't queue the request
2971 * using an skb when the following conditions not met
2973 if (!spin_trylock(&q->sendq.lock))
2974 return NET_XMIT_DROP;
2976 if (q->full || !skb_queue_empty(&q->sendq) ||
2977 q->service_ofldq_running) {
2978 spin_unlock(&q->sendq.lock);
2979 return NET_XMIT_DROP;
2981 ndesc = flits_to_desc(DIV_ROUND_UP(len, 8));
2982 credits = txq_avail(&q->q) - ndesc;
2983 pos = (u64 *)&q->q.desc[q->q.pidx];
2985 /* ofldtxq_stop modifies WR header in-situ */
2986 inline_tx_header(src, &q->q, pos, len);
2987 if (unlikely(credits < TXQ_STOP_THRES))
2988 ofldtxq_stop(q, (struct fw_wr_hdr *)pos);
2989 txq_advance(&q->q, ndesc);
2990 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
2992 spin_unlock(&q->sendq.lock);
2993 return NET_XMIT_SUCCESS;
2996 int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
2997 const void *src, unsigned int len)
2999 struct sge_uld_txq_info *txq_info;
3000 struct sge_uld_txq *txq;
3001 struct adapter *adap;
3004 adap = netdev2adap(dev);
3007 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
3008 if (unlikely(!txq_info)) {
3011 return NET_XMIT_DROP;
3013 txq = &txq_info->uldtxq[idx];
3015 ret = ofld_xmit_direct(txq, src, len);
3017 return net_xmit_eval(ret);
3019 EXPORT_SYMBOL(cxgb4_immdata_send);
3022 * t4_crypto_send - send crypto packet
3023 * @adap: the adapter
3026 * Sends crypto packet. We use the packet queue_mapping to select the
3027 * appropriate Tx queue as follows: bit 0 indicates whether the packet
3028 * should be sent as regular or control, bits 1-15 select the queue.
3030 static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
3035 ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
3041 * cxgb4_crypto_send - send crypto packet
3042 * @dev: the net device
3045 * Sends crypto packet. This is an exported version of @t4_crypto_send,
3046 * intended for ULDs.
3048 int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
3050 return t4_crypto_send(netdev2adap(dev), skb);
3052 EXPORT_SYMBOL(cxgb4_crypto_send);
3054 static inline void copy_frags(struct sk_buff *skb,
3055 const struct pkt_gl *gl, unsigned int offset)
3059 /* usually there's just one frag */
3060 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
3061 gl->frags[0].offset + offset,
3062 gl->frags[0].size - offset);
3063 skb_shinfo(skb)->nr_frags = gl->nfrags;
3064 for (i = 1; i < gl->nfrags; i++)
3065 __skb_fill_page_desc(skb, i, gl->frags[i].page,
3066 gl->frags[i].offset,
3069 /* get a reference to the last page, we don't own it */
3070 get_page(gl->frags[gl->nfrags - 1].page);
3074 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
3075 * @gl: the gather list
3076 * @skb_len: size of sk_buff main body if it carries fragments
3077 * @pull_len: amount of data to move to the sk_buff's main body
3079 * Builds an sk_buff from the given packet gather list. Returns the
3080 * sk_buff or %NULL if sk_buff allocation failed.
3082 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
3083 unsigned int skb_len, unsigned int pull_len)
3085 struct sk_buff *skb;
3088 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
3089 * size, which is expected since buffers are at least PAGE_SIZEd.
3090 * In this case packets up to RX_COPY_THRES have only one fragment.
3092 if (gl->tot_len <= RX_COPY_THRES) {
3093 skb = dev_alloc_skb(gl->tot_len);
3096 __skb_put(skb, gl->tot_len);
3097 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
3099 skb = dev_alloc_skb(skb_len);
3102 __skb_put(skb, pull_len);
3103 skb_copy_to_linear_data(skb, gl->va, pull_len);
3105 copy_frags(skb, gl, pull_len);
3106 skb->len = gl->tot_len;
3107 skb->data_len = skb->len - pull_len;
3108 skb->truesize += skb->data_len;
3112 EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
3115 * t4_pktgl_free - free a packet gather list
3116 * @gl: the gather list
3118 * Releases the pages of a packet gather list. We do not own the last
3119 * page on the list and do not free it.
3121 static void t4_pktgl_free(const struct pkt_gl *gl)
3124 const struct page_frag *p;
3126 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
3131 * Process an MPS trace packet. Give it an unused protocol number so it won't
3132 * be delivered to anyone and send it to the stack for capture.
3134 static noinline int handle_trace_pkt(struct adapter *adap,
3135 const struct pkt_gl *gl)
3137 struct sk_buff *skb;
3139 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
3140 if (unlikely(!skb)) {
3145 if (is_t4(adap->params.chip))
3146 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
3148 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
3150 skb_reset_mac_header(skb);
3151 skb->protocol = htons(0xffff);
3152 skb->dev = adap->port[0];
3153 netif_receive_skb(skb);
3158 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
3159 * @adap: the adapter
3160 * @hwtstamps: time stamp structure to update
3161 * @sgetstamp: 60bit iqe timestamp
3163 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
3164 * which is in Core Clock ticks into ktime_t and assign it
3166 static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
3167 struct skb_shared_hwtstamps *hwtstamps,
3171 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
3173 ns = div_u64(tmp, adap->params.vpd.cclk);
3175 memset(hwtstamps, 0, sizeof(*hwtstamps));
3176 hwtstamps->hwtstamp = ns_to_ktime(ns);
3179 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
3180 const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len)
3182 struct adapter *adapter = rxq->rspq.adap;
3183 struct sge *s = &adapter->sge;
3184 struct port_info *pi;
3186 struct sk_buff *skb;
3188 skb = napi_get_frags(&rxq->rspq.napi);
3189 if (unlikely(!skb)) {
3191 rxq->stats.rx_drops++;
3195 copy_frags(skb, gl, s->pktshift);
3197 skb->csum_level = 1;
3198 skb->len = gl->tot_len - s->pktshift;
3199 skb->data_len = skb->len;
3200 skb->truesize += skb->data_len;
3201 skb->ip_summed = CHECKSUM_UNNECESSARY;
3202 skb_record_rx_queue(skb, rxq->rspq.idx);
3203 pi = netdev_priv(skb->dev);
3205 cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
3207 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
3208 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
3211 if (unlikely(pkt->vlan_ex)) {
3212 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
3213 rxq->stats.vlan_ex++;
3215 ret = napi_gro_frags(&rxq->rspq.napi);
3216 if (ret == GRO_HELD)
3217 rxq->stats.lro_pkts++;
3218 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
3219 rxq->stats.lro_merged++;
3221 rxq->stats.rx_cso++;
3231 * t4_systim_to_hwstamp - read hardware time stamp
3232 * @adap: the adapter
3235 * Read Time Stamp from MPS packet and insert in skb which
3236 * is forwarded to PTP application
3238 static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
3239 struct sk_buff *skb)
3241 struct skb_shared_hwtstamps *hwtstamps;
3242 struct cpl_rx_mps_pkt *cpl = NULL;
3243 unsigned char *data;
3246 cpl = (struct cpl_rx_mps_pkt *)skb->data;
3247 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) &
3248 X_CPL_RX_MPS_PKT_TYPE_PTP))
3249 return RX_PTP_PKT_ERR;
3251 data = skb->data + sizeof(*cpl);
3252 skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt));
3253 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN;
3254 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short))
3255 return RX_PTP_PKT_ERR;
3257 hwtstamps = skb_hwtstamps(skb);
3258 memset(hwtstamps, 0, sizeof(*hwtstamps));
3259 hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data)));
3261 return RX_PTP_PKT_SUC;
3265 * t4_rx_hststamp - Recv PTP Event Message
3266 * @adap: the adapter
3267 * @rsp: the response queue descriptor holding the RX_PKT message
3270 * PTP enabled and MPS packet, read HW timestamp
3272 static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
3273 struct sge_eth_rxq *rxq, struct sk_buff *skb)
3277 if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) &&
3278 !is_t4(adapter->params.chip))) {
3279 ret = t4_systim_to_hwstamp(adapter, skb);
3280 if (ret == RX_PTP_PKT_ERR) {
3282 rxq->stats.rx_drops++;
3286 return RX_NON_PTP_PKT;
3290 * t4_tx_hststamp - Loopback PTP Transmit Event Message
3291 * @adap: the adapter
3293 * @dev: the ingress net device
3295 * Read hardware timestamp for the loopback PTP Tx event message
3297 static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
3298 struct net_device *dev)
3300 struct port_info *pi = netdev_priv(dev);
3302 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) {
3303 cxgb4_ptp_read_hwstamp(adapter, pi);
3311 * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
3312 * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue
3313 * @rsp: Response Entry pointer into Response Queue
3314 * @gl: Gather List pointer
3316 * For adapters which support the SGE Doorbell Queue Timer facility,
3317 * we configure the Ethernet TX Queues to send CIDX Updates to the
3318 * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE
3319 * messages. This adds a small load to PCIe Link RX bandwidth and,
3320 * potentially, higher CPU Interrupt load, but allows us to respond
3321 * much more quickly to the CIDX Updates. This is important for
3322 * Upper Layer Software which isn't willing to have a large amount
3323 * of TX Data outstanding before receiving DMA Completions.
3325 static void t4_tx_completion_handler(struct sge_rspq *rspq,
3327 const struct pkt_gl *gl)
3329 u8 opcode = ((const struct rss_header *)rsp)->opcode;
3330 struct port_info *pi = netdev_priv(rspq->netdev);
3331 struct adapter *adapter = rspq->adap;
3332 struct sge *s = &adapter->sge;
3333 struct sge_eth_txq *txq;
3335 /* skip RSS header */
3338 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
3340 if (unlikely(opcode == CPL_FW4_MSG &&
3341 ((const struct cpl_fw4_msg *)rsp)->type ==
3344 opcode = ((const struct rss_header *)rsp)->opcode;
3348 if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) {
3349 pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n",
3354 txq = &s->ethtxq[pi->first_qset + rspq->idx];
3356 /* We've got the Hardware Consumer Index Update in the Egress Update
3357 * message. If we're using the SGE Doorbell Queue Timer mechanism,
3358 * these Egress Update messages will be our sole CIDX Updates we get
3359 * since we don't want to chew up PCIe bandwidth for both Ingress
3360 * Messages and Status Page writes. However, The code which manages
3361 * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
3362 * stored in the Status Page at the end of the TX Queue. It's easiest
3363 * to simply copy the CIDX Update value from the Egress Update message
3364 * to the Status Page. Also note that no Endian issues need to be
3365 * considered here since both are Big Endian and we're just copying
3366 * bytes consistently ...
3369 struct cpl_sge_egr_update *egr;
3371 egr = (struct cpl_sge_egr_update *)rsp;
3372 WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
3375 t4_sge_eth_txq_egress_update(adapter, txq, -1);
3379 * t4_ethrx_handler - process an ingress ethernet packet
3380 * @q: the response queue that received the packet
3381 * @rsp: the response queue descriptor holding the RX_PKT message
3382 * @si: the gather list of packet fragments
3384 * Process an ingress ethernet packet and deliver it to the stack.
3386 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
3387 const struct pkt_gl *si)
3390 struct sk_buff *skb;
3391 const struct cpl_rx_pkt *pkt;
3392 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
3393 struct adapter *adapter = q->adap;
3394 struct sge *s = &q->adap->sge;
3395 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
3396 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
3397 u16 err_vec, tnl_hdr_len = 0;
3398 struct port_info *pi;
3401 /* If we're looking at TX Queue CIDX Update, handle that separately
3404 if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) ||
3405 (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) {
3406 t4_tx_completion_handler(q, rsp, si);
3410 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
3411 return handle_trace_pkt(q->adap, si);
3413 pkt = (const struct cpl_rx_pkt *)rsp;
3414 /* Compressed error vector is enabled for T6 only */
3415 if (q->adap->params.tp.rx_pkt_encap) {
3416 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
3417 tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec));
3419 err_vec = be16_to_cpu(pkt->err_vec);
3422 csum_ok = pkt->csum_calc && !err_vec &&
3423 (q->netdev->features & NETIF_F_RXCSUM);
3426 rxq->stats.bad_rx_pkts++;
3428 if (((pkt->l2info & htonl(RXF_TCP_F)) ||
3430 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
3431 do_gro(rxq, si, pkt, tnl_hdr_len);
3435 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
3436 if (unlikely(!skb)) {
3438 rxq->stats.rx_drops++;
3441 pi = netdev_priv(q->netdev);
3443 /* Handle PTP Event Rx packet */
3444 if (unlikely(pi->ptp_enable)) {
3445 ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
3446 if (ret == RX_PTP_PKT_ERR)
3450 __skb_pull(skb, s->pktshift); /* remove ethernet header pad */
3452 /* Handle the PTP Event Tx Loopback packet */
3453 if (unlikely(pi->ptp_enable && !ret &&
3454 (pkt->l2info & htonl(RXF_UDP_F)) &&
3455 cxgb4_ptp_is_ptp_rx(skb))) {
3456 if (!t4_tx_hststamp(adapter, skb, q->netdev))
3460 skb->protocol = eth_type_trans(skb, q->netdev);
3461 skb_record_rx_queue(skb, q->idx);
3462 if (skb->dev->features & NETIF_F_RXHASH)
3463 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
3469 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
3471 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
3472 if (!pkt->ip_frag) {
3473 skb->ip_summed = CHECKSUM_UNNECESSARY;
3474 rxq->stats.rx_cso++;
3475 } else if (pkt->l2info & htonl(RXF_IP_F)) {
3476 __sum16 c = (__force __sum16)pkt->csum;
3477 skb->csum = csum_unfold(c);
3480 skb->ip_summed = CHECKSUM_UNNECESSARY;
3481 skb->csum_level = 1;
3483 skb->ip_summed = CHECKSUM_COMPLETE;
3485 rxq->stats.rx_cso++;
3488 skb_checksum_none_assert(skb);
3489 #ifdef CONFIG_CHELSIO_T4_FCOE
3490 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
3491 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
3493 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
3494 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
3495 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
3496 if (q->adap->params.tp.rx_pkt_encap)
3498 T6_COMPR_RXERR_SUM_F;
3500 csum_ok = err_vec & RXERR_CSUM_F;
3502 skb->ip_summed = CHECKSUM_UNNECESSARY;
3506 #undef CPL_RX_PKT_FLAGS
3507 #endif /* CONFIG_CHELSIO_T4_FCOE */
3510 if (unlikely(pkt->vlan_ex)) {
3511 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
3512 rxq->stats.vlan_ex++;
3514 skb_mark_napi_id(skb, &q->napi);
3515 netif_receive_skb(skb);
3520 * restore_rx_bufs - put back a packet's Rx buffers
3521 * @si: the packet gather list
3522 * @q: the SGE free list
3523 * @frags: number of FL buffers to restore
3525 * Puts back on an FL the Rx buffers associated with @si. The buffers
3526 * have already been unmapped and are left unmapped, we mark them so to
3527 * prevent further unmapping attempts.
3529 * This function undoes a series of @unmap_rx_buf calls when we find out
3530 * that the current packet can't be processed right away afterall and we
3531 * need to come back to it later. This is a very rare event and there's
3532 * no effort to make this particularly efficient.
3534 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
3537 struct rx_sw_desc *d;
3541 q->cidx = q->size - 1;
3544 d = &q->sdesc[q->cidx];
3545 d->page = si->frags[frags].page;
3546 d->dma_addr |= RX_UNMAPPED_BUF;
3552 * is_new_response - check if a response is newly written
3553 * @r: the response descriptor
3554 * @q: the response queue
3556 * Returns true if a response descriptor contains a yet unprocessed
3559 static inline bool is_new_response(const struct rsp_ctrl *r,
3560 const struct sge_rspq *q)
3562 return (r->type_gen >> RSPD_GEN_S) == q->gen;
3566 * rspq_next - advance to the next entry in a response queue
3569 * Updates the state of a response queue to advance it to the next entry.
3571 static inline void rspq_next(struct sge_rspq *q)
3573 q->cur_desc = (void *)q->cur_desc + q->iqe_len;
3574 if (unlikely(++q->cidx == q->size)) {
3577 q->cur_desc = q->desc;
3582 * process_responses - process responses from an SGE response queue
3583 * @q: the ingress queue to process
3584 * @budget: how many responses can be processed in this round
3586 * Process responses from an SGE response queue up to the supplied budget.
3587 * Responses include received packets as well as control messages from FW
3590 * Additionally choose the interrupt holdoff time for the next interrupt
3591 * on this queue. If the system is under memory shortage use a fairly
3592 * long delay to help recovery.
3594 static int process_responses(struct sge_rspq *q, int budget)
3597 int budget_left = budget;
3598 const struct rsp_ctrl *rc;
3599 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
3600 struct adapter *adapter = q->adap;
3601 struct sge *s = &adapter->sge;
3603 while (likely(budget_left)) {
3604 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
3605 if (!is_new_response(rc, q)) {
3606 if (q->flush_handler)
3607 q->flush_handler(q);
3612 rsp_type = RSPD_TYPE_G(rc->type_gen);
3613 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
3614 struct page_frag *fp;
3616 const struct rx_sw_desc *rsd;
3617 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
3619 if (len & RSPD_NEWBUF_F) {
3620 if (likely(q->offset > 0)) {
3621 free_rx_bufs(q->adap, &rxq->fl, 1);
3624 len = RSPD_LEN_G(len);
3628 /* gather packet fragments */
3629 for (frags = 0, fp = si.frags; ; frags++, fp++) {
3630 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
3631 bufsz = get_buf_size(adapter, rsd);
3632 fp->page = rsd->page;
3633 fp->offset = q->offset;
3634 fp->size = min(bufsz, len);
3638 unmap_rx_buf(q->adap, &rxq->fl);
3641 si.sgetstamp = SGE_TIMESTAMP_G(
3642 be64_to_cpu(rc->last_flit));
3644 * Last buffer remains mapped so explicitly make it
3645 * coherent for CPU access.
3647 dma_sync_single_for_cpu(q->adap->pdev_dev,
3649 fp->size, DMA_FROM_DEVICE);
3651 si.va = page_address(si.frags[0].page) +
3655 si.nfrags = frags + 1;
3656 ret = q->handler(q, q->cur_desc, &si);
3657 if (likely(ret == 0))
3658 q->offset += ALIGN(fp->size, s->fl_align);
3660 restore_rx_bufs(&si, &rxq->fl, frags);
3661 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
3662 ret = q->handler(q, q->cur_desc, NULL);
3664 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
3667 if (unlikely(ret)) {
3668 /* couldn't process descriptor, back off for recovery */
3669 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
3677 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
3678 __refill_fl(q->adap, &rxq->fl);
3679 return budget - budget_left;
3683 * napi_rx_handler - the NAPI handler for Rx processing
3684 * @napi: the napi instance
3685 * @budget: how many packets we can process in this round
3687 * Handler for new data events when using NAPI. This does not need any
3688 * locking or protection from interrupts as data interrupts are off at
3689 * this point and other adapter interrupts do not interfere (the latter
3690 * in not a concern at all with MSI-X as non-data interrupts then have
3691 * a separate handler).
3693 static int napi_rx_handler(struct napi_struct *napi, int budget)
3695 unsigned int params;
3696 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
3700 work_done = process_responses(q, budget);
3701 if (likely(work_done < budget)) {
3704 napi_complete_done(napi, work_done);
3705 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
3707 if (q->adaptive_rx) {
3708 if (work_done > max(timer_pkt_quota[timer_index],
3710 timer_index = (timer_index + 1);
3712 timer_index = timer_index - 1;
3714 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
3715 q->next_intr_params =
3716 QINTR_TIMER_IDX_V(timer_index) |
3718 params = q->next_intr_params;
3720 params = q->next_intr_params;
3721 q->next_intr_params = q->intr_params;
3724 params = QINTR_TIMER_IDX_V(7);
3726 val = CIDXINC_V(work_done) | SEINTARM_V(params);
3728 /* If we don't have access to the new User GTS (T5+), use the old
3729 * doorbell mechanism; otherwise use the new BAR2 mechanism.
3731 if (unlikely(q->bar2_addr == NULL)) {
3732 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
3733 val | INGRESSQID_V((u32)q->cntxt_id));
3735 writel(val | INGRESSQID_V(q->bar2_qid),
3736 q->bar2_addr + SGE_UDB_GTS);
3742 void cxgb4_ethofld_restart(unsigned long data)
3744 struct sge_eosw_txq *eosw_txq = (struct sge_eosw_txq *)data;
3747 spin_lock(&eosw_txq->lock);
3748 pktcount = eosw_txq->cidx - eosw_txq->last_cidx;
3750 pktcount += eosw_txq->ndesc;
3753 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev),
3754 eosw_txq, pktcount);
3755 eosw_txq->inuse -= pktcount;
3758 /* There may be some packets waiting for completions. So,
3759 * attempt to send these packets now.
3761 ethofld_xmit(eosw_txq->netdev, eosw_txq);
3762 spin_unlock(&eosw_txq->lock);
3765 /* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
3766 * @q: the response queue that received the packet
3767 * @rsp: the response queue descriptor holding the CPL message
3768 * @si: the gather list of packet fragments
3770 * Process a ETHOFLD Tx completion. Increment the cidx here, but
3771 * free up the descriptors in a tasklet later.
3773 int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
3774 const struct pkt_gl *si)
3776 u8 opcode = ((const struct rss_header *)rsp)->opcode;
3778 /* skip RSS header */
3781 if (opcode == CPL_FW4_ACK) {
3782 const struct cpl_fw4_ack *cpl;
3783 struct sge_eosw_txq *eosw_txq;
3784 struct eotid_entry *entry;
3785 struct sk_buff *skb;
3790 cpl = (const struct cpl_fw4_ack *)rsp;
3791 eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) -
3792 q->adap->tids.eotid_base;
3793 entry = cxgb4_lookup_eotid(&q->adap->tids, eotid);
3797 eosw_txq = (struct sge_eosw_txq *)entry->data;
3801 spin_lock(&eosw_txq->lock);
3802 credits = cpl->credits;
3803 while (credits > 0) {
3804 skb = eosw_txq->desc[eosw_txq->cidx].skb;
3808 if (unlikely((eosw_txq->state ==
3809 CXGB4_EO_STATE_FLOWC_OPEN_REPLY ||
3811 CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) &&
3812 eosw_txq->cidx == eosw_txq->flowc_idx)) {
3813 flits = DIV_ROUND_UP(skb->len, 8);
3814 if (eosw_txq->state ==
3815 CXGB4_EO_STATE_FLOWC_OPEN_REPLY)
3816 eosw_txq->state = CXGB4_EO_STATE_ACTIVE;
3818 eosw_txq->state = CXGB4_EO_STATE_CLOSED;
3819 complete(&eosw_txq->completion);
3821 hdr_len = eth_get_headlen(eosw_txq->netdev,
3824 flits = ethofld_calc_tx_flits(q->adap, skb,
3827 eosw_txq_advance_index(&eosw_txq->cidx, 1,
3829 wrlen16 = DIV_ROUND_UP(flits * 8, 16);
3833 eosw_txq->cred += cpl->credits;
3836 spin_unlock(&eosw_txq->lock);
3838 /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx,
3839 * if there were packets waiting for completion.
3841 tasklet_schedule(&eosw_txq->qresume_tsk);
3849 * The MSI-X interrupt handler for an SGE response queue.
3851 irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
3853 struct sge_rspq *q = cookie;
3855 napi_schedule(&q->napi);
3860 * Process the indirect interrupt entries in the interrupt queue and kick off
3861 * NAPI for each queue that has generated an entry.
3863 static unsigned int process_intrq(struct adapter *adap)
3865 unsigned int credits;
3866 const struct rsp_ctrl *rc;
3867 struct sge_rspq *q = &adap->sge.intrq;
3870 spin_lock(&adap->sge.intrq_lock);
3871 for (credits = 0; ; credits++) {
3872 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
3873 if (!is_new_response(rc, q))
3877 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
3878 unsigned int qid = ntohl(rc->pldbuflen_qid);
3880 qid -= adap->sge.ingr_start;
3881 napi_schedule(&adap->sge.ingr_map[qid]->napi);
3887 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
3889 /* If we don't have access to the new User GTS (T5+), use the old
3890 * doorbell mechanism; otherwise use the new BAR2 mechanism.
3892 if (unlikely(q->bar2_addr == NULL)) {
3893 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
3894 val | INGRESSQID_V(q->cntxt_id));
3896 writel(val | INGRESSQID_V(q->bar2_qid),
3897 q->bar2_addr + SGE_UDB_GTS);
3900 spin_unlock(&adap->sge.intrq_lock);
3905 * The MSI interrupt handler, which handles data events from SGE response queues
3906 * as well as error and other async events as they all use the same MSI vector.
3908 static irqreturn_t t4_intr_msi(int irq, void *cookie)
3910 struct adapter *adap = cookie;
3912 if (adap->flags & CXGB4_MASTER_PF)
3913 t4_slow_intr_handler(adap);
3914 process_intrq(adap);
3919 * Interrupt handler for legacy INTx interrupts.
3920 * Handles data events from SGE response queues as well as error and other
3921 * async events as they all use the same interrupt line.
3923 static irqreturn_t t4_intr_intx(int irq, void *cookie)
3925 struct adapter *adap = cookie;
3927 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
3928 if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) |
3929 process_intrq(adap))
3931 return IRQ_NONE; /* probably shared interrupt */
3935 * t4_intr_handler - select the top-level interrupt handler
3936 * @adap: the adapter
3938 * Selects the top-level interrupt handler based on the type of interrupts
3939 * (MSI-X, MSI, or INTx).
3941 irq_handler_t t4_intr_handler(struct adapter *adap)
3943 if (adap->flags & CXGB4_USING_MSIX)
3944 return t4_sge_intr_msix;
3945 if (adap->flags & CXGB4_USING_MSI)
3947 return t4_intr_intx;
3950 static void sge_rx_timer_cb(struct timer_list *t)
3954 struct adapter *adap = from_timer(adap, t, sge.rx_timer);
3955 struct sge *s = &adap->sge;
3957 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
3958 for (m = s->starving_fl[i]; m; m &= m - 1) {
3959 struct sge_eth_rxq *rxq;
3960 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
3961 struct sge_fl *fl = s->egr_map[id];
3963 clear_bit(id, s->starving_fl);
3964 smp_mb__after_atomic();
3966 if (fl_starving(adap, fl)) {
3967 rxq = container_of(fl, struct sge_eth_rxq, fl);
3968 if (napi_reschedule(&rxq->rspq.napi))
3971 set_bit(id, s->starving_fl);
3974 /* The remainder of the SGE RX Timer Callback routine is dedicated to
3975 * global Master PF activities like checking for chip ingress stalls,
3978 if (!(adap->flags & CXGB4_MASTER_PF))
3981 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
3984 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
3987 static void sge_tx_timer_cb(struct timer_list *t)
3989 struct adapter *adap = from_timer(adap, t, sge.tx_timer);
3990 struct sge *s = &adap->sge;
3991 unsigned long m, period;
3992 unsigned int i, budget;
3994 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
3995 for (m = s->txq_maperr[i]; m; m &= m - 1) {
3996 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
3997 struct sge_uld_txq *txq = s->egr_map[id];
3999 clear_bit(id, s->txq_maperr);
4000 tasklet_schedule(&txq->qresume_tsk);
4003 if (!is_t4(adap->params.chip)) {
4004 struct sge_eth_txq *q = &s->ptptxq;
4007 spin_lock(&adap->ptp_lock);
4008 avail = reclaimable(&q->q);
4011 free_tx_desc(adap, &q->q, avail, false);
4012 q->q.in_use -= avail;
4014 spin_unlock(&adap->ptp_lock);
4017 budget = MAX_TIMER_TX_RECLAIM;
4018 i = s->ethtxq_rover;
4020 budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i],
4025 if (++i >= s->ethqsets)
4027 } while (i != s->ethtxq_rover);
4028 s->ethtxq_rover = i;
4031 /* If we found too many reclaimable packets schedule a timer
4032 * in the near future to continue where we left off.
4036 /* We reclaimed all reclaimable TX Descriptors, so reschedule
4037 * at the normal period.
4039 period = TX_QCHECK_PERIOD;
4042 mod_timer(&s->tx_timer, jiffies + period);
4046 * bar2_address - return the BAR2 address for an SGE Queue's Registers
4047 * @adapter: the adapter
4048 * @qid: the SGE Queue ID
4049 * @qtype: the SGE Queue Type (Egress or Ingress)
4050 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
4052 * Returns the BAR2 address for the SGE Queue Registers associated with
4053 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
4054 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
4055 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
4056 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
4058 static void __iomem *bar2_address(struct adapter *adapter,
4060 enum t4_bar2_qtype qtype,
4061 unsigned int *pbar2_qid)
4066 ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
4067 &bar2_qoffset, pbar2_qid);
4071 return adapter->bar2 + bar2_qoffset;
4074 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
4075 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
4077 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
4078 struct net_device *dev, int intr_idx,
4079 struct sge_fl *fl, rspq_handler_t hnd,
4080 rspq_flush_handler_t flush_hnd, int cong)
4084 struct sge *s = &adap->sge;
4085 struct port_info *pi = netdev_priv(dev);
4086 int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING);
4088 /* Size needs to be multiple of 16, including status entry. */
4089 iq->size = roundup(iq->size, 16);
4091 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
4092 &iq->phys_addr, NULL, 0,
4093 dev_to_node(adap->pdev_dev));
4097 memset(&c, 0, sizeof(c));
4098 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
4099 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4100 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
4101 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
4103 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
4104 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
4105 FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
4106 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
4107 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
4109 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
4110 FW_IQ_CMD_IQGTSMODE_F |
4111 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
4112 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
4113 c.iqsize = htons(iq->size);
4114 c.iqaddr = cpu_to_be64(iq->phys_addr);
4116 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F |
4117 FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC
4118 : FW_IQ_IQTYPE_OFLD));
4121 unsigned int chip_ver =
4122 CHELSIO_CHIP_VERSION(adap->params.chip);
4124 /* Allocate the ring for the hardware free list (with space
4125 * for its status page) along with the associated software
4126 * descriptor ring. The free list size needs to be a multiple
4127 * of the Egress Queue Unit and at least 2 Egress Units larger
4128 * than the SGE's Egress Congrestion Threshold
4129 * (fl_starve_thres - 1).
4131 if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
4132 fl->size = s->fl_starve_thres - 1 + 2 * 8;
4133 fl->size = roundup(fl->size, 8);
4134 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
4135 sizeof(struct rx_sw_desc), &fl->addr,
4136 &fl->sdesc, s->stat_len,
4137 dev_to_node(adap->pdev_dev));
4141 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
4142 c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
4143 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
4144 FW_IQ_CMD_FL0DATARO_V(relaxed) |
4145 FW_IQ_CMD_FL0PADEN_F);
4147 c.iqns_to_fl0congen |=
4148 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
4149 FW_IQ_CMD_FL0CONGCIF_F |
4150 FW_IQ_CMD_FL0CONGEN_F);
4151 /* In T6, for egress queue type FL there is internal overhead
4152 * of 16B for header going into FLM module. Hence the maximum
4153 * allowed burst size is 448 bytes. For T4/T5, the hardware
4154 * doesn't coalesce fetch requests if more than 64 bytes of
4155 * Free List pointers are provided, so we use a 128-byte Fetch
4156 * Burst Minimum there (T6 implements coalescing so we can use
4157 * the smaller 64-byte value there).
4159 c.fl0dcaen_to_fl0cidxfthresh =
4160 htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ?
4161 FETCHBURSTMIN_128B_X :
4162 FETCHBURSTMIN_64B_T6_X) |
4163 FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
4164 FETCHBURSTMAX_512B_X :
4165 FETCHBURSTMAX_256B_X));
4166 c.fl0size = htons(flsz);
4167 c.fl0addr = cpu_to_be64(fl->addr);
4170 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4174 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
4175 iq->cur_desc = iq->desc;
4178 iq->next_intr_params = iq->intr_params;
4179 iq->cntxt_id = ntohs(c.iqid);
4180 iq->abs_id = ntohs(c.physiqid);
4181 iq->bar2_addr = bar2_address(adap,
4183 T4_BAR2_QTYPE_INGRESS,
4185 iq->size--; /* subtract status entry */
4188 iq->flush_handler = flush_hnd;
4190 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
4191 skb_queue_head_init(&iq->lro_mgr.lroq);
4193 /* set offset to -1 to distinguish ingress queues without FL */
4194 iq->offset = fl ? 0 : -1;
4196 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
4199 fl->cntxt_id = ntohs(c.fl0id);
4200 fl->avail = fl->pend_cred = 0;
4201 fl->pidx = fl->cidx = 0;
4202 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
4203 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
4205 /* Note, we must initialize the BAR2 Free List User Doorbell
4206 * information before refilling the Free List!
4208 fl->bar2_addr = bar2_address(adap,
4210 T4_BAR2_QTYPE_EGRESS,
4212 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
4215 /* For T5 and later we attempt to set up the Congestion Manager values
4216 * of the new RX Ethernet Queue. This should really be handled by
4217 * firmware because it's more complex than any host driver wants to
4218 * get involved with and it's different per chip and this is almost
4219 * certainly wrong. Firmware would be wrong as well, but it would be
4220 * a lot easier to fix in one place ... For now we do something very
4221 * simple (and hopefully less wrong).
4223 if (!is_t4(adap->params.chip) && cong >= 0) {
4224 u32 param, val, ch_map = 0;
4226 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
4228 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
4229 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
4230 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
4232 val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
4235 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
4236 for (i = 0; i < 4; i++) {
4237 if (cong & (1 << i))
4238 ch_map |= 1 << (i << cng_ch_bits_log);
4240 val |= CONMCTXT_CNGCHMAP_V(ch_map);
4242 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
4245 dev_warn(adap->pdev_dev, "Failed to set Congestion"
4246 " Manager Context for Ingress Queue %d: %d\n",
4247 iq->cntxt_id, -ret);
4256 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
4257 iq->desc, iq->phys_addr);
4260 if (fl && fl->desc) {
4263 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
4264 fl->desc, fl->addr);
4270 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
4273 q->bar2_addr = bar2_address(adap,
4275 T4_BAR2_QTYPE_EGRESS,
4278 q->cidx = q->pidx = 0;
4279 q->stops = q->restarts = 0;
4280 q->stat = (void *)&q->desc[q->size];
4281 spin_lock_init(&q->db_lock);
4282 adap->sge.egr_map[id - adap->sge.egr_start] = q;
4286 * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
4287 * @adap: the adapter
4288 * @txq: the SGE Ethernet TX Queue to initialize
4289 * @dev: the Linux Network Device
4290 * @netdevq: the corresponding Linux TX Queue
4291 * @iqid: the Ingress Queue to which to deliver CIDX Update messages
4292 * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers
4294 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
4295 struct net_device *dev, struct netdev_queue *netdevq,
4296 unsigned int iqid, u8 dbqt)
4298 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4299 struct port_info *pi = netdev_priv(dev);
4300 struct sge *s = &adap->sge;
4301 struct fw_eq_eth_cmd c;
4304 /* Add status entries */
4305 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
4307 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
4308 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
4309 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
4310 netdev_queue_numa_node_read(netdevq));
4314 memset(&c, 0, sizeof(c));
4315 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
4316 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4317 FW_EQ_ETH_CMD_PFN_V(adap->pf) |
4318 FW_EQ_ETH_CMD_VFN_V(0));
4319 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
4320 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
4322 /* For TX Ethernet Queues using the SGE Doorbell Queue Timer
4323 * mechanism, we use Ingress Queue messages for Hardware Consumer
4324 * Index Updates on the TX Queue. Otherwise we have the Hardware
4325 * write the CIDX Updates into the Status Page at the end of the
4328 c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
4329 FW_EQ_ETH_CMD_VIID_V(pi->viid));
4331 c.fetchszm_to_iqid =
4332 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
4333 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
4334 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
4336 /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */
4338 htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
4339 ? FETCHBURSTMIN_64B_X
4340 : FETCHBURSTMIN_64B_T6_X) |
4341 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4342 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4343 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
4345 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
4347 /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the
4348 * currently configured Timer Index. THis can be changed later via an
4349 * ethtool -C tx-usecs {Timer Val} command. Note that the SGE
4350 * Doorbell Queue mode is currently automatically enabled in the
4351 * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ...
4355 cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F |
4356 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix));
4358 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4360 kfree(txq->q.sdesc);
4361 txq->q.sdesc = NULL;
4362 dma_free_coherent(adap->pdev_dev,
4363 nentries * sizeof(struct tx_desc),
4364 txq->q.desc, txq->q.phys_addr);
4369 txq->q.q_type = CXGB4_TXQ_ETH;
4370 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
4372 txq->tso = txq->tx_cso = txq->vlan_ins = 0;
4373 txq->mapping_err = 0;
4379 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
4380 struct net_device *dev, unsigned int iqid,
4381 unsigned int cmplqid)
4383 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4384 struct port_info *pi = netdev_priv(dev);
4385 struct sge *s = &adap->sge;
4386 struct fw_eq_ctrl_cmd c;
4389 /* Add status entries */
4390 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
4392 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
4393 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
4394 NULL, 0, dev_to_node(adap->pdev_dev));
4398 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
4399 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4400 FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
4401 FW_EQ_CTRL_CMD_VFN_V(0));
4402 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
4403 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
4404 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
4405 c.physeqid_pkd = htonl(0);
4406 c.fetchszm_to_iqid =
4407 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
4408 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
4409 FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
4411 htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
4412 ? FETCHBURSTMIN_64B_X
4413 : FETCHBURSTMIN_64B_T6_X) |
4414 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4415 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4416 FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
4417 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
4419 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4421 dma_free_coherent(adap->pdev_dev,
4422 nentries * sizeof(struct tx_desc),
4423 txq->q.desc, txq->q.phys_addr);
4428 txq->q.q_type = CXGB4_TXQ_CTRL;
4429 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
4431 skb_queue_head_init(&txq->sendq);
4432 tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
4437 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
4438 unsigned int cmplqid)
4442 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
4443 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
4444 FW_PARAMS_PARAM_YZ_V(eqid));
4446 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
4449 static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
4450 struct net_device *dev, u32 cmd, u32 iqid)
4452 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4453 struct port_info *pi = netdev_priv(dev);
4454 struct sge *s = &adap->sge;
4455 struct fw_eq_ofld_cmd c;
4456 u32 fb_min, nentries;
4459 /* Add status entries */
4460 nentries = q->size + s->stat_len / sizeof(struct tx_desc);
4461 q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc),
4462 sizeof(struct tx_sw_desc), &q->phys_addr,
4463 &q->sdesc, s->stat_len, NUMA_NO_NODE);
4467 if (chip_ver <= CHELSIO_T5)
4468 fb_min = FETCHBURSTMIN_64B_X;
4470 fb_min = FETCHBURSTMIN_64B_T6_X;
4472 memset(&c, 0, sizeof(c));
4473 c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
4474 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4475 FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
4476 FW_EQ_OFLD_CMD_VFN_V(0));
4477 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
4478 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
4479 c.fetchszm_to_iqid =
4480 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
4481 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
4482 FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
4484 htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) |
4485 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4486 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4487 FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
4488 c.eqaddr = cpu_to_be64(q->phys_addr);
4490 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4494 dma_free_coherent(adap->pdev_dev,
4495 nentries * sizeof(struct tx_desc),
4496 q->desc, q->phys_addr);
4501 init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
4505 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
4506 struct net_device *dev, unsigned int iqid,
4507 unsigned int uld_type)
4509 u32 cmd = FW_EQ_OFLD_CMD;
4512 if (unlikely(uld_type == CXGB4_TX_CRYPTO))
4513 cmd = FW_EQ_CTRL_CMD;
4515 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
4519 txq->q.q_type = CXGB4_TXQ_ULD;
4521 skb_queue_head_init(&txq->sendq);
4522 tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
4524 txq->mapping_err = 0;
4528 int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
4529 struct net_device *dev, u32 iqid)
4533 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
4537 txq->q.q_type = CXGB4_TXQ_ULD;
4538 spin_lock_init(&txq->lock);
4543 txq->mapping_err = 0;
4547 void free_txq(struct adapter *adap, struct sge_txq *q)
4549 struct sge *s = &adap->sge;
4551 dma_free_coherent(adap->pdev_dev,
4552 q->size * sizeof(struct tx_desc) + s->stat_len,
4553 q->desc, q->phys_addr);
4559 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
4562 struct sge *s = &adap->sge;
4563 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
4565 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
4566 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
4567 rq->cntxt_id, fl_id, 0xffff);
4568 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
4569 rq->desc, rq->phys_addr);
4570 netif_napi_del(&rq->napi);
4572 rq->cntxt_id = rq->abs_id = 0;
4576 free_rx_bufs(adap, fl, fl->avail);
4577 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
4578 fl->desc, fl->addr);
4587 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
4588 * @adap: the adapter
4589 * @n: number of queues
4590 * @q: pointer to first queue
4592 * Release the resources of a consecutive block of offload Rx queues.
4594 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
4596 for ( ; n; n--, q++)
4598 free_rspq_fl(adap, &q->rspq,
4599 q->fl.size ? &q->fl : NULL);
4602 void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
4605 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
4607 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
4608 kfree(txq->q.sdesc);
4609 free_txq(adap, &txq->q);
4614 * t4_free_sge_resources - free SGE resources
4615 * @adap: the adapter
4617 * Frees resources used by the SGE queue sets.
4619 void t4_free_sge_resources(struct adapter *adap)
4622 struct sge_eth_rxq *eq;
4623 struct sge_eth_txq *etq;
4625 /* stop all Rx queues in order to start them draining */
4626 for (i = 0; i < adap->sge.ethqsets; i++) {
4627 eq = &adap->sge.ethrxq[i];
4629 t4_iq_stop(adap, adap->mbox, adap->pf, 0,
4630 FW_IQ_TYPE_FL_INT_CAP,
4632 eq->fl.size ? eq->fl.cntxt_id : 0xffff,
4636 /* clean up Ethernet Tx/Rx queues */
4637 for (i = 0; i < adap->sge.ethqsets; i++) {
4638 eq = &adap->sge.ethrxq[i];
4640 free_rspq_fl(adap, &eq->rspq,
4641 eq->fl.size ? &eq->fl : NULL);
4643 cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx);
4647 etq = &adap->sge.ethtxq[i];
4649 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4651 __netif_tx_lock_bh(etq->txq);
4652 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4653 __netif_tx_unlock_bh(etq->txq);
4654 kfree(etq->q.sdesc);
4655 free_txq(adap, &etq->q);
4659 /* clean up control Tx queues */
4660 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
4661 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
4664 tasklet_kill(&cq->qresume_tsk);
4665 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
4667 __skb_queue_purge(&cq->sendq);
4668 free_txq(adap, &cq->q);
4672 if (adap->sge.fw_evtq.desc) {
4673 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
4674 if (adap->sge.fwevtq_msix_idx >= 0)
4675 cxgb4_free_msix_idx_in_bmap(adap,
4676 adap->sge.fwevtq_msix_idx);
4679 if (adap->sge.nd_msix_idx >= 0)
4680 cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx);
4682 if (adap->sge.intrq.desc)
4683 free_rspq_fl(adap, &adap->sge.intrq, NULL);
4685 if (!is_t4(adap->params.chip)) {
4686 etq = &adap->sge.ptptxq;
4688 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4690 spin_lock_bh(&adap->ptp_lock);
4691 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4692 spin_unlock_bh(&adap->ptp_lock);
4693 kfree(etq->q.sdesc);
4694 free_txq(adap, &etq->q);
4698 /* clear the reverse egress queue map */
4699 memset(adap->sge.egr_map, 0,
4700 adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
4703 void t4_sge_start(struct adapter *adap)
4705 adap->sge.ethtxq_rover = 0;
4706 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
4707 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
4711 * t4_sge_stop - disable SGE operation
4712 * @adap: the adapter
4714 * Stop tasklets and timers associated with the DMA engine. Note that
4715 * this is effective only if measures have been taken to disable any HW
4716 * events that may restart them.
4718 void t4_sge_stop(struct adapter *adap)
4721 struct sge *s = &adap->sge;
4723 if (in_interrupt()) /* actions below require waiting */
4726 if (s->rx_timer.function)
4727 del_timer_sync(&s->rx_timer);
4728 if (s->tx_timer.function)
4729 del_timer_sync(&s->tx_timer);
4731 if (is_offload(adap)) {
4732 struct sge_uld_txq_info *txq_info;
4734 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
4736 struct sge_uld_txq *txq = txq_info->uldtxq;
4738 for_each_ofldtxq(&adap->sge, i) {
4740 tasklet_kill(&txq->qresume_tsk);
4745 if (is_pci_uld(adap)) {
4746 struct sge_uld_txq_info *txq_info;
4748 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
4750 struct sge_uld_txq *txq = txq_info->uldtxq;
4752 for_each_ofldtxq(&adap->sge, i) {
4754 tasklet_kill(&txq->qresume_tsk);
4759 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
4760 struct sge_ctrl_txq *cq = &s->ctrlq[i];
4763 tasklet_kill(&cq->qresume_tsk);
4768 * t4_sge_init_soft - grab core SGE values needed by SGE code
4769 * @adap: the adapter
4771 * We need to grab the SGE operating parameters that we need to have
4772 * in order to do our job and make sure we can live with them.
4775 static int t4_sge_init_soft(struct adapter *adap)
4777 struct sge *s = &adap->sge;
4778 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
4779 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
4780 u32 ingress_rx_threshold;
4783 * Verify that CPL messages are going to the Ingress Queue for
4784 * process_responses() and that only packet data is going to the
4787 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
4788 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
4789 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
4794 * Validate the Host Buffer Register Array indices that we want to
4797 * XXX Note that we should really read through the Host Buffer Size
4798 * XXX register array and find the indices of the Buffer Sizes which
4799 * XXX meet our needs!
4801 #define READ_FL_BUF(x) \
4802 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
4804 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
4805 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
4806 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
4807 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
4809 /* We only bother using the Large Page logic if the Large Page Buffer
4810 * is larger than our Page Size Buffer.
4812 if (fl_large_pg <= fl_small_pg)
4817 /* The Page Size Buffer must be exactly equal to our Page Size and the
4818 * Large Page Size Buffer should be 0 (per above) or a power of 2.
4820 if (fl_small_pg != PAGE_SIZE ||
4821 (fl_large_pg & (fl_large_pg-1)) != 0) {
4822 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
4823 fl_small_pg, fl_large_pg);
4827 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
4829 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
4830 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
4831 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
4832 fl_small_mtu, fl_large_mtu);
4837 * Retrieve our RX interrupt holdoff timer values and counter
4838 * threshold values from the SGE parameters.
4840 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
4841 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
4842 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
4843 s->timer_val[0] = core_ticks_to_us(adap,
4844 TIMERVALUE0_G(timer_value_0_and_1));
4845 s->timer_val[1] = core_ticks_to_us(adap,
4846 TIMERVALUE1_G(timer_value_0_and_1));
4847 s->timer_val[2] = core_ticks_to_us(adap,
4848 TIMERVALUE2_G(timer_value_2_and_3));
4849 s->timer_val[3] = core_ticks_to_us(adap,
4850 TIMERVALUE3_G(timer_value_2_and_3));
4851 s->timer_val[4] = core_ticks_to_us(adap,
4852 TIMERVALUE4_G(timer_value_4_and_5));
4853 s->timer_val[5] = core_ticks_to_us(adap,
4854 TIMERVALUE5_G(timer_value_4_and_5));
4856 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
4857 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
4858 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
4859 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
4860 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
4866 * t4_sge_init - initialize SGE
4867 * @adap: the adapter
4869 * Perform low-level SGE code initialization needed every time after a
4872 int t4_sge_init(struct adapter *adap)
4874 struct sge *s = &adap->sge;
4875 u32 sge_control, sge_conm_ctrl;
4876 int ret, egress_threshold;
4879 * Ingress Padding Boundary and Egress Status Page Size are set up by
4880 * t4_fixup_host_params().
4882 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
4883 s->pktshift = PKTSHIFT_G(sge_control);
4884 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
4886 s->fl_align = t4_fl_pkt_align(adap);
4887 ret = t4_sge_init_soft(adap);
4892 * A FL with <= fl_starve_thres buffers is starving and a periodic
4893 * timer will attempt to refill it. This needs to be larger than the
4894 * SGE's Egress Congestion Threshold. If it isn't, then we can get
4895 * stuck waiting for new packets while the SGE is waiting for us to
4896 * give it more Free List entries. (Note that the SGE's Egress
4897 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
4898 * there was only a single field to control this. For T5 there's the
4899 * original field which now only applies to Unpacked Mode Free List
4900 * buffers and a new field which only applies to Packed Mode Free List
4903 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
4904 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
4906 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
4909 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
4912 egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
4915 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
4916 CHELSIO_CHIP_VERSION(adap->params.chip));
4919 s->fl_starve_thres = 2*egress_threshold + 1;
4921 t4_idma_monitor_init(adap, &s->idma_monitor);
4923 /* Set up timers used for recuring callbacks to process RX and TX
4924 * administrative tasks.
4926 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
4927 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
4929 spin_lock_init(&s->intrq_lock);