2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/tcp.h>
34 #include <linux/if_vlan.h>
35 #include <net/geneve.h>
36 #include <net/dsfield.h>
39 #include "ipoib/ipoib.h"
40 #include "en_accel/en_accel.h"
41 #include "en_accel/ktls.h"
42 #include "lib/clock.h"
44 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
48 for (i = 0; i < num_dma; i++) {
49 struct mlx5e_sq_dma *last_pushed_dma =
50 mlx5e_dma_get(sq, --sq->dma_fifo_pc);
52 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
56 #ifdef CONFIG_MLX5_CORE_EN_DCB
57 static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
61 if (skb->protocol == htons(ETH_P_IP))
62 dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
63 else if (skb->protocol == htons(ETH_P_IPV6))
64 dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
66 return priv->dcbx_dp.dscp2prio[dscp_cp];
70 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
71 struct net_device *sb_dev)
73 int txq_ix = netdev_pick_tx(dev, skb, NULL);
74 struct mlx5e_priv *priv = netdev_priv(dev);
78 if (!netdev_get_num_tc(dev))
81 #ifdef CONFIG_MLX5_CORE_EN_DCB
82 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
83 up = mlx5e_get_dscp_up(priv, skb);
86 if (skb_vlan_tag_present(skb))
87 up = skb_vlan_tag_get_prio(skb);
89 /* txq_ix can be larger than num_channels since
90 * dev->num_real_tx_queues = num_channels * num_tc
92 num_channels = priv->channels.params.num_channels;
93 if (txq_ix >= num_channels)
94 txq_ix = priv->txq2sq[txq_ix]->ch_ix;
96 return priv->channel_tc2realtxq[txq_ix][up];
99 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
101 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
103 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
106 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
108 if (skb_transport_header_was_set(skb))
109 return skb_transport_offset(skb);
111 return mlx5e_skb_l2_header_offset(skb);
114 static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
120 case MLX5_INLINE_MODE_NONE:
122 case MLX5_INLINE_MODE_TCP_UDP:
123 hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
124 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
127 case MLX5_INLINE_MODE_IP:
128 hlen = mlx5e_skb_l3_header_offset(skb);
130 case MLX5_INLINE_MODE_L2:
132 hlen = mlx5e_skb_l2_header_offset(skb);
134 return min_t(u16, hlen, skb_headlen(skb));
137 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
139 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
140 int cpy1_sz = 2 * ETH_ALEN;
141 int cpy2_sz = ihs - cpy1_sz;
143 memcpy(vhdr, skb->data, cpy1_sz);
144 vhdr->h_vlan_proto = skb->vlan_proto;
145 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
146 memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
150 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
152 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
153 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
154 if (skb->encapsulation) {
155 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
156 MLX5_ETH_WQE_L4_INNER_CSUM;
157 sq->stats->csum_partial_inner++;
159 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
160 sq->stats->csum_partial++;
163 sq->stats->csum_none++;
167 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
169 struct mlx5e_sq_stats *stats = sq->stats;
172 if (skb->encapsulation) {
173 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
174 stats->tso_inner_packets++;
175 stats->tso_inner_bytes += skb->len - ihs;
177 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
178 ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
180 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
181 stats->tso_packets++;
182 stats->tso_bytes += skb->len - ihs;
189 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
190 unsigned char *skb_data, u16 headlen,
191 struct mlx5_wqe_data_seg *dseg)
193 dma_addr_t dma_addr = 0;
198 dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
200 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
201 goto dma_unmap_wqe_err;
203 dseg->addr = cpu_to_be64(dma_addr);
204 dseg->lkey = sq->mkey_be;
205 dseg->byte_count = cpu_to_be32(headlen);
207 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
212 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
213 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
214 int fsz = skb_frag_size(frag);
216 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
218 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
219 goto dma_unmap_wqe_err;
221 dseg->addr = cpu_to_be64(dma_addr);
222 dseg->lkey = sq->mkey_be;
223 dseg->byte_count = cpu_to_be32(fsz);
225 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
233 mlx5e_dma_unmap_wqe_err(sq, num_dma);
238 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
239 u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
240 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
243 struct mlx5_wq_cyc *wq = &sq->wq;
246 wi->num_bytes = num_bytes;
247 wi->num_dma = num_dma;
248 wi->num_wqebbs = num_wqebbs;
251 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
252 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
254 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
255 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
257 sq->pc += wi->num_wqebbs;
258 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) {
259 netif_tx_stop_queue(sq->txq);
260 sq->stats->stopped++;
263 send_doorbell = __netdev_tx_sent_queue(sq->txq, num_bytes,
266 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
269 netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
270 struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
272 struct mlx5_wq_cyc *wq = &sq->wq;
273 struct mlx5_wqe_ctrl_seg *cseg;
274 struct mlx5_wqe_eth_seg *eseg;
275 struct mlx5_wqe_data_seg *dseg;
276 struct mlx5e_tx_wqe_info *wi;
278 struct mlx5e_sq_stats *stats = sq->stats;
279 u16 headlen, ihs, contig_wqebbs_room;
280 u16 ds_cnt, ds_cnt_inl = 0;
281 u8 num_wqebbs, opcode;
286 /* Calc ihs and ds cnt, no writes to wqe yet */
287 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
288 if (skb_is_gso(skb)) {
289 opcode = MLX5_OPCODE_LSO;
290 mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
291 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
292 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
293 stats->packets += skb_shinfo(skb)->gso_segs;
295 u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb);
297 opcode = MLX5_OPCODE_SEND;
299 ihs = mlx5e_calc_min_inline(mode, skb);
300 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
304 stats->bytes += num_bytes;
305 stats->xmit_more += xmit_more;
307 headlen = skb->len - ihs - skb->data_len;
309 ds_cnt += skb_shinfo(skb)->nr_frags;
312 ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN;
314 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
315 ds_cnt += ds_cnt_inl;
318 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
319 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
320 if (unlikely(contig_wqebbs_room < num_wqebbs)) {
321 #ifdef CONFIG_MLX5_EN_IPSEC
322 struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
324 #ifdef CONFIG_MLX5_EN_TLS
325 struct mlx5_wqe_ctrl_seg cur_ctrl = wqe->ctrl;
327 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
328 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
329 #ifdef CONFIG_MLX5_EN_IPSEC
332 #ifdef CONFIG_MLX5_EN_TLS
333 wqe->ctrl = cur_ctrl;
338 wi = &sq->db.wqe_info[pi];
343 #if IS_ENABLED(CONFIG_GENEVE)
344 if (skb->encapsulation)
345 mlx5e_tx_tunnel_accel(skb, eseg);
347 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
352 eseg->inline_hdr.sz = cpu_to_be16(ihs);
353 if (skb_vlan_tag_present(skb)) {
355 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs);
356 stats->added_vlan_packets++;
358 memcpy(eseg->inline_hdr.start, skb->data, ihs);
361 } else if (skb_vlan_tag_present(skb)) {
362 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
363 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
364 eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
365 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
366 stats->added_vlan_packets++;
369 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
370 if (unlikely(num_dma < 0))
373 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
374 num_dma, wi, cseg, xmit_more);
380 dev_kfree_skb_any(skb);
385 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
387 struct mlx5e_priv *priv = netdev_priv(dev);
388 struct mlx5e_tx_wqe *wqe;
389 struct mlx5e_txqsq *sq;
392 sq = priv->txq2sq[skb_get_queue_mapping(skb)];
393 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
395 /* might send skbs and update wqe and pi */
396 skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi);
400 return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
403 static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
404 struct mlx5_err_cqe *err_cqe)
406 struct mlx5_cqwq *wq = &sq->cq.wq;
409 ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
411 netdev_err(sq->channel->netdev,
412 "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
413 sq->cq.mcq.cqn, ci, sq->sqn,
414 get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
415 err_cqe->syndrome, err_cqe->vendor_err_synd);
416 mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
419 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
421 struct mlx5e_sq_stats *stats;
422 struct mlx5e_txqsq *sq;
423 struct mlx5_cqe64 *cqe;
430 sq = container_of(cq, struct mlx5e_txqsq, cq);
432 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
435 cqe = mlx5_cqwq_get_cqe(&cq->wq);
444 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
445 * otherwise a cq overrun may occur
449 /* avoid dirtying sq cache line every cqe */
450 dma_fifo_cc = sq->dma_fifo_cc;
454 struct mlx5e_tx_wqe_info *wi;
459 mlx5_cqwq_pop(&cq->wq);
461 wqe_counter = be16_to_cpu(cqe->wqe_counter);
467 last_wqe = (sqcc == wqe_counter);
469 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
470 wi = &sq->db.wqe_info[ci];
473 if (unlikely(!skb)) {
474 mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
475 sqcc += wi->num_wqebbs;
479 if (unlikely(skb_shinfo(skb)->tx_flags &
481 struct skb_shared_hwtstamps hwts = {};
484 mlx5_timecounter_cyc2time(sq->clock,
486 skb_tstamp_tx(skb, &hwts);
489 for (j = 0; j < wi->num_dma; j++) {
490 struct mlx5e_sq_dma *dma =
491 mlx5e_dma_get(sq, dma_fifo_cc++);
493 mlx5e_tx_dma_unmap(sq->pdev, dma);
497 nbytes += wi->num_bytes;
498 sqcc += wi->num_wqebbs;
499 napi_consume_skb(skb, napi_budget);
502 if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
503 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
505 mlx5e_dump_error_cqe(sq,
506 (struct mlx5_err_cqe *)cqe);
507 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
508 queue_work(cq->channel->priv->wq,
514 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
518 mlx5_cqwq_update_db_record(&cq->wq);
520 /* ensure cq space is freed before enabling more cqes */
523 sq->dma_fifo_cc = dma_fifo_cc;
526 netdev_tx_completed_queue(sq->txq, npkts, nbytes);
528 if (netif_tx_queue_stopped(sq->txq) &&
529 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
530 !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
531 netif_tx_wake_queue(sq->txq);
535 return (i == MLX5E_TX_CQ_POLL_BUDGET);
538 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
540 struct mlx5e_tx_wqe_info *wi;
548 dma_fifo_cc = sq->dma_fifo_cc;
550 while (sqcc != sq->pc) {
551 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
552 wi = &sq->db.wqe_info[ci];
556 mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
557 sqcc += wi->num_wqebbs;
561 for (i = 0; i < wi->num_dma; i++) {
562 struct mlx5e_sq_dma *dma =
563 mlx5e_dma_get(sq, dma_fifo_cc++);
565 mlx5e_tx_dma_unmap(sq->pdev, dma);
568 dev_kfree_skb_any(skb);
569 sqcc += wi->num_wqebbs;
572 sq->dma_fifo_cc = dma_fifo_cc;
576 #ifdef CONFIG_MLX5_CORE_IPOIB
578 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
579 struct mlx5_wqe_datagram_seg *dseg)
581 memcpy(&dseg->av, av, sizeof(struct mlx5_av));
582 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV);
583 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
586 netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
587 struct mlx5_av *av, u32 dqpn, u32 dqkey,
590 struct mlx5_wq_cyc *wq = &sq->wq;
591 struct mlx5i_tx_wqe *wqe;
593 struct mlx5_wqe_datagram_seg *datagram;
594 struct mlx5_wqe_ctrl_seg *cseg;
595 struct mlx5_wqe_eth_seg *eseg;
596 struct mlx5_wqe_data_seg *dseg;
597 struct mlx5e_tx_wqe_info *wi;
599 struct mlx5e_sq_stats *stats = sq->stats;
600 u16 headlen, ihs, pi, contig_wqebbs_room;
601 u16 ds_cnt, ds_cnt_inl = 0;
602 u8 num_wqebbs, opcode;
607 /* Calc ihs and ds cnt, no writes to wqe yet */
608 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
609 if (skb_is_gso(skb)) {
610 opcode = MLX5_OPCODE_LSO;
611 mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
612 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
613 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
614 stats->packets += skb_shinfo(skb)->gso_segs;
616 u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb);
618 opcode = MLX5_OPCODE_SEND;
620 ihs = mlx5e_calc_min_inline(mode, skb);
621 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
625 stats->bytes += num_bytes;
626 stats->xmit_more += xmit_more;
628 headlen = skb->len - ihs - skb->data_len;
630 ds_cnt += skb_shinfo(skb)->nr_frags;
633 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
634 ds_cnt += ds_cnt_inl;
637 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
638 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
639 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
640 if (unlikely(contig_wqebbs_room < num_wqebbs)) {
641 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
642 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
645 mlx5i_sq_fetch_wqe(sq, &wqe, pi);
648 wi = &sq->db.wqe_info[pi];
650 datagram = &wqe->datagram;
654 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
656 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
661 memcpy(eseg->inline_hdr.start, skb->data, ihs);
662 eseg->inline_hdr.sz = cpu_to_be16(ihs);
666 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
667 if (unlikely(num_dma < 0))
670 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
671 num_dma, wi, cseg, xmit_more);
677 dev_kfree_skb_any(skb);