2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <linux/numa.h>
44 #include <linux/pci.h>
45 #include <linux/utsname.h>
46 #include <linux/version.h>
47 #include <linux/vmalloc.h>
50 #include "ena_netdev.h"
51 #include "ena_pci_id_tbl.h"
53 static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
55 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
56 MODULE_DESCRIPTION(DEVICE_NAME);
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(DRV_MODULE_VERSION);
60 /* Time in jiffies before concluding the transmitter is hung. */
61 #define TX_TIMEOUT (5 * HZ)
63 #define ENA_NAPI_BUDGET 64
65 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
66 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
67 static int debug = -1;
68 module_param(debug, int, 0);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
71 static struct ena_aenq_handlers aenq_handlers;
73 static struct workqueue_struct *ena_wq;
75 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
77 static int ena_rss_init_default(struct ena_adapter *adapter);
78 static void check_for_admin_com_state(struct ena_adapter *adapter);
79 static void ena_destroy_device(struct ena_adapter *adapter);
80 static int ena_restore_device(struct ena_adapter *adapter);
82 static void ena_tx_timeout(struct net_device *dev)
84 struct ena_adapter *adapter = netdev_priv(dev);
86 /* Change the state of the device to trigger reset
87 * Check that we are not in the middle or a trigger already
90 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
93 adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
94 u64_stats_update_begin(&adapter->syncp);
95 adapter->dev_stats.tx_timeout++;
96 u64_stats_update_end(&adapter->syncp);
98 netif_err(adapter, tx_err, dev, "Transmit time out\n");
101 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
105 for (i = 0; i < adapter->num_queues; i++)
106 adapter->rx_ring[i].mtu = mtu;
109 static int ena_change_mtu(struct net_device *dev, int new_mtu)
111 struct ena_adapter *adapter = netdev_priv(dev);
114 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
116 netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
117 update_rx_ring_mtu(adapter, new_mtu);
120 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
127 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
129 #ifdef CONFIG_RFS_ACCEL
133 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
134 if (!adapter->netdev->rx_cpu_rmap)
136 for (i = 0; i < adapter->num_queues; i++) {
137 int irq_idx = ENA_IO_IRQ_IDX(i);
139 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
140 pci_irq_vector(adapter->pdev, irq_idx));
142 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
143 adapter->netdev->rx_cpu_rmap = NULL;
147 #endif /* CONFIG_RFS_ACCEL */
151 static void ena_init_io_rings_common(struct ena_adapter *adapter,
152 struct ena_ring *ring, u16 qid)
155 ring->pdev = adapter->pdev;
156 ring->dev = &adapter->pdev->dev;
157 ring->netdev = adapter->netdev;
158 ring->napi = &adapter->ena_napi[qid].napi;
159 ring->adapter = adapter;
160 ring->ena_dev = adapter->ena_dev;
161 ring->per_napi_packets = 0;
162 ring->per_napi_bytes = 0;
164 ring->first_interrupt = false;
165 ring->no_interrupt_event_cnt = 0;
166 u64_stats_init(&ring->syncp);
169 static void ena_init_io_rings(struct ena_adapter *adapter)
171 struct ena_com_dev *ena_dev;
172 struct ena_ring *txr, *rxr;
175 ena_dev = adapter->ena_dev;
177 for (i = 0; i < adapter->num_queues; i++) {
178 txr = &adapter->tx_ring[i];
179 rxr = &adapter->rx_ring[i];
181 /* TX/RX common ring state */
182 ena_init_io_rings_common(adapter, txr, i);
183 ena_init_io_rings_common(adapter, rxr, i);
185 /* TX specific ring state */
186 txr->ring_size = adapter->tx_ring_size;
187 txr->tx_max_header_size = ena_dev->tx_max_header_size;
188 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
189 txr->sgl_size = adapter->max_tx_sgl_size;
190 txr->smoothed_interval =
191 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
193 /* RX specific ring state */
194 rxr->ring_size = adapter->rx_ring_size;
195 rxr->rx_copybreak = adapter->rx_copybreak;
196 rxr->sgl_size = adapter->max_rx_sgl_size;
197 rxr->smoothed_interval =
198 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
199 rxr->empty_rx_queue = 0;
203 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
204 * @adapter: network interface device structure
207 * Return 0 on success, negative on failure
209 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
211 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
212 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
215 if (tx_ring->tx_buffer_info) {
216 netif_err(adapter, ifup,
217 adapter->netdev, "tx_buffer_info info is not NULL");
221 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
222 node = cpu_to_node(ena_irq->cpu);
224 tx_ring->tx_buffer_info = vzalloc_node(size, node);
225 if (!tx_ring->tx_buffer_info) {
226 tx_ring->tx_buffer_info = vzalloc(size);
227 if (!tx_ring->tx_buffer_info)
231 size = sizeof(u16) * tx_ring->ring_size;
232 tx_ring->free_tx_ids = vzalloc_node(size, node);
233 if (!tx_ring->free_tx_ids) {
234 tx_ring->free_tx_ids = vzalloc(size);
235 if (!tx_ring->free_tx_ids) {
236 vfree(tx_ring->tx_buffer_info);
241 /* Req id ring for TX out of order completions */
242 for (i = 0; i < tx_ring->ring_size; i++)
243 tx_ring->free_tx_ids[i] = i;
245 /* Reset tx statistics */
246 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
248 tx_ring->next_to_use = 0;
249 tx_ring->next_to_clean = 0;
250 tx_ring->cpu = ena_irq->cpu;
254 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
255 * @adapter: network interface device structure
258 * Free all transmit software resources
260 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
262 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
264 vfree(tx_ring->tx_buffer_info);
265 tx_ring->tx_buffer_info = NULL;
267 vfree(tx_ring->free_tx_ids);
268 tx_ring->free_tx_ids = NULL;
271 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
272 * @adapter: private structure
274 * Return 0 on success, negative on failure
276 static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
280 for (i = 0; i < adapter->num_queues; i++) {
281 rc = ena_setup_tx_resources(adapter, i);
290 netif_err(adapter, ifup, adapter->netdev,
291 "Tx queue %d: allocation failed\n", i);
293 /* rewind the index freeing the rings as we go */
295 ena_free_tx_resources(adapter, i);
299 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
300 * @adapter: board private structure
302 * Free all transmit software resources
304 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
308 for (i = 0; i < adapter->num_queues; i++)
309 ena_free_tx_resources(adapter, i);
312 static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
314 if (likely(req_id < rx_ring->ring_size))
317 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
318 "Invalid rx req_id: %hu\n", req_id);
320 u64_stats_update_begin(&rx_ring->syncp);
321 rx_ring->rx_stats.bad_req_id++;
322 u64_stats_update_end(&rx_ring->syncp);
324 /* Trigger device reset */
325 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
326 set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
330 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
331 * @adapter: network interface device structure
334 * Returns 0 on success, negative on failure
336 static int ena_setup_rx_resources(struct ena_adapter *adapter,
339 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
340 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
343 if (rx_ring->rx_buffer_info) {
344 netif_err(adapter, ifup, adapter->netdev,
345 "rx_buffer_info is not NULL");
349 /* alloc extra element so in rx path
350 * we can always prefetch rx_info + 1
352 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
353 node = cpu_to_node(ena_irq->cpu);
355 rx_ring->rx_buffer_info = vzalloc_node(size, node);
356 if (!rx_ring->rx_buffer_info) {
357 rx_ring->rx_buffer_info = vzalloc(size);
358 if (!rx_ring->rx_buffer_info)
362 size = sizeof(u16) * rx_ring->ring_size;
363 rx_ring->free_rx_ids = vzalloc_node(size, node);
364 if (!rx_ring->free_rx_ids) {
365 rx_ring->free_rx_ids = vzalloc(size);
366 if (!rx_ring->free_rx_ids) {
367 vfree(rx_ring->rx_buffer_info);
372 /* Req id ring for receiving RX pkts out of order */
373 for (i = 0; i < rx_ring->ring_size; i++)
374 rx_ring->free_rx_ids[i] = i;
376 /* Reset rx statistics */
377 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
379 rx_ring->next_to_clean = 0;
380 rx_ring->next_to_use = 0;
381 rx_ring->cpu = ena_irq->cpu;
386 /* ena_free_rx_resources - Free I/O Rx Resources
387 * @adapter: network interface device structure
390 * Free all receive software resources
392 static void ena_free_rx_resources(struct ena_adapter *adapter,
395 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
397 vfree(rx_ring->rx_buffer_info);
398 rx_ring->rx_buffer_info = NULL;
400 vfree(rx_ring->free_rx_ids);
401 rx_ring->free_rx_ids = NULL;
404 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
405 * @adapter: board private structure
407 * Return 0 on success, negative on failure
409 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
413 for (i = 0; i < adapter->num_queues; i++) {
414 rc = ena_setup_rx_resources(adapter, i);
423 netif_err(adapter, ifup, adapter->netdev,
424 "Rx queue %d: allocation failed\n", i);
426 /* rewind the index freeing the rings as we go */
428 ena_free_rx_resources(adapter, i);
432 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
433 * @adapter: board private structure
435 * Free all receive software resources
437 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
441 for (i = 0; i < adapter->num_queues; i++)
442 ena_free_rx_resources(adapter, i);
445 static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
446 struct ena_rx_buffer *rx_info, gfp_t gfp)
448 struct ena_com_buf *ena_buf;
452 /* if previous allocated page is not used */
453 if (unlikely(rx_info->page))
456 page = alloc_page(gfp);
457 if (unlikely(!page)) {
458 u64_stats_update_begin(&rx_ring->syncp);
459 rx_ring->rx_stats.page_alloc_fail++;
460 u64_stats_update_end(&rx_ring->syncp);
464 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
466 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
467 u64_stats_update_begin(&rx_ring->syncp);
468 rx_ring->rx_stats.dma_mapping_err++;
469 u64_stats_update_end(&rx_ring->syncp);
474 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
475 "alloc page %p, rx_info %p\n", page, rx_info);
477 rx_info->page = page;
478 rx_info->page_offset = 0;
479 ena_buf = &rx_info->ena_buf;
480 ena_buf->paddr = dma;
481 ena_buf->len = PAGE_SIZE;
486 static void ena_free_rx_page(struct ena_ring *rx_ring,
487 struct ena_rx_buffer *rx_info)
489 struct page *page = rx_info->page;
490 struct ena_com_buf *ena_buf = &rx_info->ena_buf;
492 if (unlikely(!page)) {
493 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
494 "Trying to free unallocated buffer\n");
498 dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
502 rx_info->page = NULL;
505 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
507 u16 next_to_use, req_id;
511 next_to_use = rx_ring->next_to_use;
513 for (i = 0; i < num; i++) {
514 struct ena_rx_buffer *rx_info;
516 req_id = rx_ring->free_rx_ids[next_to_use];
517 rc = validate_rx_req_id(rx_ring, req_id);
518 if (unlikely(rc < 0))
521 rx_info = &rx_ring->rx_buffer_info[req_id];
524 rc = ena_alloc_rx_page(rx_ring, rx_info,
525 GFP_ATOMIC | __GFP_COMP);
526 if (unlikely(rc < 0)) {
527 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
528 "failed to alloc buffer for rx queue %d\n",
532 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
536 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
537 "failed to add buffer for rx queue %d\n",
541 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
545 if (unlikely(i < num)) {
546 u64_stats_update_begin(&rx_ring->syncp);
547 rx_ring->rx_stats.refil_partial++;
548 u64_stats_update_end(&rx_ring->syncp);
549 netdev_warn(rx_ring->netdev,
550 "refilled rx qid %d with only %d buffers (from %d)\n",
551 rx_ring->qid, i, num);
555 /* Add memory barrier to make sure the desc were written before
559 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
563 rx_ring->next_to_use = next_to_use;
568 static void ena_free_rx_bufs(struct ena_adapter *adapter,
571 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
574 for (i = 0; i < rx_ring->ring_size; i++) {
575 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
578 ena_free_rx_page(rx_ring, rx_info);
582 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
583 * @adapter: board private structure
586 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
588 struct ena_ring *rx_ring;
591 for (i = 0; i < adapter->num_queues; i++) {
592 rx_ring = &adapter->rx_ring[i];
593 bufs_num = rx_ring->ring_size - 1;
594 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
596 if (unlikely(rc != bufs_num))
597 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
598 "refilling Queue %d failed. allocated %d buffers from: %d\n",
603 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
607 for (i = 0; i < adapter->num_queues; i++)
608 ena_free_rx_bufs(adapter, i);
611 /* ena_free_tx_bufs - Free Tx Buffers per Queue
612 * @tx_ring: TX ring for which buffers be freed
614 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
616 bool print_once = true;
619 for (i = 0; i < tx_ring->ring_size; i++) {
620 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
621 struct ena_com_buf *ena_buf;
629 netdev_notice(tx_ring->netdev,
630 "free uncompleted tx skb qid %d idx 0x%x\n",
634 netdev_dbg(tx_ring->netdev,
635 "free uncompleted tx skb qid %d idx 0x%x\n",
639 ena_buf = tx_info->bufs;
640 dma_unmap_single(tx_ring->dev,
645 /* unmap remaining mapped pages */
646 nr_frags = tx_info->num_of_bufs - 1;
647 for (j = 0; j < nr_frags; j++) {
649 dma_unmap_page(tx_ring->dev,
655 dev_kfree_skb_any(tx_info->skb);
657 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
661 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
663 struct ena_ring *tx_ring;
666 for (i = 0; i < adapter->num_queues; i++) {
667 tx_ring = &adapter->tx_ring[i];
668 ena_free_tx_bufs(tx_ring);
672 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
677 for (i = 0; i < adapter->num_queues; i++) {
678 ena_qid = ENA_IO_TXQ_IDX(i);
679 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
683 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
688 for (i = 0; i < adapter->num_queues; i++) {
689 ena_qid = ENA_IO_RXQ_IDX(i);
690 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
694 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
696 ena_destroy_all_tx_queues(adapter);
697 ena_destroy_all_rx_queues(adapter);
700 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
702 struct ena_tx_buffer *tx_info = NULL;
704 if (likely(req_id < tx_ring->ring_size)) {
705 tx_info = &tx_ring->tx_buffer_info[req_id];
706 if (likely(tx_info->skb))
711 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
712 "tx_info doesn't have valid skb\n");
714 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
715 "Invalid req_id: %hu\n", req_id);
717 u64_stats_update_begin(&tx_ring->syncp);
718 tx_ring->tx_stats.bad_req_id++;
719 u64_stats_update_end(&tx_ring->syncp);
721 /* Trigger device reset */
722 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
723 set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
727 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
729 struct netdev_queue *txq;
738 next_to_clean = tx_ring->next_to_clean;
739 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
741 while (tx_pkts < budget) {
742 struct ena_tx_buffer *tx_info;
744 struct ena_com_buf *ena_buf;
747 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
752 rc = validate_tx_req_id(tx_ring, req_id);
756 tx_info = &tx_ring->tx_buffer_info[req_id];
759 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
763 tx_info->last_jiffies = 0;
765 if (likely(tx_info->num_of_bufs != 0)) {
766 ena_buf = tx_info->bufs;
768 dma_unmap_single(tx_ring->dev,
769 dma_unmap_addr(ena_buf, paddr),
770 dma_unmap_len(ena_buf, len),
773 /* unmap remaining mapped pages */
774 nr_frags = tx_info->num_of_bufs - 1;
775 for (i = 0; i < nr_frags; i++) {
777 dma_unmap_page(tx_ring->dev,
778 dma_unmap_addr(ena_buf, paddr),
779 dma_unmap_len(ena_buf, len),
784 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
785 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
788 tx_bytes += skb->len;
791 total_done += tx_info->tx_descs;
793 tx_ring->free_tx_ids[next_to_clean] = req_id;
794 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
798 tx_ring->next_to_clean = next_to_clean;
799 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
800 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
802 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
804 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
805 "tx_poll: q %d done. total pkts: %d\n",
806 tx_ring->qid, tx_pkts);
808 /* need to make the rings circular update visible to
809 * ena_start_xmit() before checking for netif_queue_stopped().
813 above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
814 ENA_TX_WAKEUP_THRESH;
815 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
816 __netif_tx_lock(txq, smp_processor_id());
817 above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
818 ENA_TX_WAKEUP_THRESH;
819 if (netif_tx_queue_stopped(txq) && above_thresh) {
820 netif_tx_wake_queue(txq);
821 u64_stats_update_begin(&tx_ring->syncp);
822 tx_ring->tx_stats.queue_wakeup++;
823 u64_stats_update_end(&tx_ring->syncp);
825 __netif_tx_unlock(txq);
828 tx_ring->per_napi_bytes += tx_bytes;
829 tx_ring->per_napi_packets += tx_pkts;
834 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
839 skb = napi_get_frags(rx_ring->napi);
841 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
842 rx_ring->rx_copybreak);
844 if (unlikely(!skb)) {
845 u64_stats_update_begin(&rx_ring->syncp);
846 rx_ring->rx_stats.skb_alloc_fail++;
847 u64_stats_update_end(&rx_ring->syncp);
848 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
849 "Failed to allocate skb. frags: %d\n", frags);
856 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
857 struct ena_com_rx_buf_info *ena_bufs,
862 struct ena_rx_buffer *rx_info;
863 u16 len, req_id, buf = 0;
866 len = ena_bufs[buf].len;
867 req_id = ena_bufs[buf].req_id;
868 rx_info = &rx_ring->rx_buffer_info[req_id];
870 if (unlikely(!rx_info->page)) {
871 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
876 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
877 "rx_info %p page %p\n",
878 rx_info, rx_info->page);
880 /* save virt address of first buffer */
881 va = page_address(rx_info->page) + rx_info->page_offset;
882 prefetch(va + NET_IP_ALIGN);
884 if (len <= rx_ring->rx_copybreak) {
885 skb = ena_alloc_skb(rx_ring, false);
889 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
890 "rx allocated small packet. len %d. data_len %d\n",
891 skb->len, skb->data_len);
893 /* sync this buffer for CPU use */
894 dma_sync_single_for_cpu(rx_ring->dev,
895 dma_unmap_addr(&rx_info->ena_buf, paddr),
898 skb_copy_to_linear_data(skb, va, len);
899 dma_sync_single_for_device(rx_ring->dev,
900 dma_unmap_addr(&rx_info->ena_buf, paddr),
905 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
906 rx_ring->free_rx_ids[*next_to_clean] = req_id;
907 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
912 skb = ena_alloc_skb(rx_ring, true);
917 dma_unmap_page(rx_ring->dev,
918 dma_unmap_addr(&rx_info->ena_buf, paddr),
919 PAGE_SIZE, DMA_FROM_DEVICE);
921 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
922 rx_info->page_offset, len, PAGE_SIZE);
924 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
925 "rx skb updated. len %d. data_len %d\n",
926 skb->len, skb->data_len);
928 rx_info->page = NULL;
930 rx_ring->free_rx_ids[*next_to_clean] = req_id;
932 ENA_RX_RING_IDX_NEXT(*next_to_clean,
934 if (likely(--descs == 0))
938 len = ena_bufs[buf].len;
939 req_id = ena_bufs[buf].req_id;
940 rx_info = &rx_ring->rx_buffer_info[req_id];
946 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
947 * @adapter: structure containing adapter specific data
948 * @ena_rx_ctx: received packet context/metadata
949 * @skb: skb currently being received and modified
951 static inline void ena_rx_checksum(struct ena_ring *rx_ring,
952 struct ena_com_rx_ctx *ena_rx_ctx,
955 /* Rx csum disabled */
956 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
957 skb->ip_summed = CHECKSUM_NONE;
961 /* For fragmented packets the checksum isn't valid */
962 if (ena_rx_ctx->frag) {
963 skb->ip_summed = CHECKSUM_NONE;
967 /* if IP and error */
968 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
969 (ena_rx_ctx->l3_csum_err))) {
970 /* ipv4 checksum error */
971 skb->ip_summed = CHECKSUM_NONE;
972 u64_stats_update_begin(&rx_ring->syncp);
973 rx_ring->rx_stats.bad_csum++;
974 u64_stats_update_end(&rx_ring->syncp);
975 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
976 "RX IPv4 header checksum error\n");
981 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
982 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
983 if (unlikely(ena_rx_ctx->l4_csum_err)) {
984 /* TCP/UDP checksum error */
985 u64_stats_update_begin(&rx_ring->syncp);
986 rx_ring->rx_stats.bad_csum++;
987 u64_stats_update_end(&rx_ring->syncp);
988 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
989 "RX L4 checksum error\n");
990 skb->ip_summed = CHECKSUM_NONE;
994 skb->ip_summed = CHECKSUM_UNNECESSARY;
998 static void ena_set_rx_hash(struct ena_ring *rx_ring,
999 struct ena_com_rx_ctx *ena_rx_ctx,
1000 struct sk_buff *skb)
1002 enum pkt_hash_types hash_type;
1004 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1005 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1006 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1008 hash_type = PKT_HASH_TYPE_L4;
1010 hash_type = PKT_HASH_TYPE_NONE;
1012 /* Override hash type if the packet is fragmented */
1013 if (ena_rx_ctx->frag)
1014 hash_type = PKT_HASH_TYPE_NONE;
1016 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1020 /* ena_clean_rx_irq - Cleanup RX irq
1021 * @rx_ring: RX ring to clean
1022 * @napi: napi handler
1023 * @budget: how many packets driver is allowed to clean
1025 * Returns the number of cleaned buffers.
1027 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1030 u16 next_to_clean = rx_ring->next_to_clean;
1031 u32 res_budget, work_done;
1033 struct ena_com_rx_ctx ena_rx_ctx;
1034 struct ena_adapter *adapter;
1035 struct sk_buff *skb;
1036 int refill_required;
1037 int refill_threshold;
1040 int rx_copybreak_pkt = 0;
1043 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1044 "%s qid %d\n", __func__, rx_ring->qid);
1045 res_budget = budget;
1048 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1049 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1050 ena_rx_ctx.descs = 0;
1051 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1052 rx_ring->ena_com_io_sq,
1057 if (unlikely(ena_rx_ctx.descs == 0))
1060 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1061 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1062 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1063 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1065 /* allocate skb and fill it */
1066 skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
1069 /* exit if we failed to retrieve a buffer */
1070 if (unlikely(!skb)) {
1071 for (i = 0; i < ena_rx_ctx.descs; i++) {
1072 rx_ring->free_tx_ids[next_to_clean] =
1073 rx_ring->ena_bufs[i].req_id;
1075 ENA_RX_RING_IDX_NEXT(next_to_clean,
1076 rx_ring->ring_size);
1081 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1083 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1085 skb_record_rx_queue(skb, rx_ring->qid);
1087 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1088 total_len += rx_ring->ena_bufs[0].len;
1090 napi_gro_receive(napi, skb);
1092 total_len += skb->len;
1093 napi_gro_frags(napi);
1097 } while (likely(res_budget));
1099 work_done = budget - res_budget;
1100 rx_ring->per_napi_bytes += total_len;
1101 rx_ring->per_napi_packets += work_done;
1102 u64_stats_update_begin(&rx_ring->syncp);
1103 rx_ring->rx_stats.bytes += total_len;
1104 rx_ring->rx_stats.cnt += work_done;
1105 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1106 u64_stats_update_end(&rx_ring->syncp);
1108 rx_ring->next_to_clean = next_to_clean;
1110 refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
1111 refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
1113 /* Optimization, try to batch new rx buffers */
1114 if (refill_required > refill_threshold) {
1115 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1116 ena_refill_rx_bufs(rx_ring, refill_required);
1122 adapter = netdev_priv(rx_ring->netdev);
1124 u64_stats_update_begin(&rx_ring->syncp);
1125 rx_ring->rx_stats.bad_desc_num++;
1126 u64_stats_update_end(&rx_ring->syncp);
1128 /* Too many desc from the device. Trigger reset */
1129 adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1130 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1135 inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
1136 struct ena_ring *tx_ring)
1138 /* We apply adaptive moderation on Rx path only.
1139 * Tx uses static interrupt moderation.
1141 ena_com_calculate_interrupt_delay(rx_ring->ena_dev,
1142 rx_ring->per_napi_packets,
1143 rx_ring->per_napi_bytes,
1144 &rx_ring->smoothed_interval,
1145 &rx_ring->moder_tbl_idx);
1147 /* Reset per napi packets/bytes */
1148 tx_ring->per_napi_packets = 0;
1149 tx_ring->per_napi_bytes = 0;
1150 rx_ring->per_napi_packets = 0;
1151 rx_ring->per_napi_bytes = 0;
1154 static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
1155 struct ena_ring *rx_ring)
1157 struct ena_eth_io_intr_reg intr_reg;
1159 /* Update intr register: rx intr delay,
1160 * tx intr delay and interrupt unmask
1162 ena_com_update_intr_reg(&intr_reg,
1163 rx_ring->smoothed_interval,
1164 tx_ring->smoothed_interval,
1167 /* It is a shared MSI-X.
1168 * Tx and Rx CQ have pointer to it.
1169 * So we use one of them to reach the intr reg
1171 ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
1174 static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1175 struct ena_ring *rx_ring)
1177 int cpu = get_cpu();
1180 /* Check only one ring since the 2 rings are running on the same cpu */
1181 if (likely(tx_ring->cpu == cpu))
1184 numa_node = cpu_to_node(cpu);
1187 if (numa_node != NUMA_NO_NODE) {
1188 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1189 ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
1200 static int ena_io_poll(struct napi_struct *napi, int budget)
1202 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1203 struct ena_ring *tx_ring, *rx_ring;
1208 int napi_comp_call = 0;
1211 tx_ring = ena_napi->tx_ring;
1212 rx_ring = ena_napi->rx_ring;
1214 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1216 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1217 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1218 napi_complete_done(napi, 0);
1222 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1223 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1225 /* If the device is about to reset or down, avoid unmask
1226 * the interrupt and return 0 so NAPI won't reschedule
1228 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1229 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1230 napi_complete_done(napi, 0);
1233 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1236 /* Update numa and unmask the interrupt only when schedule
1237 * from the interrupt context (vs from sk_busy_loop)
1239 if (napi_complete_done(napi, rx_work_done)) {
1240 /* Tx and Rx share the same interrupt vector */
1241 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1242 ena_adjust_intr_moderation(rx_ring, tx_ring);
1244 ena_unmask_interrupt(tx_ring, rx_ring);
1247 ena_update_ring_numa_node(tx_ring, rx_ring);
1254 u64_stats_update_begin(&tx_ring->syncp);
1255 tx_ring->tx_stats.napi_comp += napi_comp_call;
1256 tx_ring->tx_stats.tx_poll++;
1257 u64_stats_update_end(&tx_ring->syncp);
1262 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1264 struct ena_adapter *adapter = (struct ena_adapter *)data;
1266 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1268 /* Don't call the aenq handler before probe is done */
1269 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1270 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1275 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1276 * @irq: interrupt number
1277 * @data: pointer to a network interface private napi device structure
1279 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1281 struct ena_napi *ena_napi = data;
1283 ena_napi->tx_ring->first_interrupt = true;
1284 ena_napi->rx_ring->first_interrupt = true;
1286 napi_schedule_irqoff(&ena_napi->napi);
1291 /* Reserve a single MSI-X vector for management (admin + aenq).
1292 * plus reserve one vector for each potential io queue.
1293 * the number of potential io queues is the minimum of what the device
1294 * supports and the number of vCPUs.
1296 static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
1298 int msix_vecs, irq_cnt;
1300 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1301 netif_err(adapter, probe, adapter->netdev,
1302 "Error, MSI-X is already enabled\n");
1306 /* Reserved the max msix vectors we might need */
1307 msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
1309 netif_dbg(adapter, probe, adapter->netdev,
1310 "trying to enable MSI-X, vectors %d\n", msix_vecs);
1312 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1313 msix_vecs, PCI_IRQ_MSIX);
1316 netif_err(adapter, probe, adapter->netdev,
1317 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1321 if (irq_cnt != msix_vecs) {
1322 netif_notice(adapter, probe, adapter->netdev,
1323 "enable only %d MSI-X (out of %d), reduce the number of queues\n",
1324 irq_cnt, msix_vecs);
1325 adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1328 if (ena_init_rx_cpu_rmap(adapter))
1329 netif_warn(adapter, probe, adapter->netdev,
1330 "Failed to map IRQs to CPUs\n");
1332 adapter->msix_vecs = irq_cnt;
1333 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
1338 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1342 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1343 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1344 pci_name(adapter->pdev));
1345 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
1346 ena_intr_msix_mgmnt;
1347 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1348 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1349 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
1350 cpu = cpumask_first(cpu_online_mask);
1351 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
1352 cpumask_set_cpu(cpu,
1353 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
1356 static void ena_setup_io_intr(struct ena_adapter *adapter)
1358 struct net_device *netdev;
1359 int irq_idx, i, cpu;
1361 netdev = adapter->netdev;
1363 for (i = 0; i < adapter->num_queues; i++) {
1364 irq_idx = ENA_IO_IRQ_IDX(i);
1365 cpu = i % num_online_cpus();
1367 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1368 "%s-Tx-Rx-%d", netdev->name, i);
1369 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
1370 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
1371 adapter->irq_tbl[irq_idx].vector =
1372 pci_irq_vector(adapter->pdev, irq_idx);
1373 adapter->irq_tbl[irq_idx].cpu = cpu;
1375 cpumask_set_cpu(cpu,
1376 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
1380 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
1382 unsigned long flags = 0;
1383 struct ena_irq *irq;
1386 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1387 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1390 netif_err(adapter, probe, adapter->netdev,
1391 "failed to request admin irq\n");
1395 netif_dbg(adapter, probe, adapter->netdev,
1396 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1397 irq->affinity_hint_mask.bits[0], irq->vector);
1399 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1404 static int ena_request_io_irq(struct ena_adapter *adapter)
1406 unsigned long flags = 0;
1407 struct ena_irq *irq;
1410 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1411 netif_err(adapter, ifup, adapter->netdev,
1412 "Failed to request I/O IRQ: MSI-X is not enabled\n");
1416 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1417 irq = &adapter->irq_tbl[i];
1418 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1421 netif_err(adapter, ifup, adapter->netdev,
1422 "Failed to request I/O IRQ. index %d rc %d\n",
1427 netif_dbg(adapter, ifup, adapter->netdev,
1428 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1429 i, irq->affinity_hint_mask.bits[0], irq->vector);
1431 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1437 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
1438 irq = &adapter->irq_tbl[k];
1439 free_irq(irq->vector, irq->data);
1445 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
1447 struct ena_irq *irq;
1449 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1450 synchronize_irq(irq->vector);
1451 irq_set_affinity_hint(irq->vector, NULL);
1452 free_irq(irq->vector, irq->data);
1455 static void ena_free_io_irq(struct ena_adapter *adapter)
1457 struct ena_irq *irq;
1460 #ifdef CONFIG_RFS_ACCEL
1461 if (adapter->msix_vecs >= 1) {
1462 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
1463 adapter->netdev->rx_cpu_rmap = NULL;
1465 #endif /* CONFIG_RFS_ACCEL */
1467 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1468 irq = &adapter->irq_tbl[i];
1469 irq_set_affinity_hint(irq->vector, NULL);
1470 free_irq(irq->vector, irq->data);
1474 static void ena_disable_msix(struct ena_adapter *adapter)
1476 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
1477 pci_free_irq_vectors(adapter->pdev);
1480 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
1484 if (!netif_running(adapter->netdev))
1487 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
1488 synchronize_irq(adapter->irq_tbl[i].vector);
1491 static void ena_del_napi(struct ena_adapter *adapter)
1495 for (i = 0; i < adapter->num_queues; i++)
1496 netif_napi_del(&adapter->ena_napi[i].napi);
1499 static void ena_init_napi(struct ena_adapter *adapter)
1501 struct ena_napi *napi;
1504 for (i = 0; i < adapter->num_queues; i++) {
1505 napi = &adapter->ena_napi[i];
1507 netif_napi_add(adapter->netdev,
1508 &adapter->ena_napi[i].napi,
1511 napi->rx_ring = &adapter->rx_ring[i];
1512 napi->tx_ring = &adapter->tx_ring[i];
1517 static void ena_napi_disable_all(struct ena_adapter *adapter)
1521 for (i = 0; i < adapter->num_queues; i++)
1522 napi_disable(&adapter->ena_napi[i].napi);
1525 static void ena_napi_enable_all(struct ena_adapter *adapter)
1529 for (i = 0; i < adapter->num_queues; i++)
1530 napi_enable(&adapter->ena_napi[i].napi);
1533 static void ena_restore_ethtool_params(struct ena_adapter *adapter)
1535 adapter->tx_usecs = 0;
1536 adapter->rx_usecs = 0;
1537 adapter->tx_frames = 1;
1538 adapter->rx_frames = 1;
1541 /* Configure the Rx forwarding */
1542 static int ena_rss_configure(struct ena_adapter *adapter)
1544 struct ena_com_dev *ena_dev = adapter->ena_dev;
1547 /* In case the RSS table wasn't initialized by probe */
1548 if (!ena_dev->rss.tbl_log_size) {
1549 rc = ena_rss_init_default(adapter);
1550 if (rc && (rc != -EOPNOTSUPP)) {
1551 netif_err(adapter, ifup, adapter->netdev,
1552 "Failed to init RSS rc: %d\n", rc);
1557 /* Set indirect table */
1558 rc = ena_com_indirect_table_set(ena_dev);
1559 if (unlikely(rc && rc != -EOPNOTSUPP))
1562 /* Configure hash function (if supported) */
1563 rc = ena_com_set_hash_function(ena_dev);
1564 if (unlikely(rc && (rc != -EOPNOTSUPP)))
1567 /* Configure hash inputs (if supported) */
1568 rc = ena_com_set_hash_ctrl(ena_dev);
1569 if (unlikely(rc && (rc != -EOPNOTSUPP)))
1575 static int ena_up_complete(struct ena_adapter *adapter)
1579 rc = ena_rss_configure(adapter);
1583 ena_init_napi(adapter);
1585 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
1587 ena_refill_all_rx_bufs(adapter);
1589 /* enable transmits */
1590 netif_tx_start_all_queues(adapter->netdev);
1592 ena_restore_ethtool_params(adapter);
1594 ena_napi_enable_all(adapter);
1599 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1601 struct ena_com_create_io_ctx ctx = { 0 };
1602 struct ena_com_dev *ena_dev;
1603 struct ena_ring *tx_ring;
1608 ena_dev = adapter->ena_dev;
1610 tx_ring = &adapter->tx_ring[qid];
1611 msix_vector = ENA_IO_IRQ_IDX(qid);
1612 ena_qid = ENA_IO_TXQ_IDX(qid);
1614 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1616 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1617 ctx.msix_vector = msix_vector;
1618 ctx.queue_size = adapter->tx_ring_size;
1619 ctx.numa_node = cpu_to_node(tx_ring->cpu);
1621 rc = ena_com_create_io_queue(ena_dev, &ctx);
1623 netif_err(adapter, ifup, adapter->netdev,
1624 "Failed to create I/O TX queue num %d rc: %d\n",
1629 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1630 &tx_ring->ena_com_io_sq,
1631 &tx_ring->ena_com_io_cq);
1633 netif_err(adapter, ifup, adapter->netdev,
1634 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1636 ena_com_destroy_io_queue(ena_dev, ena_qid);
1640 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
1644 static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
1646 struct ena_com_dev *ena_dev = adapter->ena_dev;
1649 for (i = 0; i < adapter->num_queues; i++) {
1650 rc = ena_create_io_tx_queue(adapter, i);
1659 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1664 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1666 struct ena_com_dev *ena_dev;
1667 struct ena_com_create_io_ctx ctx = { 0 };
1668 struct ena_ring *rx_ring;
1673 ena_dev = adapter->ena_dev;
1675 rx_ring = &adapter->rx_ring[qid];
1676 msix_vector = ENA_IO_IRQ_IDX(qid);
1677 ena_qid = ENA_IO_RXQ_IDX(qid);
1680 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1681 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1682 ctx.msix_vector = msix_vector;
1683 ctx.queue_size = adapter->rx_ring_size;
1684 ctx.numa_node = cpu_to_node(rx_ring->cpu);
1686 rc = ena_com_create_io_queue(ena_dev, &ctx);
1688 netif_err(adapter, ifup, adapter->netdev,
1689 "Failed to create I/O RX queue num %d rc: %d\n",
1694 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1695 &rx_ring->ena_com_io_sq,
1696 &rx_ring->ena_com_io_cq);
1698 netif_err(adapter, ifup, adapter->netdev,
1699 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1701 ena_com_destroy_io_queue(ena_dev, ena_qid);
1705 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
1710 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
1712 struct ena_com_dev *ena_dev = adapter->ena_dev;
1715 for (i = 0; i < adapter->num_queues; i++) {
1716 rc = ena_create_io_rx_queue(adapter, i);
1725 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1730 static int ena_up(struct ena_adapter *adapter)
1734 netdev_dbg(adapter->netdev, "%s\n", __func__);
1736 ena_setup_io_intr(adapter);
1738 rc = ena_request_io_irq(adapter);
1742 /* allocate transmit descriptors */
1743 rc = ena_setup_all_tx_resources(adapter);
1747 /* allocate receive descriptors */
1748 rc = ena_setup_all_rx_resources(adapter);
1752 /* Create TX queues */
1753 rc = ena_create_all_io_tx_queues(adapter);
1755 goto err_create_tx_queues;
1757 /* Create RX queues */
1758 rc = ena_create_all_io_rx_queues(adapter);
1760 goto err_create_rx_queues;
1762 rc = ena_up_complete(adapter);
1766 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1767 netif_carrier_on(adapter->netdev);
1769 u64_stats_update_begin(&adapter->syncp);
1770 adapter->dev_stats.interface_up++;
1771 u64_stats_update_end(&adapter->syncp);
1773 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1775 /* Enable completion queues interrupt */
1776 for (i = 0; i < adapter->num_queues; i++)
1777 ena_unmask_interrupt(&adapter->tx_ring[i],
1778 &adapter->rx_ring[i]);
1780 /* schedule napi in case we had pending packets
1781 * from the last time we disable napi
1783 for (i = 0; i < adapter->num_queues; i++)
1784 napi_schedule(&adapter->ena_napi[i].napi);
1789 ena_destroy_all_rx_queues(adapter);
1790 err_create_rx_queues:
1791 ena_destroy_all_tx_queues(adapter);
1792 err_create_tx_queues:
1793 ena_free_all_io_rx_resources(adapter);
1795 ena_free_all_io_tx_resources(adapter);
1797 ena_free_io_irq(adapter);
1803 static void ena_down(struct ena_adapter *adapter)
1805 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
1807 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1809 u64_stats_update_begin(&adapter->syncp);
1810 adapter->dev_stats.interface_down++;
1811 u64_stats_update_end(&adapter->syncp);
1813 netif_carrier_off(adapter->netdev);
1814 netif_tx_disable(adapter->netdev);
1816 /* After this point the napi handler won't enable the tx queue */
1817 ena_napi_disable_all(adapter);
1819 /* After destroy the queue there won't be any new interrupts */
1821 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
1824 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
1826 dev_err(&adapter->pdev->dev, "Device reset failed\n");
1829 ena_destroy_all_io_queues(adapter);
1831 ena_disable_io_intr_sync(adapter);
1832 ena_free_io_irq(adapter);
1833 ena_del_napi(adapter);
1835 ena_free_all_tx_bufs(adapter);
1836 ena_free_all_rx_bufs(adapter);
1837 ena_free_all_io_tx_resources(adapter);
1838 ena_free_all_io_rx_resources(adapter);
1841 /* ena_open - Called when a network interface is made active
1842 * @netdev: network interface device structure
1844 * Returns 0 on success, negative value on failure
1846 * The open entry point is called when a network interface is made
1847 * active by the system (IFF_UP). At this point all resources needed
1848 * for transmit and receive operations are allocated, the interrupt
1849 * handler is registered with the OS, the watchdog timer is started,
1850 * and the stack is notified that the interface is ready.
1852 static int ena_open(struct net_device *netdev)
1854 struct ena_adapter *adapter = netdev_priv(netdev);
1857 /* Notify the stack of the actual queue counts. */
1858 rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
1860 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
1864 rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
1866 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
1870 rc = ena_up(adapter);
1877 /* ena_close - Disables a network interface
1878 * @netdev: network interface device structure
1880 * Returns 0, this is not allowed to fail
1882 * The close entry point is called when an interface is de-activated
1883 * by the OS. The hardware is still under the drivers control, but
1884 * needs to be disabled. A global MAC reset is issued to stop the
1885 * hardware, and all transmit and receive resources are freed.
1887 static int ena_close(struct net_device *netdev)
1889 struct ena_adapter *adapter = netdev_priv(netdev);
1891 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
1893 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
1896 /* Check for device status and issue reset if needed*/
1897 check_for_admin_com_state(adapter);
1898 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1899 netif_err(adapter, ifdown, adapter->netdev,
1900 "Destroy failure, restarting device\n");
1901 ena_dump_stats_to_dmesg(adapter);
1902 /* rtnl lock already obtained in dev_ioctl() layer */
1903 ena_destroy_device(adapter);
1904 ena_restore_device(adapter);
1910 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
1912 u32 mss = skb_shinfo(skb)->gso_size;
1913 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
1916 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
1917 ena_tx_ctx->l4_csum_enable = 1;
1919 ena_tx_ctx->tso_enable = 1;
1920 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
1921 ena_tx_ctx->l4_csum_partial = 0;
1923 ena_tx_ctx->tso_enable = 0;
1924 ena_meta->l4_hdr_len = 0;
1925 ena_tx_ctx->l4_csum_partial = 1;
1928 switch (ip_hdr(skb)->version) {
1930 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
1931 if (ip_hdr(skb)->frag_off & htons(IP_DF))
1934 ena_tx_ctx->l3_csum_enable = 1;
1935 l4_protocol = ip_hdr(skb)->protocol;
1938 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
1939 l4_protocol = ipv6_hdr(skb)->nexthdr;
1945 if (l4_protocol == IPPROTO_TCP)
1946 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
1948 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
1950 ena_meta->mss = mss;
1951 ena_meta->l3_hdr_len = skb_network_header_len(skb);
1952 ena_meta->l3_hdr_offset = skb_network_offset(skb);
1953 ena_tx_ctx->meta_valid = 1;
1956 ena_tx_ctx->meta_valid = 0;
1960 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
1961 struct sk_buff *skb)
1963 int num_frags, header_len, rc;
1965 num_frags = skb_shinfo(skb)->nr_frags;
1966 header_len = skb_headlen(skb);
1968 if (num_frags < tx_ring->sgl_size)
1971 if ((num_frags == tx_ring->sgl_size) &&
1972 (header_len < tx_ring->tx_max_header_size))
1975 u64_stats_update_begin(&tx_ring->syncp);
1976 tx_ring->tx_stats.linearize++;
1977 u64_stats_update_end(&tx_ring->syncp);
1979 rc = skb_linearize(skb);
1981 u64_stats_update_begin(&tx_ring->syncp);
1982 tx_ring->tx_stats.linearize_failed++;
1983 u64_stats_update_end(&tx_ring->syncp);
1989 /* Called with netif_tx_lock. */
1990 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
1992 struct ena_adapter *adapter = netdev_priv(dev);
1993 struct ena_tx_buffer *tx_info;
1994 struct ena_com_tx_ctx ena_tx_ctx;
1995 struct ena_ring *tx_ring;
1996 struct netdev_queue *txq;
1997 struct ena_com_buf *ena_buf;
2005 int qid, rc, nb_hw_desc;
2008 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2009 /* Determine which tx ring we will be placed on */
2010 qid = skb_get_queue_mapping(skb);
2011 tx_ring = &adapter->tx_ring[qid];
2012 txq = netdev_get_tx_queue(dev, qid);
2014 rc = ena_check_and_linearize_skb(tx_ring, skb);
2016 goto error_drop_packet;
2018 skb_tx_timestamp(skb);
2019 len = skb_headlen(skb);
2021 next_to_use = tx_ring->next_to_use;
2022 req_id = tx_ring->free_tx_ids[next_to_use];
2023 tx_info = &tx_ring->tx_buffer_info[req_id];
2024 tx_info->num_of_bufs = 0;
2026 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2027 ena_buf = tx_info->bufs;
2030 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2031 /* prepared the push buffer */
2032 push_len = min_t(u32, len, tx_ring->tx_max_header_size);
2033 header_len = push_len;
2034 push_hdr = skb->data;
2037 header_len = min_t(u32, len, tx_ring->tx_max_header_size);
2041 netif_dbg(adapter, tx_queued, dev,
2042 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2043 push_hdr, push_len);
2045 if (len > push_len) {
2046 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2047 len - push_len, DMA_TO_DEVICE);
2048 if (dma_mapping_error(tx_ring->dev, dma))
2049 goto error_report_dma_error;
2051 ena_buf->paddr = dma;
2052 ena_buf->len = len - push_len;
2055 tx_info->num_of_bufs++;
2058 last_frag = skb_shinfo(skb)->nr_frags;
2060 for (i = 0; i < last_frag; i++) {
2061 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2063 len = skb_frag_size(frag);
2064 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
2066 if (dma_mapping_error(tx_ring->dev, dma))
2067 goto error_report_dma_error;
2069 ena_buf->paddr = dma;
2074 tx_info->num_of_bufs += last_frag;
2076 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2077 ena_tx_ctx.ena_bufs = tx_info->bufs;
2078 ena_tx_ctx.push_header = push_hdr;
2079 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2080 ena_tx_ctx.req_id = req_id;
2081 ena_tx_ctx.header_len = header_len;
2083 /* set flags and meta data */
2084 ena_tx_csum(&ena_tx_ctx, skb);
2086 /* prepare the packet's descriptors to dma engine */
2087 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
2091 netif_err(adapter, tx_queued, dev,
2092 "failed to prepare tx bufs\n");
2093 u64_stats_update_begin(&tx_ring->syncp);
2094 tx_ring->tx_stats.queue_stop++;
2095 tx_ring->tx_stats.prepare_ctx_err++;
2096 u64_stats_update_end(&tx_ring->syncp);
2097 netif_tx_stop_queue(txq);
2098 goto error_unmap_dma;
2101 netdev_tx_sent_queue(txq, skb->len);
2103 u64_stats_update_begin(&tx_ring->syncp);
2104 tx_ring->tx_stats.cnt++;
2105 tx_ring->tx_stats.bytes += skb->len;
2106 u64_stats_update_end(&tx_ring->syncp);
2108 tx_info->tx_descs = nb_hw_desc;
2109 tx_info->last_jiffies = jiffies;
2110 tx_info->print_once = 0;
2112 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2113 tx_ring->ring_size);
2115 /* This WMB is aimed to:
2116 * 1 - perform smp barrier before reading next_to_completion
2117 * 2 - make sure the desc were written before trigger DB
2121 /* stop the queue when no more space available, the packet can have up
2122 * to sgl_size + 2. one for the meta descriptor and one for header
2123 * (if the header is larger than tx_max_header_size).
2125 if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
2126 (tx_ring->sgl_size + 2))) {
2127 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
2130 netif_tx_stop_queue(txq);
2131 u64_stats_update_begin(&tx_ring->syncp);
2132 tx_ring->tx_stats.queue_stop++;
2133 u64_stats_update_end(&tx_ring->syncp);
2135 /* There is a rare condition where this function decide to
2136 * stop the queue but meanwhile clean_tx_irq updates
2137 * next_to_completion and terminates.
2138 * The queue will remain stopped forever.
2139 * To solve this issue this function perform rmb, check
2140 * the wakeup condition and wake up the queue if needed.
2144 if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
2145 > ENA_TX_WAKEUP_THRESH) {
2146 netif_tx_wake_queue(txq);
2147 u64_stats_update_begin(&tx_ring->syncp);
2148 tx_ring->tx_stats.queue_wakeup++;
2149 u64_stats_update_end(&tx_ring->syncp);
2153 if (netif_xmit_stopped(txq) || !skb->xmit_more) {
2154 /* trigger the dma engine */
2155 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);
2156 u64_stats_update_begin(&tx_ring->syncp);
2157 tx_ring->tx_stats.doorbells++;
2158 u64_stats_update_end(&tx_ring->syncp);
2161 return NETDEV_TX_OK;
2163 error_report_dma_error:
2164 u64_stats_update_begin(&tx_ring->syncp);
2165 tx_ring->tx_stats.dma_mapping_err++;
2166 u64_stats_update_end(&tx_ring->syncp);
2167 netdev_warn(adapter->netdev, "failed to map skb\n");
2169 tx_info->skb = NULL;
2173 /* save value of frag that failed */
2176 /* start back at beginning and unmap skb */
2177 tx_info->skb = NULL;
2178 ena_buf = tx_info->bufs;
2179 dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
2180 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
2182 /* unmap remaining mapped pages */
2183 for (i = 0; i < last_frag; i++) {
2185 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
2186 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
2193 return NETDEV_TX_OK;
2196 #ifdef CONFIG_NET_POLL_CONTROLLER
2197 static void ena_netpoll(struct net_device *netdev)
2199 struct ena_adapter *adapter = netdev_priv(netdev);
2202 /* Dont schedule NAPI if the driver is in the middle of reset
2203 * or netdev is down.
2206 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
2207 test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2210 for (i = 0; i < adapter->num_queues; i++)
2211 napi_schedule(&adapter->ena_napi[i].napi);
2213 #endif /* CONFIG_NET_POLL_CONTROLLER */
2215 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
2216 void *accel_priv, select_queue_fallback_t fallback)
2219 /* we suspect that this is good for in--kernel network services that
2220 * want to loop incoming skb rx to tx in normal user generated traffic,
2221 * most probably we will not get to this
2223 if (skb_rx_queue_recorded(skb))
2224 qid = skb_get_rx_queue(skb);
2226 qid = fallback(dev, skb);
2231 static void ena_config_host_info(struct ena_com_dev *ena_dev)
2233 struct ena_admin_host_info *host_info;
2236 /* Allocate only the host info */
2237 rc = ena_com_allocate_host_info(ena_dev);
2239 pr_err("Cannot allocate host info\n");
2243 host_info = ena_dev->host_attr.host_info;
2245 host_info->os_type = ENA_ADMIN_OS_LINUX;
2246 host_info->kernel_ver = LINUX_VERSION_CODE;
2247 strncpy(host_info->kernel_ver_str, utsname()->version,
2248 sizeof(host_info->kernel_ver_str) - 1);
2249 host_info->os_dist = 0;
2250 strncpy(host_info->os_dist_str, utsname()->release,
2251 sizeof(host_info->os_dist_str) - 1);
2252 host_info->driver_version =
2253 (DRV_MODULE_VER_MAJOR) |
2254 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2255 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
2257 rc = ena_com_set_host_attributes(ena_dev);
2259 if (rc == -EOPNOTSUPP)
2260 pr_warn("Cannot set host attributes\n");
2262 pr_err("Cannot set host attributes\n");
2270 ena_com_delete_host_info(ena_dev);
2273 static void ena_config_debug_area(struct ena_adapter *adapter)
2275 u32 debug_area_size;
2278 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
2279 if (ss_count <= 0) {
2280 netif_err(adapter, drv, adapter->netdev,
2281 "SS count is negative\n");
2285 /* allocate 32 bytes for each string and 64bit for the value */
2286 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
2288 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
2290 pr_err("Cannot allocate debug area\n");
2294 rc = ena_com_set_host_attributes(adapter->ena_dev);
2296 if (rc == -EOPNOTSUPP)
2297 netif_warn(adapter, drv, adapter->netdev,
2298 "Cannot set host attributes\n");
2300 netif_err(adapter, drv, adapter->netdev,
2301 "Cannot set host attributes\n");
2307 ena_com_delete_debug_area(adapter->ena_dev);
2310 static void ena_get_stats64(struct net_device *netdev,
2311 struct rtnl_link_stats64 *stats)
2313 struct ena_adapter *adapter = netdev_priv(netdev);
2314 struct ena_ring *rx_ring, *tx_ring;
2319 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2322 for (i = 0; i < adapter->num_queues; i++) {
2325 tx_ring = &adapter->tx_ring[i];
2328 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
2329 packets = tx_ring->tx_stats.cnt;
2330 bytes = tx_ring->tx_stats.bytes;
2331 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
2333 stats->tx_packets += packets;
2334 stats->tx_bytes += bytes;
2336 rx_ring = &adapter->rx_ring[i];
2339 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
2340 packets = rx_ring->rx_stats.cnt;
2341 bytes = rx_ring->rx_stats.bytes;
2342 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
2344 stats->rx_packets += packets;
2345 stats->rx_bytes += bytes;
2349 start = u64_stats_fetch_begin_irq(&adapter->syncp);
2350 rx_drops = adapter->dev_stats.rx_drops;
2351 } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
2353 stats->rx_dropped = rx_drops;
2355 stats->multicast = 0;
2356 stats->collisions = 0;
2358 stats->rx_length_errors = 0;
2359 stats->rx_crc_errors = 0;
2360 stats->rx_frame_errors = 0;
2361 stats->rx_fifo_errors = 0;
2362 stats->rx_missed_errors = 0;
2363 stats->tx_window_errors = 0;
2365 stats->rx_errors = 0;
2366 stats->tx_errors = 0;
2369 static const struct net_device_ops ena_netdev_ops = {
2370 .ndo_open = ena_open,
2371 .ndo_stop = ena_close,
2372 .ndo_start_xmit = ena_start_xmit,
2373 .ndo_select_queue = ena_select_queue,
2374 .ndo_get_stats64 = ena_get_stats64,
2375 .ndo_tx_timeout = ena_tx_timeout,
2376 .ndo_change_mtu = ena_change_mtu,
2377 .ndo_set_mac_address = NULL,
2378 .ndo_validate_addr = eth_validate_addr,
2379 #ifdef CONFIG_NET_POLL_CONTROLLER
2380 .ndo_poll_controller = ena_netpoll,
2381 #endif /* CONFIG_NET_POLL_CONTROLLER */
2384 static int ena_device_validate_params(struct ena_adapter *adapter,
2385 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2387 struct net_device *netdev = adapter->netdev;
2390 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
2393 netif_err(adapter, drv, netdev,
2394 "Error, mac address are different\n");
2398 if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) ||
2399 (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) {
2400 netif_err(adapter, drv, netdev,
2401 "Error, device doesn't support enough queues\n");
2405 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
2406 netif_err(adapter, drv, netdev,
2407 "Error, device max mtu is smaller than netdev MTU\n");
2414 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
2415 struct ena_com_dev_get_features_ctx *get_feat_ctx,
2418 struct device *dev = &pdev->dev;
2419 bool readless_supported;
2424 rc = ena_com_mmio_reg_read_request_init(ena_dev);
2426 dev_err(dev, "failed to init mmio read less\n");
2430 /* The PCIe configuration space revision id indicate if mmio reg
2433 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
2434 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2436 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2438 dev_err(dev, "Can not reset device\n");
2439 goto err_mmio_read_less;
2442 rc = ena_com_validate_version(ena_dev);
2444 dev_err(dev, "device version is too low\n");
2445 goto err_mmio_read_less;
2448 dma_width = ena_com_get_dma_width(ena_dev);
2449 if (dma_width < 0) {
2450 dev_err(dev, "Invalid dma width value %d", dma_width);
2452 goto err_mmio_read_less;
2455 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2457 dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
2458 goto err_mmio_read_less;
2461 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2463 dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
2465 goto err_mmio_read_less;
2468 /* ENA admin level init */
2469 rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
2472 "Can not initialize ena admin queue with device\n");
2473 goto err_mmio_read_less;
2476 /* To enable the msix interrupts the driver needs to know the number
2477 * of queues. So the driver uses polling mode to retrieve this
2480 ena_com_set_admin_polling_mode(ena_dev, true);
2482 ena_config_host_info(ena_dev);
2484 /* Get Device Attributes*/
2485 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2487 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
2488 goto err_admin_init;
2491 /* Try to turn all the available aenq groups */
2492 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2493 BIT(ENA_ADMIN_FATAL_ERROR) |
2494 BIT(ENA_ADMIN_WARNING) |
2495 BIT(ENA_ADMIN_NOTIFICATION) |
2496 BIT(ENA_ADMIN_KEEP_ALIVE);
2498 aenq_groups &= get_feat_ctx->aenq.supported_groups;
2500 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2502 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
2503 goto err_admin_init;
2506 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2511 ena_com_delete_host_info(ena_dev);
2512 ena_com_admin_destroy(ena_dev);
2514 ena_com_mmio_reg_read_request_destroy(ena_dev);
2519 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
2522 struct ena_com_dev *ena_dev = adapter->ena_dev;
2523 struct device *dev = &adapter->pdev->dev;
2526 rc = ena_enable_msix(adapter, io_vectors);
2528 dev_err(dev, "Can not reserve msix vectors\n");
2532 ena_setup_mgmnt_intr(adapter);
2534 rc = ena_request_mgmnt_irq(adapter);
2536 dev_err(dev, "Can not setup management interrupts\n");
2537 goto err_disable_msix;
2540 ena_com_set_admin_polling_mode(ena_dev, false);
2542 ena_com_admin_aenq_enable(ena_dev);
2547 ena_disable_msix(adapter);
2552 static void ena_destroy_device(struct ena_adapter *adapter)
2554 struct net_device *netdev = adapter->netdev;
2555 struct ena_com_dev *ena_dev = adapter->ena_dev;
2558 netif_carrier_off(netdev);
2560 del_timer_sync(&adapter->timer_service);
2562 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2563 adapter->dev_up_before_reset = dev_up;
2565 ena_com_set_admin_running_state(ena_dev, false);
2567 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2570 /* Before releasing the ENA resources, a device reset is required.
2571 * (to prevent the device from accessing them).
2572 * In case the reset flag is set and the device is up, ena_down()
2573 * already perform the reset, so it can be skipped.
2575 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
2576 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2578 ena_free_mgmnt_irq(adapter);
2580 ena_disable_msix(adapter);
2582 ena_com_abort_admin_commands(ena_dev);
2584 ena_com_wait_for_abort_completion(ena_dev);
2586 ena_com_admin_destroy(ena_dev);
2588 ena_com_mmio_reg_read_request_destroy(ena_dev);
2590 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
2592 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2595 static int ena_restore_device(struct ena_adapter *adapter)
2597 struct ena_com_dev_get_features_ctx get_feat_ctx;
2598 struct ena_com_dev *ena_dev = adapter->ena_dev;
2599 struct pci_dev *pdev = adapter->pdev;
2603 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2604 rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
2606 dev_err(&pdev->dev, "Can not initialize device\n");
2609 adapter->wd_state = wd_state;
2611 rc = ena_device_validate_params(adapter, &get_feat_ctx);
2613 dev_err(&pdev->dev, "Validation of device parameters failed\n");
2614 goto err_device_destroy;
2617 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2618 /* Make sure we don't have a race with AENQ Links state handler */
2619 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2620 netif_carrier_on(adapter->netdev);
2622 rc = ena_enable_msix_and_set_admin_interrupts(adapter,
2623 adapter->num_queues);
2625 dev_err(&pdev->dev, "Enable MSI-X failed\n");
2626 goto err_device_destroy;
2628 /* If the interface was up before the reset bring it up */
2629 if (adapter->dev_up_before_reset) {
2630 rc = ena_up(adapter);
2632 dev_err(&pdev->dev, "Failed to create I/O queues\n");
2633 goto err_disable_msix;
2637 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2638 dev_err(&pdev->dev, "Device reset completed successfully\n");
2642 ena_free_mgmnt_irq(adapter);
2643 ena_disable_msix(adapter);
2645 ena_com_admin_destroy(ena_dev);
2647 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2648 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2650 "Reset attempt failed. Can not reset the device\n");
2655 static void ena_fw_reset_device(struct work_struct *work)
2657 struct ena_adapter *adapter =
2658 container_of(work, struct ena_adapter, reset_task);
2659 struct pci_dev *pdev = adapter->pdev;
2661 if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2663 "device reset schedule while reset bit is off\n");
2667 ena_destroy_device(adapter);
2668 ena_restore_device(adapter);
2672 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
2673 struct ena_ring *rx_ring)
2675 if (likely(rx_ring->first_interrupt))
2678 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
2681 rx_ring->no_interrupt_event_cnt++;
2683 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
2684 netif_err(adapter, rx_err, adapter->netdev,
2685 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
2687 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2688 smp_mb__before_atomic();
2689 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2696 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
2697 struct ena_ring *tx_ring)
2699 struct ena_tx_buffer *tx_buf;
2700 unsigned long last_jiffies;
2704 for (i = 0; i < tx_ring->ring_size; i++) {
2705 tx_buf = &tx_ring->tx_buffer_info[i];
2706 last_jiffies = tx_buf->last_jiffies;
2708 if (last_jiffies == 0)
2709 /* no pending Tx at this location */
2712 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
2713 2 * adapter->missing_tx_completion_to))) {
2714 /* If after graceful period interrupt is still not
2715 * received, we schedule a reset
2717 netif_err(adapter, tx_err, adapter->netdev,
2718 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
2720 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2721 smp_mb__before_atomic();
2722 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2726 if (unlikely(time_is_before_jiffies(last_jiffies +
2727 adapter->missing_tx_completion_to))) {
2728 if (!tx_buf->print_once)
2729 netif_notice(adapter, tx_err, adapter->netdev,
2730 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2733 tx_buf->print_once = 1;
2738 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
2739 netif_err(adapter, tx_err, adapter->netdev,
2740 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
2742 adapter->missing_tx_completion_threshold);
2743 adapter->reset_reason =
2744 ENA_REGS_RESET_MISS_TX_CMPL;
2745 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2749 u64_stats_update_begin(&tx_ring->syncp);
2750 tx_ring->tx_stats.missed_tx = missed_tx;
2751 u64_stats_update_end(&tx_ring->syncp);
2756 static void check_for_missing_completions(struct ena_adapter *adapter)
2758 struct ena_ring *tx_ring;
2759 struct ena_ring *rx_ring;
2762 /* Make sure the driver doesn't turn the device in other process */
2765 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2768 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2771 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
2774 budget = ENA_MONITORED_TX_QUEUES;
2776 for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
2777 tx_ring = &adapter->tx_ring[i];
2778 rx_ring = &adapter->rx_ring[i];
2780 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
2784 rc = check_for_rx_interrupt_queue(adapter, rx_ring);
2793 adapter->last_monitored_tx_qid = i % adapter->num_queues;
2796 /* trigger napi schedule after 2 consecutive detections */
2797 #define EMPTY_RX_REFILL 2
2798 /* For the rare case where the device runs out of Rx descriptors and the
2799 * napi handler failed to refill new Rx descriptors (due to a lack of memory
2801 * This case will lead to a deadlock:
2802 * The device won't send interrupts since all the new Rx packets will be dropped
2803 * The napi handler won't allocate new Rx descriptors so the device will be
2804 * able to send new packets.
2806 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
2807 * It is recommended to have at least 512MB, with a minimum of 128MB for
2808 * constrained environment).
2810 * When such a situation is detected - Reschedule napi
2812 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
2814 struct ena_ring *rx_ring;
2815 int i, refill_required;
2817 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2820 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2823 for (i = 0; i < adapter->num_queues; i++) {
2824 rx_ring = &adapter->rx_ring[i];
2827 ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
2828 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
2829 rx_ring->empty_rx_queue++;
2831 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
2832 u64_stats_update_begin(&rx_ring->syncp);
2833 rx_ring->rx_stats.empty_rx_ring++;
2834 u64_stats_update_end(&rx_ring->syncp);
2836 netif_err(adapter, drv, adapter->netdev,
2837 "trigger refill for ring %d\n", i);
2839 napi_schedule(rx_ring->napi);
2840 rx_ring->empty_rx_queue = 0;
2843 rx_ring->empty_rx_queue = 0;
2848 /* Check for keep alive expiration */
2849 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2851 unsigned long keep_alive_expired;
2853 if (!adapter->wd_state)
2856 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2859 keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
2860 adapter->keep_alive_timeout);
2861 if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
2862 netif_err(adapter, drv, adapter->netdev,
2863 "Keep alive watchdog timeout.\n");
2864 u64_stats_update_begin(&adapter->syncp);
2865 adapter->dev_stats.wd_expired++;
2866 u64_stats_update_end(&adapter->syncp);
2867 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
2868 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2872 static void check_for_admin_com_state(struct ena_adapter *adapter)
2874 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
2875 netif_err(adapter, drv, adapter->netdev,
2876 "ENA admin queue is not in running state!\n");
2877 u64_stats_update_begin(&adapter->syncp);
2878 adapter->dev_stats.admin_q_pause++;
2879 u64_stats_update_end(&adapter->syncp);
2880 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
2881 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2885 static void ena_update_hints(struct ena_adapter *adapter,
2886 struct ena_admin_ena_hw_hints *hints)
2888 struct net_device *netdev = adapter->netdev;
2890 if (hints->admin_completion_tx_timeout)
2891 adapter->ena_dev->admin_queue.completion_timeout =
2892 hints->admin_completion_tx_timeout * 1000;
2894 if (hints->mmio_read_timeout)
2895 /* convert to usec */
2896 adapter->ena_dev->mmio_read.reg_read_to =
2897 hints->mmio_read_timeout * 1000;
2899 if (hints->missed_tx_completion_count_threshold_to_reset)
2900 adapter->missing_tx_completion_threshold =
2901 hints->missed_tx_completion_count_threshold_to_reset;
2903 if (hints->missing_tx_completion_timeout) {
2904 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2905 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
2907 adapter->missing_tx_completion_to =
2908 msecs_to_jiffies(hints->missing_tx_completion_timeout);
2911 if (hints->netdev_wd_timeout)
2912 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
2914 if (hints->driver_watchdog_timeout) {
2915 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2916 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2918 adapter->keep_alive_timeout =
2919 msecs_to_jiffies(hints->driver_watchdog_timeout);
2923 static void ena_update_host_info(struct ena_admin_host_info *host_info,
2924 struct net_device *netdev)
2926 host_info->supported_network_features[0] =
2927 netdev->features & GENMASK_ULL(31, 0);
2928 host_info->supported_network_features[1] =
2929 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
2932 static void ena_timer_service(struct timer_list *t)
2934 struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
2935 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
2936 struct ena_admin_host_info *host_info =
2937 adapter->ena_dev->host_attr.host_info;
2939 check_for_missing_keep_alive(adapter);
2941 check_for_admin_com_state(adapter);
2943 check_for_missing_completions(adapter);
2945 check_for_empty_rx_ring(adapter);
2948 ena_dump_stats_to_buf(adapter, debug_area);
2951 ena_update_host_info(host_info, adapter->netdev);
2953 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2954 netif_err(adapter, drv, adapter->netdev,
2955 "Trigger reset is on\n");
2956 ena_dump_stats_to_dmesg(adapter);
2957 queue_work(ena_wq, &adapter->reset_task);
2961 /* Reset the timer */
2962 mod_timer(&adapter->timer_service, jiffies + HZ);
2965 static int ena_calc_io_queue_num(struct pci_dev *pdev,
2966 struct ena_com_dev *ena_dev,
2967 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2969 int io_sq_num, io_queue_num;
2971 /* In case of LLQ use the llq number in the get feature cmd */
2972 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2973 io_sq_num = get_feat_ctx->max_queues.max_llq_num;
2975 if (io_sq_num == 0) {
2977 "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
2979 ena_dev->tx_mem_queue_type =
2980 ENA_ADMIN_PLACEMENT_POLICY_HOST;
2981 io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2984 io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2987 io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
2988 io_queue_num = min_t(int, io_queue_num, io_sq_num);
2989 io_queue_num = min_t(int, io_queue_num,
2990 get_feat_ctx->max_queues.max_cq_num);
2991 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
2992 io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
2993 if (unlikely(!io_queue_num)) {
2994 dev_err(&pdev->dev, "The device doesn't have io queues\n");
2998 return io_queue_num;
3001 static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
3002 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3006 has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
3008 /* Enable push mode if device supports LLQ */
3009 if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0))
3010 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
3012 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3015 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3016 struct net_device *netdev)
3018 netdev_features_t dev_features = 0;
3020 /* Set offload features */
3021 if (feat->offload.tx &
3022 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3023 dev_features |= NETIF_F_IP_CSUM;
3025 if (feat->offload.tx &
3026 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3027 dev_features |= NETIF_F_IPV6_CSUM;
3029 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3030 dev_features |= NETIF_F_TSO;
3032 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3033 dev_features |= NETIF_F_TSO6;
3035 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3036 dev_features |= NETIF_F_TSO_ECN;
3038 if (feat->offload.rx_supported &
3039 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3040 dev_features |= NETIF_F_RXCSUM;
3042 if (feat->offload.rx_supported &
3043 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3044 dev_features |= NETIF_F_RXCSUM;
3052 netdev->hw_features |= netdev->features;
3053 netdev->vlan_features |= netdev->features;
3056 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3057 struct ena_com_dev_get_features_ctx *feat)
3059 struct net_device *netdev = adapter->netdev;
3061 /* Copy mac address */
3062 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3063 eth_hw_addr_random(netdev);
3064 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3066 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3067 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3070 /* Set offload features */
3071 ena_set_dev_offloads(feat, netdev);
3073 adapter->max_mtu = feat->dev_attr.max_mtu;
3074 netdev->max_mtu = adapter->max_mtu;
3075 netdev->min_mtu = ENA_MIN_MTU;
3078 static int ena_rss_init_default(struct ena_adapter *adapter)
3080 struct ena_com_dev *ena_dev = adapter->ena_dev;
3081 struct device *dev = &adapter->pdev->dev;
3085 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3087 dev_err(dev, "Cannot init indirect table\n");
3091 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
3092 val = ethtool_rxfh_indir_default(i, adapter->num_queues);
3093 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
3094 ENA_IO_RXQ_IDX(val));
3095 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3096 dev_err(dev, "Cannot fill indirect table\n");
3097 goto err_fill_indir;
3101 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
3102 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
3103 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3104 dev_err(dev, "Cannot fill hash function\n");
3105 goto err_fill_indir;
3108 rc = ena_com_set_default_hash_ctrl(ena_dev);
3109 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3110 dev_err(dev, "Cannot fill hash control\n");
3111 goto err_fill_indir;
3117 ena_com_rss_destroy(ena_dev);
3123 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3127 if (ena_dev->mem_bar)
3128 devm_iounmap(&pdev->dev, ena_dev->mem_bar);
3130 if (ena_dev->reg_bar)
3131 devm_iounmap(&pdev->dev, ena_dev->reg_bar);
3133 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3134 pci_release_selected_regions(pdev, release_bars);
3137 static int ena_calc_queue_size(struct pci_dev *pdev,
3138 struct ena_com_dev *ena_dev,
3139 u16 *max_tx_sgl_size,
3140 u16 *max_rx_sgl_size,
3141 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3143 u32 queue_size = ENA_DEFAULT_RING_SIZE;
3145 queue_size = min_t(u32, queue_size,
3146 get_feat_ctx->max_queues.max_cq_depth);
3147 queue_size = min_t(u32, queue_size,
3148 get_feat_ctx->max_queues.max_sq_depth);
3150 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3151 queue_size = min_t(u32, queue_size,
3152 get_feat_ctx->max_queues.max_llq_depth);
3154 queue_size = rounddown_pow_of_two(queue_size);
3156 if (unlikely(!queue_size)) {
3157 dev_err(&pdev->dev, "Invalid queue size\n");
3161 *max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3162 get_feat_ctx->max_queues.max_packet_tx_descs);
3163 *max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3164 get_feat_ctx->max_queues.max_packet_rx_descs);
3169 /* ena_probe - Device Initialization Routine
3170 * @pdev: PCI device information struct
3171 * @ent: entry in ena_pci_tbl
3173 * Returns 0 on success, negative on failure
3175 * ena_probe initializes an adapter identified by a pci_dev structure.
3176 * The OS initialization, configuring of the adapter private structure,
3177 * and a hardware reset occur.
3179 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3181 struct ena_com_dev_get_features_ctx get_feat_ctx;
3182 static int version_printed;
3183 struct net_device *netdev;
3184 struct ena_adapter *adapter;
3185 struct ena_com_dev *ena_dev = NULL;
3186 static int adapters_found;
3187 int io_queue_num, bars, rc;
3189 u16 tx_sgl_size = 0;
3190 u16 rx_sgl_size = 0;
3193 dev_dbg(&pdev->dev, "%s\n", __func__);
3195 if (version_printed++ == 0)
3196 dev_info(&pdev->dev, "%s", version);
3198 rc = pci_enable_device_mem(pdev);
3200 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
3204 pci_set_master(pdev);
3206 ena_dev = vzalloc(sizeof(*ena_dev));
3209 goto err_disable_device;
3212 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3213 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
3215 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
3217 goto err_free_ena_dev;
3220 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
3221 pci_resource_start(pdev, ENA_REG_BAR),
3222 pci_resource_len(pdev, ENA_REG_BAR));
3223 if (!ena_dev->reg_bar) {
3224 dev_err(&pdev->dev, "failed to remap regs bar\n");
3226 goto err_free_region;
3229 ena_dev->dmadev = &pdev->dev;
3231 rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
3233 dev_err(&pdev->dev, "ena device init failed\n");
3236 goto err_free_region;
3239 ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
3241 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
3242 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3243 pci_resource_start(pdev, ENA_MEM_BAR),
3244 pci_resource_len(pdev, ENA_MEM_BAR));
3245 if (!ena_dev->mem_bar) {
3247 goto err_device_destroy;
3251 /* initial Tx interrupt delay, Assumes 1 usec granularity.
3252 * Updated during device initialization with the real granularity
3254 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
3255 io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
3256 queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size,
3257 &rx_sgl_size, &get_feat_ctx);
3258 if ((queue_size <= 0) || (io_queue_num <= 0)) {
3260 goto err_device_destroy;
3263 dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n",
3264 io_queue_num, queue_size);
3266 /* dev zeroed in init_etherdev */
3267 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
3269 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
3271 goto err_device_destroy;
3274 SET_NETDEV_DEV(netdev, &pdev->dev);
3276 adapter = netdev_priv(netdev);
3277 pci_set_drvdata(pdev, adapter);
3279 adapter->ena_dev = ena_dev;
3280 adapter->netdev = netdev;
3281 adapter->pdev = pdev;
3283 ena_set_conf_feat_params(adapter, &get_feat_ctx);
3285 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3286 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3288 adapter->tx_ring_size = queue_size;
3289 adapter->rx_ring_size = queue_size;
3291 adapter->max_tx_sgl_size = tx_sgl_size;
3292 adapter->max_rx_sgl_size = rx_sgl_size;
3294 adapter->num_queues = io_queue_num;
3295 adapter->last_monitored_tx_qid = 0;
3297 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
3298 adapter->wd_state = wd_state;
3300 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
3302 rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
3305 "Failed to query interrupt moderation feature\n");
3306 goto err_netdev_destroy;
3308 ena_init_io_rings(adapter);
3310 netdev->netdev_ops = &ena_netdev_ops;
3311 netdev->watchdog_timeo = TX_TIMEOUT;
3312 ena_set_ethtool_ops(netdev);
3314 netdev->priv_flags |= IFF_UNICAST_FLT;
3316 u64_stats_init(&adapter->syncp);
3318 rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
3321 "Failed to enable and set the admin interrupts\n");
3322 goto err_worker_destroy;
3324 rc = ena_rss_init_default(adapter);
3325 if (rc && (rc != -EOPNOTSUPP)) {
3326 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
3330 ena_config_debug_area(adapter);
3332 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
3334 netif_carrier_off(netdev);
3336 rc = register_netdev(netdev);
3338 dev_err(&pdev->dev, "Cannot register net device\n");
3342 INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
3344 adapter->last_keep_alive_jiffies = jiffies;
3345 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
3346 adapter->missing_tx_completion_to = TX_TIMEOUT;
3347 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
3349 ena_update_hints(adapter, &get_feat_ctx.hw_hints);
3351 timer_setup(&adapter->timer_service, ena_timer_service, 0);
3352 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3354 dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
3355 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
3356 netdev->dev_addr, io_queue_num);
3358 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3365 ena_com_delete_debug_area(ena_dev);
3366 ena_com_rss_destroy(ena_dev);
3368 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
3369 ena_free_mgmnt_irq(adapter);
3370 ena_disable_msix(adapter);
3372 ena_com_destroy_interrupt_moderation(ena_dev);
3373 del_timer(&adapter->timer_service);
3375 free_netdev(netdev);
3377 ena_com_delete_host_info(ena_dev);
3378 ena_com_admin_destroy(ena_dev);
3380 ena_release_bars(ena_dev, pdev);
3384 pci_disable_device(pdev);
3388 /*****************************************************************************/
3389 static int ena_sriov_configure(struct pci_dev *dev, int numvfs)
3394 rc = pci_enable_sriov(dev, numvfs);
3397 "pci_enable_sriov failed to enable: %d vfs with the error: %d\n",
3406 pci_disable_sriov(dev);
3413 /*****************************************************************************/
3414 /*****************************************************************************/
3416 /* ena_remove - Device Removal Routine
3417 * @pdev: PCI device information struct
3419 * ena_remove is called by the PCI subsystem to alert the driver
3420 * that it should release a PCI device.
3422 static void ena_remove(struct pci_dev *pdev)
3424 struct ena_adapter *adapter = pci_get_drvdata(pdev);
3425 struct ena_com_dev *ena_dev;
3426 struct net_device *netdev;
3428 ena_dev = adapter->ena_dev;
3429 netdev = adapter->netdev;
3431 #ifdef CONFIG_RFS_ACCEL
3432 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
3433 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
3434 netdev->rx_cpu_rmap = NULL;
3436 #endif /* CONFIG_RFS_ACCEL */
3438 unregister_netdev(netdev);
3439 del_timer_sync(&adapter->timer_service);
3441 cancel_work_sync(&adapter->reset_task);
3443 /* Reset the device only if the device is running. */
3444 if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3445 ena_com_dev_reset(ena_dev, adapter->reset_reason);
3447 ena_free_mgmnt_irq(adapter);
3449 ena_disable_msix(adapter);
3451 free_netdev(netdev);
3453 ena_com_mmio_reg_read_request_destroy(ena_dev);
3455 ena_com_abort_admin_commands(ena_dev);
3457 ena_com_wait_for_abort_completion(ena_dev);
3459 ena_com_admin_destroy(ena_dev);
3461 ena_com_rss_destroy(ena_dev);
3463 ena_com_delete_debug_area(ena_dev);
3465 ena_com_delete_host_info(ena_dev);
3467 ena_release_bars(ena_dev, pdev);
3469 pci_disable_device(pdev);
3471 ena_com_destroy_interrupt_moderation(ena_dev);
3477 /* ena_suspend - PM suspend callback
3478 * @pdev: PCI device information struct
3479 * @state:power state
3481 static int ena_suspend(struct pci_dev *pdev, pm_message_t state)
3483 struct ena_adapter *adapter = pci_get_drvdata(pdev);
3485 u64_stats_update_begin(&adapter->syncp);
3486 adapter->dev_stats.suspend++;
3487 u64_stats_update_end(&adapter->syncp);
3490 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3492 "ignoring device reset request as the device is being suspended\n");
3493 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3495 ena_destroy_device(adapter);
3500 /* ena_resume - PM resume callback
3501 * @pdev: PCI device information struct
3504 static int ena_resume(struct pci_dev *pdev)
3506 struct ena_adapter *adapter = pci_get_drvdata(pdev);
3509 u64_stats_update_begin(&adapter->syncp);
3510 adapter->dev_stats.resume++;
3511 u64_stats_update_end(&adapter->syncp);
3514 rc = ena_restore_device(adapter);
3520 static struct pci_driver ena_pci_driver = {
3521 .name = DRV_MODULE_NAME,
3522 .id_table = ena_pci_tbl,
3524 .remove = ena_remove,
3526 .suspend = ena_suspend,
3527 .resume = ena_resume,
3529 .sriov_configure = ena_sriov_configure,
3532 static int __init ena_init(void)
3534 pr_info("%s", version);
3536 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
3538 pr_err("Failed to create workqueue\n");
3542 return pci_register_driver(&ena_pci_driver);
3545 static void __exit ena_cleanup(void)
3547 pci_unregister_driver(&ena_pci_driver);
3550 destroy_workqueue(ena_wq);
3555 /******************************************************************************
3556 ******************************** AENQ Handlers *******************************
3557 *****************************************************************************/
3558 /* ena_update_on_link_change:
3559 * Notify the network interface about the change in link status
3561 static void ena_update_on_link_change(void *adapter_data,
3562 struct ena_admin_aenq_entry *aenq_e)
3564 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3565 struct ena_admin_aenq_link_change_desc *aenq_desc =
3566 (struct ena_admin_aenq_link_change_desc *)aenq_e;
3567 int status = aenq_desc->flags &
3568 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3571 netdev_dbg(adapter->netdev, "%s\n", __func__);
3572 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3573 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
3574 netif_carrier_on(adapter->netdev);
3576 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3577 netif_carrier_off(adapter->netdev);
3581 static void ena_keep_alive_wd(void *adapter_data,
3582 struct ena_admin_aenq_entry *aenq_e)
3584 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3585 struct ena_admin_aenq_keep_alive_desc *desc;
3588 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
3589 adapter->last_keep_alive_jiffies = jiffies;
3591 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
3593 u64_stats_update_begin(&adapter->syncp);
3594 adapter->dev_stats.rx_drops = rx_drops;
3595 u64_stats_update_end(&adapter->syncp);
3598 static void ena_notification(void *adapter_data,
3599 struct ena_admin_aenq_entry *aenq_e)
3601 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3602 struct ena_admin_ena_hw_hints *hints;
3604 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
3605 "Invalid group(%x) expected %x\n",
3606 aenq_e->aenq_common_desc.group,
3607 ENA_ADMIN_NOTIFICATION);
3609 switch (aenq_e->aenq_common_desc.syndrom) {
3610 case ENA_ADMIN_UPDATE_HINTS:
3611 hints = (struct ena_admin_ena_hw_hints *)
3612 (&aenq_e->inline_data_w4);
3613 ena_update_hints(adapter, hints);
3616 netif_err(adapter, drv, adapter->netdev,
3617 "Invalid aenq notification link state %d\n",
3618 aenq_e->aenq_common_desc.syndrom);
3622 /* This handler will called for unknown event group or unimplemented handlers*/
3623 static void unimplemented_aenq_handler(void *data,
3624 struct ena_admin_aenq_entry *aenq_e)
3626 struct ena_adapter *adapter = (struct ena_adapter *)data;
3628 netif_err(adapter, drv, adapter->netdev,
3629 "Unknown event was received or event with unimplemented handler\n");
3632 static struct ena_aenq_handlers aenq_handlers = {
3634 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3635 [ENA_ADMIN_NOTIFICATION] = ena_notification,
3636 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3638 .unimplemented_handler = unimplemented_aenq_handler
3641 module_init(ena_init);
3642 module_exit(ena_cleanup);