2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/numa.h>
43 #include <linux/pci.h>
44 #include <linux/utsname.h>
45 #include <linux/version.h>
46 #include <linux/vmalloc.h>
49 #include "ena_netdev.h"
50 #include "ena_pci_id_tbl.h"
52 static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
54 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
55 MODULE_DESCRIPTION(DEVICE_NAME);
56 MODULE_LICENSE("GPL");
57 MODULE_VERSION(DRV_MODULE_VERSION);
59 /* Time in jiffies before concluding the transmitter is hung. */
60 #define TX_TIMEOUT (5 * HZ)
62 #define ENA_NAPI_BUDGET 64
64 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
65 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
66 static int debug = -1;
67 module_param(debug, int, 0);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70 static struct ena_aenq_handlers aenq_handlers;
72 static struct workqueue_struct *ena_wq;
74 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
76 static int ena_rss_init_default(struct ena_adapter *adapter);
77 static void check_for_admin_com_state(struct ena_adapter *adapter);
78 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
79 static int ena_restore_device(struct ena_adapter *adapter);
81 static void ena_tx_timeout(struct net_device *dev)
83 struct ena_adapter *adapter = netdev_priv(dev);
85 /* Change the state of the device to trigger reset
86 * Check that we are not in the middle or a trigger already
89 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
92 adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
93 u64_stats_update_begin(&adapter->syncp);
94 adapter->dev_stats.tx_timeout++;
95 u64_stats_update_end(&adapter->syncp);
97 netif_err(adapter, tx_err, dev, "Transmit time out\n");
100 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
104 for (i = 0; i < adapter->num_queues; i++)
105 adapter->rx_ring[i].mtu = mtu;
108 static int ena_change_mtu(struct net_device *dev, int new_mtu)
110 struct ena_adapter *adapter = netdev_priv(dev);
113 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
115 netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
116 update_rx_ring_mtu(adapter, new_mtu);
119 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
126 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
128 #ifdef CONFIG_RFS_ACCEL
132 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
133 if (!adapter->netdev->rx_cpu_rmap)
135 for (i = 0; i < adapter->num_queues; i++) {
136 int irq_idx = ENA_IO_IRQ_IDX(i);
138 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
139 pci_irq_vector(adapter->pdev, irq_idx));
141 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
142 adapter->netdev->rx_cpu_rmap = NULL;
146 #endif /* CONFIG_RFS_ACCEL */
150 static void ena_init_io_rings_common(struct ena_adapter *adapter,
151 struct ena_ring *ring, u16 qid)
154 ring->pdev = adapter->pdev;
155 ring->dev = &adapter->pdev->dev;
156 ring->netdev = adapter->netdev;
157 ring->napi = &adapter->ena_napi[qid].napi;
158 ring->adapter = adapter;
159 ring->ena_dev = adapter->ena_dev;
160 ring->per_napi_packets = 0;
161 ring->per_napi_bytes = 0;
163 ring->first_interrupt = false;
164 ring->no_interrupt_event_cnt = 0;
165 u64_stats_init(&ring->syncp);
168 static void ena_init_io_rings(struct ena_adapter *adapter)
170 struct ena_com_dev *ena_dev;
171 struct ena_ring *txr, *rxr;
174 ena_dev = adapter->ena_dev;
176 for (i = 0; i < adapter->num_queues; i++) {
177 txr = &adapter->tx_ring[i];
178 rxr = &adapter->rx_ring[i];
180 /* TX/RX common ring state */
181 ena_init_io_rings_common(adapter, txr, i);
182 ena_init_io_rings_common(adapter, rxr, i);
184 /* TX specific ring state */
185 txr->ring_size = adapter->requested_tx_ring_size;
186 txr->tx_max_header_size = ena_dev->tx_max_header_size;
187 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
188 txr->sgl_size = adapter->max_tx_sgl_size;
189 txr->smoothed_interval =
190 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
192 /* RX specific ring state */
193 rxr->ring_size = adapter->requested_rx_ring_size;
194 rxr->rx_copybreak = adapter->rx_copybreak;
195 rxr->sgl_size = adapter->max_rx_sgl_size;
196 rxr->smoothed_interval =
197 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
198 rxr->empty_rx_queue = 0;
199 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
203 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
204 * @adapter: network interface device structure
207 * Return 0 on success, negative on failure
209 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
211 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
212 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
215 if (tx_ring->tx_buffer_info) {
216 netif_err(adapter, ifup,
217 adapter->netdev, "tx_buffer_info info is not NULL");
221 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
222 node = cpu_to_node(ena_irq->cpu);
224 tx_ring->tx_buffer_info = vzalloc_node(size, node);
225 if (!tx_ring->tx_buffer_info) {
226 tx_ring->tx_buffer_info = vzalloc(size);
227 if (!tx_ring->tx_buffer_info)
228 goto err_tx_buffer_info;
231 size = sizeof(u16) * tx_ring->ring_size;
232 tx_ring->free_ids = vzalloc_node(size, node);
233 if (!tx_ring->free_ids) {
234 tx_ring->free_ids = vzalloc(size);
235 if (!tx_ring->free_ids)
236 goto err_tx_free_ids;
239 size = tx_ring->tx_max_header_size;
240 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
241 if (!tx_ring->push_buf_intermediate_buf) {
242 tx_ring->push_buf_intermediate_buf = vzalloc(size);
243 if (!tx_ring->push_buf_intermediate_buf)
244 goto err_push_buf_intermediate_buf;
247 /* Req id ring for TX out of order completions */
248 for (i = 0; i < tx_ring->ring_size; i++)
249 tx_ring->free_ids[i] = i;
251 /* Reset tx statistics */
252 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
254 tx_ring->next_to_use = 0;
255 tx_ring->next_to_clean = 0;
256 tx_ring->cpu = ena_irq->cpu;
259 err_push_buf_intermediate_buf:
260 vfree(tx_ring->free_ids);
261 tx_ring->free_ids = NULL;
263 vfree(tx_ring->tx_buffer_info);
264 tx_ring->tx_buffer_info = NULL;
269 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
270 * @adapter: network interface device structure
273 * Free all transmit software resources
275 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
277 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
279 vfree(tx_ring->tx_buffer_info);
280 tx_ring->tx_buffer_info = NULL;
282 vfree(tx_ring->free_ids);
283 tx_ring->free_ids = NULL;
285 vfree(tx_ring->push_buf_intermediate_buf);
286 tx_ring->push_buf_intermediate_buf = NULL;
289 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
290 * @adapter: private structure
292 * Return 0 on success, negative on failure
294 static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
298 for (i = 0; i < adapter->num_queues; i++) {
299 rc = ena_setup_tx_resources(adapter, i);
308 netif_err(adapter, ifup, adapter->netdev,
309 "Tx queue %d: allocation failed\n", i);
311 /* rewind the index freeing the rings as we go */
313 ena_free_tx_resources(adapter, i);
317 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
318 * @adapter: board private structure
320 * Free all transmit software resources
322 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
326 for (i = 0; i < adapter->num_queues; i++)
327 ena_free_tx_resources(adapter, i);
330 static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
332 if (likely(req_id < rx_ring->ring_size))
335 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
336 "Invalid rx req_id: %hu\n", req_id);
338 u64_stats_update_begin(&rx_ring->syncp);
339 rx_ring->rx_stats.bad_req_id++;
340 u64_stats_update_end(&rx_ring->syncp);
342 /* Trigger device reset */
343 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
344 set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
348 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
349 * @adapter: network interface device structure
352 * Returns 0 on success, negative on failure
354 static int ena_setup_rx_resources(struct ena_adapter *adapter,
357 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
358 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
361 if (rx_ring->rx_buffer_info) {
362 netif_err(adapter, ifup, adapter->netdev,
363 "rx_buffer_info is not NULL");
367 /* alloc extra element so in rx path
368 * we can always prefetch rx_info + 1
370 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
371 node = cpu_to_node(ena_irq->cpu);
373 rx_ring->rx_buffer_info = vzalloc_node(size, node);
374 if (!rx_ring->rx_buffer_info) {
375 rx_ring->rx_buffer_info = vzalloc(size);
376 if (!rx_ring->rx_buffer_info)
380 size = sizeof(u16) * rx_ring->ring_size;
381 rx_ring->free_ids = vzalloc_node(size, node);
382 if (!rx_ring->free_ids) {
383 rx_ring->free_ids = vzalloc(size);
384 if (!rx_ring->free_ids) {
385 vfree(rx_ring->rx_buffer_info);
386 rx_ring->rx_buffer_info = NULL;
391 /* Req id ring for receiving RX pkts out of order */
392 for (i = 0; i < rx_ring->ring_size; i++)
393 rx_ring->free_ids[i] = i;
395 /* Reset rx statistics */
396 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
398 rx_ring->next_to_clean = 0;
399 rx_ring->next_to_use = 0;
400 rx_ring->cpu = ena_irq->cpu;
405 /* ena_free_rx_resources - Free I/O Rx Resources
406 * @adapter: network interface device structure
409 * Free all receive software resources
411 static void ena_free_rx_resources(struct ena_adapter *adapter,
414 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
416 vfree(rx_ring->rx_buffer_info);
417 rx_ring->rx_buffer_info = NULL;
419 vfree(rx_ring->free_ids);
420 rx_ring->free_ids = NULL;
423 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
424 * @adapter: board private structure
426 * Return 0 on success, negative on failure
428 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
432 for (i = 0; i < adapter->num_queues; i++) {
433 rc = ena_setup_rx_resources(adapter, i);
442 netif_err(adapter, ifup, adapter->netdev,
443 "Rx queue %d: allocation failed\n", i);
445 /* rewind the index freeing the rings as we go */
447 ena_free_rx_resources(adapter, i);
451 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
452 * @adapter: board private structure
454 * Free all receive software resources
456 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
460 for (i = 0; i < adapter->num_queues; i++)
461 ena_free_rx_resources(adapter, i);
464 static int ena_alloc_rx_page(struct ena_ring *rx_ring,
465 struct ena_rx_buffer *rx_info, gfp_t gfp)
467 struct ena_com_buf *ena_buf;
471 /* if previous allocated page is not used */
472 if (unlikely(rx_info->page))
475 page = alloc_page(gfp);
476 if (unlikely(!page)) {
477 u64_stats_update_begin(&rx_ring->syncp);
478 rx_ring->rx_stats.page_alloc_fail++;
479 u64_stats_update_end(&rx_ring->syncp);
483 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
485 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
486 u64_stats_update_begin(&rx_ring->syncp);
487 rx_ring->rx_stats.dma_mapping_err++;
488 u64_stats_update_end(&rx_ring->syncp);
493 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
494 "alloc page %p, rx_info %p\n", page, rx_info);
496 rx_info->page = page;
497 rx_info->page_offset = 0;
498 ena_buf = &rx_info->ena_buf;
499 ena_buf->paddr = dma;
500 ena_buf->len = ENA_PAGE_SIZE;
505 static void ena_free_rx_page(struct ena_ring *rx_ring,
506 struct ena_rx_buffer *rx_info)
508 struct page *page = rx_info->page;
509 struct ena_com_buf *ena_buf = &rx_info->ena_buf;
511 if (unlikely(!page)) {
512 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
513 "Trying to free unallocated buffer\n");
517 dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
521 rx_info->page = NULL;
524 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
526 u16 next_to_use, req_id;
530 next_to_use = rx_ring->next_to_use;
532 for (i = 0; i < num; i++) {
533 struct ena_rx_buffer *rx_info;
535 req_id = rx_ring->free_ids[next_to_use];
536 rc = validate_rx_req_id(rx_ring, req_id);
537 if (unlikely(rc < 0))
540 rx_info = &rx_ring->rx_buffer_info[req_id];
543 rc = ena_alloc_rx_page(rx_ring, rx_info,
544 GFP_ATOMIC | __GFP_COMP);
545 if (unlikely(rc < 0)) {
546 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
547 "failed to alloc buffer for rx queue %d\n",
551 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
555 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
556 "failed to add buffer for rx queue %d\n",
560 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
564 if (unlikely(i < num)) {
565 u64_stats_update_begin(&rx_ring->syncp);
566 rx_ring->rx_stats.refil_partial++;
567 u64_stats_update_end(&rx_ring->syncp);
568 netdev_warn(rx_ring->netdev,
569 "refilled rx qid %d with only %d buffers (from %d)\n",
570 rx_ring->qid, i, num);
573 /* ena_com_write_sq_doorbell issues a wmb() */
575 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
577 rx_ring->next_to_use = next_to_use;
582 static void ena_free_rx_bufs(struct ena_adapter *adapter,
585 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
588 for (i = 0; i < rx_ring->ring_size; i++) {
589 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
592 ena_free_rx_page(rx_ring, rx_info);
596 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
597 * @adapter: board private structure
599 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
601 struct ena_ring *rx_ring;
604 for (i = 0; i < adapter->num_queues; i++) {
605 rx_ring = &adapter->rx_ring[i];
606 bufs_num = rx_ring->ring_size - 1;
607 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
609 if (unlikely(rc != bufs_num))
610 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
611 "refilling Queue %d failed. allocated %d buffers from: %d\n",
616 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
620 for (i = 0; i < adapter->num_queues; i++)
621 ena_free_rx_bufs(adapter, i);
624 static void ena_unmap_tx_skb(struct ena_ring *tx_ring,
625 struct ena_tx_buffer *tx_info)
627 struct ena_com_buf *ena_buf;
631 ena_buf = tx_info->bufs;
632 cnt = tx_info->num_of_bufs;
637 if (tx_info->map_linear_data) {
638 dma_unmap_single(tx_ring->dev,
639 dma_unmap_addr(ena_buf, paddr),
640 dma_unmap_len(ena_buf, len),
646 /* unmap remaining mapped pages */
647 for (i = 0; i < cnt; i++) {
648 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
649 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
654 /* ena_free_tx_bufs - Free Tx Buffers per Queue
655 * @tx_ring: TX ring for which buffers be freed
657 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
659 bool print_once = true;
662 for (i = 0; i < tx_ring->ring_size; i++) {
663 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
669 netdev_notice(tx_ring->netdev,
670 "free uncompleted tx skb qid %d idx 0x%x\n",
674 netdev_dbg(tx_ring->netdev,
675 "free uncompleted tx skb qid %d idx 0x%x\n",
679 ena_unmap_tx_skb(tx_ring, tx_info);
681 dev_kfree_skb_any(tx_info->skb);
683 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
687 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
689 struct ena_ring *tx_ring;
692 for (i = 0; i < adapter->num_queues; i++) {
693 tx_ring = &adapter->tx_ring[i];
694 ena_free_tx_bufs(tx_ring);
698 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
703 for (i = 0; i < adapter->num_queues; i++) {
704 ena_qid = ENA_IO_TXQ_IDX(i);
705 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
709 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
714 for (i = 0; i < adapter->num_queues; i++) {
715 ena_qid = ENA_IO_RXQ_IDX(i);
716 cancel_work_sync(&adapter->ena_napi[i].dim.work);
717 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
721 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
723 ena_destroy_all_tx_queues(adapter);
724 ena_destroy_all_rx_queues(adapter);
727 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
729 struct ena_tx_buffer *tx_info = NULL;
731 if (likely(req_id < tx_ring->ring_size)) {
732 tx_info = &tx_ring->tx_buffer_info[req_id];
733 if (likely(tx_info->skb))
738 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
739 "tx_info doesn't have valid skb\n");
741 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
742 "Invalid req_id: %hu\n", req_id);
744 u64_stats_update_begin(&tx_ring->syncp);
745 tx_ring->tx_stats.bad_req_id++;
746 u64_stats_update_end(&tx_ring->syncp);
748 /* Trigger device reset */
749 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
750 set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
754 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
756 struct netdev_queue *txq;
765 next_to_clean = tx_ring->next_to_clean;
766 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
768 while (tx_pkts < budget) {
769 struct ena_tx_buffer *tx_info;
772 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
777 rc = validate_tx_req_id(tx_ring, req_id);
781 tx_info = &tx_ring->tx_buffer_info[req_id];
784 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
788 tx_info->last_jiffies = 0;
790 ena_unmap_tx_skb(tx_ring, tx_info);
792 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
793 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
796 tx_bytes += skb->len;
799 total_done += tx_info->tx_descs;
801 tx_ring->free_ids[next_to_clean] = req_id;
802 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
806 tx_ring->next_to_clean = next_to_clean;
807 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
808 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
810 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
812 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
813 "tx_poll: q %d done. total pkts: %d\n",
814 tx_ring->qid, tx_pkts);
816 /* need to make the rings circular update visible to
817 * ena_start_xmit() before checking for netif_queue_stopped().
821 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
822 ENA_TX_WAKEUP_THRESH);
823 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
824 __netif_tx_lock(txq, smp_processor_id());
826 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
827 ENA_TX_WAKEUP_THRESH);
828 if (netif_tx_queue_stopped(txq) && above_thresh) {
829 netif_tx_wake_queue(txq);
830 u64_stats_update_begin(&tx_ring->syncp);
831 tx_ring->tx_stats.queue_wakeup++;
832 u64_stats_update_end(&tx_ring->syncp);
834 __netif_tx_unlock(txq);
837 tx_ring->per_napi_bytes += tx_bytes;
838 tx_ring->per_napi_packets += tx_pkts;
843 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
848 skb = napi_get_frags(rx_ring->napi);
850 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
851 rx_ring->rx_copybreak);
853 if (unlikely(!skb)) {
854 u64_stats_update_begin(&rx_ring->syncp);
855 rx_ring->rx_stats.skb_alloc_fail++;
856 u64_stats_update_end(&rx_ring->syncp);
857 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
858 "Failed to allocate skb. frags: %d\n", frags);
865 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
866 struct ena_com_rx_buf_info *ena_bufs,
871 struct ena_rx_buffer *rx_info;
872 u16 len, req_id, buf = 0;
875 len = ena_bufs[buf].len;
876 req_id = ena_bufs[buf].req_id;
877 rx_info = &rx_ring->rx_buffer_info[req_id];
879 if (unlikely(!rx_info->page)) {
880 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
885 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
886 "rx_info %p page %p\n",
887 rx_info, rx_info->page);
889 /* save virt address of first buffer */
890 va = page_address(rx_info->page) + rx_info->page_offset;
891 prefetch(va + NET_IP_ALIGN);
893 if (len <= rx_ring->rx_copybreak) {
894 skb = ena_alloc_skb(rx_ring, false);
898 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
899 "rx allocated small packet. len %d. data_len %d\n",
900 skb->len, skb->data_len);
902 /* sync this buffer for CPU use */
903 dma_sync_single_for_cpu(rx_ring->dev,
904 dma_unmap_addr(&rx_info->ena_buf, paddr),
907 skb_copy_to_linear_data(skb, va, len);
908 dma_sync_single_for_device(rx_ring->dev,
909 dma_unmap_addr(&rx_info->ena_buf, paddr),
914 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
915 rx_ring->free_ids[*next_to_clean] = req_id;
916 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
921 skb = ena_alloc_skb(rx_ring, true);
926 dma_unmap_page(rx_ring->dev,
927 dma_unmap_addr(&rx_info->ena_buf, paddr),
928 ENA_PAGE_SIZE, DMA_FROM_DEVICE);
930 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
931 rx_info->page_offset, len, ENA_PAGE_SIZE);
933 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
934 "rx skb updated. len %d. data_len %d\n",
935 skb->len, skb->data_len);
937 rx_info->page = NULL;
939 rx_ring->free_ids[*next_to_clean] = req_id;
941 ENA_RX_RING_IDX_NEXT(*next_to_clean,
943 if (likely(--descs == 0))
947 len = ena_bufs[buf].len;
948 req_id = ena_bufs[buf].req_id;
949 rx_info = &rx_ring->rx_buffer_info[req_id];
955 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
956 * @adapter: structure containing adapter specific data
957 * @ena_rx_ctx: received packet context/metadata
958 * @skb: skb currently being received and modified
960 static void ena_rx_checksum(struct ena_ring *rx_ring,
961 struct ena_com_rx_ctx *ena_rx_ctx,
964 /* Rx csum disabled */
965 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
966 skb->ip_summed = CHECKSUM_NONE;
970 /* For fragmented packets the checksum isn't valid */
971 if (ena_rx_ctx->frag) {
972 skb->ip_summed = CHECKSUM_NONE;
976 /* if IP and error */
977 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
978 (ena_rx_ctx->l3_csum_err))) {
979 /* ipv4 checksum error */
980 skb->ip_summed = CHECKSUM_NONE;
981 u64_stats_update_begin(&rx_ring->syncp);
982 rx_ring->rx_stats.bad_csum++;
983 u64_stats_update_end(&rx_ring->syncp);
984 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
985 "RX IPv4 header checksum error\n");
990 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
991 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
992 if (unlikely(ena_rx_ctx->l4_csum_err)) {
993 /* TCP/UDP checksum error */
994 u64_stats_update_begin(&rx_ring->syncp);
995 rx_ring->rx_stats.bad_csum++;
996 u64_stats_update_end(&rx_ring->syncp);
997 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
998 "RX L4 checksum error\n");
999 skb->ip_summed = CHECKSUM_NONE;
1003 if (likely(ena_rx_ctx->l4_csum_checked)) {
1004 skb->ip_summed = CHECKSUM_UNNECESSARY;
1005 u64_stats_update_begin(&rx_ring->syncp);
1006 rx_ring->rx_stats.csum_good++;
1007 u64_stats_update_end(&rx_ring->syncp);
1009 u64_stats_update_begin(&rx_ring->syncp);
1010 rx_ring->rx_stats.csum_unchecked++;
1011 u64_stats_update_end(&rx_ring->syncp);
1012 skb->ip_summed = CHECKSUM_NONE;
1015 skb->ip_summed = CHECKSUM_NONE;
1021 static void ena_set_rx_hash(struct ena_ring *rx_ring,
1022 struct ena_com_rx_ctx *ena_rx_ctx,
1023 struct sk_buff *skb)
1025 enum pkt_hash_types hash_type;
1027 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1028 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1029 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1031 hash_type = PKT_HASH_TYPE_L4;
1033 hash_type = PKT_HASH_TYPE_NONE;
1035 /* Override hash type if the packet is fragmented */
1036 if (ena_rx_ctx->frag)
1037 hash_type = PKT_HASH_TYPE_NONE;
1039 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1043 /* ena_clean_rx_irq - Cleanup RX irq
1044 * @rx_ring: RX ring to clean
1045 * @napi: napi handler
1046 * @budget: how many packets driver is allowed to clean
1048 * Returns the number of cleaned buffers.
1050 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1053 u16 next_to_clean = rx_ring->next_to_clean;
1054 u32 res_budget, work_done;
1056 struct ena_com_rx_ctx ena_rx_ctx;
1057 struct ena_adapter *adapter;
1058 struct sk_buff *skb;
1059 int refill_required;
1060 int refill_threshold;
1063 int rx_copybreak_pkt = 0;
1066 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1067 "%s qid %d\n", __func__, rx_ring->qid);
1068 res_budget = budget;
1071 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1072 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1073 ena_rx_ctx.descs = 0;
1074 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1075 rx_ring->ena_com_io_sq,
1080 if (unlikely(ena_rx_ctx.descs == 0))
1083 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1084 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1085 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1086 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1088 /* allocate skb and fill it */
1089 skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
1092 /* exit if we failed to retrieve a buffer */
1093 if (unlikely(!skb)) {
1094 for (i = 0; i < ena_rx_ctx.descs; i++) {
1095 rx_ring->free_ids[next_to_clean] =
1096 rx_ring->ena_bufs[i].req_id;
1098 ENA_RX_RING_IDX_NEXT(next_to_clean,
1099 rx_ring->ring_size);
1104 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1106 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1108 skb_record_rx_queue(skb, rx_ring->qid);
1110 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1111 total_len += rx_ring->ena_bufs[0].len;
1113 napi_gro_receive(napi, skb);
1115 total_len += skb->len;
1116 napi_gro_frags(napi);
1120 } while (likely(res_budget));
1122 work_done = budget - res_budget;
1123 rx_ring->per_napi_bytes += total_len;
1124 rx_ring->per_napi_packets += work_done;
1125 u64_stats_update_begin(&rx_ring->syncp);
1126 rx_ring->rx_stats.bytes += total_len;
1127 rx_ring->rx_stats.cnt += work_done;
1128 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1129 u64_stats_update_end(&rx_ring->syncp);
1131 rx_ring->next_to_clean = next_to_clean;
1133 refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
1135 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1136 ENA_RX_REFILL_THRESH_PACKET);
1138 /* Optimization, try to batch new rx buffers */
1139 if (refill_required > refill_threshold) {
1140 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1141 ena_refill_rx_bufs(rx_ring, refill_required);
1147 adapter = netdev_priv(rx_ring->netdev);
1149 u64_stats_update_begin(&rx_ring->syncp);
1150 rx_ring->rx_stats.bad_desc_num++;
1151 u64_stats_update_end(&rx_ring->syncp);
1153 /* Too many desc from the device. Trigger reset */
1154 adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1155 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1160 static void ena_dim_work(struct work_struct *w)
1162 struct dim *dim = container_of(w, struct dim, work);
1163 struct dim_cq_moder cur_moder =
1164 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1165 struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
1167 ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
1168 dim->state = DIM_START_MEASURE;
1171 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
1173 struct dim_sample dim_sample;
1174 struct ena_ring *rx_ring = ena_napi->rx_ring;
1176 if (!rx_ring->per_napi_packets)
1179 rx_ring->non_empty_napi_events++;
1181 dim_update_sample(rx_ring->non_empty_napi_events,
1182 rx_ring->rx_stats.cnt,
1183 rx_ring->rx_stats.bytes,
1186 net_dim(&ena_napi->dim, dim_sample);
1188 rx_ring->per_napi_packets = 0;
1191 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
1192 struct ena_ring *rx_ring)
1194 struct ena_eth_io_intr_reg intr_reg;
1196 /* Update intr register: rx intr delay,
1197 * tx intr delay and interrupt unmask
1199 ena_com_update_intr_reg(&intr_reg,
1200 rx_ring->smoothed_interval,
1201 tx_ring->smoothed_interval,
1204 /* It is a shared MSI-X.
1205 * Tx and Rx CQ have pointer to it.
1206 * So we use one of them to reach the intr reg
1208 ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
1211 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1212 struct ena_ring *rx_ring)
1214 int cpu = get_cpu();
1217 /* Check only one ring since the 2 rings are running on the same cpu */
1218 if (likely(tx_ring->cpu == cpu))
1221 numa_node = cpu_to_node(cpu);
1224 if (numa_node != NUMA_NO_NODE) {
1225 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1226 ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
1237 static int ena_io_poll(struct napi_struct *napi, int budget)
1239 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1240 struct ena_ring *tx_ring, *rx_ring;
1245 int napi_comp_call = 0;
1248 tx_ring = ena_napi->tx_ring;
1249 rx_ring = ena_napi->rx_ring;
1251 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1253 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1254 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1255 napi_complete_done(napi, 0);
1259 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1260 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1262 /* If the device is about to reset or down, avoid unmask
1263 * the interrupt and return 0 so NAPI won't reschedule
1265 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1266 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1267 napi_complete_done(napi, 0);
1270 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1273 /* Update numa and unmask the interrupt only when schedule
1274 * from the interrupt context (vs from sk_busy_loop)
1276 if (napi_complete_done(napi, rx_work_done)) {
1277 /* We apply adaptive moderation on Rx path only.
1278 * Tx uses static interrupt moderation.
1280 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1281 ena_adjust_adaptive_rx_intr_moderation(ena_napi);
1283 ena_unmask_interrupt(tx_ring, rx_ring);
1286 ena_update_ring_numa_node(tx_ring, rx_ring);
1293 u64_stats_update_begin(&tx_ring->syncp);
1294 tx_ring->tx_stats.napi_comp += napi_comp_call;
1295 tx_ring->tx_stats.tx_poll++;
1296 u64_stats_update_end(&tx_ring->syncp);
1301 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1303 struct ena_adapter *adapter = (struct ena_adapter *)data;
1305 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1307 /* Don't call the aenq handler before probe is done */
1308 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1309 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1314 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1315 * @irq: interrupt number
1316 * @data: pointer to a network interface private napi device structure
1318 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1320 struct ena_napi *ena_napi = data;
1322 ena_napi->tx_ring->first_interrupt = true;
1323 ena_napi->rx_ring->first_interrupt = true;
1325 napi_schedule_irqoff(&ena_napi->napi);
1330 /* Reserve a single MSI-X vector for management (admin + aenq).
1331 * plus reserve one vector for each potential io queue.
1332 * the number of potential io queues is the minimum of what the device
1333 * supports and the number of vCPUs.
1335 static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
1337 int msix_vecs, irq_cnt;
1339 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1340 netif_err(adapter, probe, adapter->netdev,
1341 "Error, MSI-X is already enabled\n");
1345 /* Reserved the max msix vectors we might need */
1346 msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
1347 netif_dbg(adapter, probe, adapter->netdev,
1348 "trying to enable MSI-X, vectors %d\n", msix_vecs);
1350 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1351 msix_vecs, PCI_IRQ_MSIX);
1354 netif_err(adapter, probe, adapter->netdev,
1355 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1359 if (irq_cnt != msix_vecs) {
1360 netif_notice(adapter, probe, adapter->netdev,
1361 "enable only %d MSI-X (out of %d), reduce the number of queues\n",
1362 irq_cnt, msix_vecs);
1363 adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1366 if (ena_init_rx_cpu_rmap(adapter))
1367 netif_warn(adapter, probe, adapter->netdev,
1368 "Failed to map IRQs to CPUs\n");
1370 adapter->msix_vecs = irq_cnt;
1371 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
1376 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1380 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1381 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1382 pci_name(adapter->pdev));
1383 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
1384 ena_intr_msix_mgmnt;
1385 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1386 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1387 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
1388 cpu = cpumask_first(cpu_online_mask);
1389 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
1390 cpumask_set_cpu(cpu,
1391 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
1394 static void ena_setup_io_intr(struct ena_adapter *adapter)
1396 struct net_device *netdev;
1397 int irq_idx, i, cpu;
1399 netdev = adapter->netdev;
1401 for (i = 0; i < adapter->num_queues; i++) {
1402 irq_idx = ENA_IO_IRQ_IDX(i);
1403 cpu = i % num_online_cpus();
1405 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1406 "%s-Tx-Rx-%d", netdev->name, i);
1407 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
1408 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
1409 adapter->irq_tbl[irq_idx].vector =
1410 pci_irq_vector(adapter->pdev, irq_idx);
1411 adapter->irq_tbl[irq_idx].cpu = cpu;
1413 cpumask_set_cpu(cpu,
1414 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
1418 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
1420 unsigned long flags = 0;
1421 struct ena_irq *irq;
1424 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1425 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1428 netif_err(adapter, probe, adapter->netdev,
1429 "failed to request admin irq\n");
1433 netif_dbg(adapter, probe, adapter->netdev,
1434 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1435 irq->affinity_hint_mask.bits[0], irq->vector);
1437 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1442 static int ena_request_io_irq(struct ena_adapter *adapter)
1444 unsigned long flags = 0;
1445 struct ena_irq *irq;
1448 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1449 netif_err(adapter, ifup, adapter->netdev,
1450 "Failed to request I/O IRQ: MSI-X is not enabled\n");
1454 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1455 irq = &adapter->irq_tbl[i];
1456 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1459 netif_err(adapter, ifup, adapter->netdev,
1460 "Failed to request I/O IRQ. index %d rc %d\n",
1465 netif_dbg(adapter, ifup, adapter->netdev,
1466 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1467 i, irq->affinity_hint_mask.bits[0], irq->vector);
1469 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1475 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
1476 irq = &adapter->irq_tbl[k];
1477 free_irq(irq->vector, irq->data);
1483 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
1485 struct ena_irq *irq;
1487 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1488 synchronize_irq(irq->vector);
1489 irq_set_affinity_hint(irq->vector, NULL);
1490 free_irq(irq->vector, irq->data);
1493 static void ena_free_io_irq(struct ena_adapter *adapter)
1495 struct ena_irq *irq;
1498 #ifdef CONFIG_RFS_ACCEL
1499 if (adapter->msix_vecs >= 1) {
1500 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
1501 adapter->netdev->rx_cpu_rmap = NULL;
1503 #endif /* CONFIG_RFS_ACCEL */
1505 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1506 irq = &adapter->irq_tbl[i];
1507 irq_set_affinity_hint(irq->vector, NULL);
1508 free_irq(irq->vector, irq->data);
1512 static void ena_disable_msix(struct ena_adapter *adapter)
1514 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
1515 pci_free_irq_vectors(adapter->pdev);
1518 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
1522 if (!netif_running(adapter->netdev))
1525 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
1526 synchronize_irq(adapter->irq_tbl[i].vector);
1529 static void ena_del_napi(struct ena_adapter *adapter)
1533 for (i = 0; i < adapter->num_queues; i++)
1534 netif_napi_del(&adapter->ena_napi[i].napi);
1537 static void ena_init_napi(struct ena_adapter *adapter)
1539 struct ena_napi *napi;
1542 for (i = 0; i < adapter->num_queues; i++) {
1543 napi = &adapter->ena_napi[i];
1545 netif_napi_add(adapter->netdev,
1546 &adapter->ena_napi[i].napi,
1549 napi->rx_ring = &adapter->rx_ring[i];
1550 napi->tx_ring = &adapter->tx_ring[i];
1555 static void ena_napi_disable_all(struct ena_adapter *adapter)
1559 for (i = 0; i < adapter->num_queues; i++)
1560 napi_disable(&adapter->ena_napi[i].napi);
1563 static void ena_napi_enable_all(struct ena_adapter *adapter)
1567 for (i = 0; i < adapter->num_queues; i++)
1568 napi_enable(&adapter->ena_napi[i].napi);
1571 static void ena_restore_ethtool_params(struct ena_adapter *adapter)
1573 adapter->tx_usecs = 0;
1574 adapter->rx_usecs = 0;
1575 adapter->tx_frames = 1;
1576 adapter->rx_frames = 1;
1579 /* Configure the Rx forwarding */
1580 static int ena_rss_configure(struct ena_adapter *adapter)
1582 struct ena_com_dev *ena_dev = adapter->ena_dev;
1585 /* In case the RSS table wasn't initialized by probe */
1586 if (!ena_dev->rss.tbl_log_size) {
1587 rc = ena_rss_init_default(adapter);
1588 if (rc && (rc != -EOPNOTSUPP)) {
1589 netif_err(adapter, ifup, adapter->netdev,
1590 "Failed to init RSS rc: %d\n", rc);
1595 /* Set indirect table */
1596 rc = ena_com_indirect_table_set(ena_dev);
1597 if (unlikely(rc && rc != -EOPNOTSUPP))
1600 /* Configure hash function (if supported) */
1601 rc = ena_com_set_hash_function(ena_dev);
1602 if (unlikely(rc && (rc != -EOPNOTSUPP)))
1605 /* Configure hash inputs (if supported) */
1606 rc = ena_com_set_hash_ctrl(ena_dev);
1607 if (unlikely(rc && (rc != -EOPNOTSUPP)))
1613 static int ena_up_complete(struct ena_adapter *adapter)
1617 rc = ena_rss_configure(adapter);
1621 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
1623 ena_refill_all_rx_bufs(adapter);
1625 /* enable transmits */
1626 netif_tx_start_all_queues(adapter->netdev);
1628 ena_restore_ethtool_params(adapter);
1630 ena_napi_enable_all(adapter);
1635 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1637 struct ena_com_create_io_ctx ctx;
1638 struct ena_com_dev *ena_dev;
1639 struct ena_ring *tx_ring;
1644 ena_dev = adapter->ena_dev;
1646 tx_ring = &adapter->tx_ring[qid];
1647 msix_vector = ENA_IO_IRQ_IDX(qid);
1648 ena_qid = ENA_IO_TXQ_IDX(qid);
1650 memset(&ctx, 0x0, sizeof(ctx));
1652 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1654 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1655 ctx.msix_vector = msix_vector;
1656 ctx.queue_size = tx_ring->ring_size;
1657 ctx.numa_node = cpu_to_node(tx_ring->cpu);
1659 rc = ena_com_create_io_queue(ena_dev, &ctx);
1661 netif_err(adapter, ifup, adapter->netdev,
1662 "Failed to create I/O TX queue num %d rc: %d\n",
1667 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1668 &tx_ring->ena_com_io_sq,
1669 &tx_ring->ena_com_io_cq);
1671 netif_err(adapter, ifup, adapter->netdev,
1672 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1674 ena_com_destroy_io_queue(ena_dev, ena_qid);
1678 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
1682 static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
1684 struct ena_com_dev *ena_dev = adapter->ena_dev;
1687 for (i = 0; i < adapter->num_queues; i++) {
1688 rc = ena_create_io_tx_queue(adapter, i);
1697 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1702 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1704 struct ena_com_dev *ena_dev;
1705 struct ena_com_create_io_ctx ctx;
1706 struct ena_ring *rx_ring;
1711 ena_dev = adapter->ena_dev;
1713 rx_ring = &adapter->rx_ring[qid];
1714 msix_vector = ENA_IO_IRQ_IDX(qid);
1715 ena_qid = ENA_IO_RXQ_IDX(qid);
1717 memset(&ctx, 0x0, sizeof(ctx));
1720 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1721 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1722 ctx.msix_vector = msix_vector;
1723 ctx.queue_size = rx_ring->ring_size;
1724 ctx.numa_node = cpu_to_node(rx_ring->cpu);
1726 rc = ena_com_create_io_queue(ena_dev, &ctx);
1728 netif_err(adapter, ifup, adapter->netdev,
1729 "Failed to create I/O RX queue num %d rc: %d\n",
1734 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1735 &rx_ring->ena_com_io_sq,
1736 &rx_ring->ena_com_io_cq);
1738 netif_err(adapter, ifup, adapter->netdev,
1739 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1741 ena_com_destroy_io_queue(ena_dev, ena_qid);
1745 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
1750 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
1752 struct ena_com_dev *ena_dev = adapter->ena_dev;
1755 for (i = 0; i < adapter->num_queues; i++) {
1756 rc = ena_create_io_rx_queue(adapter, i);
1759 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
1766 cancel_work_sync(&adapter->ena_napi[i].dim.work);
1767 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1773 static void set_io_rings_size(struct ena_adapter *adapter,
1774 int new_tx_size, int new_rx_size)
1778 for (i = 0; i < adapter->num_queues; i++) {
1779 adapter->tx_ring[i].ring_size = new_tx_size;
1780 adapter->rx_ring[i].ring_size = new_rx_size;
1784 /* This function allows queue allocation to backoff when the system is
1785 * low on memory. If there is not enough memory to allocate io queues
1786 * the driver will try to allocate smaller queues.
1788 * The backoff algorithm is as follows:
1789 * 1. Try to allocate TX and RX and if successful.
1790 * 1.1. return success
1792 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
1794 * 3. If TX or RX is smaller than 256
1795 * 3.1. return failure.
1797 * 4.1. go back to 1.
1799 static int create_queues_with_size_backoff(struct ena_adapter *adapter)
1801 int rc, cur_rx_ring_size, cur_tx_ring_size;
1802 int new_rx_ring_size, new_tx_ring_size;
1804 /* current queue sizes might be set to smaller than the requested
1805 * ones due to past queue allocation failures.
1807 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
1808 adapter->requested_rx_ring_size);
1811 rc = ena_setup_all_tx_resources(adapter);
1815 rc = ena_create_all_io_tx_queues(adapter);
1817 goto err_create_tx_queues;
1819 rc = ena_setup_all_rx_resources(adapter);
1823 rc = ena_create_all_io_rx_queues(adapter);
1825 goto err_create_rx_queues;
1829 err_create_rx_queues:
1830 ena_free_all_io_rx_resources(adapter);
1832 ena_destroy_all_tx_queues(adapter);
1833 err_create_tx_queues:
1834 ena_free_all_io_tx_resources(adapter);
1836 if (rc != -ENOMEM) {
1837 netif_err(adapter, ifup, adapter->netdev,
1838 "Queue creation failed with error code %d\n",
1843 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
1844 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
1846 netif_err(adapter, ifup, adapter->netdev,
1847 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
1848 cur_tx_ring_size, cur_rx_ring_size);
1850 new_tx_ring_size = cur_tx_ring_size;
1851 new_rx_ring_size = cur_rx_ring_size;
1853 /* Decrease the size of the larger queue, or
1854 * decrease both if they are the same size.
1856 if (cur_rx_ring_size <= cur_tx_ring_size)
1857 new_tx_ring_size = cur_tx_ring_size / 2;
1858 if (cur_rx_ring_size >= cur_tx_ring_size)
1859 new_rx_ring_size = cur_rx_ring_size / 2;
1861 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
1862 new_rx_ring_size < ENA_MIN_RING_SIZE) {
1863 netif_err(adapter, ifup, adapter->netdev,
1864 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
1869 netif_err(adapter, ifup, adapter->netdev,
1870 "Retrying queue creation with sizes TX=%d, RX=%d\n",
1874 set_io_rings_size(adapter, new_tx_ring_size,
1879 static int ena_up(struct ena_adapter *adapter)
1883 netdev_dbg(adapter->netdev, "%s\n", __func__);
1885 ena_setup_io_intr(adapter);
1887 /* napi poll functions should be initialized before running
1888 * request_irq(), to handle a rare condition where there is a pending
1889 * interrupt, causing the ISR to fire immediately while the poll
1890 * function wasn't set yet, causing a null dereference
1892 ena_init_napi(adapter);
1894 rc = ena_request_io_irq(adapter);
1898 rc = create_queues_with_size_backoff(adapter);
1900 goto err_create_queues_with_backoff;
1902 rc = ena_up_complete(adapter);
1906 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1907 netif_carrier_on(adapter->netdev);
1909 u64_stats_update_begin(&adapter->syncp);
1910 adapter->dev_stats.interface_up++;
1911 u64_stats_update_end(&adapter->syncp);
1913 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1915 /* Enable completion queues interrupt */
1916 for (i = 0; i < adapter->num_queues; i++)
1917 ena_unmask_interrupt(&adapter->tx_ring[i],
1918 &adapter->rx_ring[i]);
1920 /* schedule napi in case we had pending packets
1921 * from the last time we disable napi
1923 for (i = 0; i < adapter->num_queues; i++)
1924 napi_schedule(&adapter->ena_napi[i].napi);
1929 ena_destroy_all_tx_queues(adapter);
1930 ena_free_all_io_tx_resources(adapter);
1931 ena_destroy_all_rx_queues(adapter);
1932 ena_free_all_io_rx_resources(adapter);
1933 err_create_queues_with_backoff:
1934 ena_free_io_irq(adapter);
1936 ena_del_napi(adapter);
1941 static void ena_down(struct ena_adapter *adapter)
1943 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
1945 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1947 u64_stats_update_begin(&adapter->syncp);
1948 adapter->dev_stats.interface_down++;
1949 u64_stats_update_end(&adapter->syncp);
1951 netif_carrier_off(adapter->netdev);
1952 netif_tx_disable(adapter->netdev);
1954 /* After this point the napi handler won't enable the tx queue */
1955 ena_napi_disable_all(adapter);
1957 /* After destroy the queue there won't be any new interrupts */
1959 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
1962 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
1964 dev_err(&adapter->pdev->dev, "Device reset failed\n");
1965 /* stop submitting admin commands on a device that was reset */
1966 ena_com_set_admin_running_state(adapter->ena_dev, false);
1969 ena_destroy_all_io_queues(adapter);
1971 ena_disable_io_intr_sync(adapter);
1972 ena_free_io_irq(adapter);
1973 ena_del_napi(adapter);
1975 ena_free_all_tx_bufs(adapter);
1976 ena_free_all_rx_bufs(adapter);
1977 ena_free_all_io_tx_resources(adapter);
1978 ena_free_all_io_rx_resources(adapter);
1981 /* ena_open - Called when a network interface is made active
1982 * @netdev: network interface device structure
1984 * Returns 0 on success, negative value on failure
1986 * The open entry point is called when a network interface is made
1987 * active by the system (IFF_UP). At this point all resources needed
1988 * for transmit and receive operations are allocated, the interrupt
1989 * handler is registered with the OS, the watchdog timer is started,
1990 * and the stack is notified that the interface is ready.
1992 static int ena_open(struct net_device *netdev)
1994 struct ena_adapter *adapter = netdev_priv(netdev);
1997 /* Notify the stack of the actual queue counts. */
1998 rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
2000 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
2004 rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
2006 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
2010 rc = ena_up(adapter);
2017 /* ena_close - Disables a network interface
2018 * @netdev: network interface device structure
2020 * Returns 0, this is not allowed to fail
2022 * The close entry point is called when an interface is de-activated
2023 * by the OS. The hardware is still under the drivers control, but
2024 * needs to be disabled. A global MAC reset is issued to stop the
2025 * hardware, and all transmit and receive resources are freed.
2027 static int ena_close(struct net_device *netdev)
2029 struct ena_adapter *adapter = netdev_priv(netdev);
2031 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
2033 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2036 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2039 /* Check for device status and issue reset if needed*/
2040 check_for_admin_com_state(adapter);
2041 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2042 netif_err(adapter, ifdown, adapter->netdev,
2043 "Destroy failure, restarting device\n");
2044 ena_dump_stats_to_dmesg(adapter);
2045 /* rtnl lock already obtained in dev_ioctl() layer */
2046 ena_destroy_device(adapter, false);
2047 ena_restore_device(adapter);
2053 int ena_update_queue_sizes(struct ena_adapter *adapter,
2059 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2060 ena_close(adapter->netdev);
2061 adapter->requested_tx_ring_size = new_tx_size;
2062 adapter->requested_rx_ring_size = new_rx_size;
2063 ena_init_io_rings(adapter);
2064 return dev_up ? ena_up(adapter) : 0;
2067 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
2069 u32 mss = skb_shinfo(skb)->gso_size;
2070 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
2073 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
2074 ena_tx_ctx->l4_csum_enable = 1;
2076 ena_tx_ctx->tso_enable = 1;
2077 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
2078 ena_tx_ctx->l4_csum_partial = 0;
2080 ena_tx_ctx->tso_enable = 0;
2081 ena_meta->l4_hdr_len = 0;
2082 ena_tx_ctx->l4_csum_partial = 1;
2085 switch (ip_hdr(skb)->version) {
2087 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2088 if (ip_hdr(skb)->frag_off & htons(IP_DF))
2091 ena_tx_ctx->l3_csum_enable = 1;
2092 l4_protocol = ip_hdr(skb)->protocol;
2095 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2096 l4_protocol = ipv6_hdr(skb)->nexthdr;
2102 if (l4_protocol == IPPROTO_TCP)
2103 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2105 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2107 ena_meta->mss = mss;
2108 ena_meta->l3_hdr_len = skb_network_header_len(skb);
2109 ena_meta->l3_hdr_offset = skb_network_offset(skb);
2110 ena_tx_ctx->meta_valid = 1;
2113 ena_tx_ctx->meta_valid = 0;
2117 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
2118 struct sk_buff *skb)
2120 int num_frags, header_len, rc;
2122 num_frags = skb_shinfo(skb)->nr_frags;
2123 header_len = skb_headlen(skb);
2125 if (num_frags < tx_ring->sgl_size)
2128 if ((num_frags == tx_ring->sgl_size) &&
2129 (header_len < tx_ring->tx_max_header_size))
2132 u64_stats_update_begin(&tx_ring->syncp);
2133 tx_ring->tx_stats.linearize++;
2134 u64_stats_update_end(&tx_ring->syncp);
2136 rc = skb_linearize(skb);
2138 u64_stats_update_begin(&tx_ring->syncp);
2139 tx_ring->tx_stats.linearize_failed++;
2140 u64_stats_update_end(&tx_ring->syncp);
2146 static int ena_tx_map_skb(struct ena_ring *tx_ring,
2147 struct ena_tx_buffer *tx_info,
2148 struct sk_buff *skb,
2152 struct ena_adapter *adapter = tx_ring->adapter;
2153 struct ena_com_buf *ena_buf;
2155 u32 skb_head_len, frag_len, last_frag;
2160 skb_head_len = skb_headlen(skb);
2162 ena_buf = tx_info->bufs;
2164 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2165 /* When the device is LLQ mode, the driver will copy
2166 * the header into the device memory space.
2167 * the ena_com layer assume the header is in a linear
2169 * This assumption might be wrong since part of the header
2170 * can be in the fragmented buffers.
2171 * Use skb_header_pointer to make sure the header is in a
2172 * linear memory space.
2175 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2176 *push_hdr = skb_header_pointer(skb, 0, push_len,
2177 tx_ring->push_buf_intermediate_buf);
2178 *header_len = push_len;
2179 if (unlikely(skb->data != *push_hdr)) {
2180 u64_stats_update_begin(&tx_ring->syncp);
2181 tx_ring->tx_stats.llq_buffer_copy++;
2182 u64_stats_update_end(&tx_ring->syncp);
2184 delta = push_len - skb_head_len;
2188 *header_len = min_t(u32, skb_head_len,
2189 tx_ring->tx_max_header_size);
2192 netif_dbg(adapter, tx_queued, adapter->netdev,
2193 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2194 *push_hdr, push_len);
2196 if (skb_head_len > push_len) {
2197 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2198 skb_head_len - push_len, DMA_TO_DEVICE);
2199 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2200 goto error_report_dma_error;
2202 ena_buf->paddr = dma;
2203 ena_buf->len = skb_head_len - push_len;
2206 tx_info->num_of_bufs++;
2207 tx_info->map_linear_data = 1;
2209 tx_info->map_linear_data = 0;
2212 last_frag = skb_shinfo(skb)->nr_frags;
2214 for (i = 0; i < last_frag; i++) {
2215 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2217 frag_len = skb_frag_size(frag);
2219 if (unlikely(delta >= frag_len)) {
2224 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2225 frag_len - delta, DMA_TO_DEVICE);
2226 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2227 goto error_report_dma_error;
2229 ena_buf->paddr = dma;
2230 ena_buf->len = frag_len - delta;
2232 tx_info->num_of_bufs++;
2238 error_report_dma_error:
2239 u64_stats_update_begin(&tx_ring->syncp);
2240 tx_ring->tx_stats.dma_mapping_err++;
2241 u64_stats_update_end(&tx_ring->syncp);
2242 netdev_warn(adapter->netdev, "failed to map skb\n");
2244 tx_info->skb = NULL;
2246 tx_info->num_of_bufs += i;
2247 ena_unmap_tx_skb(tx_ring, tx_info);
2252 /* Called with netif_tx_lock. */
2253 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2255 struct ena_adapter *adapter = netdev_priv(dev);
2256 struct ena_tx_buffer *tx_info;
2257 struct ena_com_tx_ctx ena_tx_ctx;
2258 struct ena_ring *tx_ring;
2259 struct netdev_queue *txq;
2261 u16 next_to_use, req_id, header_len;
2262 int qid, rc, nb_hw_desc;
2264 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2265 /* Determine which tx ring we will be placed on */
2266 qid = skb_get_queue_mapping(skb);
2267 tx_ring = &adapter->tx_ring[qid];
2268 txq = netdev_get_tx_queue(dev, qid);
2270 rc = ena_check_and_linearize_skb(tx_ring, skb);
2272 goto error_drop_packet;
2274 skb_tx_timestamp(skb);
2276 next_to_use = tx_ring->next_to_use;
2277 req_id = tx_ring->free_ids[next_to_use];
2278 tx_info = &tx_ring->tx_buffer_info[req_id];
2279 tx_info->num_of_bufs = 0;
2281 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2283 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
2285 goto error_drop_packet;
2287 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2288 ena_tx_ctx.ena_bufs = tx_info->bufs;
2289 ena_tx_ctx.push_header = push_hdr;
2290 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2291 ena_tx_ctx.req_id = req_id;
2292 ena_tx_ctx.header_len = header_len;
2294 /* set flags and meta data */
2295 ena_tx_csum(&ena_tx_ctx, skb);
2297 if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx))) {
2298 netif_dbg(adapter, tx_queued, dev,
2299 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
2301 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2304 /* prepare the packet's descriptors to dma engine */
2305 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
2308 /* ena_com_prepare_tx() can't fail due to overflow of tx queue,
2309 * since the number of free descriptors in the queue is checked
2310 * after sending the previous packet. In case there isn't enough
2311 * space in the queue for the next packet, it is stopped
2312 * until there is again enough available space in the queue.
2313 * All other failure reasons of ena_com_prepare_tx() are fatal
2314 * and therefore require a device reset.
2317 netif_err(adapter, tx_queued, dev,
2318 "failed to prepare tx bufs\n");
2319 u64_stats_update_begin(&tx_ring->syncp);
2320 tx_ring->tx_stats.prepare_ctx_err++;
2321 u64_stats_update_end(&tx_ring->syncp);
2322 adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
2323 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2324 goto error_unmap_dma;
2327 netdev_tx_sent_queue(txq, skb->len);
2329 u64_stats_update_begin(&tx_ring->syncp);
2330 tx_ring->tx_stats.cnt++;
2331 tx_ring->tx_stats.bytes += skb->len;
2332 u64_stats_update_end(&tx_ring->syncp);
2334 tx_info->tx_descs = nb_hw_desc;
2335 tx_info->last_jiffies = jiffies;
2336 tx_info->print_once = 0;
2338 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2339 tx_ring->ring_size);
2341 /* stop the queue when no more space available, the packet can have up
2342 * to sgl_size + 2. one for the meta descriptor and one for header
2343 * (if the header is larger than tx_max_header_size).
2345 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2346 tx_ring->sgl_size + 2))) {
2347 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
2350 netif_tx_stop_queue(txq);
2351 u64_stats_update_begin(&tx_ring->syncp);
2352 tx_ring->tx_stats.queue_stop++;
2353 u64_stats_update_end(&tx_ring->syncp);
2355 /* There is a rare condition where this function decide to
2356 * stop the queue but meanwhile clean_tx_irq updates
2357 * next_to_completion and terminates.
2358 * The queue will remain stopped forever.
2359 * To solve this issue add a mb() to make sure that
2360 * netif_tx_stop_queue() write is vissible before checking if
2361 * there is additional space in the queue.
2365 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2366 ENA_TX_WAKEUP_THRESH)) {
2367 netif_tx_wake_queue(txq);
2368 u64_stats_update_begin(&tx_ring->syncp);
2369 tx_ring->tx_stats.queue_wakeup++;
2370 u64_stats_update_end(&tx_ring->syncp);
2374 if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
2375 /* trigger the dma engine. ena_com_write_sq_doorbell()
2378 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2379 u64_stats_update_begin(&tx_ring->syncp);
2380 tx_ring->tx_stats.doorbells++;
2381 u64_stats_update_end(&tx_ring->syncp);
2384 return NETDEV_TX_OK;
2387 ena_unmap_tx_skb(tx_ring, tx_info);
2388 tx_info->skb = NULL;
2392 return NETDEV_TX_OK;
2395 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
2396 struct net_device *sb_dev)
2399 /* we suspect that this is good for in--kernel network services that
2400 * want to loop incoming skb rx to tx in normal user generated traffic,
2401 * most probably we will not get to this
2403 if (skb_rx_queue_recorded(skb))
2404 qid = skb_get_rx_queue(skb);
2406 qid = netdev_pick_tx(dev, skb, NULL);
2411 static void ena_config_host_info(struct ena_com_dev *ena_dev,
2412 struct pci_dev *pdev)
2414 struct ena_admin_host_info *host_info;
2417 /* Allocate only the host info */
2418 rc = ena_com_allocate_host_info(ena_dev);
2420 pr_err("Cannot allocate host info\n");
2424 host_info = ena_dev->host_attr.host_info;
2426 host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
2427 host_info->os_type = ENA_ADMIN_OS_LINUX;
2428 host_info->kernel_ver = LINUX_VERSION_CODE;
2429 strlcpy(host_info->kernel_ver_str, utsname()->version,
2430 sizeof(host_info->kernel_ver_str) - 1);
2431 host_info->os_dist = 0;
2432 strncpy(host_info->os_dist_str, utsname()->release,
2433 sizeof(host_info->os_dist_str) - 1);
2434 host_info->driver_version =
2435 (DRV_MODULE_VER_MAJOR) |
2436 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2437 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
2438 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
2439 host_info->num_cpus = num_online_cpus();
2441 rc = ena_com_set_host_attributes(ena_dev);
2443 if (rc == -EOPNOTSUPP)
2444 pr_warn("Cannot set host attributes\n");
2446 pr_err("Cannot set host attributes\n");
2454 ena_com_delete_host_info(ena_dev);
2457 static void ena_config_debug_area(struct ena_adapter *adapter)
2459 u32 debug_area_size;
2462 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
2463 if (ss_count <= 0) {
2464 netif_err(adapter, drv, adapter->netdev,
2465 "SS count is negative\n");
2469 /* allocate 32 bytes for each string and 64bit for the value */
2470 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
2472 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
2474 pr_err("Cannot allocate debug area\n");
2478 rc = ena_com_set_host_attributes(adapter->ena_dev);
2480 if (rc == -EOPNOTSUPP)
2481 netif_warn(adapter, drv, adapter->netdev,
2482 "Cannot set host attributes\n");
2484 netif_err(adapter, drv, adapter->netdev,
2485 "Cannot set host attributes\n");
2491 ena_com_delete_debug_area(adapter->ena_dev);
2494 static void ena_get_stats64(struct net_device *netdev,
2495 struct rtnl_link_stats64 *stats)
2497 struct ena_adapter *adapter = netdev_priv(netdev);
2498 struct ena_ring *rx_ring, *tx_ring;
2503 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2506 for (i = 0; i < adapter->num_queues; i++) {
2509 tx_ring = &adapter->tx_ring[i];
2512 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
2513 packets = tx_ring->tx_stats.cnt;
2514 bytes = tx_ring->tx_stats.bytes;
2515 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
2517 stats->tx_packets += packets;
2518 stats->tx_bytes += bytes;
2520 rx_ring = &adapter->rx_ring[i];
2523 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
2524 packets = rx_ring->rx_stats.cnt;
2525 bytes = rx_ring->rx_stats.bytes;
2526 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
2528 stats->rx_packets += packets;
2529 stats->rx_bytes += bytes;
2533 start = u64_stats_fetch_begin_irq(&adapter->syncp);
2534 rx_drops = adapter->dev_stats.rx_drops;
2535 } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
2537 stats->rx_dropped = rx_drops;
2539 stats->multicast = 0;
2540 stats->collisions = 0;
2542 stats->rx_length_errors = 0;
2543 stats->rx_crc_errors = 0;
2544 stats->rx_frame_errors = 0;
2545 stats->rx_fifo_errors = 0;
2546 stats->rx_missed_errors = 0;
2547 stats->tx_window_errors = 0;
2549 stats->rx_errors = 0;
2550 stats->tx_errors = 0;
2553 static const struct net_device_ops ena_netdev_ops = {
2554 .ndo_open = ena_open,
2555 .ndo_stop = ena_close,
2556 .ndo_start_xmit = ena_start_xmit,
2557 .ndo_select_queue = ena_select_queue,
2558 .ndo_get_stats64 = ena_get_stats64,
2559 .ndo_tx_timeout = ena_tx_timeout,
2560 .ndo_change_mtu = ena_change_mtu,
2561 .ndo_set_mac_address = NULL,
2562 .ndo_validate_addr = eth_validate_addr,
2565 static int ena_device_validate_params(struct ena_adapter *adapter,
2566 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2568 struct net_device *netdev = adapter->netdev;
2571 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
2574 netif_err(adapter, drv, netdev,
2575 "Error, mac address are different\n");
2579 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
2580 netif_err(adapter, drv, netdev,
2581 "Error, device max mtu is smaller than netdev MTU\n");
2588 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
2589 struct ena_com_dev_get_features_ctx *get_feat_ctx,
2592 struct device *dev = &pdev->dev;
2593 bool readless_supported;
2598 rc = ena_com_mmio_reg_read_request_init(ena_dev);
2600 dev_err(dev, "failed to init mmio read less\n");
2604 /* The PCIe configuration space revision id indicate if mmio reg
2607 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
2608 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2610 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2612 dev_err(dev, "Can not reset device\n");
2613 goto err_mmio_read_less;
2616 rc = ena_com_validate_version(ena_dev);
2618 dev_err(dev, "device version is too low\n");
2619 goto err_mmio_read_less;
2622 dma_width = ena_com_get_dma_width(ena_dev);
2623 if (dma_width < 0) {
2624 dev_err(dev, "Invalid dma width value %d", dma_width);
2626 goto err_mmio_read_less;
2629 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2631 dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
2632 goto err_mmio_read_less;
2635 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2637 dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
2639 goto err_mmio_read_less;
2642 /* ENA admin level init */
2643 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
2646 "Can not initialize ena admin queue with device\n");
2647 goto err_mmio_read_less;
2650 /* To enable the msix interrupts the driver needs to know the number
2651 * of queues. So the driver uses polling mode to retrieve this
2654 ena_com_set_admin_polling_mode(ena_dev, true);
2656 ena_config_host_info(ena_dev, pdev);
2658 /* Get Device Attributes*/
2659 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2661 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
2662 goto err_admin_init;
2665 /* Try to turn all the available aenq groups */
2666 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2667 BIT(ENA_ADMIN_FATAL_ERROR) |
2668 BIT(ENA_ADMIN_WARNING) |
2669 BIT(ENA_ADMIN_NOTIFICATION) |
2670 BIT(ENA_ADMIN_KEEP_ALIVE);
2672 aenq_groups &= get_feat_ctx->aenq.supported_groups;
2674 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2676 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
2677 goto err_admin_init;
2680 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2685 ena_com_delete_host_info(ena_dev);
2686 ena_com_admin_destroy(ena_dev);
2688 ena_com_mmio_reg_read_request_destroy(ena_dev);
2693 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
2696 struct ena_com_dev *ena_dev = adapter->ena_dev;
2697 struct device *dev = &adapter->pdev->dev;
2700 rc = ena_enable_msix(adapter, io_vectors);
2702 dev_err(dev, "Can not reserve msix vectors\n");
2706 ena_setup_mgmnt_intr(adapter);
2708 rc = ena_request_mgmnt_irq(adapter);
2710 dev_err(dev, "Can not setup management interrupts\n");
2711 goto err_disable_msix;
2714 ena_com_set_admin_polling_mode(ena_dev, false);
2716 ena_com_admin_aenq_enable(ena_dev);
2721 ena_disable_msix(adapter);
2726 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
2728 struct net_device *netdev = adapter->netdev;
2729 struct ena_com_dev *ena_dev = adapter->ena_dev;
2732 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2735 netif_carrier_off(netdev);
2737 del_timer_sync(&adapter->timer_service);
2739 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2740 adapter->dev_up_before_reset = dev_up;
2742 ena_com_set_admin_running_state(ena_dev, false);
2744 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2747 /* Stop the device from sending AENQ events (in case reset flag is set
2748 * and device is up, ena_down() already reset the device.
2750 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
2751 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2753 ena_free_mgmnt_irq(adapter);
2755 ena_disable_msix(adapter);
2757 ena_com_abort_admin_commands(ena_dev);
2759 ena_com_wait_for_abort_completion(ena_dev);
2761 ena_com_admin_destroy(ena_dev);
2763 ena_com_mmio_reg_read_request_destroy(ena_dev);
2765 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
2767 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2768 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2771 static int ena_restore_device(struct ena_adapter *adapter)
2773 struct ena_com_dev_get_features_ctx get_feat_ctx;
2774 struct ena_com_dev *ena_dev = adapter->ena_dev;
2775 struct pci_dev *pdev = adapter->pdev;
2779 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2780 rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
2782 dev_err(&pdev->dev, "Can not initialize device\n");
2785 adapter->wd_state = wd_state;
2787 rc = ena_device_validate_params(adapter, &get_feat_ctx);
2789 dev_err(&pdev->dev, "Validation of device parameters failed\n");
2790 goto err_device_destroy;
2793 rc = ena_enable_msix_and_set_admin_interrupts(adapter,
2794 adapter->num_queues);
2796 dev_err(&pdev->dev, "Enable MSI-X failed\n");
2797 goto err_device_destroy;
2799 /* If the interface was up before the reset bring it up */
2800 if (adapter->dev_up_before_reset) {
2801 rc = ena_up(adapter);
2803 dev_err(&pdev->dev, "Failed to create I/O queues\n");
2804 goto err_disable_msix;
2808 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2810 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2811 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2812 netif_carrier_on(adapter->netdev);
2814 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2816 "Device reset completed successfully, Driver info: %s\n",
2821 ena_free_mgmnt_irq(adapter);
2822 ena_disable_msix(adapter);
2824 ena_com_abort_admin_commands(ena_dev);
2825 ena_com_wait_for_abort_completion(ena_dev);
2826 ena_com_admin_destroy(ena_dev);
2827 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
2828 ena_com_mmio_reg_read_request_destroy(ena_dev);
2830 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2831 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2833 "Reset attempt failed. Can not reset the device\n");
2838 static void ena_fw_reset_device(struct work_struct *work)
2840 struct ena_adapter *adapter =
2841 container_of(work, struct ena_adapter, reset_task);
2842 struct pci_dev *pdev = adapter->pdev;
2844 if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2846 "device reset schedule while reset bit is off\n");
2850 ena_destroy_device(adapter, false);
2851 ena_restore_device(adapter);
2855 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
2856 struct ena_ring *rx_ring)
2858 if (likely(rx_ring->first_interrupt))
2861 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
2864 rx_ring->no_interrupt_event_cnt++;
2866 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
2867 netif_err(adapter, rx_err, adapter->netdev,
2868 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
2870 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2871 smp_mb__before_atomic();
2872 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2879 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
2880 struct ena_ring *tx_ring)
2882 struct ena_tx_buffer *tx_buf;
2883 unsigned long last_jiffies;
2887 for (i = 0; i < tx_ring->ring_size; i++) {
2888 tx_buf = &tx_ring->tx_buffer_info[i];
2889 last_jiffies = tx_buf->last_jiffies;
2891 if (last_jiffies == 0)
2892 /* no pending Tx at this location */
2895 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
2896 2 * adapter->missing_tx_completion_to))) {
2897 /* If after graceful period interrupt is still not
2898 * received, we schedule a reset
2900 netif_err(adapter, tx_err, adapter->netdev,
2901 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
2903 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2904 smp_mb__before_atomic();
2905 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2909 if (unlikely(time_is_before_jiffies(last_jiffies +
2910 adapter->missing_tx_completion_to))) {
2911 if (!tx_buf->print_once)
2912 netif_notice(adapter, tx_err, adapter->netdev,
2913 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2916 tx_buf->print_once = 1;
2921 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
2922 netif_err(adapter, tx_err, adapter->netdev,
2923 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
2925 adapter->missing_tx_completion_threshold);
2926 adapter->reset_reason =
2927 ENA_REGS_RESET_MISS_TX_CMPL;
2928 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2932 u64_stats_update_begin(&tx_ring->syncp);
2933 tx_ring->tx_stats.missed_tx = missed_tx;
2934 u64_stats_update_end(&tx_ring->syncp);
2939 static void check_for_missing_completions(struct ena_adapter *adapter)
2941 struct ena_ring *tx_ring;
2942 struct ena_ring *rx_ring;
2945 /* Make sure the driver doesn't turn the device in other process */
2948 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2951 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2954 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
2957 budget = ENA_MONITORED_TX_QUEUES;
2959 for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
2960 tx_ring = &adapter->tx_ring[i];
2961 rx_ring = &adapter->rx_ring[i];
2963 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
2967 rc = check_for_rx_interrupt_queue(adapter, rx_ring);
2976 adapter->last_monitored_tx_qid = i % adapter->num_queues;
2979 /* trigger napi schedule after 2 consecutive detections */
2980 #define EMPTY_RX_REFILL 2
2981 /* For the rare case where the device runs out of Rx descriptors and the
2982 * napi handler failed to refill new Rx descriptors (due to a lack of memory
2984 * This case will lead to a deadlock:
2985 * The device won't send interrupts since all the new Rx packets will be dropped
2986 * The napi handler won't allocate new Rx descriptors so the device will be
2987 * able to send new packets.
2989 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
2990 * It is recommended to have at least 512MB, with a minimum of 128MB for
2991 * constrained environment).
2993 * When such a situation is detected - Reschedule napi
2995 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
2997 struct ena_ring *rx_ring;
2998 int i, refill_required;
3000 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3003 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3006 for (i = 0; i < adapter->num_queues; i++) {
3007 rx_ring = &adapter->rx_ring[i];
3010 ena_com_free_desc(rx_ring->ena_com_io_sq);
3011 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3012 rx_ring->empty_rx_queue++;
3014 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3015 u64_stats_update_begin(&rx_ring->syncp);
3016 rx_ring->rx_stats.empty_rx_ring++;
3017 u64_stats_update_end(&rx_ring->syncp);
3019 netif_err(adapter, drv, adapter->netdev,
3020 "trigger refill for ring %d\n", i);
3022 napi_schedule(rx_ring->napi);
3023 rx_ring->empty_rx_queue = 0;
3026 rx_ring->empty_rx_queue = 0;
3031 /* Check for keep alive expiration */
3032 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3034 unsigned long keep_alive_expired;
3036 if (!adapter->wd_state)
3039 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3042 keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
3043 adapter->keep_alive_timeout);
3044 if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
3045 netif_err(adapter, drv, adapter->netdev,
3046 "Keep alive watchdog timeout.\n");
3047 u64_stats_update_begin(&adapter->syncp);
3048 adapter->dev_stats.wd_expired++;
3049 u64_stats_update_end(&adapter->syncp);
3050 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
3051 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3055 static void check_for_admin_com_state(struct ena_adapter *adapter)
3057 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
3058 netif_err(adapter, drv, adapter->netdev,
3059 "ENA admin queue is not in running state!\n");
3060 u64_stats_update_begin(&adapter->syncp);
3061 adapter->dev_stats.admin_q_pause++;
3062 u64_stats_update_end(&adapter->syncp);
3063 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
3064 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3068 static void ena_update_hints(struct ena_adapter *adapter,
3069 struct ena_admin_ena_hw_hints *hints)
3071 struct net_device *netdev = adapter->netdev;
3073 if (hints->admin_completion_tx_timeout)
3074 adapter->ena_dev->admin_queue.completion_timeout =
3075 hints->admin_completion_tx_timeout * 1000;
3077 if (hints->mmio_read_timeout)
3078 /* convert to usec */
3079 adapter->ena_dev->mmio_read.reg_read_to =
3080 hints->mmio_read_timeout * 1000;
3082 if (hints->missed_tx_completion_count_threshold_to_reset)
3083 adapter->missing_tx_completion_threshold =
3084 hints->missed_tx_completion_count_threshold_to_reset;
3086 if (hints->missing_tx_completion_timeout) {
3087 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3088 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
3090 adapter->missing_tx_completion_to =
3091 msecs_to_jiffies(hints->missing_tx_completion_timeout);
3094 if (hints->netdev_wd_timeout)
3095 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
3097 if (hints->driver_watchdog_timeout) {
3098 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3099 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3101 adapter->keep_alive_timeout =
3102 msecs_to_jiffies(hints->driver_watchdog_timeout);
3106 static void ena_update_host_info(struct ena_admin_host_info *host_info,
3107 struct net_device *netdev)
3109 host_info->supported_network_features[0] =
3110 netdev->features & GENMASK_ULL(31, 0);
3111 host_info->supported_network_features[1] =
3112 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
3115 static void ena_timer_service(struct timer_list *t)
3117 struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
3118 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
3119 struct ena_admin_host_info *host_info =
3120 adapter->ena_dev->host_attr.host_info;
3122 check_for_missing_keep_alive(adapter);
3124 check_for_admin_com_state(adapter);
3126 check_for_missing_completions(adapter);
3128 check_for_empty_rx_ring(adapter);
3131 ena_dump_stats_to_buf(adapter, debug_area);
3134 ena_update_host_info(host_info, adapter->netdev);
3136 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3137 netif_err(adapter, drv, adapter->netdev,
3138 "Trigger reset is on\n");
3139 ena_dump_stats_to_dmesg(adapter);
3140 queue_work(ena_wq, &adapter->reset_task);
3144 /* Reset the timer */
3145 mod_timer(&adapter->timer_service, jiffies + HZ);
3148 static int ena_calc_io_queue_num(struct pci_dev *pdev,
3149 struct ena_com_dev *ena_dev,
3150 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3152 int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num;
3154 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3155 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3156 &get_feat_ctx->max_queue_ext.max_queue_ext;
3157 io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
3158 max_queue_ext->max_rx_cq_num);
3160 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
3161 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
3163 struct ena_admin_queue_feature_desc *max_queues =
3164 &get_feat_ctx->max_queues;
3165 io_tx_sq_num = max_queues->max_sq_num;
3166 io_tx_cq_num = max_queues->max_cq_num;
3167 io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
3170 /* In case of LLQ use the llq fields for the tx SQ/CQ */
3171 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3172 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
3174 io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
3175 io_queue_num = min_t(int, io_queue_num, io_rx_num);
3176 io_queue_num = min_t(int, io_queue_num, io_tx_sq_num);
3177 io_queue_num = min_t(int, io_queue_num, io_tx_cq_num);
3178 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
3179 io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
3180 if (unlikely(!io_queue_num)) {
3181 dev_err(&pdev->dev, "The device doesn't have io queues\n");
3185 return io_queue_num;
3188 static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3189 struct ena_com_dev *ena_dev,
3190 struct ena_admin_feature_llq_desc *llq,
3191 struct ena_llq_configurations *llq_default_configurations)
3195 u32 llq_feature_mask;
3197 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3198 if (!(ena_dev->supported_features & llq_feature_mask)) {
3200 "LLQ is not supported Fallback to host mode policy.\n");
3201 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3205 has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
3207 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3210 "Failed to configure the device mode. Fallback to host mode policy.\n");
3211 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3215 /* Nothing to config, exit */
3216 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
3221 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3222 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3226 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3227 pci_resource_start(pdev, ENA_MEM_BAR),
3228 pci_resource_len(pdev, ENA_MEM_BAR));
3230 if (!ena_dev->mem_bar)
3236 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3237 struct net_device *netdev)
3239 netdev_features_t dev_features = 0;
3241 /* Set offload features */
3242 if (feat->offload.tx &
3243 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3244 dev_features |= NETIF_F_IP_CSUM;
3246 if (feat->offload.tx &
3247 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3248 dev_features |= NETIF_F_IPV6_CSUM;
3250 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3251 dev_features |= NETIF_F_TSO;
3253 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3254 dev_features |= NETIF_F_TSO6;
3256 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3257 dev_features |= NETIF_F_TSO_ECN;
3259 if (feat->offload.rx_supported &
3260 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3261 dev_features |= NETIF_F_RXCSUM;
3263 if (feat->offload.rx_supported &
3264 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3265 dev_features |= NETIF_F_RXCSUM;
3273 netdev->hw_features |= netdev->features;
3274 netdev->vlan_features |= netdev->features;
3277 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3278 struct ena_com_dev_get_features_ctx *feat)
3280 struct net_device *netdev = adapter->netdev;
3282 /* Copy mac address */
3283 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3284 eth_hw_addr_random(netdev);
3285 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3287 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3288 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3291 /* Set offload features */
3292 ena_set_dev_offloads(feat, netdev);
3294 adapter->max_mtu = feat->dev_attr.max_mtu;
3295 netdev->max_mtu = adapter->max_mtu;
3296 netdev->min_mtu = ENA_MIN_MTU;
3299 static int ena_rss_init_default(struct ena_adapter *adapter)
3301 struct ena_com_dev *ena_dev = adapter->ena_dev;
3302 struct device *dev = &adapter->pdev->dev;
3306 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3308 dev_err(dev, "Cannot init indirect table\n");
3312 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
3313 val = ethtool_rxfh_indir_default(i, adapter->num_queues);
3314 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
3315 ENA_IO_RXQ_IDX(val));
3316 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3317 dev_err(dev, "Cannot fill indirect table\n");
3318 goto err_fill_indir;
3322 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
3323 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
3324 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3325 dev_err(dev, "Cannot fill hash function\n");
3326 goto err_fill_indir;
3329 rc = ena_com_set_default_hash_ctrl(ena_dev);
3330 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3331 dev_err(dev, "Cannot fill hash control\n");
3332 goto err_fill_indir;
3338 ena_com_rss_destroy(ena_dev);
3344 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3346 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3348 pci_release_selected_regions(pdev, release_bars);
3351 static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
3353 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
3354 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
3355 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
3356 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
3357 llq_config->llq_ring_entry_size_value = 128;
3360 static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
3362 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
3363 struct ena_com_dev *ena_dev = ctx->ena_dev;
3364 u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
3365 u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
3366 u32 max_tx_queue_size;
3367 u32 max_rx_queue_size;
3369 if (ctx->ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3370 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3371 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
3372 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
3373 max_queue_ext->max_rx_sq_depth);
3374 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
3376 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3377 max_tx_queue_size = min_t(u32, max_tx_queue_size,
3378 llq->max_llq_depth);
3380 max_tx_queue_size = min_t(u32, max_tx_queue_size,
3381 max_queue_ext->max_tx_sq_depth);
3383 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3384 max_queue_ext->max_per_packet_tx_descs);
3385 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3386 max_queue_ext->max_per_packet_rx_descs);
3388 struct ena_admin_queue_feature_desc *max_queues =
3389 &ctx->get_feat_ctx->max_queues;
3390 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
3391 max_queues->max_sq_depth);
3392 max_tx_queue_size = max_queues->max_cq_depth;
3394 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3395 max_tx_queue_size = min_t(u32, max_tx_queue_size,
3396 llq->max_llq_depth);
3398 max_tx_queue_size = min_t(u32, max_tx_queue_size,
3399 max_queues->max_sq_depth);
3401 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3402 max_queues->max_packet_tx_descs);
3403 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3404 max_queues->max_packet_rx_descs);
3407 max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
3408 max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
3410 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
3412 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
3415 tx_queue_size = rounddown_pow_of_two(tx_queue_size);
3416 rx_queue_size = rounddown_pow_of_two(rx_queue_size);
3418 ctx->max_tx_queue_size = max_tx_queue_size;
3419 ctx->max_rx_queue_size = max_rx_queue_size;
3420 ctx->tx_queue_size = tx_queue_size;
3421 ctx->rx_queue_size = rx_queue_size;
3426 /* ena_probe - Device Initialization Routine
3427 * @pdev: PCI device information struct
3428 * @ent: entry in ena_pci_tbl
3430 * Returns 0 on success, negative on failure
3432 * ena_probe initializes an adapter identified by a pci_dev structure.
3433 * The OS initialization, configuring of the adapter private structure,
3434 * and a hardware reset occur.
3436 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3438 struct ena_com_dev_get_features_ctx get_feat_ctx;
3439 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
3440 struct ena_llq_configurations llq_config;
3441 struct ena_com_dev *ena_dev = NULL;
3442 struct ena_adapter *adapter;
3443 int io_queue_num, bars, rc;
3444 struct net_device *netdev;
3445 static int adapters_found;
3446 char *queue_type_str;
3449 dev_dbg(&pdev->dev, "%s\n", __func__);
3451 dev_info_once(&pdev->dev, "%s", version);
3453 rc = pci_enable_device_mem(pdev);
3455 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
3459 pci_set_master(pdev);
3461 ena_dev = vzalloc(sizeof(*ena_dev));
3464 goto err_disable_device;
3467 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3468 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
3470 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
3472 goto err_free_ena_dev;
3475 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
3476 pci_resource_start(pdev, ENA_REG_BAR),
3477 pci_resource_len(pdev, ENA_REG_BAR));
3478 if (!ena_dev->reg_bar) {
3479 dev_err(&pdev->dev, "failed to remap regs bar\n");
3481 goto err_free_region;
3484 ena_dev->dmadev = &pdev->dev;
3486 rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
3488 dev_err(&pdev->dev, "ena device init failed\n");
3491 goto err_free_region;
3494 set_default_llq_configurations(&llq_config);
3496 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
3499 dev_err(&pdev->dev, "ena device init failed\n");
3500 goto err_device_destroy;
3503 calc_queue_ctx.ena_dev = ena_dev;
3504 calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
3505 calc_queue_ctx.pdev = pdev;
3507 /* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
3508 * Updated during device initialization with the real granularity
3510 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
3511 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
3512 io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
3513 rc = ena_calc_queue_size(&calc_queue_ctx);
3514 if (rc || io_queue_num <= 0) {
3516 goto err_device_destroy;
3519 dev_info(&pdev->dev, "creating %d io queues. rx queue size: %d tx queue size. %d LLQ is %s\n",
3521 calc_queue_ctx.rx_queue_size,
3522 calc_queue_ctx.tx_queue_size,
3523 (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
3524 "ENABLED" : "DISABLED");
3526 /* dev zeroed in init_etherdev */
3527 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
3529 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
3531 goto err_device_destroy;
3534 SET_NETDEV_DEV(netdev, &pdev->dev);
3536 adapter = netdev_priv(netdev);
3537 pci_set_drvdata(pdev, adapter);
3539 adapter->ena_dev = ena_dev;
3540 adapter->netdev = netdev;
3541 adapter->pdev = pdev;
3543 ena_set_conf_feat_params(adapter, &get_feat_ctx);
3545 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3546 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3548 adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
3549 adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
3550 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
3551 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
3552 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
3553 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
3555 adapter->num_queues = io_queue_num;
3556 adapter->last_monitored_tx_qid = 0;
3558 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
3559 adapter->wd_state = wd_state;
3561 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
3563 rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
3566 "Failed to query interrupt moderation feature\n");
3567 goto err_netdev_destroy;
3569 ena_init_io_rings(adapter);
3571 netdev->netdev_ops = &ena_netdev_ops;
3572 netdev->watchdog_timeo = TX_TIMEOUT;
3573 ena_set_ethtool_ops(netdev);
3575 netdev->priv_flags |= IFF_UNICAST_FLT;
3577 u64_stats_init(&adapter->syncp);
3579 rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
3582 "Failed to enable and set the admin interrupts\n");
3583 goto err_worker_destroy;
3585 rc = ena_rss_init_default(adapter);
3586 if (rc && (rc != -EOPNOTSUPP)) {
3587 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
3591 ena_config_debug_area(adapter);
3593 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
3595 netif_carrier_off(netdev);
3597 rc = register_netdev(netdev);
3599 dev_err(&pdev->dev, "Cannot register net device\n");
3603 INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
3605 adapter->last_keep_alive_jiffies = jiffies;
3606 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
3607 adapter->missing_tx_completion_to = TX_TIMEOUT;
3608 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
3610 ena_update_hints(adapter, &get_feat_ctx.hw_hints);
3612 timer_setup(&adapter->timer_service, ena_timer_service, 0);
3613 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3615 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
3616 queue_type_str = "Regular";
3618 queue_type_str = "Low Latency";
3620 dev_info(&pdev->dev,
3621 "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n",
3622 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
3623 netdev->dev_addr, io_queue_num, queue_type_str);
3625 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3632 ena_com_delete_debug_area(ena_dev);
3633 ena_com_rss_destroy(ena_dev);
3635 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
3636 /* stop submitting admin commands on a device that was reset */
3637 ena_com_set_admin_running_state(ena_dev, false);
3638 ena_free_mgmnt_irq(adapter);
3639 ena_disable_msix(adapter);
3641 ena_com_destroy_interrupt_moderation(ena_dev);
3642 del_timer(&adapter->timer_service);
3644 free_netdev(netdev);
3646 ena_com_delete_host_info(ena_dev);
3647 ena_com_admin_destroy(ena_dev);
3649 ena_release_bars(ena_dev, pdev);
3653 pci_disable_device(pdev);
3657 /*****************************************************************************/
3659 /* ena_remove - Device Removal Routine
3660 * @pdev: PCI device information struct
3662 * ena_remove is called by the PCI subsystem to alert the driver
3663 * that it should release a PCI device.
3665 static void ena_remove(struct pci_dev *pdev)
3667 struct ena_adapter *adapter = pci_get_drvdata(pdev);
3668 struct ena_com_dev *ena_dev;
3669 struct net_device *netdev;
3671 ena_dev = adapter->ena_dev;
3672 netdev = adapter->netdev;
3674 #ifdef CONFIG_RFS_ACCEL
3675 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
3676 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
3677 netdev->rx_cpu_rmap = NULL;
3679 #endif /* CONFIG_RFS_ACCEL */
3680 del_timer_sync(&adapter->timer_service);
3682 cancel_work_sync(&adapter->reset_task);
3685 ena_destroy_device(adapter, true);
3688 unregister_netdev(netdev);
3690 free_netdev(netdev);
3692 ena_com_rss_destroy(ena_dev);
3694 ena_com_delete_debug_area(ena_dev);
3696 ena_com_delete_host_info(ena_dev);
3698 ena_release_bars(ena_dev, pdev);
3700 pci_disable_device(pdev);
3702 ena_com_destroy_interrupt_moderation(ena_dev);
3708 /* ena_suspend - PM suspend callback
3709 * @pdev: PCI device information struct
3710 * @state:power state
3712 static int ena_suspend(struct pci_dev *pdev, pm_message_t state)
3714 struct ena_adapter *adapter = pci_get_drvdata(pdev);
3716 u64_stats_update_begin(&adapter->syncp);
3717 adapter->dev_stats.suspend++;
3718 u64_stats_update_end(&adapter->syncp);
3721 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3723 "ignoring device reset request as the device is being suspended\n");
3724 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3726 ena_destroy_device(adapter, true);
3731 /* ena_resume - PM resume callback
3732 * @pdev: PCI device information struct
3735 static int ena_resume(struct pci_dev *pdev)
3737 struct ena_adapter *adapter = pci_get_drvdata(pdev);
3740 u64_stats_update_begin(&adapter->syncp);
3741 adapter->dev_stats.resume++;
3742 u64_stats_update_end(&adapter->syncp);
3745 rc = ena_restore_device(adapter);
3751 static struct pci_driver ena_pci_driver = {
3752 .name = DRV_MODULE_NAME,
3753 .id_table = ena_pci_tbl,
3755 .remove = ena_remove,
3757 .suspend = ena_suspend,
3758 .resume = ena_resume,
3760 .sriov_configure = pci_sriov_configure_simple,
3763 static int __init ena_init(void)
3765 pr_info("%s", version);
3767 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
3769 pr_err("Failed to create workqueue\n");
3773 return pci_register_driver(&ena_pci_driver);
3776 static void __exit ena_cleanup(void)
3778 pci_unregister_driver(&ena_pci_driver);
3781 destroy_workqueue(ena_wq);
3786 /******************************************************************************
3787 ******************************** AENQ Handlers *******************************
3788 *****************************************************************************/
3789 /* ena_update_on_link_change:
3790 * Notify the network interface about the change in link status
3792 static void ena_update_on_link_change(void *adapter_data,
3793 struct ena_admin_aenq_entry *aenq_e)
3795 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3796 struct ena_admin_aenq_link_change_desc *aenq_desc =
3797 (struct ena_admin_aenq_link_change_desc *)aenq_e;
3798 int status = aenq_desc->flags &
3799 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3802 netdev_dbg(adapter->netdev, "%s\n", __func__);
3803 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3804 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
3805 netif_carrier_on(adapter->netdev);
3807 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3808 netif_carrier_off(adapter->netdev);
3812 static void ena_keep_alive_wd(void *adapter_data,
3813 struct ena_admin_aenq_entry *aenq_e)
3815 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3816 struct ena_admin_aenq_keep_alive_desc *desc;
3819 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
3820 adapter->last_keep_alive_jiffies = jiffies;
3822 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
3824 u64_stats_update_begin(&adapter->syncp);
3825 adapter->dev_stats.rx_drops = rx_drops;
3826 u64_stats_update_end(&adapter->syncp);
3829 static void ena_notification(void *adapter_data,
3830 struct ena_admin_aenq_entry *aenq_e)
3832 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3833 struct ena_admin_ena_hw_hints *hints;
3835 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
3836 "Invalid group(%x) expected %x\n",
3837 aenq_e->aenq_common_desc.group,
3838 ENA_ADMIN_NOTIFICATION);
3840 switch (aenq_e->aenq_common_desc.syndrom) {
3841 case ENA_ADMIN_UPDATE_HINTS:
3842 hints = (struct ena_admin_ena_hw_hints *)
3843 (&aenq_e->inline_data_w4);
3844 ena_update_hints(adapter, hints);
3847 netif_err(adapter, drv, adapter->netdev,
3848 "Invalid aenq notification link state %d\n",
3849 aenq_e->aenq_common_desc.syndrom);
3853 /* This handler will called for unknown event group or unimplemented handlers*/
3854 static void unimplemented_aenq_handler(void *data,
3855 struct ena_admin_aenq_entry *aenq_e)
3857 struct ena_adapter *adapter = (struct ena_adapter *)data;
3859 netif_err(adapter, drv, adapter->netdev,
3860 "Unknown event was received or event with unimplemented handler\n");
3863 static struct ena_aenq_handlers aenq_handlers = {
3865 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3866 [ENA_ADMIN_NOTIFICATION] = ena_notification,
3867 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3869 .unimplemented_handler = unimplemented_aenq_handler
3872 module_init(ena_init);
3873 module_exit(ena_cleanup);