]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/amazon/ena/ena_netdev.c
cdcc169b87fa583e1b6370e602a8a32bbe972467
[linux.git] / drivers / net / ethernet / amazon / ena / ena_netdev.c
1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/numa.h>
43 #include <linux/pci.h>
44 #include <linux/utsname.h>
45 #include <linux/version.h>
46 #include <linux/vmalloc.h>
47 #include <net/ip.h>
48
49 #include "ena_netdev.h"
50 #include "ena_pci_id_tbl.h"
51
52 static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
53
54 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
55 MODULE_DESCRIPTION(DEVICE_NAME);
56 MODULE_LICENSE("GPL");
57 MODULE_VERSION(DRV_MODULE_VERSION);
58
59 /* Time in jiffies before concluding the transmitter is hung. */
60 #define TX_TIMEOUT  (5 * HZ)
61
62 #define ENA_NAPI_BUDGET 64
63
64 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
65                 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
66 static int debug = -1;
67 module_param(debug, int, 0);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70 static struct ena_aenq_handlers aenq_handlers;
71
72 static struct workqueue_struct *ena_wq;
73
74 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
75
76 static int ena_rss_init_default(struct ena_adapter *adapter);
77 static void check_for_admin_com_state(struct ena_adapter *adapter);
78 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
79 static int ena_restore_device(struct ena_adapter *adapter);
80
81 static void ena_tx_timeout(struct net_device *dev)
82 {
83         struct ena_adapter *adapter = netdev_priv(dev);
84
85         /* Change the state of the device to trigger reset
86          * Check that we are not in the middle or a trigger already
87          */
88
89         if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
90                 return;
91
92         adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
93         u64_stats_update_begin(&adapter->syncp);
94         adapter->dev_stats.tx_timeout++;
95         u64_stats_update_end(&adapter->syncp);
96
97         netif_err(adapter, tx_err, dev, "Transmit time out\n");
98 }
99
100 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
101 {
102         int i;
103
104         for (i = 0; i < adapter->num_queues; i++)
105                 adapter->rx_ring[i].mtu = mtu;
106 }
107
108 static int ena_change_mtu(struct net_device *dev, int new_mtu)
109 {
110         struct ena_adapter *adapter = netdev_priv(dev);
111         int ret;
112
113         ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
114         if (!ret) {
115                 netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
116                 update_rx_ring_mtu(adapter, new_mtu);
117                 dev->mtu = new_mtu;
118         } else {
119                 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
120                           new_mtu);
121         }
122
123         return ret;
124 }
125
126 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
127 {
128 #ifdef CONFIG_RFS_ACCEL
129         u32 i;
130         int rc;
131
132         adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
133         if (!adapter->netdev->rx_cpu_rmap)
134                 return -ENOMEM;
135         for (i = 0; i < adapter->num_queues; i++) {
136                 int irq_idx = ENA_IO_IRQ_IDX(i);
137
138                 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
139                                       pci_irq_vector(adapter->pdev, irq_idx));
140                 if (rc) {
141                         free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
142                         adapter->netdev->rx_cpu_rmap = NULL;
143                         return rc;
144                 }
145         }
146 #endif /* CONFIG_RFS_ACCEL */
147         return 0;
148 }
149
150 static void ena_init_io_rings_common(struct ena_adapter *adapter,
151                                      struct ena_ring *ring, u16 qid)
152 {
153         ring->qid = qid;
154         ring->pdev = adapter->pdev;
155         ring->dev = &adapter->pdev->dev;
156         ring->netdev = adapter->netdev;
157         ring->napi = &adapter->ena_napi[qid].napi;
158         ring->adapter = adapter;
159         ring->ena_dev = adapter->ena_dev;
160         ring->per_napi_packets = 0;
161         ring->per_napi_bytes = 0;
162         ring->cpu = 0;
163         ring->first_interrupt = false;
164         ring->no_interrupt_event_cnt = 0;
165         u64_stats_init(&ring->syncp);
166 }
167
168 static void ena_init_io_rings(struct ena_adapter *adapter)
169 {
170         struct ena_com_dev *ena_dev;
171         struct ena_ring *txr, *rxr;
172         int i;
173
174         ena_dev = adapter->ena_dev;
175
176         for (i = 0; i < adapter->num_queues; i++) {
177                 txr = &adapter->tx_ring[i];
178                 rxr = &adapter->rx_ring[i];
179
180                 /* TX/RX common ring state */
181                 ena_init_io_rings_common(adapter, txr, i);
182                 ena_init_io_rings_common(adapter, rxr, i);
183
184                 /* TX specific ring state */
185                 txr->ring_size = adapter->requested_tx_ring_size;
186                 txr->tx_max_header_size = ena_dev->tx_max_header_size;
187                 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
188                 txr->sgl_size = adapter->max_tx_sgl_size;
189                 txr->smoothed_interval =
190                         ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
191
192                 /* RX specific ring state */
193                 rxr->ring_size = adapter->requested_rx_ring_size;
194                 rxr->rx_copybreak = adapter->rx_copybreak;
195                 rxr->sgl_size = adapter->max_rx_sgl_size;
196                 rxr->smoothed_interval =
197                         ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
198                 rxr->empty_rx_queue = 0;
199                 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
200         }
201 }
202
203 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
204  * @adapter: network interface device structure
205  * @qid: queue index
206  *
207  * Return 0 on success, negative on failure
208  */
209 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
210 {
211         struct ena_ring *tx_ring = &adapter->tx_ring[qid];
212         struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
213         int size, i, node;
214
215         if (tx_ring->tx_buffer_info) {
216                 netif_err(adapter, ifup,
217                           adapter->netdev, "tx_buffer_info info is not NULL");
218                 return -EEXIST;
219         }
220
221         size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
222         node = cpu_to_node(ena_irq->cpu);
223
224         tx_ring->tx_buffer_info = vzalloc_node(size, node);
225         if (!tx_ring->tx_buffer_info) {
226                 tx_ring->tx_buffer_info = vzalloc(size);
227                 if (!tx_ring->tx_buffer_info)
228                         goto err_tx_buffer_info;
229         }
230
231         size = sizeof(u16) * tx_ring->ring_size;
232         tx_ring->free_ids = vzalloc_node(size, node);
233         if (!tx_ring->free_ids) {
234                 tx_ring->free_ids = vzalloc(size);
235                 if (!tx_ring->free_ids)
236                         goto err_tx_free_ids;
237         }
238
239         size = tx_ring->tx_max_header_size;
240         tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
241         if (!tx_ring->push_buf_intermediate_buf) {
242                 tx_ring->push_buf_intermediate_buf = vzalloc(size);
243                 if (!tx_ring->push_buf_intermediate_buf)
244                         goto err_push_buf_intermediate_buf;
245         }
246
247         /* Req id ring for TX out of order completions */
248         for (i = 0; i < tx_ring->ring_size; i++)
249                 tx_ring->free_ids[i] = i;
250
251         /* Reset tx statistics */
252         memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
253
254         tx_ring->next_to_use = 0;
255         tx_ring->next_to_clean = 0;
256         tx_ring->cpu = ena_irq->cpu;
257         return 0;
258
259 err_push_buf_intermediate_buf:
260         vfree(tx_ring->free_ids);
261         tx_ring->free_ids = NULL;
262 err_tx_free_ids:
263         vfree(tx_ring->tx_buffer_info);
264         tx_ring->tx_buffer_info = NULL;
265 err_tx_buffer_info:
266         return -ENOMEM;
267 }
268
269 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
270  * @adapter: network interface device structure
271  * @qid: queue index
272  *
273  * Free all transmit software resources
274  */
275 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
276 {
277         struct ena_ring *tx_ring = &adapter->tx_ring[qid];
278
279         vfree(tx_ring->tx_buffer_info);
280         tx_ring->tx_buffer_info = NULL;
281
282         vfree(tx_ring->free_ids);
283         tx_ring->free_ids = NULL;
284
285         vfree(tx_ring->push_buf_intermediate_buf);
286         tx_ring->push_buf_intermediate_buf = NULL;
287 }
288
289 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
290  * @adapter: private structure
291  *
292  * Return 0 on success, negative on failure
293  */
294 static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
295 {
296         int i, rc = 0;
297
298         for (i = 0; i < adapter->num_queues; i++) {
299                 rc = ena_setup_tx_resources(adapter, i);
300                 if (rc)
301                         goto err_setup_tx;
302         }
303
304         return 0;
305
306 err_setup_tx:
307
308         netif_err(adapter, ifup, adapter->netdev,
309                   "Tx queue %d: allocation failed\n", i);
310
311         /* rewind the index freeing the rings as we go */
312         while (i--)
313                 ena_free_tx_resources(adapter, i);
314         return rc;
315 }
316
317 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
318  * @adapter: board private structure
319  *
320  * Free all transmit software resources
321  */
322 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
323 {
324         int i;
325
326         for (i = 0; i < adapter->num_queues; i++)
327                 ena_free_tx_resources(adapter, i);
328 }
329
330 static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
331 {
332         if (likely(req_id < rx_ring->ring_size))
333                 return 0;
334
335         netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
336                   "Invalid rx req_id: %hu\n", req_id);
337
338         u64_stats_update_begin(&rx_ring->syncp);
339         rx_ring->rx_stats.bad_req_id++;
340         u64_stats_update_end(&rx_ring->syncp);
341
342         /* Trigger device reset */
343         rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
344         set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
345         return -EFAULT;
346 }
347
348 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
349  * @adapter: network interface device structure
350  * @qid: queue index
351  *
352  * Returns 0 on success, negative on failure
353  */
354 static int ena_setup_rx_resources(struct ena_adapter *adapter,
355                                   u32 qid)
356 {
357         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
358         struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
359         int size, node, i;
360
361         if (rx_ring->rx_buffer_info) {
362                 netif_err(adapter, ifup, adapter->netdev,
363                           "rx_buffer_info is not NULL");
364                 return -EEXIST;
365         }
366
367         /* alloc extra element so in rx path
368          * we can always prefetch rx_info + 1
369          */
370         size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
371         node = cpu_to_node(ena_irq->cpu);
372
373         rx_ring->rx_buffer_info = vzalloc_node(size, node);
374         if (!rx_ring->rx_buffer_info) {
375                 rx_ring->rx_buffer_info = vzalloc(size);
376                 if (!rx_ring->rx_buffer_info)
377                         return -ENOMEM;
378         }
379
380         size = sizeof(u16) * rx_ring->ring_size;
381         rx_ring->free_ids = vzalloc_node(size, node);
382         if (!rx_ring->free_ids) {
383                 rx_ring->free_ids = vzalloc(size);
384                 if (!rx_ring->free_ids) {
385                         vfree(rx_ring->rx_buffer_info);
386                         rx_ring->rx_buffer_info = NULL;
387                         return -ENOMEM;
388                 }
389         }
390
391         /* Req id ring for receiving RX pkts out of order */
392         for (i = 0; i < rx_ring->ring_size; i++)
393                 rx_ring->free_ids[i] = i;
394
395         /* Reset rx statistics */
396         memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
397
398         rx_ring->next_to_clean = 0;
399         rx_ring->next_to_use = 0;
400         rx_ring->cpu = ena_irq->cpu;
401
402         return 0;
403 }
404
405 /* ena_free_rx_resources - Free I/O Rx Resources
406  * @adapter: network interface device structure
407  * @qid: queue index
408  *
409  * Free all receive software resources
410  */
411 static void ena_free_rx_resources(struct ena_adapter *adapter,
412                                   u32 qid)
413 {
414         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
415
416         vfree(rx_ring->rx_buffer_info);
417         rx_ring->rx_buffer_info = NULL;
418
419         vfree(rx_ring->free_ids);
420         rx_ring->free_ids = NULL;
421 }
422
423 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
424  * @adapter: board private structure
425  *
426  * Return 0 on success, negative on failure
427  */
428 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
429 {
430         int i, rc = 0;
431
432         for (i = 0; i < adapter->num_queues; i++) {
433                 rc = ena_setup_rx_resources(adapter, i);
434                 if (rc)
435                         goto err_setup_rx;
436         }
437
438         return 0;
439
440 err_setup_rx:
441
442         netif_err(adapter, ifup, adapter->netdev,
443                   "Rx queue %d: allocation failed\n", i);
444
445         /* rewind the index freeing the rings as we go */
446         while (i--)
447                 ena_free_rx_resources(adapter, i);
448         return rc;
449 }
450
451 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
452  * @adapter: board private structure
453  *
454  * Free all receive software resources
455  */
456 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
457 {
458         int i;
459
460         for (i = 0; i < adapter->num_queues; i++)
461                 ena_free_rx_resources(adapter, i);
462 }
463
464 static int ena_alloc_rx_page(struct ena_ring *rx_ring,
465                                     struct ena_rx_buffer *rx_info, gfp_t gfp)
466 {
467         struct ena_com_buf *ena_buf;
468         struct page *page;
469         dma_addr_t dma;
470
471         /* if previous allocated page is not used */
472         if (unlikely(rx_info->page))
473                 return 0;
474
475         page = alloc_page(gfp);
476         if (unlikely(!page)) {
477                 u64_stats_update_begin(&rx_ring->syncp);
478                 rx_ring->rx_stats.page_alloc_fail++;
479                 u64_stats_update_end(&rx_ring->syncp);
480                 return -ENOMEM;
481         }
482
483         dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
484                            DMA_FROM_DEVICE);
485         if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
486                 u64_stats_update_begin(&rx_ring->syncp);
487                 rx_ring->rx_stats.dma_mapping_err++;
488                 u64_stats_update_end(&rx_ring->syncp);
489
490                 __free_page(page);
491                 return -EIO;
492         }
493         netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
494                   "alloc page %p, rx_info %p\n", page, rx_info);
495
496         rx_info->page = page;
497         rx_info->page_offset = 0;
498         ena_buf = &rx_info->ena_buf;
499         ena_buf->paddr = dma;
500         ena_buf->len = ENA_PAGE_SIZE;
501
502         return 0;
503 }
504
505 static void ena_free_rx_page(struct ena_ring *rx_ring,
506                              struct ena_rx_buffer *rx_info)
507 {
508         struct page *page = rx_info->page;
509         struct ena_com_buf *ena_buf = &rx_info->ena_buf;
510
511         if (unlikely(!page)) {
512                 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
513                            "Trying to free unallocated buffer\n");
514                 return;
515         }
516
517         dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
518                        DMA_FROM_DEVICE);
519
520         __free_page(page);
521         rx_info->page = NULL;
522 }
523
524 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
525 {
526         u16 next_to_use, req_id;
527         u32 i;
528         int rc;
529
530         next_to_use = rx_ring->next_to_use;
531
532         for (i = 0; i < num; i++) {
533                 struct ena_rx_buffer *rx_info;
534
535                 req_id = rx_ring->free_ids[next_to_use];
536                 rc = validate_rx_req_id(rx_ring, req_id);
537                 if (unlikely(rc < 0))
538                         break;
539
540                 rx_info = &rx_ring->rx_buffer_info[req_id];
541
542
543                 rc = ena_alloc_rx_page(rx_ring, rx_info,
544                                        GFP_ATOMIC | __GFP_COMP);
545                 if (unlikely(rc < 0)) {
546                         netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
547                                    "failed to alloc buffer for rx queue %d\n",
548                                    rx_ring->qid);
549                         break;
550                 }
551                 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
552                                                 &rx_info->ena_buf,
553                                                 req_id);
554                 if (unlikely(rc)) {
555                         netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
556                                    "failed to add buffer for rx queue %d\n",
557                                    rx_ring->qid);
558                         break;
559                 }
560                 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
561                                                    rx_ring->ring_size);
562         }
563
564         if (unlikely(i < num)) {
565                 u64_stats_update_begin(&rx_ring->syncp);
566                 rx_ring->rx_stats.refil_partial++;
567                 u64_stats_update_end(&rx_ring->syncp);
568                 netdev_warn(rx_ring->netdev,
569                             "refilled rx qid %d with only %d buffers (from %d)\n",
570                             rx_ring->qid, i, num);
571         }
572
573         /* ena_com_write_sq_doorbell issues a wmb() */
574         if (likely(i))
575                 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
576
577         rx_ring->next_to_use = next_to_use;
578
579         return i;
580 }
581
582 static void ena_free_rx_bufs(struct ena_adapter *adapter,
583                              u32 qid)
584 {
585         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
586         u32 i;
587
588         for (i = 0; i < rx_ring->ring_size; i++) {
589                 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
590
591                 if (rx_info->page)
592                         ena_free_rx_page(rx_ring, rx_info);
593         }
594 }
595
596 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
597  * @adapter: board private structure
598  */
599 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
600 {
601         struct ena_ring *rx_ring;
602         int i, rc, bufs_num;
603
604         for (i = 0; i < adapter->num_queues; i++) {
605                 rx_ring = &adapter->rx_ring[i];
606                 bufs_num = rx_ring->ring_size - 1;
607                 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
608
609                 if (unlikely(rc != bufs_num))
610                         netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
611                                    "refilling Queue %d failed. allocated %d buffers from: %d\n",
612                                    i, rc, bufs_num);
613         }
614 }
615
616 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
617 {
618         int i;
619
620         for (i = 0; i < adapter->num_queues; i++)
621                 ena_free_rx_bufs(adapter, i);
622 }
623
624 static void ena_unmap_tx_skb(struct ena_ring *tx_ring,
625                                     struct ena_tx_buffer *tx_info)
626 {
627         struct ena_com_buf *ena_buf;
628         u32 cnt;
629         int i;
630
631         ena_buf = tx_info->bufs;
632         cnt = tx_info->num_of_bufs;
633
634         if (unlikely(!cnt))
635                 return;
636
637         if (tx_info->map_linear_data) {
638                 dma_unmap_single(tx_ring->dev,
639                                  dma_unmap_addr(ena_buf, paddr),
640                                  dma_unmap_len(ena_buf, len),
641                                  DMA_TO_DEVICE);
642                 ena_buf++;
643                 cnt--;
644         }
645
646         /* unmap remaining mapped pages */
647         for (i = 0; i < cnt; i++) {
648                 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
649                                dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
650                 ena_buf++;
651         }
652 }
653
654 /* ena_free_tx_bufs - Free Tx Buffers per Queue
655  * @tx_ring: TX ring for which buffers be freed
656  */
657 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
658 {
659         bool print_once = true;
660         u32 i;
661
662         for (i = 0; i < tx_ring->ring_size; i++) {
663                 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
664
665                 if (!tx_info->skb)
666                         continue;
667
668                 if (print_once) {
669                         netdev_notice(tx_ring->netdev,
670                                       "free uncompleted tx skb qid %d idx 0x%x\n",
671                                       tx_ring->qid, i);
672                         print_once = false;
673                 } else {
674                         netdev_dbg(tx_ring->netdev,
675                                    "free uncompleted tx skb qid %d idx 0x%x\n",
676                                    tx_ring->qid, i);
677                 }
678
679                 ena_unmap_tx_skb(tx_ring, tx_info);
680
681                 dev_kfree_skb_any(tx_info->skb);
682         }
683         netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
684                                                   tx_ring->qid));
685 }
686
687 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
688 {
689         struct ena_ring *tx_ring;
690         int i;
691
692         for (i = 0; i < adapter->num_queues; i++) {
693                 tx_ring = &adapter->tx_ring[i];
694                 ena_free_tx_bufs(tx_ring);
695         }
696 }
697
698 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
699 {
700         u16 ena_qid;
701         int i;
702
703         for (i = 0; i < adapter->num_queues; i++) {
704                 ena_qid = ENA_IO_TXQ_IDX(i);
705                 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
706         }
707 }
708
709 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
710 {
711         u16 ena_qid;
712         int i;
713
714         for (i = 0; i < adapter->num_queues; i++) {
715                 ena_qid = ENA_IO_RXQ_IDX(i);
716                 cancel_work_sync(&adapter->ena_napi[i].dim.work);
717                 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
718         }
719 }
720
721 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
722 {
723         ena_destroy_all_tx_queues(adapter);
724         ena_destroy_all_rx_queues(adapter);
725 }
726
727 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
728 {
729         struct ena_tx_buffer *tx_info = NULL;
730
731         if (likely(req_id < tx_ring->ring_size)) {
732                 tx_info = &tx_ring->tx_buffer_info[req_id];
733                 if (likely(tx_info->skb))
734                         return 0;
735         }
736
737         if (tx_info)
738                 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
739                           "tx_info doesn't have valid skb\n");
740         else
741                 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
742                           "Invalid req_id: %hu\n", req_id);
743
744         u64_stats_update_begin(&tx_ring->syncp);
745         tx_ring->tx_stats.bad_req_id++;
746         u64_stats_update_end(&tx_ring->syncp);
747
748         /* Trigger device reset */
749         tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
750         set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
751         return -EFAULT;
752 }
753
754 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
755 {
756         struct netdev_queue *txq;
757         bool above_thresh;
758         u32 tx_bytes = 0;
759         u32 total_done = 0;
760         u16 next_to_clean;
761         u16 req_id;
762         int tx_pkts = 0;
763         int rc;
764
765         next_to_clean = tx_ring->next_to_clean;
766         txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
767
768         while (tx_pkts < budget) {
769                 struct ena_tx_buffer *tx_info;
770                 struct sk_buff *skb;
771
772                 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
773                                                 &req_id);
774                 if (rc)
775                         break;
776
777                 rc = validate_tx_req_id(tx_ring, req_id);
778                 if (rc)
779                         break;
780
781                 tx_info = &tx_ring->tx_buffer_info[req_id];
782                 skb = tx_info->skb;
783
784                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
785                 prefetch(&skb->end);
786
787                 tx_info->skb = NULL;
788                 tx_info->last_jiffies = 0;
789
790                 ena_unmap_tx_skb(tx_ring, tx_info);
791
792                 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
793                           "tx_poll: q %d skb %p completed\n", tx_ring->qid,
794                           skb);
795
796                 tx_bytes += skb->len;
797                 dev_kfree_skb(skb);
798                 tx_pkts++;
799                 total_done += tx_info->tx_descs;
800
801                 tx_ring->free_ids[next_to_clean] = req_id;
802                 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
803                                                      tx_ring->ring_size);
804         }
805
806         tx_ring->next_to_clean = next_to_clean;
807         ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
808         ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
809
810         netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
811
812         netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
813                   "tx_poll: q %d done. total pkts: %d\n",
814                   tx_ring->qid, tx_pkts);
815
816         /* need to make the rings circular update visible to
817          * ena_start_xmit() before checking for netif_queue_stopped().
818          */
819         smp_mb();
820
821         above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
822                                                     ENA_TX_WAKEUP_THRESH);
823         if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
824                 __netif_tx_lock(txq, smp_processor_id());
825                 above_thresh =
826                         ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
827                                                      ENA_TX_WAKEUP_THRESH);
828                 if (netif_tx_queue_stopped(txq) && above_thresh) {
829                         netif_tx_wake_queue(txq);
830                         u64_stats_update_begin(&tx_ring->syncp);
831                         tx_ring->tx_stats.queue_wakeup++;
832                         u64_stats_update_end(&tx_ring->syncp);
833                 }
834                 __netif_tx_unlock(txq);
835         }
836
837         tx_ring->per_napi_bytes += tx_bytes;
838         tx_ring->per_napi_packets += tx_pkts;
839
840         return tx_pkts;
841 }
842
843 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
844 {
845         struct sk_buff *skb;
846
847         if (frags)
848                 skb = napi_get_frags(rx_ring->napi);
849         else
850                 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
851                                                 rx_ring->rx_copybreak);
852
853         if (unlikely(!skb)) {
854                 u64_stats_update_begin(&rx_ring->syncp);
855                 rx_ring->rx_stats.skb_alloc_fail++;
856                 u64_stats_update_end(&rx_ring->syncp);
857                 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
858                           "Failed to allocate skb. frags: %d\n", frags);
859                 return NULL;
860         }
861
862         return skb;
863 }
864
865 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
866                                   struct ena_com_rx_buf_info *ena_bufs,
867                                   u32 descs,
868                                   u16 *next_to_clean)
869 {
870         struct sk_buff *skb;
871         struct ena_rx_buffer *rx_info;
872         u16 len, req_id, buf = 0;
873         void *va;
874
875         len = ena_bufs[buf].len;
876         req_id = ena_bufs[buf].req_id;
877         rx_info = &rx_ring->rx_buffer_info[req_id];
878
879         if (unlikely(!rx_info->page)) {
880                 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
881                           "Page is NULL\n");
882                 return NULL;
883         }
884
885         netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
886                   "rx_info %p page %p\n",
887                   rx_info, rx_info->page);
888
889         /* save virt address of first buffer */
890         va = page_address(rx_info->page) + rx_info->page_offset;
891         prefetch(va + NET_IP_ALIGN);
892
893         if (len <= rx_ring->rx_copybreak) {
894                 skb = ena_alloc_skb(rx_ring, false);
895                 if (unlikely(!skb))
896                         return NULL;
897
898                 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
899                           "rx allocated small packet. len %d. data_len %d\n",
900                           skb->len, skb->data_len);
901
902                 /* sync this buffer for CPU use */
903                 dma_sync_single_for_cpu(rx_ring->dev,
904                                         dma_unmap_addr(&rx_info->ena_buf, paddr),
905                                         len,
906                                         DMA_FROM_DEVICE);
907                 skb_copy_to_linear_data(skb, va, len);
908                 dma_sync_single_for_device(rx_ring->dev,
909                                            dma_unmap_addr(&rx_info->ena_buf, paddr),
910                                            len,
911                                            DMA_FROM_DEVICE);
912
913                 skb_put(skb, len);
914                 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
915                 rx_ring->free_ids[*next_to_clean] = req_id;
916                 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
917                                                      rx_ring->ring_size);
918                 return skb;
919         }
920
921         skb = ena_alloc_skb(rx_ring, true);
922         if (unlikely(!skb))
923                 return NULL;
924
925         do {
926                 dma_unmap_page(rx_ring->dev,
927                                dma_unmap_addr(&rx_info->ena_buf, paddr),
928                                ENA_PAGE_SIZE, DMA_FROM_DEVICE);
929
930                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
931                                 rx_info->page_offset, len, ENA_PAGE_SIZE);
932
933                 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
934                           "rx skb updated. len %d. data_len %d\n",
935                           skb->len, skb->data_len);
936
937                 rx_info->page = NULL;
938
939                 rx_ring->free_ids[*next_to_clean] = req_id;
940                 *next_to_clean =
941                         ENA_RX_RING_IDX_NEXT(*next_to_clean,
942                                              rx_ring->ring_size);
943                 if (likely(--descs == 0))
944                         break;
945
946                 buf++;
947                 len = ena_bufs[buf].len;
948                 req_id = ena_bufs[buf].req_id;
949                 rx_info = &rx_ring->rx_buffer_info[req_id];
950         } while (1);
951
952         return skb;
953 }
954
955 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
956  * @adapter: structure containing adapter specific data
957  * @ena_rx_ctx: received packet context/metadata
958  * @skb: skb currently being received and modified
959  */
960 static void ena_rx_checksum(struct ena_ring *rx_ring,
961                                    struct ena_com_rx_ctx *ena_rx_ctx,
962                                    struct sk_buff *skb)
963 {
964         /* Rx csum disabled */
965         if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
966                 skb->ip_summed = CHECKSUM_NONE;
967                 return;
968         }
969
970         /* For fragmented packets the checksum isn't valid */
971         if (ena_rx_ctx->frag) {
972                 skb->ip_summed = CHECKSUM_NONE;
973                 return;
974         }
975
976         /* if IP and error */
977         if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
978                      (ena_rx_ctx->l3_csum_err))) {
979                 /* ipv4 checksum error */
980                 skb->ip_summed = CHECKSUM_NONE;
981                 u64_stats_update_begin(&rx_ring->syncp);
982                 rx_ring->rx_stats.bad_csum++;
983                 u64_stats_update_end(&rx_ring->syncp);
984                 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
985                           "RX IPv4 header checksum error\n");
986                 return;
987         }
988
989         /* if TCP/UDP */
990         if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
991                    (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
992                 if (unlikely(ena_rx_ctx->l4_csum_err)) {
993                         /* TCP/UDP checksum error */
994                         u64_stats_update_begin(&rx_ring->syncp);
995                         rx_ring->rx_stats.bad_csum++;
996                         u64_stats_update_end(&rx_ring->syncp);
997                         netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
998                                   "RX L4 checksum error\n");
999                         skb->ip_summed = CHECKSUM_NONE;
1000                         return;
1001                 }
1002
1003                 if (likely(ena_rx_ctx->l4_csum_checked)) {
1004                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1005                         u64_stats_update_begin(&rx_ring->syncp);
1006                         rx_ring->rx_stats.csum_good++;
1007                         u64_stats_update_end(&rx_ring->syncp);
1008                 } else {
1009                         u64_stats_update_begin(&rx_ring->syncp);
1010                         rx_ring->rx_stats.csum_unchecked++;
1011                         u64_stats_update_end(&rx_ring->syncp);
1012                         skb->ip_summed = CHECKSUM_NONE;
1013                 }
1014         } else {
1015                 skb->ip_summed = CHECKSUM_NONE;
1016                 return;
1017         }
1018
1019 }
1020
1021 static void ena_set_rx_hash(struct ena_ring *rx_ring,
1022                             struct ena_com_rx_ctx *ena_rx_ctx,
1023                             struct sk_buff *skb)
1024 {
1025         enum pkt_hash_types hash_type;
1026
1027         if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1028                 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1029                            (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1030
1031                         hash_type = PKT_HASH_TYPE_L4;
1032                 else
1033                         hash_type = PKT_HASH_TYPE_NONE;
1034
1035                 /* Override hash type if the packet is fragmented */
1036                 if (ena_rx_ctx->frag)
1037                         hash_type = PKT_HASH_TYPE_NONE;
1038
1039                 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1040         }
1041 }
1042
1043 /* ena_clean_rx_irq - Cleanup RX irq
1044  * @rx_ring: RX ring to clean
1045  * @napi: napi handler
1046  * @budget: how many packets driver is allowed to clean
1047  *
1048  * Returns the number of cleaned buffers.
1049  */
1050 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1051                             u32 budget)
1052 {
1053         u16 next_to_clean = rx_ring->next_to_clean;
1054         u32 res_budget, work_done;
1055
1056         struct ena_com_rx_ctx ena_rx_ctx;
1057         struct ena_adapter *adapter;
1058         struct sk_buff *skb;
1059         int refill_required;
1060         int refill_threshold;
1061         int rc = 0;
1062         int total_len = 0;
1063         int rx_copybreak_pkt = 0;
1064         int i;
1065
1066         netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1067                   "%s qid %d\n", __func__, rx_ring->qid);
1068         res_budget = budget;
1069
1070         do {
1071                 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1072                 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1073                 ena_rx_ctx.descs = 0;
1074                 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1075                                     rx_ring->ena_com_io_sq,
1076                                     &ena_rx_ctx);
1077                 if (unlikely(rc))
1078                         goto error;
1079
1080                 if (unlikely(ena_rx_ctx.descs == 0))
1081                         break;
1082
1083                 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1084                           "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1085                           rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1086                           ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1087
1088                 /* allocate skb and fill it */
1089                 skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
1090                                  &next_to_clean);
1091
1092                 /* exit if we failed to retrieve a buffer */
1093                 if (unlikely(!skb)) {
1094                         for (i = 0; i < ena_rx_ctx.descs; i++) {
1095                                 rx_ring->free_ids[next_to_clean] =
1096                                         rx_ring->ena_bufs[i].req_id;
1097                                 next_to_clean =
1098                                         ENA_RX_RING_IDX_NEXT(next_to_clean,
1099                                                              rx_ring->ring_size);
1100                         }
1101                         break;
1102                 }
1103
1104                 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1105
1106                 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1107
1108                 skb_record_rx_queue(skb, rx_ring->qid);
1109
1110                 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1111                         total_len += rx_ring->ena_bufs[0].len;
1112                         rx_copybreak_pkt++;
1113                         napi_gro_receive(napi, skb);
1114                 } else {
1115                         total_len += skb->len;
1116                         napi_gro_frags(napi);
1117                 }
1118
1119                 res_budget--;
1120         } while (likely(res_budget));
1121
1122         work_done = budget - res_budget;
1123         rx_ring->per_napi_bytes += total_len;
1124         rx_ring->per_napi_packets += work_done;
1125         u64_stats_update_begin(&rx_ring->syncp);
1126         rx_ring->rx_stats.bytes += total_len;
1127         rx_ring->rx_stats.cnt += work_done;
1128         rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1129         u64_stats_update_end(&rx_ring->syncp);
1130
1131         rx_ring->next_to_clean = next_to_clean;
1132
1133         refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
1134         refill_threshold =
1135                 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1136                       ENA_RX_REFILL_THRESH_PACKET);
1137
1138         /* Optimization, try to batch new rx buffers */
1139         if (refill_required > refill_threshold) {
1140                 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1141                 ena_refill_rx_bufs(rx_ring, refill_required);
1142         }
1143
1144         return work_done;
1145
1146 error:
1147         adapter = netdev_priv(rx_ring->netdev);
1148
1149         u64_stats_update_begin(&rx_ring->syncp);
1150         rx_ring->rx_stats.bad_desc_num++;
1151         u64_stats_update_end(&rx_ring->syncp);
1152
1153         /* Too many desc from the device. Trigger reset */
1154         adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1155         set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1156
1157         return 0;
1158 }
1159
1160 static void ena_dim_work(struct work_struct *w)
1161 {
1162         struct dim *dim = container_of(w, struct dim, work);
1163         struct dim_cq_moder cur_moder =
1164                 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1165         struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
1166
1167         ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
1168         dim->state = DIM_START_MEASURE;
1169 }
1170
1171 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
1172 {
1173         struct dim_sample dim_sample;
1174         struct ena_ring *rx_ring = ena_napi->rx_ring;
1175
1176         if (!rx_ring->per_napi_packets)
1177                 return;
1178
1179         rx_ring->non_empty_napi_events++;
1180
1181         dim_update_sample(rx_ring->non_empty_napi_events,
1182                           rx_ring->rx_stats.cnt,
1183                           rx_ring->rx_stats.bytes,
1184                           &dim_sample);
1185
1186         net_dim(&ena_napi->dim, dim_sample);
1187
1188         rx_ring->per_napi_packets = 0;
1189 }
1190
1191 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
1192                                         struct ena_ring *rx_ring)
1193 {
1194         struct ena_eth_io_intr_reg intr_reg;
1195
1196         /* Update intr register: rx intr delay,
1197          * tx intr delay and interrupt unmask
1198          */
1199         ena_com_update_intr_reg(&intr_reg,
1200                                 rx_ring->smoothed_interval,
1201                                 tx_ring->smoothed_interval,
1202                                 true);
1203
1204         /* It is a shared MSI-X.
1205          * Tx and Rx CQ have pointer to it.
1206          * So we use one of them to reach the intr reg
1207          */
1208         ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
1209 }
1210
1211 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1212                                              struct ena_ring *rx_ring)
1213 {
1214         int cpu = get_cpu();
1215         int numa_node;
1216
1217         /* Check only one ring since the 2 rings are running on the same cpu */
1218         if (likely(tx_ring->cpu == cpu))
1219                 goto out;
1220
1221         numa_node = cpu_to_node(cpu);
1222         put_cpu();
1223
1224         if (numa_node != NUMA_NO_NODE) {
1225                 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1226                 ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
1227         }
1228
1229         tx_ring->cpu = cpu;
1230         rx_ring->cpu = cpu;
1231
1232         return;
1233 out:
1234         put_cpu();
1235 }
1236
1237 static int ena_io_poll(struct napi_struct *napi, int budget)
1238 {
1239         struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1240         struct ena_ring *tx_ring, *rx_ring;
1241
1242         u32 tx_work_done;
1243         u32 rx_work_done;
1244         int tx_budget;
1245         int napi_comp_call = 0;
1246         int ret;
1247
1248         tx_ring = ena_napi->tx_ring;
1249         rx_ring = ena_napi->rx_ring;
1250
1251         tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1252
1253         if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1254             test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1255                 napi_complete_done(napi, 0);
1256                 return 0;
1257         }
1258
1259         tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1260         rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1261
1262         /* If the device is about to reset or down, avoid unmask
1263          * the interrupt and return 0 so NAPI won't reschedule
1264          */
1265         if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1266                      test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1267                 napi_complete_done(napi, 0);
1268                 ret = 0;
1269
1270         } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1271                 napi_comp_call = 1;
1272
1273                 /* Update numa and unmask the interrupt only when schedule
1274                  * from the interrupt context (vs from sk_busy_loop)
1275                  */
1276                 if (napi_complete_done(napi, rx_work_done)) {
1277                         /* We apply adaptive moderation on Rx path only.
1278                          * Tx uses static interrupt moderation.
1279                          */
1280                         if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1281                                 ena_adjust_adaptive_rx_intr_moderation(ena_napi);
1282
1283                         ena_unmask_interrupt(tx_ring, rx_ring);
1284                 }
1285
1286                 ena_update_ring_numa_node(tx_ring, rx_ring);
1287
1288                 ret = rx_work_done;
1289         } else {
1290                 ret = budget;
1291         }
1292
1293         u64_stats_update_begin(&tx_ring->syncp);
1294         tx_ring->tx_stats.napi_comp += napi_comp_call;
1295         tx_ring->tx_stats.tx_poll++;
1296         u64_stats_update_end(&tx_ring->syncp);
1297
1298         return ret;
1299 }
1300
1301 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1302 {
1303         struct ena_adapter *adapter = (struct ena_adapter *)data;
1304
1305         ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1306
1307         /* Don't call the aenq handler before probe is done */
1308         if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1309                 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1310
1311         return IRQ_HANDLED;
1312 }
1313
1314 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1315  * @irq: interrupt number
1316  * @data: pointer to a network interface private napi device structure
1317  */
1318 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1319 {
1320         struct ena_napi *ena_napi = data;
1321
1322         ena_napi->tx_ring->first_interrupt = true;
1323         ena_napi->rx_ring->first_interrupt = true;
1324
1325         napi_schedule_irqoff(&ena_napi->napi);
1326
1327         return IRQ_HANDLED;
1328 }
1329
1330 /* Reserve a single MSI-X vector for management (admin + aenq).
1331  * plus reserve one vector for each potential io queue.
1332  * the number of potential io queues is the minimum of what the device
1333  * supports and the number of vCPUs.
1334  */
1335 static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
1336 {
1337         int msix_vecs, irq_cnt;
1338
1339         if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1340                 netif_err(adapter, probe, adapter->netdev,
1341                           "Error, MSI-X is already enabled\n");
1342                 return -EPERM;
1343         }
1344
1345         /* Reserved the max msix vectors we might need */
1346         msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
1347         netif_dbg(adapter, probe, adapter->netdev,
1348                   "trying to enable MSI-X, vectors %d\n", msix_vecs);
1349
1350         irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1351                                         msix_vecs, PCI_IRQ_MSIX);
1352
1353         if (irq_cnt < 0) {
1354                 netif_err(adapter, probe, adapter->netdev,
1355                           "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1356                 return -ENOSPC;
1357         }
1358
1359         if (irq_cnt != msix_vecs) {
1360                 netif_notice(adapter, probe, adapter->netdev,
1361                              "enable only %d MSI-X (out of %d), reduce the number of queues\n",
1362                              irq_cnt, msix_vecs);
1363                 adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1364         }
1365
1366         if (ena_init_rx_cpu_rmap(adapter))
1367                 netif_warn(adapter, probe, adapter->netdev,
1368                            "Failed to map IRQs to CPUs\n");
1369
1370         adapter->msix_vecs = irq_cnt;
1371         set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
1372
1373         return 0;
1374 }
1375
1376 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1377 {
1378         u32 cpu;
1379
1380         snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1381                  ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1382                  pci_name(adapter->pdev));
1383         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
1384                 ena_intr_msix_mgmnt;
1385         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1386         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1387                 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
1388         cpu = cpumask_first(cpu_online_mask);
1389         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
1390         cpumask_set_cpu(cpu,
1391                         &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
1392 }
1393
1394 static void ena_setup_io_intr(struct ena_adapter *adapter)
1395 {
1396         struct net_device *netdev;
1397         int irq_idx, i, cpu;
1398
1399         netdev = adapter->netdev;
1400
1401         for (i = 0; i < adapter->num_queues; i++) {
1402                 irq_idx = ENA_IO_IRQ_IDX(i);
1403                 cpu = i % num_online_cpus();
1404
1405                 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1406                          "%s-Tx-Rx-%d", netdev->name, i);
1407                 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
1408                 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
1409                 adapter->irq_tbl[irq_idx].vector =
1410                         pci_irq_vector(adapter->pdev, irq_idx);
1411                 adapter->irq_tbl[irq_idx].cpu = cpu;
1412
1413                 cpumask_set_cpu(cpu,
1414                                 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
1415         }
1416 }
1417
1418 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
1419 {
1420         unsigned long flags = 0;
1421         struct ena_irq *irq;
1422         int rc;
1423
1424         irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1425         rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1426                          irq->data);
1427         if (rc) {
1428                 netif_err(adapter, probe, adapter->netdev,
1429                           "failed to request admin irq\n");
1430                 return rc;
1431         }
1432
1433         netif_dbg(adapter, probe, adapter->netdev,
1434                   "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1435                   irq->affinity_hint_mask.bits[0], irq->vector);
1436
1437         irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1438
1439         return rc;
1440 }
1441
1442 static int ena_request_io_irq(struct ena_adapter *adapter)
1443 {
1444         unsigned long flags = 0;
1445         struct ena_irq *irq;
1446         int rc = 0, i, k;
1447
1448         if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1449                 netif_err(adapter, ifup, adapter->netdev,
1450                           "Failed to request I/O IRQ: MSI-X is not enabled\n");
1451                 return -EINVAL;
1452         }
1453
1454         for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1455                 irq = &adapter->irq_tbl[i];
1456                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1457                                  irq->data);
1458                 if (rc) {
1459                         netif_err(adapter, ifup, adapter->netdev,
1460                                   "Failed to request I/O IRQ. index %d rc %d\n",
1461                                    i, rc);
1462                         goto err;
1463                 }
1464
1465                 netif_dbg(adapter, ifup, adapter->netdev,
1466                           "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1467                           i, irq->affinity_hint_mask.bits[0], irq->vector);
1468
1469                 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1470         }
1471
1472         return rc;
1473
1474 err:
1475         for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
1476                 irq = &adapter->irq_tbl[k];
1477                 free_irq(irq->vector, irq->data);
1478         }
1479
1480         return rc;
1481 }
1482
1483 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
1484 {
1485         struct ena_irq *irq;
1486
1487         irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1488         synchronize_irq(irq->vector);
1489         irq_set_affinity_hint(irq->vector, NULL);
1490         free_irq(irq->vector, irq->data);
1491 }
1492
1493 static void ena_free_io_irq(struct ena_adapter *adapter)
1494 {
1495         struct ena_irq *irq;
1496         int i;
1497
1498 #ifdef CONFIG_RFS_ACCEL
1499         if (adapter->msix_vecs >= 1) {
1500                 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
1501                 adapter->netdev->rx_cpu_rmap = NULL;
1502         }
1503 #endif /* CONFIG_RFS_ACCEL */
1504
1505         for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1506                 irq = &adapter->irq_tbl[i];
1507                 irq_set_affinity_hint(irq->vector, NULL);
1508                 free_irq(irq->vector, irq->data);
1509         }
1510 }
1511
1512 static void ena_disable_msix(struct ena_adapter *adapter)
1513 {
1514         if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
1515                 pci_free_irq_vectors(adapter->pdev);
1516 }
1517
1518 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
1519 {
1520         int i;
1521
1522         if (!netif_running(adapter->netdev))
1523                 return;
1524
1525         for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
1526                 synchronize_irq(adapter->irq_tbl[i].vector);
1527 }
1528
1529 static void ena_del_napi(struct ena_adapter *adapter)
1530 {
1531         int i;
1532
1533         for (i = 0; i < adapter->num_queues; i++)
1534                 netif_napi_del(&adapter->ena_napi[i].napi);
1535 }
1536
1537 static void ena_init_napi(struct ena_adapter *adapter)
1538 {
1539         struct ena_napi *napi;
1540         int i;
1541
1542         for (i = 0; i < adapter->num_queues; i++) {
1543                 napi = &adapter->ena_napi[i];
1544
1545                 netif_napi_add(adapter->netdev,
1546                                &adapter->ena_napi[i].napi,
1547                                ena_io_poll,
1548                                ENA_NAPI_BUDGET);
1549                 napi->rx_ring = &adapter->rx_ring[i];
1550                 napi->tx_ring = &adapter->tx_ring[i];
1551                 napi->qid = i;
1552         }
1553 }
1554
1555 static void ena_napi_disable_all(struct ena_adapter *adapter)
1556 {
1557         int i;
1558
1559         for (i = 0; i < adapter->num_queues; i++)
1560                 napi_disable(&adapter->ena_napi[i].napi);
1561 }
1562
1563 static void ena_napi_enable_all(struct ena_adapter *adapter)
1564 {
1565         int i;
1566
1567         for (i = 0; i < adapter->num_queues; i++)
1568                 napi_enable(&adapter->ena_napi[i].napi);
1569 }
1570
1571 static void ena_restore_ethtool_params(struct ena_adapter *adapter)
1572 {
1573         adapter->tx_usecs = 0;
1574         adapter->rx_usecs = 0;
1575         adapter->tx_frames = 1;
1576         adapter->rx_frames = 1;
1577 }
1578
1579 /* Configure the Rx forwarding */
1580 static int ena_rss_configure(struct ena_adapter *adapter)
1581 {
1582         struct ena_com_dev *ena_dev = adapter->ena_dev;
1583         int rc;
1584
1585         /* In case the RSS table wasn't initialized by probe */
1586         if (!ena_dev->rss.tbl_log_size) {
1587                 rc = ena_rss_init_default(adapter);
1588                 if (rc && (rc != -EOPNOTSUPP)) {
1589                         netif_err(adapter, ifup, adapter->netdev,
1590                                   "Failed to init RSS rc: %d\n", rc);
1591                         return rc;
1592                 }
1593         }
1594
1595         /* Set indirect table */
1596         rc = ena_com_indirect_table_set(ena_dev);
1597         if (unlikely(rc && rc != -EOPNOTSUPP))
1598                 return rc;
1599
1600         /* Configure hash function (if supported) */
1601         rc = ena_com_set_hash_function(ena_dev);
1602         if (unlikely(rc && (rc != -EOPNOTSUPP)))
1603                 return rc;
1604
1605         /* Configure hash inputs (if supported) */
1606         rc = ena_com_set_hash_ctrl(ena_dev);
1607         if (unlikely(rc && (rc != -EOPNOTSUPP)))
1608                 return rc;
1609
1610         return 0;
1611 }
1612
1613 static int ena_up_complete(struct ena_adapter *adapter)
1614 {
1615         int rc;
1616
1617         rc = ena_rss_configure(adapter);
1618         if (rc)
1619                 return rc;
1620
1621         ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
1622
1623         ena_refill_all_rx_bufs(adapter);
1624
1625         /* enable transmits */
1626         netif_tx_start_all_queues(adapter->netdev);
1627
1628         ena_restore_ethtool_params(adapter);
1629
1630         ena_napi_enable_all(adapter);
1631
1632         return 0;
1633 }
1634
1635 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1636 {
1637         struct ena_com_create_io_ctx ctx;
1638         struct ena_com_dev *ena_dev;
1639         struct ena_ring *tx_ring;
1640         u32 msix_vector;
1641         u16 ena_qid;
1642         int rc;
1643
1644         ena_dev = adapter->ena_dev;
1645
1646         tx_ring = &adapter->tx_ring[qid];
1647         msix_vector = ENA_IO_IRQ_IDX(qid);
1648         ena_qid = ENA_IO_TXQ_IDX(qid);
1649
1650         memset(&ctx, 0x0, sizeof(ctx));
1651
1652         ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1653         ctx.qid = ena_qid;
1654         ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1655         ctx.msix_vector = msix_vector;
1656         ctx.queue_size = tx_ring->ring_size;
1657         ctx.numa_node = cpu_to_node(tx_ring->cpu);
1658
1659         rc = ena_com_create_io_queue(ena_dev, &ctx);
1660         if (rc) {
1661                 netif_err(adapter, ifup, adapter->netdev,
1662                           "Failed to create I/O TX queue num %d rc: %d\n",
1663                           qid, rc);
1664                 return rc;
1665         }
1666
1667         rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1668                                      &tx_ring->ena_com_io_sq,
1669                                      &tx_ring->ena_com_io_cq);
1670         if (rc) {
1671                 netif_err(adapter, ifup, adapter->netdev,
1672                           "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1673                           qid, rc);
1674                 ena_com_destroy_io_queue(ena_dev, ena_qid);
1675                 return rc;
1676         }
1677
1678         ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
1679         return rc;
1680 }
1681
1682 static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
1683 {
1684         struct ena_com_dev *ena_dev = adapter->ena_dev;
1685         int rc, i;
1686
1687         for (i = 0; i < adapter->num_queues; i++) {
1688                 rc = ena_create_io_tx_queue(adapter, i);
1689                 if (rc)
1690                         goto create_err;
1691         }
1692
1693         return 0;
1694
1695 create_err:
1696         while (i--)
1697                 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1698
1699         return rc;
1700 }
1701
1702 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1703 {
1704         struct ena_com_dev *ena_dev;
1705         struct ena_com_create_io_ctx ctx;
1706         struct ena_ring *rx_ring;
1707         u32 msix_vector;
1708         u16 ena_qid;
1709         int rc;
1710
1711         ena_dev = adapter->ena_dev;
1712
1713         rx_ring = &adapter->rx_ring[qid];
1714         msix_vector = ENA_IO_IRQ_IDX(qid);
1715         ena_qid = ENA_IO_RXQ_IDX(qid);
1716
1717         memset(&ctx, 0x0, sizeof(ctx));
1718
1719         ctx.qid = ena_qid;
1720         ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1721         ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1722         ctx.msix_vector = msix_vector;
1723         ctx.queue_size = rx_ring->ring_size;
1724         ctx.numa_node = cpu_to_node(rx_ring->cpu);
1725
1726         rc = ena_com_create_io_queue(ena_dev, &ctx);
1727         if (rc) {
1728                 netif_err(adapter, ifup, adapter->netdev,
1729                           "Failed to create I/O RX queue num %d rc: %d\n",
1730                           qid, rc);
1731                 return rc;
1732         }
1733
1734         rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1735                                      &rx_ring->ena_com_io_sq,
1736                                      &rx_ring->ena_com_io_cq);
1737         if (rc) {
1738                 netif_err(adapter, ifup, adapter->netdev,
1739                           "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1740                           qid, rc);
1741                 ena_com_destroy_io_queue(ena_dev, ena_qid);
1742                 return rc;
1743         }
1744
1745         ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
1746
1747         return rc;
1748 }
1749
1750 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
1751 {
1752         struct ena_com_dev *ena_dev = adapter->ena_dev;
1753         int rc, i;
1754
1755         for (i = 0; i < adapter->num_queues; i++) {
1756                 rc = ena_create_io_rx_queue(adapter, i);
1757                 if (rc)
1758                         goto create_err;
1759                 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
1760         }
1761
1762         return 0;
1763
1764 create_err:
1765         while (i--) {
1766                 cancel_work_sync(&adapter->ena_napi[i].dim.work);
1767                 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1768         }
1769
1770         return rc;
1771 }
1772
1773 static void set_io_rings_size(struct ena_adapter *adapter,
1774                                      int new_tx_size, int new_rx_size)
1775 {
1776         int i;
1777
1778         for (i = 0; i < adapter->num_queues; i++) {
1779                 adapter->tx_ring[i].ring_size = new_tx_size;
1780                 adapter->rx_ring[i].ring_size = new_rx_size;
1781         }
1782 }
1783
1784 /* This function allows queue allocation to backoff when the system is
1785  * low on memory. If there is not enough memory to allocate io queues
1786  * the driver will try to allocate smaller queues.
1787  *
1788  * The backoff algorithm is as follows:
1789  *  1. Try to allocate TX and RX and if successful.
1790  *  1.1. return success
1791  *
1792  *  2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
1793  *
1794  *  3. If TX or RX is smaller than 256
1795  *  3.1. return failure.
1796  *  4. else
1797  *  4.1. go back to 1.
1798  */
1799 static int create_queues_with_size_backoff(struct ena_adapter *adapter)
1800 {
1801         int rc, cur_rx_ring_size, cur_tx_ring_size;
1802         int new_rx_ring_size, new_tx_ring_size;
1803
1804         /* current queue sizes might be set to smaller than the requested
1805          * ones due to past queue allocation failures.
1806          */
1807         set_io_rings_size(adapter, adapter->requested_tx_ring_size,
1808                           adapter->requested_rx_ring_size);
1809
1810         while (1) {
1811                 rc = ena_setup_all_tx_resources(adapter);
1812                 if (rc)
1813                         goto err_setup_tx;
1814
1815                 rc = ena_create_all_io_tx_queues(adapter);
1816                 if (rc)
1817                         goto err_create_tx_queues;
1818
1819                 rc = ena_setup_all_rx_resources(adapter);
1820                 if (rc)
1821                         goto err_setup_rx;
1822
1823                 rc = ena_create_all_io_rx_queues(adapter);
1824                 if (rc)
1825                         goto err_create_rx_queues;
1826
1827                 return 0;
1828
1829 err_create_rx_queues:
1830                 ena_free_all_io_rx_resources(adapter);
1831 err_setup_rx:
1832                 ena_destroy_all_tx_queues(adapter);
1833 err_create_tx_queues:
1834                 ena_free_all_io_tx_resources(adapter);
1835 err_setup_tx:
1836                 if (rc != -ENOMEM) {
1837                         netif_err(adapter, ifup, adapter->netdev,
1838                                   "Queue creation failed with error code %d\n",
1839                                   rc);
1840                         return rc;
1841                 }
1842
1843                 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
1844                 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
1845
1846                 netif_err(adapter, ifup, adapter->netdev,
1847                           "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
1848                           cur_tx_ring_size, cur_rx_ring_size);
1849
1850                 new_tx_ring_size = cur_tx_ring_size;
1851                 new_rx_ring_size = cur_rx_ring_size;
1852
1853                 /* Decrease the size of the larger queue, or
1854                  * decrease both if they are the same size.
1855                  */
1856                 if (cur_rx_ring_size <= cur_tx_ring_size)
1857                         new_tx_ring_size = cur_tx_ring_size / 2;
1858                 if (cur_rx_ring_size >= cur_tx_ring_size)
1859                         new_rx_ring_size = cur_rx_ring_size / 2;
1860
1861                 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
1862                     new_rx_ring_size < ENA_MIN_RING_SIZE) {
1863                         netif_err(adapter, ifup, adapter->netdev,
1864                                   "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
1865                                   ENA_MIN_RING_SIZE);
1866                         return rc;
1867                 }
1868
1869                 netif_err(adapter, ifup, adapter->netdev,
1870                           "Retrying queue creation with sizes TX=%d, RX=%d\n",
1871                           new_tx_ring_size,
1872                           new_rx_ring_size);
1873
1874                 set_io_rings_size(adapter, new_tx_ring_size,
1875                                   new_rx_ring_size);
1876         }
1877 }
1878
1879 static int ena_up(struct ena_adapter *adapter)
1880 {
1881         int rc, i;
1882
1883         netdev_dbg(adapter->netdev, "%s\n", __func__);
1884
1885         ena_setup_io_intr(adapter);
1886
1887         /* napi poll functions should be initialized before running
1888          * request_irq(), to handle a rare condition where there is a pending
1889          * interrupt, causing the ISR to fire immediately while the poll
1890          * function wasn't set yet, causing a null dereference
1891          */
1892         ena_init_napi(adapter);
1893
1894         rc = ena_request_io_irq(adapter);
1895         if (rc)
1896                 goto err_req_irq;
1897
1898         rc = create_queues_with_size_backoff(adapter);
1899         if (rc)
1900                 goto err_create_queues_with_backoff;
1901
1902         rc = ena_up_complete(adapter);
1903         if (rc)
1904                 goto err_up;
1905
1906         if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1907                 netif_carrier_on(adapter->netdev);
1908
1909         u64_stats_update_begin(&adapter->syncp);
1910         adapter->dev_stats.interface_up++;
1911         u64_stats_update_end(&adapter->syncp);
1912
1913         set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1914
1915         /* Enable completion queues interrupt */
1916         for (i = 0; i < adapter->num_queues; i++)
1917                 ena_unmask_interrupt(&adapter->tx_ring[i],
1918                                      &adapter->rx_ring[i]);
1919
1920         /* schedule napi in case we had pending packets
1921          * from the last time we disable napi
1922          */
1923         for (i = 0; i < adapter->num_queues; i++)
1924                 napi_schedule(&adapter->ena_napi[i].napi);
1925
1926         return rc;
1927
1928 err_up:
1929         ena_destroy_all_tx_queues(adapter);
1930         ena_free_all_io_tx_resources(adapter);
1931         ena_destroy_all_rx_queues(adapter);
1932         ena_free_all_io_rx_resources(adapter);
1933 err_create_queues_with_backoff:
1934         ena_free_io_irq(adapter);
1935 err_req_irq:
1936         ena_del_napi(adapter);
1937
1938         return rc;
1939 }
1940
1941 static void ena_down(struct ena_adapter *adapter)
1942 {
1943         netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
1944
1945         clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1946
1947         u64_stats_update_begin(&adapter->syncp);
1948         adapter->dev_stats.interface_down++;
1949         u64_stats_update_end(&adapter->syncp);
1950
1951         netif_carrier_off(adapter->netdev);
1952         netif_tx_disable(adapter->netdev);
1953
1954         /* After this point the napi handler won't enable the tx queue */
1955         ena_napi_disable_all(adapter);
1956
1957         /* After destroy the queue there won't be any new interrupts */
1958
1959         if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
1960                 int rc;
1961
1962                 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
1963                 if (rc)
1964                         dev_err(&adapter->pdev->dev, "Device reset failed\n");
1965                 /* stop submitting admin commands on a device that was reset */
1966                 ena_com_set_admin_running_state(adapter->ena_dev, false);
1967         }
1968
1969         ena_destroy_all_io_queues(adapter);
1970
1971         ena_disable_io_intr_sync(adapter);
1972         ena_free_io_irq(adapter);
1973         ena_del_napi(adapter);
1974
1975         ena_free_all_tx_bufs(adapter);
1976         ena_free_all_rx_bufs(adapter);
1977         ena_free_all_io_tx_resources(adapter);
1978         ena_free_all_io_rx_resources(adapter);
1979 }
1980
1981 /* ena_open - Called when a network interface is made active
1982  * @netdev: network interface device structure
1983  *
1984  * Returns 0 on success, negative value on failure
1985  *
1986  * The open entry point is called when a network interface is made
1987  * active by the system (IFF_UP).  At this point all resources needed
1988  * for transmit and receive operations are allocated, the interrupt
1989  * handler is registered with the OS, the watchdog timer is started,
1990  * and the stack is notified that the interface is ready.
1991  */
1992 static int ena_open(struct net_device *netdev)
1993 {
1994         struct ena_adapter *adapter = netdev_priv(netdev);
1995         int rc;
1996
1997         /* Notify the stack of the actual queue counts. */
1998         rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
1999         if (rc) {
2000                 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
2001                 return rc;
2002         }
2003
2004         rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
2005         if (rc) {
2006                 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
2007                 return rc;
2008         }
2009
2010         rc = ena_up(adapter);
2011         if (rc)
2012                 return rc;
2013
2014         return rc;
2015 }
2016
2017 /* ena_close - Disables a network interface
2018  * @netdev: network interface device structure
2019  *
2020  * Returns 0, this is not allowed to fail
2021  *
2022  * The close entry point is called when an interface is de-activated
2023  * by the OS.  The hardware is still under the drivers control, but
2024  * needs to be disabled.  A global MAC reset is issued to stop the
2025  * hardware, and all transmit and receive resources are freed.
2026  */
2027 static int ena_close(struct net_device *netdev)
2028 {
2029         struct ena_adapter *adapter = netdev_priv(netdev);
2030
2031         netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
2032
2033         if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2034                 return 0;
2035
2036         if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2037                 ena_down(adapter);
2038
2039         /* Check for device status and issue reset if needed*/
2040         check_for_admin_com_state(adapter);
2041         if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2042                 netif_err(adapter, ifdown, adapter->netdev,
2043                           "Destroy failure, restarting device\n");
2044                 ena_dump_stats_to_dmesg(adapter);
2045                 /* rtnl lock already obtained in dev_ioctl() layer */
2046                 ena_destroy_device(adapter, false);
2047                 ena_restore_device(adapter);
2048         }
2049
2050         return 0;
2051 }
2052
2053 int ena_update_queue_sizes(struct ena_adapter *adapter,
2054                            u32 new_tx_size,
2055                            u32 new_rx_size)
2056 {
2057         bool dev_up;
2058
2059         dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2060         ena_close(adapter->netdev);
2061         adapter->requested_tx_ring_size = new_tx_size;
2062         adapter->requested_rx_ring_size = new_rx_size;
2063         ena_init_io_rings(adapter);
2064         return dev_up ? ena_up(adapter) : 0;
2065 }
2066
2067 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
2068 {
2069         u32 mss = skb_shinfo(skb)->gso_size;
2070         struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
2071         u8 l4_protocol = 0;
2072
2073         if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
2074                 ena_tx_ctx->l4_csum_enable = 1;
2075                 if (mss) {
2076                         ena_tx_ctx->tso_enable = 1;
2077                         ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
2078                         ena_tx_ctx->l4_csum_partial = 0;
2079                 } else {
2080                         ena_tx_ctx->tso_enable = 0;
2081                         ena_meta->l4_hdr_len = 0;
2082                         ena_tx_ctx->l4_csum_partial = 1;
2083                 }
2084
2085                 switch (ip_hdr(skb)->version) {
2086                 case IPVERSION:
2087                         ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2088                         if (ip_hdr(skb)->frag_off & htons(IP_DF))
2089                                 ena_tx_ctx->df = 1;
2090                         if (mss)
2091                                 ena_tx_ctx->l3_csum_enable = 1;
2092                         l4_protocol = ip_hdr(skb)->protocol;
2093                         break;
2094                 case 6:
2095                         ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2096                         l4_protocol = ipv6_hdr(skb)->nexthdr;
2097                         break;
2098                 default:
2099                         break;
2100                 }
2101
2102                 if (l4_protocol == IPPROTO_TCP)
2103                         ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2104                 else
2105                         ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2106
2107                 ena_meta->mss = mss;
2108                 ena_meta->l3_hdr_len = skb_network_header_len(skb);
2109                 ena_meta->l3_hdr_offset = skb_network_offset(skb);
2110                 ena_tx_ctx->meta_valid = 1;
2111
2112         } else {
2113                 ena_tx_ctx->meta_valid = 0;
2114         }
2115 }
2116
2117 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
2118                                        struct sk_buff *skb)
2119 {
2120         int num_frags, header_len, rc;
2121
2122         num_frags = skb_shinfo(skb)->nr_frags;
2123         header_len = skb_headlen(skb);
2124
2125         if (num_frags < tx_ring->sgl_size)
2126                 return 0;
2127
2128         if ((num_frags == tx_ring->sgl_size) &&
2129             (header_len < tx_ring->tx_max_header_size))
2130                 return 0;
2131
2132         u64_stats_update_begin(&tx_ring->syncp);
2133         tx_ring->tx_stats.linearize++;
2134         u64_stats_update_end(&tx_ring->syncp);
2135
2136         rc = skb_linearize(skb);
2137         if (unlikely(rc)) {
2138                 u64_stats_update_begin(&tx_ring->syncp);
2139                 tx_ring->tx_stats.linearize_failed++;
2140                 u64_stats_update_end(&tx_ring->syncp);
2141         }
2142
2143         return rc;
2144 }
2145
2146 static int ena_tx_map_skb(struct ena_ring *tx_ring,
2147                           struct ena_tx_buffer *tx_info,
2148                           struct sk_buff *skb,
2149                           void **push_hdr,
2150                           u16 *header_len)
2151 {
2152         struct ena_adapter *adapter = tx_ring->adapter;
2153         struct ena_com_buf *ena_buf;
2154         dma_addr_t dma;
2155         u32 skb_head_len, frag_len, last_frag;
2156         u16 push_len = 0;
2157         u16 delta = 0;
2158         int i = 0;
2159
2160         skb_head_len = skb_headlen(skb);
2161         tx_info->skb = skb;
2162         ena_buf = tx_info->bufs;
2163
2164         if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2165                 /* When the device is LLQ mode, the driver will copy
2166                  * the header into the device memory space.
2167                  * the ena_com layer assume the header is in a linear
2168                  * memory space.
2169                  * This assumption might be wrong since part of the header
2170                  * can be in the fragmented buffers.
2171                  * Use skb_header_pointer to make sure the header is in a
2172                  * linear memory space.
2173                  */
2174
2175                 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2176                 *push_hdr = skb_header_pointer(skb, 0, push_len,
2177                                                tx_ring->push_buf_intermediate_buf);
2178                 *header_len = push_len;
2179                 if (unlikely(skb->data != *push_hdr)) {
2180                         u64_stats_update_begin(&tx_ring->syncp);
2181                         tx_ring->tx_stats.llq_buffer_copy++;
2182                         u64_stats_update_end(&tx_ring->syncp);
2183
2184                         delta = push_len - skb_head_len;
2185                 }
2186         } else {
2187                 *push_hdr = NULL;
2188                 *header_len = min_t(u32, skb_head_len,
2189                                     tx_ring->tx_max_header_size);
2190         }
2191
2192         netif_dbg(adapter, tx_queued, adapter->netdev,
2193                   "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2194                   *push_hdr, push_len);
2195
2196         if (skb_head_len > push_len) {
2197                 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2198                                      skb_head_len - push_len, DMA_TO_DEVICE);
2199                 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2200                         goto error_report_dma_error;
2201
2202                 ena_buf->paddr = dma;
2203                 ena_buf->len = skb_head_len - push_len;
2204
2205                 ena_buf++;
2206                 tx_info->num_of_bufs++;
2207                 tx_info->map_linear_data = 1;
2208         } else {
2209                 tx_info->map_linear_data = 0;
2210         }
2211
2212         last_frag = skb_shinfo(skb)->nr_frags;
2213
2214         for (i = 0; i < last_frag; i++) {
2215                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2216
2217                 frag_len = skb_frag_size(frag);
2218
2219                 if (unlikely(delta >= frag_len)) {
2220                         delta -= frag_len;
2221                         continue;
2222                 }
2223
2224                 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2225                                        frag_len - delta, DMA_TO_DEVICE);
2226                 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2227                         goto error_report_dma_error;
2228
2229                 ena_buf->paddr = dma;
2230                 ena_buf->len = frag_len - delta;
2231                 ena_buf++;
2232                 tx_info->num_of_bufs++;
2233                 delta = 0;
2234         }
2235
2236         return 0;
2237
2238 error_report_dma_error:
2239         u64_stats_update_begin(&tx_ring->syncp);
2240         tx_ring->tx_stats.dma_mapping_err++;
2241         u64_stats_update_end(&tx_ring->syncp);
2242         netdev_warn(adapter->netdev, "failed to map skb\n");
2243
2244         tx_info->skb = NULL;
2245
2246         tx_info->num_of_bufs += i;
2247         ena_unmap_tx_skb(tx_ring, tx_info);
2248
2249         return -EINVAL;
2250 }
2251
2252 /* Called with netif_tx_lock. */
2253 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2254 {
2255         struct ena_adapter *adapter = netdev_priv(dev);
2256         struct ena_tx_buffer *tx_info;
2257         struct ena_com_tx_ctx ena_tx_ctx;
2258         struct ena_ring *tx_ring;
2259         struct netdev_queue *txq;
2260         void *push_hdr;
2261         u16 next_to_use, req_id, header_len;
2262         int qid, rc, nb_hw_desc;
2263
2264         netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2265         /*  Determine which tx ring we will be placed on */
2266         qid = skb_get_queue_mapping(skb);
2267         tx_ring = &adapter->tx_ring[qid];
2268         txq = netdev_get_tx_queue(dev, qid);
2269
2270         rc = ena_check_and_linearize_skb(tx_ring, skb);
2271         if (unlikely(rc))
2272                 goto error_drop_packet;
2273
2274         skb_tx_timestamp(skb);
2275
2276         next_to_use = tx_ring->next_to_use;
2277         req_id = tx_ring->free_ids[next_to_use];
2278         tx_info = &tx_ring->tx_buffer_info[req_id];
2279         tx_info->num_of_bufs = 0;
2280
2281         WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2282
2283         rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
2284         if (unlikely(rc))
2285                 goto error_drop_packet;
2286
2287         memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2288         ena_tx_ctx.ena_bufs = tx_info->bufs;
2289         ena_tx_ctx.push_header = push_hdr;
2290         ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2291         ena_tx_ctx.req_id = req_id;
2292         ena_tx_ctx.header_len = header_len;
2293
2294         /* set flags and meta data */
2295         ena_tx_csum(&ena_tx_ctx, skb);
2296
2297         if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx))) {
2298                 netif_dbg(adapter, tx_queued, dev,
2299                           "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
2300                           qid);
2301                 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2302         }
2303
2304         /* prepare the packet's descriptors to dma engine */
2305         rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
2306                                 &nb_hw_desc);
2307
2308         /* ena_com_prepare_tx() can't fail due to overflow of tx queue,
2309          * since the number of free descriptors in the queue is checked
2310          * after sending the previous packet. In case there isn't enough
2311          * space in the queue for the next packet, it is stopped
2312          * until there is again enough available space in the queue.
2313          * All other failure reasons of ena_com_prepare_tx() are fatal
2314          * and therefore require a device reset.
2315          */
2316         if (unlikely(rc)) {
2317                 netif_err(adapter, tx_queued, dev,
2318                           "failed to prepare tx bufs\n");
2319                 u64_stats_update_begin(&tx_ring->syncp);
2320                 tx_ring->tx_stats.prepare_ctx_err++;
2321                 u64_stats_update_end(&tx_ring->syncp);
2322                 adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
2323                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2324                 goto error_unmap_dma;
2325         }
2326
2327         netdev_tx_sent_queue(txq, skb->len);
2328
2329         u64_stats_update_begin(&tx_ring->syncp);
2330         tx_ring->tx_stats.cnt++;
2331         tx_ring->tx_stats.bytes += skb->len;
2332         u64_stats_update_end(&tx_ring->syncp);
2333
2334         tx_info->tx_descs = nb_hw_desc;
2335         tx_info->last_jiffies = jiffies;
2336         tx_info->print_once = 0;
2337
2338         tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2339                 tx_ring->ring_size);
2340
2341         /* stop the queue when no more space available, the packet can have up
2342          * to sgl_size + 2. one for the meta descriptor and one for header
2343          * (if the header is larger than tx_max_header_size).
2344          */
2345         if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2346                                                    tx_ring->sgl_size + 2))) {
2347                 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
2348                           __func__, qid);
2349
2350                 netif_tx_stop_queue(txq);
2351                 u64_stats_update_begin(&tx_ring->syncp);
2352                 tx_ring->tx_stats.queue_stop++;
2353                 u64_stats_update_end(&tx_ring->syncp);
2354
2355                 /* There is a rare condition where this function decide to
2356                  * stop the queue but meanwhile clean_tx_irq updates
2357                  * next_to_completion and terminates.
2358                  * The queue will remain stopped forever.
2359                  * To solve this issue add a mb() to make sure that
2360                  * netif_tx_stop_queue() write is vissible before checking if
2361                  * there is additional space in the queue.
2362                  */
2363                 smp_mb();
2364
2365                 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2366                                                  ENA_TX_WAKEUP_THRESH)) {
2367                         netif_tx_wake_queue(txq);
2368                         u64_stats_update_begin(&tx_ring->syncp);
2369                         tx_ring->tx_stats.queue_wakeup++;
2370                         u64_stats_update_end(&tx_ring->syncp);
2371                 }
2372         }
2373
2374         if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
2375                 /* trigger the dma engine. ena_com_write_sq_doorbell()
2376                  * has a mb
2377                  */
2378                 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2379                 u64_stats_update_begin(&tx_ring->syncp);
2380                 tx_ring->tx_stats.doorbells++;
2381                 u64_stats_update_end(&tx_ring->syncp);
2382         }
2383
2384         return NETDEV_TX_OK;
2385
2386 error_unmap_dma:
2387         ena_unmap_tx_skb(tx_ring, tx_info);
2388         tx_info->skb = NULL;
2389
2390 error_drop_packet:
2391         dev_kfree_skb(skb);
2392         return NETDEV_TX_OK;
2393 }
2394
2395 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
2396                             struct net_device *sb_dev)
2397 {
2398         u16 qid;
2399         /* we suspect that this is good for in--kernel network services that
2400          * want to loop incoming skb rx to tx in normal user generated traffic,
2401          * most probably we will not get to this
2402          */
2403         if (skb_rx_queue_recorded(skb))
2404                 qid = skb_get_rx_queue(skb);
2405         else
2406                 qid = netdev_pick_tx(dev, skb, NULL);
2407
2408         return qid;
2409 }
2410
2411 static void ena_config_host_info(struct ena_com_dev *ena_dev,
2412                                  struct pci_dev *pdev)
2413 {
2414         struct ena_admin_host_info *host_info;
2415         int rc;
2416
2417         /* Allocate only the host info */
2418         rc = ena_com_allocate_host_info(ena_dev);
2419         if (rc) {
2420                 pr_err("Cannot allocate host info\n");
2421                 return;
2422         }
2423
2424         host_info = ena_dev->host_attr.host_info;
2425
2426         host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
2427         host_info->os_type = ENA_ADMIN_OS_LINUX;
2428         host_info->kernel_ver = LINUX_VERSION_CODE;
2429         strlcpy(host_info->kernel_ver_str, utsname()->version,
2430                 sizeof(host_info->kernel_ver_str) - 1);
2431         host_info->os_dist = 0;
2432         strncpy(host_info->os_dist_str, utsname()->release,
2433                 sizeof(host_info->os_dist_str) - 1);
2434         host_info->driver_version =
2435                 (DRV_MODULE_VER_MAJOR) |
2436                 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2437                 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
2438                 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
2439         host_info->num_cpus = num_online_cpus();
2440
2441         rc = ena_com_set_host_attributes(ena_dev);
2442         if (rc) {
2443                 if (rc == -EOPNOTSUPP)
2444                         pr_warn("Cannot set host attributes\n");
2445                 else
2446                         pr_err("Cannot set host attributes\n");
2447
2448                 goto err;
2449         }
2450
2451         return;
2452
2453 err:
2454         ena_com_delete_host_info(ena_dev);
2455 }
2456
2457 static void ena_config_debug_area(struct ena_adapter *adapter)
2458 {
2459         u32 debug_area_size;
2460         int rc, ss_count;
2461
2462         ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
2463         if (ss_count <= 0) {
2464                 netif_err(adapter, drv, adapter->netdev,
2465                           "SS count is negative\n");
2466                 return;
2467         }
2468
2469         /* allocate 32 bytes for each string and 64bit for the value */
2470         debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
2471
2472         rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
2473         if (rc) {
2474                 pr_err("Cannot allocate debug area\n");
2475                 return;
2476         }
2477
2478         rc = ena_com_set_host_attributes(adapter->ena_dev);
2479         if (rc) {
2480                 if (rc == -EOPNOTSUPP)
2481                         netif_warn(adapter, drv, adapter->netdev,
2482                                    "Cannot set host attributes\n");
2483                 else
2484                         netif_err(adapter, drv, adapter->netdev,
2485                                   "Cannot set host attributes\n");
2486                 goto err;
2487         }
2488
2489         return;
2490 err:
2491         ena_com_delete_debug_area(adapter->ena_dev);
2492 }
2493
2494 static void ena_get_stats64(struct net_device *netdev,
2495                             struct rtnl_link_stats64 *stats)
2496 {
2497         struct ena_adapter *adapter = netdev_priv(netdev);
2498         struct ena_ring *rx_ring, *tx_ring;
2499         unsigned int start;
2500         u64 rx_drops;
2501         int i;
2502
2503         if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2504                 return;
2505
2506         for (i = 0; i < adapter->num_queues; i++) {
2507                 u64 bytes, packets;
2508
2509                 tx_ring = &adapter->tx_ring[i];
2510
2511                 do {
2512                         start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
2513                         packets = tx_ring->tx_stats.cnt;
2514                         bytes = tx_ring->tx_stats.bytes;
2515                 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
2516
2517                 stats->tx_packets += packets;
2518                 stats->tx_bytes += bytes;
2519
2520                 rx_ring = &adapter->rx_ring[i];
2521
2522                 do {
2523                         start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
2524                         packets = rx_ring->rx_stats.cnt;
2525                         bytes = rx_ring->rx_stats.bytes;
2526                 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
2527
2528                 stats->rx_packets += packets;
2529                 stats->rx_bytes += bytes;
2530         }
2531
2532         do {
2533                 start = u64_stats_fetch_begin_irq(&adapter->syncp);
2534                 rx_drops = adapter->dev_stats.rx_drops;
2535         } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
2536
2537         stats->rx_dropped = rx_drops;
2538
2539         stats->multicast = 0;
2540         stats->collisions = 0;
2541
2542         stats->rx_length_errors = 0;
2543         stats->rx_crc_errors = 0;
2544         stats->rx_frame_errors = 0;
2545         stats->rx_fifo_errors = 0;
2546         stats->rx_missed_errors = 0;
2547         stats->tx_window_errors = 0;
2548
2549         stats->rx_errors = 0;
2550         stats->tx_errors = 0;
2551 }
2552
2553 static const struct net_device_ops ena_netdev_ops = {
2554         .ndo_open               = ena_open,
2555         .ndo_stop               = ena_close,
2556         .ndo_start_xmit         = ena_start_xmit,
2557         .ndo_select_queue       = ena_select_queue,
2558         .ndo_get_stats64        = ena_get_stats64,
2559         .ndo_tx_timeout         = ena_tx_timeout,
2560         .ndo_change_mtu         = ena_change_mtu,
2561         .ndo_set_mac_address    = NULL,
2562         .ndo_validate_addr      = eth_validate_addr,
2563 };
2564
2565 static int ena_device_validate_params(struct ena_adapter *adapter,
2566                                       struct ena_com_dev_get_features_ctx *get_feat_ctx)
2567 {
2568         struct net_device *netdev = adapter->netdev;
2569         int rc;
2570
2571         rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
2572                               adapter->mac_addr);
2573         if (!rc) {
2574                 netif_err(adapter, drv, netdev,
2575                           "Error, mac address are different\n");
2576                 return -EINVAL;
2577         }
2578
2579         if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
2580                 netif_err(adapter, drv, netdev,
2581                           "Error, device max mtu is smaller than netdev MTU\n");
2582                 return -EINVAL;
2583         }
2584
2585         return 0;
2586 }
2587
2588 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
2589                            struct ena_com_dev_get_features_ctx *get_feat_ctx,
2590                            bool *wd_state)
2591 {
2592         struct device *dev = &pdev->dev;
2593         bool readless_supported;
2594         u32 aenq_groups;
2595         int dma_width;
2596         int rc;
2597
2598         rc = ena_com_mmio_reg_read_request_init(ena_dev);
2599         if (rc) {
2600                 dev_err(dev, "failed to init mmio read less\n");
2601                 return rc;
2602         }
2603
2604         /* The PCIe configuration space revision id indicate if mmio reg
2605          * read is disabled
2606          */
2607         readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
2608         ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2609
2610         rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2611         if (rc) {
2612                 dev_err(dev, "Can not reset device\n");
2613                 goto err_mmio_read_less;
2614         }
2615
2616         rc = ena_com_validate_version(ena_dev);
2617         if (rc) {
2618                 dev_err(dev, "device version is too low\n");
2619                 goto err_mmio_read_less;
2620         }
2621
2622         dma_width = ena_com_get_dma_width(ena_dev);
2623         if (dma_width < 0) {
2624                 dev_err(dev, "Invalid dma width value %d", dma_width);
2625                 rc = dma_width;
2626                 goto err_mmio_read_less;
2627         }
2628
2629         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2630         if (rc) {
2631                 dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
2632                 goto err_mmio_read_less;
2633         }
2634
2635         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2636         if (rc) {
2637                 dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
2638                         rc);
2639                 goto err_mmio_read_less;
2640         }
2641
2642         /* ENA admin level init */
2643         rc = ena_com_admin_init(ena_dev, &aenq_handlers);
2644         if (rc) {
2645                 dev_err(dev,
2646                         "Can not initialize ena admin queue with device\n");
2647                 goto err_mmio_read_less;
2648         }
2649
2650         /* To enable the msix interrupts the driver needs to know the number
2651          * of queues. So the driver uses polling mode to retrieve this
2652          * information
2653          */
2654         ena_com_set_admin_polling_mode(ena_dev, true);
2655
2656         ena_config_host_info(ena_dev, pdev);
2657
2658         /* Get Device Attributes*/
2659         rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2660         if (rc) {
2661                 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
2662                 goto err_admin_init;
2663         }
2664
2665         /* Try to turn all the available aenq groups */
2666         aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2667                 BIT(ENA_ADMIN_FATAL_ERROR) |
2668                 BIT(ENA_ADMIN_WARNING) |
2669                 BIT(ENA_ADMIN_NOTIFICATION) |
2670                 BIT(ENA_ADMIN_KEEP_ALIVE);
2671
2672         aenq_groups &= get_feat_ctx->aenq.supported_groups;
2673
2674         rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2675         if (rc) {
2676                 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
2677                 goto err_admin_init;
2678         }
2679
2680         *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2681
2682         return 0;
2683
2684 err_admin_init:
2685         ena_com_delete_host_info(ena_dev);
2686         ena_com_admin_destroy(ena_dev);
2687 err_mmio_read_less:
2688         ena_com_mmio_reg_read_request_destroy(ena_dev);
2689
2690         return rc;
2691 }
2692
2693 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
2694                                                     int io_vectors)
2695 {
2696         struct ena_com_dev *ena_dev = adapter->ena_dev;
2697         struct device *dev = &adapter->pdev->dev;
2698         int rc;
2699
2700         rc = ena_enable_msix(adapter, io_vectors);
2701         if (rc) {
2702                 dev_err(dev, "Can not reserve msix vectors\n");
2703                 return rc;
2704         }
2705
2706         ena_setup_mgmnt_intr(adapter);
2707
2708         rc = ena_request_mgmnt_irq(adapter);
2709         if (rc) {
2710                 dev_err(dev, "Can not setup management interrupts\n");
2711                 goto err_disable_msix;
2712         }
2713
2714         ena_com_set_admin_polling_mode(ena_dev, false);
2715
2716         ena_com_admin_aenq_enable(ena_dev);
2717
2718         return 0;
2719
2720 err_disable_msix:
2721         ena_disable_msix(adapter);
2722
2723         return rc;
2724 }
2725
2726 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
2727 {
2728         struct net_device *netdev = adapter->netdev;
2729         struct ena_com_dev *ena_dev = adapter->ena_dev;
2730         bool dev_up;
2731
2732         if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2733                 return;
2734
2735         netif_carrier_off(netdev);
2736
2737         del_timer_sync(&adapter->timer_service);
2738
2739         dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2740         adapter->dev_up_before_reset = dev_up;
2741         if (!graceful)
2742                 ena_com_set_admin_running_state(ena_dev, false);
2743
2744         if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2745                 ena_down(adapter);
2746
2747         /* Stop the device from sending AENQ events (in case reset flag is set
2748          *  and device is up, ena_down() already reset the device.
2749          */
2750         if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
2751                 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2752
2753         ena_free_mgmnt_irq(adapter);
2754
2755         ena_disable_msix(adapter);
2756
2757         ena_com_abort_admin_commands(ena_dev);
2758
2759         ena_com_wait_for_abort_completion(ena_dev);
2760
2761         ena_com_admin_destroy(ena_dev);
2762
2763         ena_com_mmio_reg_read_request_destroy(ena_dev);
2764
2765         adapter->reset_reason = ENA_REGS_RESET_NORMAL;
2766
2767         clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2768         clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2769 }
2770
2771 static int ena_restore_device(struct ena_adapter *adapter)
2772 {
2773         struct ena_com_dev_get_features_ctx get_feat_ctx;
2774         struct ena_com_dev *ena_dev = adapter->ena_dev;
2775         struct pci_dev *pdev = adapter->pdev;
2776         bool wd_state;
2777         int rc;
2778
2779         set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2780         rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
2781         if (rc) {
2782                 dev_err(&pdev->dev, "Can not initialize device\n");
2783                 goto err;
2784         }
2785         adapter->wd_state = wd_state;
2786
2787         rc = ena_device_validate_params(adapter, &get_feat_ctx);
2788         if (rc) {
2789                 dev_err(&pdev->dev, "Validation of device parameters failed\n");
2790                 goto err_device_destroy;
2791         }
2792
2793         rc = ena_enable_msix_and_set_admin_interrupts(adapter,
2794                                                       adapter->num_queues);
2795         if (rc) {
2796                 dev_err(&pdev->dev, "Enable MSI-X failed\n");
2797                 goto err_device_destroy;
2798         }
2799         /* If the interface was up before the reset bring it up */
2800         if (adapter->dev_up_before_reset) {
2801                 rc = ena_up(adapter);
2802                 if (rc) {
2803                         dev_err(&pdev->dev, "Failed to create I/O queues\n");
2804                         goto err_disable_msix;
2805                 }
2806         }
2807
2808         set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2809
2810         clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2811         if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2812                 netif_carrier_on(adapter->netdev);
2813
2814         mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2815         dev_err(&pdev->dev,
2816                 "Device reset completed successfully, Driver info: %s\n",
2817                 version);
2818
2819         return rc;
2820 err_disable_msix:
2821         ena_free_mgmnt_irq(adapter);
2822         ena_disable_msix(adapter);
2823 err_device_destroy:
2824         ena_com_abort_admin_commands(ena_dev);
2825         ena_com_wait_for_abort_completion(ena_dev);
2826         ena_com_admin_destroy(ena_dev);
2827         ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
2828         ena_com_mmio_reg_read_request_destroy(ena_dev);
2829 err:
2830         clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2831         clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2832         dev_err(&pdev->dev,
2833                 "Reset attempt failed. Can not reset the device\n");
2834
2835         return rc;
2836 }
2837
2838 static void ena_fw_reset_device(struct work_struct *work)
2839 {
2840         struct ena_adapter *adapter =
2841                 container_of(work, struct ena_adapter, reset_task);
2842         struct pci_dev *pdev = adapter->pdev;
2843
2844         if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2845                 dev_err(&pdev->dev,
2846                         "device reset schedule while reset bit is off\n");
2847                 return;
2848         }
2849         rtnl_lock();
2850         ena_destroy_device(adapter, false);
2851         ena_restore_device(adapter);
2852         rtnl_unlock();
2853 }
2854
2855 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
2856                                         struct ena_ring *rx_ring)
2857 {
2858         if (likely(rx_ring->first_interrupt))
2859                 return 0;
2860
2861         if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
2862                 return 0;
2863
2864         rx_ring->no_interrupt_event_cnt++;
2865
2866         if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
2867                 netif_err(adapter, rx_err, adapter->netdev,
2868                           "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
2869                           rx_ring->qid);
2870                 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2871                 smp_mb__before_atomic();
2872                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2873                 return -EIO;
2874         }
2875
2876         return 0;
2877 }
2878
2879 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
2880                                           struct ena_ring *tx_ring)
2881 {
2882         struct ena_tx_buffer *tx_buf;
2883         unsigned long last_jiffies;
2884         u32 missed_tx = 0;
2885         int i, rc = 0;
2886
2887         for (i = 0; i < tx_ring->ring_size; i++) {
2888                 tx_buf = &tx_ring->tx_buffer_info[i];
2889                 last_jiffies = tx_buf->last_jiffies;
2890
2891                 if (last_jiffies == 0)
2892                         /* no pending Tx at this location */
2893                         continue;
2894
2895                 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
2896                              2 * adapter->missing_tx_completion_to))) {
2897                         /* If after graceful period interrupt is still not
2898                          * received, we schedule a reset
2899                          */
2900                         netif_err(adapter, tx_err, adapter->netdev,
2901                                   "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
2902                                   tx_ring->qid);
2903                         adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2904                         smp_mb__before_atomic();
2905                         set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2906                         return -EIO;
2907                 }
2908
2909                 if (unlikely(time_is_before_jiffies(last_jiffies +
2910                                 adapter->missing_tx_completion_to))) {
2911                         if (!tx_buf->print_once)
2912                                 netif_notice(adapter, tx_err, adapter->netdev,
2913                                              "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2914                                              tx_ring->qid, i);
2915
2916                         tx_buf->print_once = 1;
2917                         missed_tx++;
2918                 }
2919         }
2920
2921         if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
2922                 netif_err(adapter, tx_err, adapter->netdev,
2923                           "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
2924                           missed_tx,
2925                           adapter->missing_tx_completion_threshold);
2926                 adapter->reset_reason =
2927                         ENA_REGS_RESET_MISS_TX_CMPL;
2928                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2929                 rc = -EIO;
2930         }
2931
2932         u64_stats_update_begin(&tx_ring->syncp);
2933         tx_ring->tx_stats.missed_tx = missed_tx;
2934         u64_stats_update_end(&tx_ring->syncp);
2935
2936         return rc;
2937 }
2938
2939 static void check_for_missing_completions(struct ena_adapter *adapter)
2940 {
2941         struct ena_ring *tx_ring;
2942         struct ena_ring *rx_ring;
2943         int i, budget, rc;
2944
2945         /* Make sure the driver doesn't turn the device in other process */
2946         smp_rmb();
2947
2948         if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2949                 return;
2950
2951         if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2952                 return;
2953
2954         if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
2955                 return;
2956
2957         budget = ENA_MONITORED_TX_QUEUES;
2958
2959         for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
2960                 tx_ring = &adapter->tx_ring[i];
2961                 rx_ring = &adapter->rx_ring[i];
2962
2963                 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
2964                 if (unlikely(rc))
2965                         return;
2966
2967                 rc = check_for_rx_interrupt_queue(adapter, rx_ring);
2968                 if (unlikely(rc))
2969                         return;
2970
2971                 budget--;
2972                 if (!budget)
2973                         break;
2974         }
2975
2976         adapter->last_monitored_tx_qid = i % adapter->num_queues;
2977 }
2978
2979 /* trigger napi schedule after 2 consecutive detections */
2980 #define EMPTY_RX_REFILL 2
2981 /* For the rare case where the device runs out of Rx descriptors and the
2982  * napi handler failed to refill new Rx descriptors (due to a lack of memory
2983  * for example).
2984  * This case will lead to a deadlock:
2985  * The device won't send interrupts since all the new Rx packets will be dropped
2986  * The napi handler won't allocate new Rx descriptors so the device will be
2987  * able to send new packets.
2988  *
2989  * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
2990  * It is recommended to have at least 512MB, with a minimum of 128MB for
2991  * constrained environment).
2992  *
2993  * When such a situation is detected - Reschedule napi
2994  */
2995 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
2996 {
2997         struct ena_ring *rx_ring;
2998         int i, refill_required;
2999
3000         if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3001                 return;
3002
3003         if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3004                 return;
3005
3006         for (i = 0; i < adapter->num_queues; i++) {
3007                 rx_ring = &adapter->rx_ring[i];
3008
3009                 refill_required =
3010                         ena_com_free_desc(rx_ring->ena_com_io_sq);
3011                 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3012                         rx_ring->empty_rx_queue++;
3013
3014                         if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3015                                 u64_stats_update_begin(&rx_ring->syncp);
3016                                 rx_ring->rx_stats.empty_rx_ring++;
3017                                 u64_stats_update_end(&rx_ring->syncp);
3018
3019                                 netif_err(adapter, drv, adapter->netdev,
3020                                           "trigger refill for ring %d\n", i);
3021
3022                                 napi_schedule(rx_ring->napi);
3023                                 rx_ring->empty_rx_queue = 0;
3024                         }
3025                 } else {
3026                         rx_ring->empty_rx_queue = 0;
3027                 }
3028         }
3029 }
3030
3031 /* Check for keep alive expiration */
3032 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3033 {
3034         unsigned long keep_alive_expired;
3035
3036         if (!adapter->wd_state)
3037                 return;
3038
3039         if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3040                 return;
3041
3042         keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
3043                                            adapter->keep_alive_timeout);
3044         if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
3045                 netif_err(adapter, drv, adapter->netdev,
3046                           "Keep alive watchdog timeout.\n");
3047                 u64_stats_update_begin(&adapter->syncp);
3048                 adapter->dev_stats.wd_expired++;
3049                 u64_stats_update_end(&adapter->syncp);
3050                 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
3051                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3052         }
3053 }
3054
3055 static void check_for_admin_com_state(struct ena_adapter *adapter)
3056 {
3057         if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
3058                 netif_err(adapter, drv, adapter->netdev,
3059                           "ENA admin queue is not in running state!\n");
3060                 u64_stats_update_begin(&adapter->syncp);
3061                 adapter->dev_stats.admin_q_pause++;
3062                 u64_stats_update_end(&adapter->syncp);
3063                 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
3064                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3065         }
3066 }
3067
3068 static void ena_update_hints(struct ena_adapter *adapter,
3069                              struct ena_admin_ena_hw_hints *hints)
3070 {
3071         struct net_device *netdev = adapter->netdev;
3072
3073         if (hints->admin_completion_tx_timeout)
3074                 adapter->ena_dev->admin_queue.completion_timeout =
3075                         hints->admin_completion_tx_timeout * 1000;
3076
3077         if (hints->mmio_read_timeout)
3078                 /* convert to usec */
3079                 adapter->ena_dev->mmio_read.reg_read_to =
3080                         hints->mmio_read_timeout * 1000;
3081
3082         if (hints->missed_tx_completion_count_threshold_to_reset)
3083                 adapter->missing_tx_completion_threshold =
3084                         hints->missed_tx_completion_count_threshold_to_reset;
3085
3086         if (hints->missing_tx_completion_timeout) {
3087                 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3088                         adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
3089                 else
3090                         adapter->missing_tx_completion_to =
3091                                 msecs_to_jiffies(hints->missing_tx_completion_timeout);
3092         }
3093
3094         if (hints->netdev_wd_timeout)
3095                 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
3096
3097         if (hints->driver_watchdog_timeout) {
3098                 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3099                         adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3100                 else
3101                         adapter->keep_alive_timeout =
3102                                 msecs_to_jiffies(hints->driver_watchdog_timeout);
3103         }
3104 }
3105
3106 static void ena_update_host_info(struct ena_admin_host_info *host_info,
3107                                  struct net_device *netdev)
3108 {
3109         host_info->supported_network_features[0] =
3110                 netdev->features & GENMASK_ULL(31, 0);
3111         host_info->supported_network_features[1] =
3112                 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
3113 }
3114
3115 static void ena_timer_service(struct timer_list *t)
3116 {
3117         struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
3118         u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
3119         struct ena_admin_host_info *host_info =
3120                 adapter->ena_dev->host_attr.host_info;
3121
3122         check_for_missing_keep_alive(adapter);
3123
3124         check_for_admin_com_state(adapter);
3125
3126         check_for_missing_completions(adapter);
3127
3128         check_for_empty_rx_ring(adapter);
3129
3130         if (debug_area)
3131                 ena_dump_stats_to_buf(adapter, debug_area);
3132
3133         if (host_info)
3134                 ena_update_host_info(host_info, adapter->netdev);
3135
3136         if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3137                 netif_err(adapter, drv, adapter->netdev,
3138                           "Trigger reset is on\n");
3139                 ena_dump_stats_to_dmesg(adapter);
3140                 queue_work(ena_wq, &adapter->reset_task);
3141                 return;
3142         }
3143
3144         /* Reset the timer */
3145         mod_timer(&adapter->timer_service, jiffies + HZ);
3146 }
3147
3148 static int ena_calc_io_queue_num(struct pci_dev *pdev,
3149                                  struct ena_com_dev *ena_dev,
3150                                  struct ena_com_dev_get_features_ctx *get_feat_ctx)
3151 {
3152         int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num;
3153
3154         if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3155                 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3156                         &get_feat_ctx->max_queue_ext.max_queue_ext;
3157                 io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
3158                                   max_queue_ext->max_rx_cq_num);
3159
3160                 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
3161                 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
3162         } else {
3163                 struct ena_admin_queue_feature_desc *max_queues =
3164                         &get_feat_ctx->max_queues;
3165                 io_tx_sq_num = max_queues->max_sq_num;
3166                 io_tx_cq_num = max_queues->max_cq_num;
3167                 io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
3168         }
3169
3170         /* In case of LLQ use the llq fields for the tx SQ/CQ */
3171         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3172                 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
3173
3174         io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
3175         io_queue_num = min_t(int, io_queue_num, io_rx_num);
3176         io_queue_num = min_t(int, io_queue_num, io_tx_sq_num);
3177         io_queue_num = min_t(int, io_queue_num, io_tx_cq_num);
3178         /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
3179         io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
3180         if (unlikely(!io_queue_num)) {
3181                 dev_err(&pdev->dev, "The device doesn't have io queues\n");
3182                 return -EFAULT;
3183         }
3184
3185         return io_queue_num;
3186 }
3187
3188 static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3189                                            struct ena_com_dev *ena_dev,
3190                                            struct ena_admin_feature_llq_desc *llq,
3191                                            struct ena_llq_configurations *llq_default_configurations)
3192 {
3193         bool has_mem_bar;
3194         int rc;
3195         u32 llq_feature_mask;
3196
3197         llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3198         if (!(ena_dev->supported_features & llq_feature_mask)) {
3199                 dev_err(&pdev->dev,
3200                         "LLQ is not supported Fallback to host mode policy.\n");
3201                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3202                 return 0;
3203         }
3204
3205         has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
3206
3207         rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3208         if (unlikely(rc)) {
3209                 dev_err(&pdev->dev,
3210                         "Failed to configure the device mode.  Fallback to host mode policy.\n");
3211                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3212                 return 0;
3213         }
3214
3215         /* Nothing to config, exit */
3216         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
3217                 return 0;
3218
3219         if (!has_mem_bar) {
3220                 dev_err(&pdev->dev,
3221                         "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3222                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3223                 return 0;
3224         }
3225
3226         ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3227                                            pci_resource_start(pdev, ENA_MEM_BAR),
3228                                            pci_resource_len(pdev, ENA_MEM_BAR));
3229
3230         if (!ena_dev->mem_bar)
3231                 return -EFAULT;
3232
3233         return 0;
3234 }
3235
3236 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3237                                  struct net_device *netdev)
3238 {
3239         netdev_features_t dev_features = 0;
3240
3241         /* Set offload features */
3242         if (feat->offload.tx &
3243                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3244                 dev_features |= NETIF_F_IP_CSUM;
3245
3246         if (feat->offload.tx &
3247                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3248                 dev_features |= NETIF_F_IPV6_CSUM;
3249
3250         if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3251                 dev_features |= NETIF_F_TSO;
3252
3253         if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3254                 dev_features |= NETIF_F_TSO6;
3255
3256         if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3257                 dev_features |= NETIF_F_TSO_ECN;
3258
3259         if (feat->offload.rx_supported &
3260                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3261                 dev_features |= NETIF_F_RXCSUM;
3262
3263         if (feat->offload.rx_supported &
3264                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3265                 dev_features |= NETIF_F_RXCSUM;
3266
3267         netdev->features =
3268                 dev_features |
3269                 NETIF_F_SG |
3270                 NETIF_F_RXHASH |
3271                 NETIF_F_HIGHDMA;
3272
3273         netdev->hw_features |= netdev->features;
3274         netdev->vlan_features |= netdev->features;
3275 }
3276
3277 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3278                                      struct ena_com_dev_get_features_ctx *feat)
3279 {
3280         struct net_device *netdev = adapter->netdev;
3281
3282         /* Copy mac address */
3283         if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3284                 eth_hw_addr_random(netdev);
3285                 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3286         } else {
3287                 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3288                 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3289         }
3290
3291         /* Set offload features */
3292         ena_set_dev_offloads(feat, netdev);
3293
3294         adapter->max_mtu = feat->dev_attr.max_mtu;
3295         netdev->max_mtu = adapter->max_mtu;
3296         netdev->min_mtu = ENA_MIN_MTU;
3297 }
3298
3299 static int ena_rss_init_default(struct ena_adapter *adapter)
3300 {
3301         struct ena_com_dev *ena_dev = adapter->ena_dev;
3302         struct device *dev = &adapter->pdev->dev;
3303         int rc, i;
3304         u32 val;
3305
3306         rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3307         if (unlikely(rc)) {
3308                 dev_err(dev, "Cannot init indirect table\n");
3309                 goto err_rss_init;
3310         }
3311
3312         for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
3313                 val = ethtool_rxfh_indir_default(i, adapter->num_queues);
3314                 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
3315                                                        ENA_IO_RXQ_IDX(val));
3316                 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3317                         dev_err(dev, "Cannot fill indirect table\n");
3318                         goto err_fill_indir;
3319                 }
3320         }
3321
3322         rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
3323                                         ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
3324         if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3325                 dev_err(dev, "Cannot fill hash function\n");
3326                 goto err_fill_indir;
3327         }
3328
3329         rc = ena_com_set_default_hash_ctrl(ena_dev);
3330         if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3331                 dev_err(dev, "Cannot fill hash control\n");
3332                 goto err_fill_indir;
3333         }
3334
3335         return 0;
3336
3337 err_fill_indir:
3338         ena_com_rss_destroy(ena_dev);
3339 err_rss_init:
3340
3341         return rc;
3342 }
3343
3344 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3345 {
3346         int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3347
3348         pci_release_selected_regions(pdev, release_bars);
3349 }
3350
3351 static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
3352 {
3353         llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
3354         llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
3355         llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
3356         llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
3357         llq_config->llq_ring_entry_size_value = 128;
3358 }
3359
3360 static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
3361 {
3362         struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
3363         struct ena_com_dev *ena_dev = ctx->ena_dev;
3364         u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
3365         u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
3366         u32 max_tx_queue_size;
3367         u32 max_rx_queue_size;
3368
3369         if (ctx->ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3370                 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3371                         &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
3372                 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
3373                                           max_queue_ext->max_rx_sq_depth);
3374                 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
3375
3376                 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3377                         max_tx_queue_size = min_t(u32, max_tx_queue_size,
3378                                                   llq->max_llq_depth);
3379                 else
3380                         max_tx_queue_size = min_t(u32, max_tx_queue_size,
3381                                                   max_queue_ext->max_tx_sq_depth);
3382
3383                 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3384                                              max_queue_ext->max_per_packet_tx_descs);
3385                 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3386                                              max_queue_ext->max_per_packet_rx_descs);
3387         } else {
3388                 struct ena_admin_queue_feature_desc *max_queues =
3389                         &ctx->get_feat_ctx->max_queues;
3390                 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
3391                                           max_queues->max_sq_depth);
3392                 max_tx_queue_size = max_queues->max_cq_depth;
3393
3394                 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3395                         max_tx_queue_size = min_t(u32, max_tx_queue_size,
3396                                                   llq->max_llq_depth);
3397                 else
3398                         max_tx_queue_size = min_t(u32, max_tx_queue_size,
3399                                                   max_queues->max_sq_depth);
3400
3401                 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3402                                              max_queues->max_packet_tx_descs);
3403                 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3404                                              max_queues->max_packet_rx_descs);
3405         }
3406
3407         max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
3408         max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
3409
3410         tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
3411                                   max_tx_queue_size);
3412         rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
3413                                   max_rx_queue_size);
3414
3415         tx_queue_size = rounddown_pow_of_two(tx_queue_size);
3416         rx_queue_size = rounddown_pow_of_two(rx_queue_size);
3417
3418         ctx->max_tx_queue_size = max_tx_queue_size;
3419         ctx->max_rx_queue_size = max_rx_queue_size;
3420         ctx->tx_queue_size = tx_queue_size;
3421         ctx->rx_queue_size = rx_queue_size;
3422
3423         return 0;
3424 }
3425
3426 /* ena_probe - Device Initialization Routine
3427  * @pdev: PCI device information struct
3428  * @ent: entry in ena_pci_tbl
3429  *
3430  * Returns 0 on success, negative on failure
3431  *
3432  * ena_probe initializes an adapter identified by a pci_dev structure.
3433  * The OS initialization, configuring of the adapter private structure,
3434  * and a hardware reset occur.
3435  */
3436 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3437 {
3438         struct ena_com_dev_get_features_ctx get_feat_ctx;
3439         struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
3440         struct ena_llq_configurations llq_config;
3441         struct ena_com_dev *ena_dev = NULL;
3442         struct ena_adapter *adapter;
3443         int io_queue_num, bars, rc;
3444         struct net_device *netdev;
3445         static int adapters_found;
3446         char *queue_type_str;
3447         bool wd_state;
3448
3449         dev_dbg(&pdev->dev, "%s\n", __func__);
3450
3451         dev_info_once(&pdev->dev, "%s", version);
3452
3453         rc = pci_enable_device_mem(pdev);
3454         if (rc) {
3455                 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
3456                 return rc;
3457         }
3458
3459         pci_set_master(pdev);
3460
3461         ena_dev = vzalloc(sizeof(*ena_dev));
3462         if (!ena_dev) {
3463                 rc = -ENOMEM;
3464                 goto err_disable_device;
3465         }
3466
3467         bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3468         rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
3469         if (rc) {
3470                 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
3471                         rc);
3472                 goto err_free_ena_dev;
3473         }
3474
3475         ena_dev->reg_bar = devm_ioremap(&pdev->dev,
3476                                         pci_resource_start(pdev, ENA_REG_BAR),
3477                                         pci_resource_len(pdev, ENA_REG_BAR));
3478         if (!ena_dev->reg_bar) {
3479                 dev_err(&pdev->dev, "failed to remap regs bar\n");
3480                 rc = -EFAULT;
3481                 goto err_free_region;
3482         }
3483
3484         ena_dev->dmadev = &pdev->dev;
3485
3486         rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
3487         if (rc) {
3488                 dev_err(&pdev->dev, "ena device init failed\n");
3489                 if (rc == -ETIME)
3490                         rc = -EPROBE_DEFER;
3491                 goto err_free_region;
3492         }
3493
3494         set_default_llq_configurations(&llq_config);
3495
3496         rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
3497                                              &llq_config);
3498         if (rc) {
3499                 dev_err(&pdev->dev, "ena device init failed\n");
3500                 goto err_device_destroy;
3501         }
3502
3503         calc_queue_ctx.ena_dev = ena_dev;
3504         calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
3505         calc_queue_ctx.pdev = pdev;
3506
3507         /* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
3508         * Updated during device initialization with the real granularity
3509         */
3510         ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
3511         ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
3512         io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
3513         rc = ena_calc_queue_size(&calc_queue_ctx);
3514         if (rc || io_queue_num <= 0) {
3515                 rc = -EFAULT;
3516                 goto err_device_destroy;
3517         }
3518
3519         dev_info(&pdev->dev, "creating %d io queues. rx queue size: %d tx queue size. %d LLQ is %s\n",
3520                  io_queue_num,
3521                  calc_queue_ctx.rx_queue_size,
3522                  calc_queue_ctx.tx_queue_size,
3523                  (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
3524                  "ENABLED" : "DISABLED");
3525
3526         /* dev zeroed in init_etherdev */
3527         netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
3528         if (!netdev) {
3529                 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
3530                 rc = -ENOMEM;
3531                 goto err_device_destroy;
3532         }
3533
3534         SET_NETDEV_DEV(netdev, &pdev->dev);
3535
3536         adapter = netdev_priv(netdev);
3537         pci_set_drvdata(pdev, adapter);
3538
3539         adapter->ena_dev = ena_dev;
3540         adapter->netdev = netdev;
3541         adapter->pdev = pdev;
3542
3543         ena_set_conf_feat_params(adapter, &get_feat_ctx);
3544
3545         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3546         adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3547
3548         adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
3549         adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
3550         adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
3551         adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
3552         adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
3553         adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
3554
3555         adapter->num_queues = io_queue_num;
3556         adapter->last_monitored_tx_qid = 0;
3557
3558         adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
3559         adapter->wd_state = wd_state;
3560
3561         snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
3562
3563         rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
3564         if (rc) {
3565                 dev_err(&pdev->dev,
3566                         "Failed to query interrupt moderation feature\n");
3567                 goto err_netdev_destroy;
3568         }
3569         ena_init_io_rings(adapter);
3570
3571         netdev->netdev_ops = &ena_netdev_ops;
3572         netdev->watchdog_timeo = TX_TIMEOUT;
3573         ena_set_ethtool_ops(netdev);
3574
3575         netdev->priv_flags |= IFF_UNICAST_FLT;
3576
3577         u64_stats_init(&adapter->syncp);
3578
3579         rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
3580         if (rc) {
3581                 dev_err(&pdev->dev,
3582                         "Failed to enable and set the admin interrupts\n");
3583                 goto err_worker_destroy;
3584         }
3585         rc = ena_rss_init_default(adapter);
3586         if (rc && (rc != -EOPNOTSUPP)) {
3587                 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
3588                 goto err_free_msix;
3589         }
3590
3591         ena_config_debug_area(adapter);
3592
3593         memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
3594
3595         netif_carrier_off(netdev);
3596
3597         rc = register_netdev(netdev);
3598         if (rc) {
3599                 dev_err(&pdev->dev, "Cannot register net device\n");
3600                 goto err_rss;
3601         }
3602
3603         INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
3604
3605         adapter->last_keep_alive_jiffies = jiffies;
3606         adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
3607         adapter->missing_tx_completion_to = TX_TIMEOUT;
3608         adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
3609
3610         ena_update_hints(adapter, &get_feat_ctx.hw_hints);
3611
3612         timer_setup(&adapter->timer_service, ena_timer_service, 0);
3613         mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3614
3615         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
3616                 queue_type_str = "Regular";
3617         else
3618                 queue_type_str = "Low Latency";
3619
3620         dev_info(&pdev->dev,
3621                  "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n",
3622                  DEVICE_NAME, (long)pci_resource_start(pdev, 0),
3623                  netdev->dev_addr, io_queue_num, queue_type_str);
3624
3625         set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3626
3627         adapters_found++;
3628
3629         return 0;
3630
3631 err_rss:
3632         ena_com_delete_debug_area(ena_dev);
3633         ena_com_rss_destroy(ena_dev);
3634 err_free_msix:
3635         ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
3636         /* stop submitting admin commands on a device that was reset */
3637         ena_com_set_admin_running_state(ena_dev, false);
3638         ena_free_mgmnt_irq(adapter);
3639         ena_disable_msix(adapter);
3640 err_worker_destroy:
3641         ena_com_destroy_interrupt_moderation(ena_dev);
3642         del_timer(&adapter->timer_service);
3643 err_netdev_destroy:
3644         free_netdev(netdev);
3645 err_device_destroy:
3646         ena_com_delete_host_info(ena_dev);
3647         ena_com_admin_destroy(ena_dev);
3648 err_free_region:
3649         ena_release_bars(ena_dev, pdev);
3650 err_free_ena_dev:
3651         vfree(ena_dev);
3652 err_disable_device:
3653         pci_disable_device(pdev);
3654         return rc;
3655 }
3656
3657 /*****************************************************************************/
3658
3659 /* ena_remove - Device Removal Routine
3660  * @pdev: PCI device information struct
3661  *
3662  * ena_remove is called by the PCI subsystem to alert the driver
3663  * that it should release a PCI device.
3664  */
3665 static void ena_remove(struct pci_dev *pdev)
3666 {
3667         struct ena_adapter *adapter = pci_get_drvdata(pdev);
3668         struct ena_com_dev *ena_dev;
3669         struct net_device *netdev;
3670
3671         ena_dev = adapter->ena_dev;
3672         netdev = adapter->netdev;
3673
3674 #ifdef CONFIG_RFS_ACCEL
3675         if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
3676                 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
3677                 netdev->rx_cpu_rmap = NULL;
3678         }
3679 #endif /* CONFIG_RFS_ACCEL */
3680         del_timer_sync(&adapter->timer_service);
3681
3682         cancel_work_sync(&adapter->reset_task);
3683
3684         rtnl_lock();
3685         ena_destroy_device(adapter, true);
3686         rtnl_unlock();
3687
3688         unregister_netdev(netdev);
3689
3690         free_netdev(netdev);
3691
3692         ena_com_rss_destroy(ena_dev);
3693
3694         ena_com_delete_debug_area(ena_dev);
3695
3696         ena_com_delete_host_info(ena_dev);
3697
3698         ena_release_bars(ena_dev, pdev);
3699
3700         pci_disable_device(pdev);
3701
3702         ena_com_destroy_interrupt_moderation(ena_dev);
3703
3704         vfree(ena_dev);
3705 }
3706
3707 #ifdef CONFIG_PM
3708 /* ena_suspend - PM suspend callback
3709  * @pdev: PCI device information struct
3710  * @state:power state
3711  */
3712 static int ena_suspend(struct pci_dev *pdev,  pm_message_t state)
3713 {
3714         struct ena_adapter *adapter = pci_get_drvdata(pdev);
3715
3716         u64_stats_update_begin(&adapter->syncp);
3717         adapter->dev_stats.suspend++;
3718         u64_stats_update_end(&adapter->syncp);
3719
3720         rtnl_lock();
3721         if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3722                 dev_err(&pdev->dev,
3723                         "ignoring device reset request as the device is being suspended\n");
3724                 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3725         }
3726         ena_destroy_device(adapter, true);
3727         rtnl_unlock();
3728         return 0;
3729 }
3730
3731 /* ena_resume - PM resume callback
3732  * @pdev: PCI device information struct
3733  *
3734  */
3735 static int ena_resume(struct pci_dev *pdev)
3736 {
3737         struct ena_adapter *adapter = pci_get_drvdata(pdev);
3738         int rc;
3739
3740         u64_stats_update_begin(&adapter->syncp);
3741         adapter->dev_stats.resume++;
3742         u64_stats_update_end(&adapter->syncp);
3743
3744         rtnl_lock();
3745         rc = ena_restore_device(adapter);
3746         rtnl_unlock();
3747         return rc;
3748 }
3749 #endif
3750
3751 static struct pci_driver ena_pci_driver = {
3752         .name           = DRV_MODULE_NAME,
3753         .id_table       = ena_pci_tbl,
3754         .probe          = ena_probe,
3755         .remove         = ena_remove,
3756 #ifdef CONFIG_PM
3757         .suspend    = ena_suspend,
3758         .resume     = ena_resume,
3759 #endif
3760         .sriov_configure = pci_sriov_configure_simple,
3761 };
3762
3763 static int __init ena_init(void)
3764 {
3765         pr_info("%s", version);
3766
3767         ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
3768         if (!ena_wq) {
3769                 pr_err("Failed to create workqueue\n");
3770                 return -ENOMEM;
3771         }
3772
3773         return pci_register_driver(&ena_pci_driver);
3774 }
3775
3776 static void __exit ena_cleanup(void)
3777 {
3778         pci_unregister_driver(&ena_pci_driver);
3779
3780         if (ena_wq) {
3781                 destroy_workqueue(ena_wq);
3782                 ena_wq = NULL;
3783         }
3784 }
3785
3786 /******************************************************************************
3787  ******************************** AENQ Handlers *******************************
3788  *****************************************************************************/
3789 /* ena_update_on_link_change:
3790  * Notify the network interface about the change in link status
3791  */
3792 static void ena_update_on_link_change(void *adapter_data,
3793                                       struct ena_admin_aenq_entry *aenq_e)
3794 {
3795         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3796         struct ena_admin_aenq_link_change_desc *aenq_desc =
3797                 (struct ena_admin_aenq_link_change_desc *)aenq_e;
3798         int status = aenq_desc->flags &
3799                 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3800
3801         if (status) {
3802                 netdev_dbg(adapter->netdev, "%s\n", __func__);
3803                 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3804                 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
3805                         netif_carrier_on(adapter->netdev);
3806         } else {
3807                 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3808                 netif_carrier_off(adapter->netdev);
3809         }
3810 }
3811
3812 static void ena_keep_alive_wd(void *adapter_data,
3813                               struct ena_admin_aenq_entry *aenq_e)
3814 {
3815         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3816         struct ena_admin_aenq_keep_alive_desc *desc;
3817         u64 rx_drops;
3818
3819         desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
3820         adapter->last_keep_alive_jiffies = jiffies;
3821
3822         rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
3823
3824         u64_stats_update_begin(&adapter->syncp);
3825         adapter->dev_stats.rx_drops = rx_drops;
3826         u64_stats_update_end(&adapter->syncp);
3827 }
3828
3829 static void ena_notification(void *adapter_data,
3830                              struct ena_admin_aenq_entry *aenq_e)
3831 {
3832         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3833         struct ena_admin_ena_hw_hints *hints;
3834
3835         WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
3836              "Invalid group(%x) expected %x\n",
3837              aenq_e->aenq_common_desc.group,
3838              ENA_ADMIN_NOTIFICATION);
3839
3840         switch (aenq_e->aenq_common_desc.syndrom) {
3841         case ENA_ADMIN_UPDATE_HINTS:
3842                 hints = (struct ena_admin_ena_hw_hints *)
3843                         (&aenq_e->inline_data_w4);
3844                 ena_update_hints(adapter, hints);
3845                 break;
3846         default:
3847                 netif_err(adapter, drv, adapter->netdev,
3848                           "Invalid aenq notification link state %d\n",
3849                           aenq_e->aenq_common_desc.syndrom);
3850         }
3851 }
3852
3853 /* This handler will called for unknown event group or unimplemented handlers*/
3854 static void unimplemented_aenq_handler(void *data,
3855                                        struct ena_admin_aenq_entry *aenq_e)
3856 {
3857         struct ena_adapter *adapter = (struct ena_adapter *)data;
3858
3859         netif_err(adapter, drv, adapter->netdev,
3860                   "Unknown event was received or event with unimplemented handler\n");
3861 }
3862
3863 static struct ena_aenq_handlers aenq_handlers = {
3864         .handlers = {
3865                 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3866                 [ENA_ADMIN_NOTIFICATION] = ena_notification,
3867                 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3868         },
3869         .unimplemented_handler = unimplemented_aenq_handler
3870 };
3871
3872 module_init(ena_init);
3873 module_exit(ena_cleanup);