]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/amazon/ena/ena_netdev.c
sched/topology: Make local variables static
[linux.git] / drivers / net / ethernet / amazon / ena / ena_netdev.c
1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <linux/numa.h>
44 #include <linux/pci.h>
45 #include <linux/utsname.h>
46 #include <linux/version.h>
47 #include <linux/vmalloc.h>
48 #include <net/ip.h>
49
50 #include "ena_netdev.h"
51 #include "ena_pci_id_tbl.h"
52
53 static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
54
55 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
56 MODULE_DESCRIPTION(DEVICE_NAME);
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(DRV_MODULE_VERSION);
59
60 /* Time in jiffies before concluding the transmitter is hung. */
61 #define TX_TIMEOUT  (5 * HZ)
62
63 #define ENA_NAPI_BUDGET 64
64
65 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
66                 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
67 static int debug = -1;
68 module_param(debug, int, 0);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71 static struct ena_aenq_handlers aenq_handlers;
72
73 static struct workqueue_struct *ena_wq;
74
75 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
76
77 static int ena_rss_init_default(struct ena_adapter *adapter);
78 static void check_for_admin_com_state(struct ena_adapter *adapter);
79 static void ena_destroy_device(struct ena_adapter *adapter);
80 static int ena_restore_device(struct ena_adapter *adapter);
81
82 static void ena_tx_timeout(struct net_device *dev)
83 {
84         struct ena_adapter *adapter = netdev_priv(dev);
85
86         /* Change the state of the device to trigger reset
87          * Check that we are not in the middle or a trigger already
88          */
89
90         if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
91                 return;
92
93         adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
94         u64_stats_update_begin(&adapter->syncp);
95         adapter->dev_stats.tx_timeout++;
96         u64_stats_update_end(&adapter->syncp);
97
98         netif_err(adapter, tx_err, dev, "Transmit time out\n");
99 }
100
101 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
102 {
103         int i;
104
105         for (i = 0; i < adapter->num_queues; i++)
106                 adapter->rx_ring[i].mtu = mtu;
107 }
108
109 static int ena_change_mtu(struct net_device *dev, int new_mtu)
110 {
111         struct ena_adapter *adapter = netdev_priv(dev);
112         int ret;
113
114         ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
115         if (!ret) {
116                 netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
117                 update_rx_ring_mtu(adapter, new_mtu);
118                 dev->mtu = new_mtu;
119         } else {
120                 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
121                           new_mtu);
122         }
123
124         return ret;
125 }
126
127 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
128 {
129 #ifdef CONFIG_RFS_ACCEL
130         u32 i;
131         int rc;
132
133         adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
134         if (!adapter->netdev->rx_cpu_rmap)
135                 return -ENOMEM;
136         for (i = 0; i < adapter->num_queues; i++) {
137                 int irq_idx = ENA_IO_IRQ_IDX(i);
138
139                 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
140                                       pci_irq_vector(adapter->pdev, irq_idx));
141                 if (rc) {
142                         free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
143                         adapter->netdev->rx_cpu_rmap = NULL;
144                         return rc;
145                 }
146         }
147 #endif /* CONFIG_RFS_ACCEL */
148         return 0;
149 }
150
151 static void ena_init_io_rings_common(struct ena_adapter *adapter,
152                                      struct ena_ring *ring, u16 qid)
153 {
154         ring->qid = qid;
155         ring->pdev = adapter->pdev;
156         ring->dev = &adapter->pdev->dev;
157         ring->netdev = adapter->netdev;
158         ring->napi = &adapter->ena_napi[qid].napi;
159         ring->adapter = adapter;
160         ring->ena_dev = adapter->ena_dev;
161         ring->per_napi_packets = 0;
162         ring->per_napi_bytes = 0;
163         ring->cpu = 0;
164         ring->first_interrupt = false;
165         ring->no_interrupt_event_cnt = 0;
166         u64_stats_init(&ring->syncp);
167 }
168
169 static void ena_init_io_rings(struct ena_adapter *adapter)
170 {
171         struct ena_com_dev *ena_dev;
172         struct ena_ring *txr, *rxr;
173         int i;
174
175         ena_dev = adapter->ena_dev;
176
177         for (i = 0; i < adapter->num_queues; i++) {
178                 txr = &adapter->tx_ring[i];
179                 rxr = &adapter->rx_ring[i];
180
181                 /* TX/RX common ring state */
182                 ena_init_io_rings_common(adapter, txr, i);
183                 ena_init_io_rings_common(adapter, rxr, i);
184
185                 /* TX specific ring state */
186                 txr->ring_size = adapter->tx_ring_size;
187                 txr->tx_max_header_size = ena_dev->tx_max_header_size;
188                 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
189                 txr->sgl_size = adapter->max_tx_sgl_size;
190                 txr->smoothed_interval =
191                         ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
192
193                 /* RX specific ring state */
194                 rxr->ring_size = adapter->rx_ring_size;
195                 rxr->rx_copybreak = adapter->rx_copybreak;
196                 rxr->sgl_size = adapter->max_rx_sgl_size;
197                 rxr->smoothed_interval =
198                         ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
199                 rxr->empty_rx_queue = 0;
200         }
201 }
202
203 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
204  * @adapter: network interface device structure
205  * @qid: queue index
206  *
207  * Return 0 on success, negative on failure
208  */
209 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
210 {
211         struct ena_ring *tx_ring = &adapter->tx_ring[qid];
212         struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
213         int size, i, node;
214
215         if (tx_ring->tx_buffer_info) {
216                 netif_err(adapter, ifup,
217                           adapter->netdev, "tx_buffer_info info is not NULL");
218                 return -EEXIST;
219         }
220
221         size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
222         node = cpu_to_node(ena_irq->cpu);
223
224         tx_ring->tx_buffer_info = vzalloc_node(size, node);
225         if (!tx_ring->tx_buffer_info) {
226                 tx_ring->tx_buffer_info = vzalloc(size);
227                 if (!tx_ring->tx_buffer_info)
228                         return -ENOMEM;
229         }
230
231         size = sizeof(u16) * tx_ring->ring_size;
232         tx_ring->free_tx_ids = vzalloc_node(size, node);
233         if (!tx_ring->free_tx_ids) {
234                 tx_ring->free_tx_ids = vzalloc(size);
235                 if (!tx_ring->free_tx_ids) {
236                         vfree(tx_ring->tx_buffer_info);
237                         return -ENOMEM;
238                 }
239         }
240
241         /* Req id ring for TX out of order completions */
242         for (i = 0; i < tx_ring->ring_size; i++)
243                 tx_ring->free_tx_ids[i] = i;
244
245         /* Reset tx statistics */
246         memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
247
248         tx_ring->next_to_use = 0;
249         tx_ring->next_to_clean = 0;
250         tx_ring->cpu = ena_irq->cpu;
251         return 0;
252 }
253
254 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
255  * @adapter: network interface device structure
256  * @qid: queue index
257  *
258  * Free all transmit software resources
259  */
260 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
261 {
262         struct ena_ring *tx_ring = &adapter->tx_ring[qid];
263
264         vfree(tx_ring->tx_buffer_info);
265         tx_ring->tx_buffer_info = NULL;
266
267         vfree(tx_ring->free_tx_ids);
268         tx_ring->free_tx_ids = NULL;
269 }
270
271 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
272  * @adapter: private structure
273  *
274  * Return 0 on success, negative on failure
275  */
276 static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
277 {
278         int i, rc = 0;
279
280         for (i = 0; i < adapter->num_queues; i++) {
281                 rc = ena_setup_tx_resources(adapter, i);
282                 if (rc)
283                         goto err_setup_tx;
284         }
285
286         return 0;
287
288 err_setup_tx:
289
290         netif_err(adapter, ifup, adapter->netdev,
291                   "Tx queue %d: allocation failed\n", i);
292
293         /* rewind the index freeing the rings as we go */
294         while (i--)
295                 ena_free_tx_resources(adapter, i);
296         return rc;
297 }
298
299 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
300  * @adapter: board private structure
301  *
302  * Free all transmit software resources
303  */
304 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
305 {
306         int i;
307
308         for (i = 0; i < adapter->num_queues; i++)
309                 ena_free_tx_resources(adapter, i);
310 }
311
312 static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
313 {
314         if (likely(req_id < rx_ring->ring_size))
315                 return 0;
316
317         netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
318                   "Invalid rx req_id: %hu\n", req_id);
319
320         u64_stats_update_begin(&rx_ring->syncp);
321         rx_ring->rx_stats.bad_req_id++;
322         u64_stats_update_end(&rx_ring->syncp);
323
324         /* Trigger device reset */
325         rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
326         set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
327         return -EFAULT;
328 }
329
330 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
331  * @adapter: network interface device structure
332  * @qid: queue index
333  *
334  * Returns 0 on success, negative on failure
335  */
336 static int ena_setup_rx_resources(struct ena_adapter *adapter,
337                                   u32 qid)
338 {
339         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
340         struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
341         int size, node, i;
342
343         if (rx_ring->rx_buffer_info) {
344                 netif_err(adapter, ifup, adapter->netdev,
345                           "rx_buffer_info is not NULL");
346                 return -EEXIST;
347         }
348
349         /* alloc extra element so in rx path
350          * we can always prefetch rx_info + 1
351          */
352         size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
353         node = cpu_to_node(ena_irq->cpu);
354
355         rx_ring->rx_buffer_info = vzalloc_node(size, node);
356         if (!rx_ring->rx_buffer_info) {
357                 rx_ring->rx_buffer_info = vzalloc(size);
358                 if (!rx_ring->rx_buffer_info)
359                         return -ENOMEM;
360         }
361
362         size = sizeof(u16) * rx_ring->ring_size;
363         rx_ring->free_rx_ids = vzalloc_node(size, node);
364         if (!rx_ring->free_rx_ids) {
365                 rx_ring->free_rx_ids = vzalloc(size);
366                 if (!rx_ring->free_rx_ids) {
367                         vfree(rx_ring->rx_buffer_info);
368                         return -ENOMEM;
369                 }
370         }
371
372         /* Req id ring for receiving RX pkts out of order */
373         for (i = 0; i < rx_ring->ring_size; i++)
374                 rx_ring->free_rx_ids[i] = i;
375
376         /* Reset rx statistics */
377         memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
378
379         rx_ring->next_to_clean = 0;
380         rx_ring->next_to_use = 0;
381         rx_ring->cpu = ena_irq->cpu;
382
383         return 0;
384 }
385
386 /* ena_free_rx_resources - Free I/O Rx Resources
387  * @adapter: network interface device structure
388  * @qid: queue index
389  *
390  * Free all receive software resources
391  */
392 static void ena_free_rx_resources(struct ena_adapter *adapter,
393                                   u32 qid)
394 {
395         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
396
397         vfree(rx_ring->rx_buffer_info);
398         rx_ring->rx_buffer_info = NULL;
399
400         vfree(rx_ring->free_rx_ids);
401         rx_ring->free_rx_ids = NULL;
402 }
403
404 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
405  * @adapter: board private structure
406  *
407  * Return 0 on success, negative on failure
408  */
409 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
410 {
411         int i, rc = 0;
412
413         for (i = 0; i < adapter->num_queues; i++) {
414                 rc = ena_setup_rx_resources(adapter, i);
415                 if (rc)
416                         goto err_setup_rx;
417         }
418
419         return 0;
420
421 err_setup_rx:
422
423         netif_err(adapter, ifup, adapter->netdev,
424                   "Rx queue %d: allocation failed\n", i);
425
426         /* rewind the index freeing the rings as we go */
427         while (i--)
428                 ena_free_rx_resources(adapter, i);
429         return rc;
430 }
431
432 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
433  * @adapter: board private structure
434  *
435  * Free all receive software resources
436  */
437 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
438 {
439         int i;
440
441         for (i = 0; i < adapter->num_queues; i++)
442                 ena_free_rx_resources(adapter, i);
443 }
444
445 static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
446                                     struct ena_rx_buffer *rx_info, gfp_t gfp)
447 {
448         struct ena_com_buf *ena_buf;
449         struct page *page;
450         dma_addr_t dma;
451
452         /* if previous allocated page is not used */
453         if (unlikely(rx_info->page))
454                 return 0;
455
456         page = alloc_page(gfp);
457         if (unlikely(!page)) {
458                 u64_stats_update_begin(&rx_ring->syncp);
459                 rx_ring->rx_stats.page_alloc_fail++;
460                 u64_stats_update_end(&rx_ring->syncp);
461                 return -ENOMEM;
462         }
463
464         dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
465                            DMA_FROM_DEVICE);
466         if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
467                 u64_stats_update_begin(&rx_ring->syncp);
468                 rx_ring->rx_stats.dma_mapping_err++;
469                 u64_stats_update_end(&rx_ring->syncp);
470
471                 __free_page(page);
472                 return -EIO;
473         }
474         netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
475                   "alloc page %p, rx_info %p\n", page, rx_info);
476
477         rx_info->page = page;
478         rx_info->page_offset = 0;
479         ena_buf = &rx_info->ena_buf;
480         ena_buf->paddr = dma;
481         ena_buf->len = PAGE_SIZE;
482
483         return 0;
484 }
485
486 static void ena_free_rx_page(struct ena_ring *rx_ring,
487                              struct ena_rx_buffer *rx_info)
488 {
489         struct page *page = rx_info->page;
490         struct ena_com_buf *ena_buf = &rx_info->ena_buf;
491
492         if (unlikely(!page)) {
493                 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
494                            "Trying to free unallocated buffer\n");
495                 return;
496         }
497
498         dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
499                        DMA_FROM_DEVICE);
500
501         __free_page(page);
502         rx_info->page = NULL;
503 }
504
505 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
506 {
507         u16 next_to_use, req_id;
508         u32 i;
509         int rc;
510
511         next_to_use = rx_ring->next_to_use;
512
513         for (i = 0; i < num; i++) {
514                 struct ena_rx_buffer *rx_info;
515
516                 req_id = rx_ring->free_rx_ids[next_to_use];
517                 rc = validate_rx_req_id(rx_ring, req_id);
518                 if (unlikely(rc < 0))
519                         break;
520
521                 rx_info = &rx_ring->rx_buffer_info[req_id];
522
523
524                 rc = ena_alloc_rx_page(rx_ring, rx_info,
525                                        GFP_ATOMIC | __GFP_COMP);
526                 if (unlikely(rc < 0)) {
527                         netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
528                                    "failed to alloc buffer for rx queue %d\n",
529                                    rx_ring->qid);
530                         break;
531                 }
532                 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
533                                                 &rx_info->ena_buf,
534                                                 req_id);
535                 if (unlikely(rc)) {
536                         netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
537                                    "failed to add buffer for rx queue %d\n",
538                                    rx_ring->qid);
539                         break;
540                 }
541                 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
542                                                    rx_ring->ring_size);
543         }
544
545         if (unlikely(i < num)) {
546                 u64_stats_update_begin(&rx_ring->syncp);
547                 rx_ring->rx_stats.refil_partial++;
548                 u64_stats_update_end(&rx_ring->syncp);
549                 netdev_warn(rx_ring->netdev,
550                             "refilled rx qid %d with only %d buffers (from %d)\n",
551                             rx_ring->qid, i, num);
552         }
553
554         if (likely(i)) {
555                 /* Add memory barrier to make sure the desc were written before
556                  * issue a doorbell
557                  */
558                 wmb();
559                 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
560                 mmiowb();
561         }
562
563         rx_ring->next_to_use = next_to_use;
564
565         return i;
566 }
567
568 static void ena_free_rx_bufs(struct ena_adapter *adapter,
569                              u32 qid)
570 {
571         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
572         u32 i;
573
574         for (i = 0; i < rx_ring->ring_size; i++) {
575                 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
576
577                 if (rx_info->page)
578                         ena_free_rx_page(rx_ring, rx_info);
579         }
580 }
581
582 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
583  * @adapter: board private structure
584  *
585  */
586 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
587 {
588         struct ena_ring *rx_ring;
589         int i, rc, bufs_num;
590
591         for (i = 0; i < adapter->num_queues; i++) {
592                 rx_ring = &adapter->rx_ring[i];
593                 bufs_num = rx_ring->ring_size - 1;
594                 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
595
596                 if (unlikely(rc != bufs_num))
597                         netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
598                                    "refilling Queue %d failed. allocated %d buffers from: %d\n",
599                                    i, rc, bufs_num);
600         }
601 }
602
603 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
604 {
605         int i;
606
607         for (i = 0; i < adapter->num_queues; i++)
608                 ena_free_rx_bufs(adapter, i);
609 }
610
611 /* ena_free_tx_bufs - Free Tx Buffers per Queue
612  * @tx_ring: TX ring for which buffers be freed
613  */
614 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
615 {
616         bool print_once = true;
617         u32 i;
618
619         for (i = 0; i < tx_ring->ring_size; i++) {
620                 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
621                 struct ena_com_buf *ena_buf;
622                 int nr_frags;
623                 int j;
624
625                 if (!tx_info->skb)
626                         continue;
627
628                 if (print_once) {
629                         netdev_notice(tx_ring->netdev,
630                                       "free uncompleted tx skb qid %d idx 0x%x\n",
631                                       tx_ring->qid, i);
632                         print_once = false;
633                 } else {
634                         netdev_dbg(tx_ring->netdev,
635                                    "free uncompleted tx skb qid %d idx 0x%x\n",
636                                    tx_ring->qid, i);
637                 }
638
639                 ena_buf = tx_info->bufs;
640                 dma_unmap_single(tx_ring->dev,
641                                  ena_buf->paddr,
642                                  ena_buf->len,
643                                  DMA_TO_DEVICE);
644
645                 /* unmap remaining mapped pages */
646                 nr_frags = tx_info->num_of_bufs - 1;
647                 for (j = 0; j < nr_frags; j++) {
648                         ena_buf++;
649                         dma_unmap_page(tx_ring->dev,
650                                        ena_buf->paddr,
651                                        ena_buf->len,
652                                        DMA_TO_DEVICE);
653                 }
654
655                 dev_kfree_skb_any(tx_info->skb);
656         }
657         netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
658                                                   tx_ring->qid));
659 }
660
661 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
662 {
663         struct ena_ring *tx_ring;
664         int i;
665
666         for (i = 0; i < adapter->num_queues; i++) {
667                 tx_ring = &adapter->tx_ring[i];
668                 ena_free_tx_bufs(tx_ring);
669         }
670 }
671
672 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
673 {
674         u16 ena_qid;
675         int i;
676
677         for (i = 0; i < adapter->num_queues; i++) {
678                 ena_qid = ENA_IO_TXQ_IDX(i);
679                 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
680         }
681 }
682
683 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
684 {
685         u16 ena_qid;
686         int i;
687
688         for (i = 0; i < adapter->num_queues; i++) {
689                 ena_qid = ENA_IO_RXQ_IDX(i);
690                 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
691         }
692 }
693
694 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
695 {
696         ena_destroy_all_tx_queues(adapter);
697         ena_destroy_all_rx_queues(adapter);
698 }
699
700 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
701 {
702         struct ena_tx_buffer *tx_info = NULL;
703
704         if (likely(req_id < tx_ring->ring_size)) {
705                 tx_info = &tx_ring->tx_buffer_info[req_id];
706                 if (likely(tx_info->skb))
707                         return 0;
708         }
709
710         if (tx_info)
711                 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
712                           "tx_info doesn't have valid skb\n");
713         else
714                 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
715                           "Invalid req_id: %hu\n", req_id);
716
717         u64_stats_update_begin(&tx_ring->syncp);
718         tx_ring->tx_stats.bad_req_id++;
719         u64_stats_update_end(&tx_ring->syncp);
720
721         /* Trigger device reset */
722         tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
723         set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
724         return -EFAULT;
725 }
726
727 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
728 {
729         struct netdev_queue *txq;
730         bool above_thresh;
731         u32 tx_bytes = 0;
732         u32 total_done = 0;
733         u16 next_to_clean;
734         u16 req_id;
735         int tx_pkts = 0;
736         int rc;
737
738         next_to_clean = tx_ring->next_to_clean;
739         txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
740
741         while (tx_pkts < budget) {
742                 struct ena_tx_buffer *tx_info;
743                 struct sk_buff *skb;
744                 struct ena_com_buf *ena_buf;
745                 int i, nr_frags;
746
747                 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
748                                                 &req_id);
749                 if (rc)
750                         break;
751
752                 rc = validate_tx_req_id(tx_ring, req_id);
753                 if (rc)
754                         break;
755
756                 tx_info = &tx_ring->tx_buffer_info[req_id];
757                 skb = tx_info->skb;
758
759                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
760                 prefetch(&skb->end);
761
762                 tx_info->skb = NULL;
763                 tx_info->last_jiffies = 0;
764
765                 if (likely(tx_info->num_of_bufs != 0)) {
766                         ena_buf = tx_info->bufs;
767
768                         dma_unmap_single(tx_ring->dev,
769                                          dma_unmap_addr(ena_buf, paddr),
770                                          dma_unmap_len(ena_buf, len),
771                                          DMA_TO_DEVICE);
772
773                         /* unmap remaining mapped pages */
774                         nr_frags = tx_info->num_of_bufs - 1;
775                         for (i = 0; i < nr_frags; i++) {
776                                 ena_buf++;
777                                 dma_unmap_page(tx_ring->dev,
778                                                dma_unmap_addr(ena_buf, paddr),
779                                                dma_unmap_len(ena_buf, len),
780                                                DMA_TO_DEVICE);
781                         }
782                 }
783
784                 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
785                           "tx_poll: q %d skb %p completed\n", tx_ring->qid,
786                           skb);
787
788                 tx_bytes += skb->len;
789                 dev_kfree_skb(skb);
790                 tx_pkts++;
791                 total_done += tx_info->tx_descs;
792
793                 tx_ring->free_tx_ids[next_to_clean] = req_id;
794                 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
795                                                      tx_ring->ring_size);
796         }
797
798         tx_ring->next_to_clean = next_to_clean;
799         ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
800         ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
801
802         netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
803
804         netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
805                   "tx_poll: q %d done. total pkts: %d\n",
806                   tx_ring->qid, tx_pkts);
807
808         /* need to make the rings circular update visible to
809          * ena_start_xmit() before checking for netif_queue_stopped().
810          */
811         smp_mb();
812
813         above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
814                 ENA_TX_WAKEUP_THRESH;
815         if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
816                 __netif_tx_lock(txq, smp_processor_id());
817                 above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
818                         ENA_TX_WAKEUP_THRESH;
819                 if (netif_tx_queue_stopped(txq) && above_thresh) {
820                         netif_tx_wake_queue(txq);
821                         u64_stats_update_begin(&tx_ring->syncp);
822                         tx_ring->tx_stats.queue_wakeup++;
823                         u64_stats_update_end(&tx_ring->syncp);
824                 }
825                 __netif_tx_unlock(txq);
826         }
827
828         tx_ring->per_napi_bytes += tx_bytes;
829         tx_ring->per_napi_packets += tx_pkts;
830
831         return tx_pkts;
832 }
833
834 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
835 {
836         struct sk_buff *skb;
837
838         if (frags)
839                 skb = napi_get_frags(rx_ring->napi);
840         else
841                 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
842                                                 rx_ring->rx_copybreak);
843
844         if (unlikely(!skb)) {
845                 u64_stats_update_begin(&rx_ring->syncp);
846                 rx_ring->rx_stats.skb_alloc_fail++;
847                 u64_stats_update_end(&rx_ring->syncp);
848                 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
849                           "Failed to allocate skb. frags: %d\n", frags);
850                 return NULL;
851         }
852
853         return skb;
854 }
855
856 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
857                                   struct ena_com_rx_buf_info *ena_bufs,
858                                   u32 descs,
859                                   u16 *next_to_clean)
860 {
861         struct sk_buff *skb;
862         struct ena_rx_buffer *rx_info;
863         u16 len, req_id, buf = 0;
864         void *va;
865
866         len = ena_bufs[buf].len;
867         req_id = ena_bufs[buf].req_id;
868         rx_info = &rx_ring->rx_buffer_info[req_id];
869
870         if (unlikely(!rx_info->page)) {
871                 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
872                           "Page is NULL\n");
873                 return NULL;
874         }
875
876         netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
877                   "rx_info %p page %p\n",
878                   rx_info, rx_info->page);
879
880         /* save virt address of first buffer */
881         va = page_address(rx_info->page) + rx_info->page_offset;
882         prefetch(va + NET_IP_ALIGN);
883
884         if (len <= rx_ring->rx_copybreak) {
885                 skb = ena_alloc_skb(rx_ring, false);
886                 if (unlikely(!skb))
887                         return NULL;
888
889                 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
890                           "rx allocated small packet. len %d. data_len %d\n",
891                           skb->len, skb->data_len);
892
893                 /* sync this buffer for CPU use */
894                 dma_sync_single_for_cpu(rx_ring->dev,
895                                         dma_unmap_addr(&rx_info->ena_buf, paddr),
896                                         len,
897                                         DMA_FROM_DEVICE);
898                 skb_copy_to_linear_data(skb, va, len);
899                 dma_sync_single_for_device(rx_ring->dev,
900                                            dma_unmap_addr(&rx_info->ena_buf, paddr),
901                                            len,
902                                            DMA_FROM_DEVICE);
903
904                 skb_put(skb, len);
905                 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
906                 rx_ring->free_rx_ids[*next_to_clean] = req_id;
907                 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
908                                                      rx_ring->ring_size);
909                 return skb;
910         }
911
912         skb = ena_alloc_skb(rx_ring, true);
913         if (unlikely(!skb))
914                 return NULL;
915
916         do {
917                 dma_unmap_page(rx_ring->dev,
918                                dma_unmap_addr(&rx_info->ena_buf, paddr),
919                                PAGE_SIZE, DMA_FROM_DEVICE);
920
921                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
922                                 rx_info->page_offset, len, PAGE_SIZE);
923
924                 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
925                           "rx skb updated. len %d. data_len %d\n",
926                           skb->len, skb->data_len);
927
928                 rx_info->page = NULL;
929
930                 rx_ring->free_rx_ids[*next_to_clean] = req_id;
931                 *next_to_clean =
932                         ENA_RX_RING_IDX_NEXT(*next_to_clean,
933                                              rx_ring->ring_size);
934                 if (likely(--descs == 0))
935                         break;
936
937                 buf++;
938                 len = ena_bufs[buf].len;
939                 req_id = ena_bufs[buf].req_id;
940                 rx_info = &rx_ring->rx_buffer_info[req_id];
941         } while (1);
942
943         return skb;
944 }
945
946 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
947  * @adapter: structure containing adapter specific data
948  * @ena_rx_ctx: received packet context/metadata
949  * @skb: skb currently being received and modified
950  */
951 static inline void ena_rx_checksum(struct ena_ring *rx_ring,
952                                    struct ena_com_rx_ctx *ena_rx_ctx,
953                                    struct sk_buff *skb)
954 {
955         /* Rx csum disabled */
956         if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
957                 skb->ip_summed = CHECKSUM_NONE;
958                 return;
959         }
960
961         /* For fragmented packets the checksum isn't valid */
962         if (ena_rx_ctx->frag) {
963                 skb->ip_summed = CHECKSUM_NONE;
964                 return;
965         }
966
967         /* if IP and error */
968         if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
969                      (ena_rx_ctx->l3_csum_err))) {
970                 /* ipv4 checksum error */
971                 skb->ip_summed = CHECKSUM_NONE;
972                 u64_stats_update_begin(&rx_ring->syncp);
973                 rx_ring->rx_stats.bad_csum++;
974                 u64_stats_update_end(&rx_ring->syncp);
975                 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
976                           "RX IPv4 header checksum error\n");
977                 return;
978         }
979
980         /* if TCP/UDP */
981         if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
982                    (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
983                 if (unlikely(ena_rx_ctx->l4_csum_err)) {
984                         /* TCP/UDP checksum error */
985                         u64_stats_update_begin(&rx_ring->syncp);
986                         rx_ring->rx_stats.bad_csum++;
987                         u64_stats_update_end(&rx_ring->syncp);
988                         netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
989                                   "RX L4 checksum error\n");
990                         skb->ip_summed = CHECKSUM_NONE;
991                         return;
992                 }
993
994                 skb->ip_summed = CHECKSUM_UNNECESSARY;
995         }
996 }
997
998 static void ena_set_rx_hash(struct ena_ring *rx_ring,
999                             struct ena_com_rx_ctx *ena_rx_ctx,
1000                             struct sk_buff *skb)
1001 {
1002         enum pkt_hash_types hash_type;
1003
1004         if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1005                 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1006                            (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1007
1008                         hash_type = PKT_HASH_TYPE_L4;
1009                 else
1010                         hash_type = PKT_HASH_TYPE_NONE;
1011
1012                 /* Override hash type if the packet is fragmented */
1013                 if (ena_rx_ctx->frag)
1014                         hash_type = PKT_HASH_TYPE_NONE;
1015
1016                 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1017         }
1018 }
1019
1020 /* ena_clean_rx_irq - Cleanup RX irq
1021  * @rx_ring: RX ring to clean
1022  * @napi: napi handler
1023  * @budget: how many packets driver is allowed to clean
1024  *
1025  * Returns the number of cleaned buffers.
1026  */
1027 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1028                             u32 budget)
1029 {
1030         u16 next_to_clean = rx_ring->next_to_clean;
1031         u32 res_budget, work_done;
1032
1033         struct ena_com_rx_ctx ena_rx_ctx;
1034         struct ena_adapter *adapter;
1035         struct sk_buff *skb;
1036         int refill_required;
1037         int refill_threshold;
1038         int rc = 0;
1039         int total_len = 0;
1040         int rx_copybreak_pkt = 0;
1041         int i;
1042
1043         netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1044                   "%s qid %d\n", __func__, rx_ring->qid);
1045         res_budget = budget;
1046
1047         do {
1048                 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1049                 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1050                 ena_rx_ctx.descs = 0;
1051                 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1052                                     rx_ring->ena_com_io_sq,
1053                                     &ena_rx_ctx);
1054                 if (unlikely(rc))
1055                         goto error;
1056
1057                 if (unlikely(ena_rx_ctx.descs == 0))
1058                         break;
1059
1060                 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1061                           "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1062                           rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1063                           ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1064
1065                 /* allocate skb and fill it */
1066                 skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
1067                                  &next_to_clean);
1068
1069                 /* exit if we failed to retrieve a buffer */
1070                 if (unlikely(!skb)) {
1071                         for (i = 0; i < ena_rx_ctx.descs; i++) {
1072                                 rx_ring->free_tx_ids[next_to_clean] =
1073                                         rx_ring->ena_bufs[i].req_id;
1074                                 next_to_clean =
1075                                         ENA_RX_RING_IDX_NEXT(next_to_clean,
1076                                                              rx_ring->ring_size);
1077                         }
1078                         break;
1079                 }
1080
1081                 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1082
1083                 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1084
1085                 skb_record_rx_queue(skb, rx_ring->qid);
1086
1087                 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1088                         total_len += rx_ring->ena_bufs[0].len;
1089                         rx_copybreak_pkt++;
1090                         napi_gro_receive(napi, skb);
1091                 } else {
1092                         total_len += skb->len;
1093                         napi_gro_frags(napi);
1094                 }
1095
1096                 res_budget--;
1097         } while (likely(res_budget));
1098
1099         work_done = budget - res_budget;
1100         rx_ring->per_napi_bytes += total_len;
1101         rx_ring->per_napi_packets += work_done;
1102         u64_stats_update_begin(&rx_ring->syncp);
1103         rx_ring->rx_stats.bytes += total_len;
1104         rx_ring->rx_stats.cnt += work_done;
1105         rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1106         u64_stats_update_end(&rx_ring->syncp);
1107
1108         rx_ring->next_to_clean = next_to_clean;
1109
1110         refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
1111         refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
1112
1113         /* Optimization, try to batch new rx buffers */
1114         if (refill_required > refill_threshold) {
1115                 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1116                 ena_refill_rx_bufs(rx_ring, refill_required);
1117         }
1118
1119         return work_done;
1120
1121 error:
1122         adapter = netdev_priv(rx_ring->netdev);
1123
1124         u64_stats_update_begin(&rx_ring->syncp);
1125         rx_ring->rx_stats.bad_desc_num++;
1126         u64_stats_update_end(&rx_ring->syncp);
1127
1128         /* Too many desc from the device. Trigger reset */
1129         adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1130         set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1131
1132         return 0;
1133 }
1134
1135 inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
1136                                        struct ena_ring *tx_ring)
1137 {
1138         /* We apply adaptive moderation on Rx path only.
1139          * Tx uses static interrupt moderation.
1140          */
1141         ena_com_calculate_interrupt_delay(rx_ring->ena_dev,
1142                                           rx_ring->per_napi_packets,
1143                                           rx_ring->per_napi_bytes,
1144                                           &rx_ring->smoothed_interval,
1145                                           &rx_ring->moder_tbl_idx);
1146
1147         /* Reset per napi packets/bytes */
1148         tx_ring->per_napi_packets = 0;
1149         tx_ring->per_napi_bytes = 0;
1150         rx_ring->per_napi_packets = 0;
1151         rx_ring->per_napi_bytes = 0;
1152 }
1153
1154 static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
1155                                         struct ena_ring *rx_ring)
1156 {
1157         struct ena_eth_io_intr_reg intr_reg;
1158
1159         /* Update intr register: rx intr delay,
1160          * tx intr delay and interrupt unmask
1161          */
1162         ena_com_update_intr_reg(&intr_reg,
1163                                 rx_ring->smoothed_interval,
1164                                 tx_ring->smoothed_interval,
1165                                 true);
1166
1167         /* It is a shared MSI-X.
1168          * Tx and Rx CQ have pointer to it.
1169          * So we use one of them to reach the intr reg
1170          */
1171         ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
1172 }
1173
1174 static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1175                                              struct ena_ring *rx_ring)
1176 {
1177         int cpu = get_cpu();
1178         int numa_node;
1179
1180         /* Check only one ring since the 2 rings are running on the same cpu */
1181         if (likely(tx_ring->cpu == cpu))
1182                 goto out;
1183
1184         numa_node = cpu_to_node(cpu);
1185         put_cpu();
1186
1187         if (numa_node != NUMA_NO_NODE) {
1188                 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1189                 ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
1190         }
1191
1192         tx_ring->cpu = cpu;
1193         rx_ring->cpu = cpu;
1194
1195         return;
1196 out:
1197         put_cpu();
1198 }
1199
1200 static int ena_io_poll(struct napi_struct *napi, int budget)
1201 {
1202         struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1203         struct ena_ring *tx_ring, *rx_ring;
1204
1205         u32 tx_work_done;
1206         u32 rx_work_done;
1207         int tx_budget;
1208         int napi_comp_call = 0;
1209         int ret;
1210
1211         tx_ring = ena_napi->tx_ring;
1212         rx_ring = ena_napi->rx_ring;
1213
1214         tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1215
1216         if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1217             test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1218                 napi_complete_done(napi, 0);
1219                 return 0;
1220         }
1221
1222         tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1223         rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1224
1225         /* If the device is about to reset or down, avoid unmask
1226          * the interrupt and return 0 so NAPI won't reschedule
1227          */
1228         if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1229                      test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1230                 napi_complete_done(napi, 0);
1231                 ret = 0;
1232
1233         } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1234                 napi_comp_call = 1;
1235
1236                 /* Update numa and unmask the interrupt only when schedule
1237                  * from the interrupt context (vs from sk_busy_loop)
1238                  */
1239                 if (napi_complete_done(napi, rx_work_done)) {
1240                         /* Tx and Rx share the same interrupt vector */
1241                         if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1242                                 ena_adjust_intr_moderation(rx_ring, tx_ring);
1243
1244                         ena_unmask_interrupt(tx_ring, rx_ring);
1245                 }
1246
1247                 ena_update_ring_numa_node(tx_ring, rx_ring);
1248
1249                 ret = rx_work_done;
1250         } else {
1251                 ret = budget;
1252         }
1253
1254         u64_stats_update_begin(&tx_ring->syncp);
1255         tx_ring->tx_stats.napi_comp += napi_comp_call;
1256         tx_ring->tx_stats.tx_poll++;
1257         u64_stats_update_end(&tx_ring->syncp);
1258
1259         return ret;
1260 }
1261
1262 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1263 {
1264         struct ena_adapter *adapter = (struct ena_adapter *)data;
1265
1266         ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1267
1268         /* Don't call the aenq handler before probe is done */
1269         if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1270                 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1271
1272         return IRQ_HANDLED;
1273 }
1274
1275 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1276  * @irq: interrupt number
1277  * @data: pointer to a network interface private napi device structure
1278  */
1279 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1280 {
1281         struct ena_napi *ena_napi = data;
1282
1283         ena_napi->tx_ring->first_interrupt = true;
1284         ena_napi->rx_ring->first_interrupt = true;
1285
1286         napi_schedule_irqoff(&ena_napi->napi);
1287
1288         return IRQ_HANDLED;
1289 }
1290
1291 /* Reserve a single MSI-X vector for management (admin + aenq).
1292  * plus reserve one vector for each potential io queue.
1293  * the number of potential io queues is the minimum of what the device
1294  * supports and the number of vCPUs.
1295  */
1296 static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
1297 {
1298         int msix_vecs, irq_cnt;
1299
1300         if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1301                 netif_err(adapter, probe, adapter->netdev,
1302                           "Error, MSI-X is already enabled\n");
1303                 return -EPERM;
1304         }
1305
1306         /* Reserved the max msix vectors we might need */
1307         msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
1308
1309         netif_dbg(adapter, probe, adapter->netdev,
1310                   "trying to enable MSI-X, vectors %d\n", msix_vecs);
1311
1312         irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1313                                         msix_vecs, PCI_IRQ_MSIX);
1314
1315         if (irq_cnt < 0) {
1316                 netif_err(adapter, probe, adapter->netdev,
1317                           "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1318                 return -ENOSPC;
1319         }
1320
1321         if (irq_cnt != msix_vecs) {
1322                 netif_notice(adapter, probe, adapter->netdev,
1323                              "enable only %d MSI-X (out of %d), reduce the number of queues\n",
1324                              irq_cnt, msix_vecs);
1325                 adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1326         }
1327
1328         if (ena_init_rx_cpu_rmap(adapter))
1329                 netif_warn(adapter, probe, adapter->netdev,
1330                            "Failed to map IRQs to CPUs\n");
1331
1332         adapter->msix_vecs = irq_cnt;
1333         set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
1334
1335         return 0;
1336 }
1337
1338 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1339 {
1340         u32 cpu;
1341
1342         snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1343                  ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1344                  pci_name(adapter->pdev));
1345         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
1346                 ena_intr_msix_mgmnt;
1347         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1348         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1349                 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
1350         cpu = cpumask_first(cpu_online_mask);
1351         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
1352         cpumask_set_cpu(cpu,
1353                         &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
1354 }
1355
1356 static void ena_setup_io_intr(struct ena_adapter *adapter)
1357 {
1358         struct net_device *netdev;
1359         int irq_idx, i, cpu;
1360
1361         netdev = adapter->netdev;
1362
1363         for (i = 0; i < adapter->num_queues; i++) {
1364                 irq_idx = ENA_IO_IRQ_IDX(i);
1365                 cpu = i % num_online_cpus();
1366
1367                 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1368                          "%s-Tx-Rx-%d", netdev->name, i);
1369                 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
1370                 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
1371                 adapter->irq_tbl[irq_idx].vector =
1372                         pci_irq_vector(adapter->pdev, irq_idx);
1373                 adapter->irq_tbl[irq_idx].cpu = cpu;
1374
1375                 cpumask_set_cpu(cpu,
1376                                 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
1377         }
1378 }
1379
1380 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
1381 {
1382         unsigned long flags = 0;
1383         struct ena_irq *irq;
1384         int rc;
1385
1386         irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1387         rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1388                          irq->data);
1389         if (rc) {
1390                 netif_err(adapter, probe, adapter->netdev,
1391                           "failed to request admin irq\n");
1392                 return rc;
1393         }
1394
1395         netif_dbg(adapter, probe, adapter->netdev,
1396                   "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1397                   irq->affinity_hint_mask.bits[0], irq->vector);
1398
1399         irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1400
1401         return rc;
1402 }
1403
1404 static int ena_request_io_irq(struct ena_adapter *adapter)
1405 {
1406         unsigned long flags = 0;
1407         struct ena_irq *irq;
1408         int rc = 0, i, k;
1409
1410         if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1411                 netif_err(adapter, ifup, adapter->netdev,
1412                           "Failed to request I/O IRQ: MSI-X is not enabled\n");
1413                 return -EINVAL;
1414         }
1415
1416         for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1417                 irq = &adapter->irq_tbl[i];
1418                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1419                                  irq->data);
1420                 if (rc) {
1421                         netif_err(adapter, ifup, adapter->netdev,
1422                                   "Failed to request I/O IRQ. index %d rc %d\n",
1423                                    i, rc);
1424                         goto err;
1425                 }
1426
1427                 netif_dbg(adapter, ifup, adapter->netdev,
1428                           "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1429                           i, irq->affinity_hint_mask.bits[0], irq->vector);
1430
1431                 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1432         }
1433
1434         return rc;
1435
1436 err:
1437         for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
1438                 irq = &adapter->irq_tbl[k];
1439                 free_irq(irq->vector, irq->data);
1440         }
1441
1442         return rc;
1443 }
1444
1445 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
1446 {
1447         struct ena_irq *irq;
1448
1449         irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1450         synchronize_irq(irq->vector);
1451         irq_set_affinity_hint(irq->vector, NULL);
1452         free_irq(irq->vector, irq->data);
1453 }
1454
1455 static void ena_free_io_irq(struct ena_adapter *adapter)
1456 {
1457         struct ena_irq *irq;
1458         int i;
1459
1460 #ifdef CONFIG_RFS_ACCEL
1461         if (adapter->msix_vecs >= 1) {
1462                 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
1463                 adapter->netdev->rx_cpu_rmap = NULL;
1464         }
1465 #endif /* CONFIG_RFS_ACCEL */
1466
1467         for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1468                 irq = &adapter->irq_tbl[i];
1469                 irq_set_affinity_hint(irq->vector, NULL);
1470                 free_irq(irq->vector, irq->data);
1471         }
1472 }
1473
1474 static void ena_disable_msix(struct ena_adapter *adapter)
1475 {
1476         if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
1477                 pci_free_irq_vectors(adapter->pdev);
1478 }
1479
1480 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
1481 {
1482         int i;
1483
1484         if (!netif_running(adapter->netdev))
1485                 return;
1486
1487         for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
1488                 synchronize_irq(adapter->irq_tbl[i].vector);
1489 }
1490
1491 static void ena_del_napi(struct ena_adapter *adapter)
1492 {
1493         int i;
1494
1495         for (i = 0; i < adapter->num_queues; i++)
1496                 netif_napi_del(&adapter->ena_napi[i].napi);
1497 }
1498
1499 static void ena_init_napi(struct ena_adapter *adapter)
1500 {
1501         struct ena_napi *napi;
1502         int i;
1503
1504         for (i = 0; i < adapter->num_queues; i++) {
1505                 napi = &adapter->ena_napi[i];
1506
1507                 netif_napi_add(adapter->netdev,
1508                                &adapter->ena_napi[i].napi,
1509                                ena_io_poll,
1510                                ENA_NAPI_BUDGET);
1511                 napi->rx_ring = &adapter->rx_ring[i];
1512                 napi->tx_ring = &adapter->tx_ring[i];
1513                 napi->qid = i;
1514         }
1515 }
1516
1517 static void ena_napi_disable_all(struct ena_adapter *adapter)
1518 {
1519         int i;
1520
1521         for (i = 0; i < adapter->num_queues; i++)
1522                 napi_disable(&adapter->ena_napi[i].napi);
1523 }
1524
1525 static void ena_napi_enable_all(struct ena_adapter *adapter)
1526 {
1527         int i;
1528
1529         for (i = 0; i < adapter->num_queues; i++)
1530                 napi_enable(&adapter->ena_napi[i].napi);
1531 }
1532
1533 static void ena_restore_ethtool_params(struct ena_adapter *adapter)
1534 {
1535         adapter->tx_usecs = 0;
1536         adapter->rx_usecs = 0;
1537         adapter->tx_frames = 1;
1538         adapter->rx_frames = 1;
1539 }
1540
1541 /* Configure the Rx forwarding */
1542 static int ena_rss_configure(struct ena_adapter *adapter)
1543 {
1544         struct ena_com_dev *ena_dev = adapter->ena_dev;
1545         int rc;
1546
1547         /* In case the RSS table wasn't initialized by probe */
1548         if (!ena_dev->rss.tbl_log_size) {
1549                 rc = ena_rss_init_default(adapter);
1550                 if (rc && (rc != -EOPNOTSUPP)) {
1551                         netif_err(adapter, ifup, adapter->netdev,
1552                                   "Failed to init RSS rc: %d\n", rc);
1553                         return rc;
1554                 }
1555         }
1556
1557         /* Set indirect table */
1558         rc = ena_com_indirect_table_set(ena_dev);
1559         if (unlikely(rc && rc != -EOPNOTSUPP))
1560                 return rc;
1561
1562         /* Configure hash function (if supported) */
1563         rc = ena_com_set_hash_function(ena_dev);
1564         if (unlikely(rc && (rc != -EOPNOTSUPP)))
1565                 return rc;
1566
1567         /* Configure hash inputs (if supported) */
1568         rc = ena_com_set_hash_ctrl(ena_dev);
1569         if (unlikely(rc && (rc != -EOPNOTSUPP)))
1570                 return rc;
1571
1572         return 0;
1573 }
1574
1575 static int ena_up_complete(struct ena_adapter *adapter)
1576 {
1577         int rc;
1578
1579         rc = ena_rss_configure(adapter);
1580         if (rc)
1581                 return rc;
1582
1583         ena_init_napi(adapter);
1584
1585         ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
1586
1587         ena_refill_all_rx_bufs(adapter);
1588
1589         /* enable transmits */
1590         netif_tx_start_all_queues(adapter->netdev);
1591
1592         ena_restore_ethtool_params(adapter);
1593
1594         ena_napi_enable_all(adapter);
1595
1596         return 0;
1597 }
1598
1599 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1600 {
1601         struct ena_com_create_io_ctx ctx = { 0 };
1602         struct ena_com_dev *ena_dev;
1603         struct ena_ring *tx_ring;
1604         u32 msix_vector;
1605         u16 ena_qid;
1606         int rc;
1607
1608         ena_dev = adapter->ena_dev;
1609
1610         tx_ring = &adapter->tx_ring[qid];
1611         msix_vector = ENA_IO_IRQ_IDX(qid);
1612         ena_qid = ENA_IO_TXQ_IDX(qid);
1613
1614         ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1615         ctx.qid = ena_qid;
1616         ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1617         ctx.msix_vector = msix_vector;
1618         ctx.queue_size = adapter->tx_ring_size;
1619         ctx.numa_node = cpu_to_node(tx_ring->cpu);
1620
1621         rc = ena_com_create_io_queue(ena_dev, &ctx);
1622         if (rc) {
1623                 netif_err(adapter, ifup, adapter->netdev,
1624                           "Failed to create I/O TX queue num %d rc: %d\n",
1625                           qid, rc);
1626                 return rc;
1627         }
1628
1629         rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1630                                      &tx_ring->ena_com_io_sq,
1631                                      &tx_ring->ena_com_io_cq);
1632         if (rc) {
1633                 netif_err(adapter, ifup, adapter->netdev,
1634                           "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1635                           qid, rc);
1636                 ena_com_destroy_io_queue(ena_dev, ena_qid);
1637                 return rc;
1638         }
1639
1640         ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
1641         return rc;
1642 }
1643
1644 static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
1645 {
1646         struct ena_com_dev *ena_dev = adapter->ena_dev;
1647         int rc, i;
1648
1649         for (i = 0; i < adapter->num_queues; i++) {
1650                 rc = ena_create_io_tx_queue(adapter, i);
1651                 if (rc)
1652                         goto create_err;
1653         }
1654
1655         return 0;
1656
1657 create_err:
1658         while (i--)
1659                 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1660
1661         return rc;
1662 }
1663
1664 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1665 {
1666         struct ena_com_dev *ena_dev;
1667         struct ena_com_create_io_ctx ctx = { 0 };
1668         struct ena_ring *rx_ring;
1669         u32 msix_vector;
1670         u16 ena_qid;
1671         int rc;
1672
1673         ena_dev = adapter->ena_dev;
1674
1675         rx_ring = &adapter->rx_ring[qid];
1676         msix_vector = ENA_IO_IRQ_IDX(qid);
1677         ena_qid = ENA_IO_RXQ_IDX(qid);
1678
1679         ctx.qid = ena_qid;
1680         ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1681         ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1682         ctx.msix_vector = msix_vector;
1683         ctx.queue_size = adapter->rx_ring_size;
1684         ctx.numa_node = cpu_to_node(rx_ring->cpu);
1685
1686         rc = ena_com_create_io_queue(ena_dev, &ctx);
1687         if (rc) {
1688                 netif_err(adapter, ifup, adapter->netdev,
1689                           "Failed to create I/O RX queue num %d rc: %d\n",
1690                           qid, rc);
1691                 return rc;
1692         }
1693
1694         rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1695                                      &rx_ring->ena_com_io_sq,
1696                                      &rx_ring->ena_com_io_cq);
1697         if (rc) {
1698                 netif_err(adapter, ifup, adapter->netdev,
1699                           "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1700                           qid, rc);
1701                 ena_com_destroy_io_queue(ena_dev, ena_qid);
1702                 return rc;
1703         }
1704
1705         ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
1706
1707         return rc;
1708 }
1709
1710 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
1711 {
1712         struct ena_com_dev *ena_dev = adapter->ena_dev;
1713         int rc, i;
1714
1715         for (i = 0; i < adapter->num_queues; i++) {
1716                 rc = ena_create_io_rx_queue(adapter, i);
1717                 if (rc)
1718                         goto create_err;
1719         }
1720
1721         return 0;
1722
1723 create_err:
1724         while (i--)
1725                 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1726
1727         return rc;
1728 }
1729
1730 static int ena_up(struct ena_adapter *adapter)
1731 {
1732         int rc, i;
1733
1734         netdev_dbg(adapter->netdev, "%s\n", __func__);
1735
1736         ena_setup_io_intr(adapter);
1737
1738         rc = ena_request_io_irq(adapter);
1739         if (rc)
1740                 goto err_req_irq;
1741
1742         /* allocate transmit descriptors */
1743         rc = ena_setup_all_tx_resources(adapter);
1744         if (rc)
1745                 goto err_setup_tx;
1746
1747         /* allocate receive descriptors */
1748         rc = ena_setup_all_rx_resources(adapter);
1749         if (rc)
1750                 goto err_setup_rx;
1751
1752         /* Create TX queues */
1753         rc = ena_create_all_io_tx_queues(adapter);
1754         if (rc)
1755                 goto err_create_tx_queues;
1756
1757         /* Create RX queues */
1758         rc = ena_create_all_io_rx_queues(adapter);
1759         if (rc)
1760                 goto err_create_rx_queues;
1761
1762         rc = ena_up_complete(adapter);
1763         if (rc)
1764                 goto err_up;
1765
1766         if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1767                 netif_carrier_on(adapter->netdev);
1768
1769         u64_stats_update_begin(&adapter->syncp);
1770         adapter->dev_stats.interface_up++;
1771         u64_stats_update_end(&adapter->syncp);
1772
1773         set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1774
1775         /* Enable completion queues interrupt */
1776         for (i = 0; i < adapter->num_queues; i++)
1777                 ena_unmask_interrupt(&adapter->tx_ring[i],
1778                                      &adapter->rx_ring[i]);
1779
1780         /* schedule napi in case we had pending packets
1781          * from the last time we disable napi
1782          */
1783         for (i = 0; i < adapter->num_queues; i++)
1784                 napi_schedule(&adapter->ena_napi[i].napi);
1785
1786         return rc;
1787
1788 err_up:
1789         ena_destroy_all_rx_queues(adapter);
1790 err_create_rx_queues:
1791         ena_destroy_all_tx_queues(adapter);
1792 err_create_tx_queues:
1793         ena_free_all_io_rx_resources(adapter);
1794 err_setup_rx:
1795         ena_free_all_io_tx_resources(adapter);
1796 err_setup_tx:
1797         ena_free_io_irq(adapter);
1798 err_req_irq:
1799
1800         return rc;
1801 }
1802
1803 static void ena_down(struct ena_adapter *adapter)
1804 {
1805         netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
1806
1807         clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1808
1809         u64_stats_update_begin(&adapter->syncp);
1810         adapter->dev_stats.interface_down++;
1811         u64_stats_update_end(&adapter->syncp);
1812
1813         netif_carrier_off(adapter->netdev);
1814         netif_tx_disable(adapter->netdev);
1815
1816         /* After this point the napi handler won't enable the tx queue */
1817         ena_napi_disable_all(adapter);
1818
1819         /* After destroy the queue there won't be any new interrupts */
1820
1821         if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
1822                 int rc;
1823
1824                 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
1825                 if (rc)
1826                         dev_err(&adapter->pdev->dev, "Device reset failed\n");
1827         }
1828
1829         ena_destroy_all_io_queues(adapter);
1830
1831         ena_disable_io_intr_sync(adapter);
1832         ena_free_io_irq(adapter);
1833         ena_del_napi(adapter);
1834
1835         ena_free_all_tx_bufs(adapter);
1836         ena_free_all_rx_bufs(adapter);
1837         ena_free_all_io_tx_resources(adapter);
1838         ena_free_all_io_rx_resources(adapter);
1839 }
1840
1841 /* ena_open - Called when a network interface is made active
1842  * @netdev: network interface device structure
1843  *
1844  * Returns 0 on success, negative value on failure
1845  *
1846  * The open entry point is called when a network interface is made
1847  * active by the system (IFF_UP).  At this point all resources needed
1848  * for transmit and receive operations are allocated, the interrupt
1849  * handler is registered with the OS, the watchdog timer is started,
1850  * and the stack is notified that the interface is ready.
1851  */
1852 static int ena_open(struct net_device *netdev)
1853 {
1854         struct ena_adapter *adapter = netdev_priv(netdev);
1855         int rc;
1856
1857         /* Notify the stack of the actual queue counts. */
1858         rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
1859         if (rc) {
1860                 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
1861                 return rc;
1862         }
1863
1864         rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
1865         if (rc) {
1866                 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
1867                 return rc;
1868         }
1869
1870         rc = ena_up(adapter);
1871         if (rc)
1872                 return rc;
1873
1874         return rc;
1875 }
1876
1877 /* ena_close - Disables a network interface
1878  * @netdev: network interface device structure
1879  *
1880  * Returns 0, this is not allowed to fail
1881  *
1882  * The close entry point is called when an interface is de-activated
1883  * by the OS.  The hardware is still under the drivers control, but
1884  * needs to be disabled.  A global MAC reset is issued to stop the
1885  * hardware, and all transmit and receive resources are freed.
1886  */
1887 static int ena_close(struct net_device *netdev)
1888 {
1889         struct ena_adapter *adapter = netdev_priv(netdev);
1890
1891         netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
1892
1893         if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
1894                 ena_down(adapter);
1895
1896         /* Check for device status and issue reset if needed*/
1897         check_for_admin_com_state(adapter);
1898         if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1899                 netif_err(adapter, ifdown, adapter->netdev,
1900                           "Destroy failure, restarting device\n");
1901                 ena_dump_stats_to_dmesg(adapter);
1902                 /* rtnl lock already obtained in dev_ioctl() layer */
1903                 ena_destroy_device(adapter);
1904                 ena_restore_device(adapter);
1905         }
1906
1907         return 0;
1908 }
1909
1910 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
1911 {
1912         u32 mss = skb_shinfo(skb)->gso_size;
1913         struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
1914         u8 l4_protocol = 0;
1915
1916         if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
1917                 ena_tx_ctx->l4_csum_enable = 1;
1918                 if (mss) {
1919                         ena_tx_ctx->tso_enable = 1;
1920                         ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
1921                         ena_tx_ctx->l4_csum_partial = 0;
1922                 } else {
1923                         ena_tx_ctx->tso_enable = 0;
1924                         ena_meta->l4_hdr_len = 0;
1925                         ena_tx_ctx->l4_csum_partial = 1;
1926                 }
1927
1928                 switch (ip_hdr(skb)->version) {
1929                 case IPVERSION:
1930                         ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
1931                         if (ip_hdr(skb)->frag_off & htons(IP_DF))
1932                                 ena_tx_ctx->df = 1;
1933                         if (mss)
1934                                 ena_tx_ctx->l3_csum_enable = 1;
1935                         l4_protocol = ip_hdr(skb)->protocol;
1936                         break;
1937                 case 6:
1938                         ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
1939                         l4_protocol = ipv6_hdr(skb)->nexthdr;
1940                         break;
1941                 default:
1942                         break;
1943                 }
1944
1945                 if (l4_protocol == IPPROTO_TCP)
1946                         ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
1947                 else
1948                         ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
1949
1950                 ena_meta->mss = mss;
1951                 ena_meta->l3_hdr_len = skb_network_header_len(skb);
1952                 ena_meta->l3_hdr_offset = skb_network_offset(skb);
1953                 ena_tx_ctx->meta_valid = 1;
1954
1955         } else {
1956                 ena_tx_ctx->meta_valid = 0;
1957         }
1958 }
1959
1960 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
1961                                        struct sk_buff *skb)
1962 {
1963         int num_frags, header_len, rc;
1964
1965         num_frags = skb_shinfo(skb)->nr_frags;
1966         header_len = skb_headlen(skb);
1967
1968         if (num_frags < tx_ring->sgl_size)
1969                 return 0;
1970
1971         if ((num_frags == tx_ring->sgl_size) &&
1972             (header_len < tx_ring->tx_max_header_size))
1973                 return 0;
1974
1975         u64_stats_update_begin(&tx_ring->syncp);
1976         tx_ring->tx_stats.linearize++;
1977         u64_stats_update_end(&tx_ring->syncp);
1978
1979         rc = skb_linearize(skb);
1980         if (unlikely(rc)) {
1981                 u64_stats_update_begin(&tx_ring->syncp);
1982                 tx_ring->tx_stats.linearize_failed++;
1983                 u64_stats_update_end(&tx_ring->syncp);
1984         }
1985
1986         return rc;
1987 }
1988
1989 /* Called with netif_tx_lock. */
1990 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
1991 {
1992         struct ena_adapter *adapter = netdev_priv(dev);
1993         struct ena_tx_buffer *tx_info;
1994         struct ena_com_tx_ctx ena_tx_ctx;
1995         struct ena_ring *tx_ring;
1996         struct netdev_queue *txq;
1997         struct ena_com_buf *ena_buf;
1998         void *push_hdr;
1999         u32 len, last_frag;
2000         u16 next_to_use;
2001         u16 req_id;
2002         u16 push_len;
2003         u16 header_len;
2004         dma_addr_t dma;
2005         int qid, rc, nb_hw_desc;
2006         int i = -1;
2007
2008         netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2009         /*  Determine which tx ring we will be placed on */
2010         qid = skb_get_queue_mapping(skb);
2011         tx_ring = &adapter->tx_ring[qid];
2012         txq = netdev_get_tx_queue(dev, qid);
2013
2014         rc = ena_check_and_linearize_skb(tx_ring, skb);
2015         if (unlikely(rc))
2016                 goto error_drop_packet;
2017
2018         skb_tx_timestamp(skb);
2019         len = skb_headlen(skb);
2020
2021         next_to_use = tx_ring->next_to_use;
2022         req_id = tx_ring->free_tx_ids[next_to_use];
2023         tx_info = &tx_ring->tx_buffer_info[req_id];
2024         tx_info->num_of_bufs = 0;
2025
2026         WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2027         ena_buf = tx_info->bufs;
2028         tx_info->skb = skb;
2029
2030         if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2031                 /* prepared the push buffer */
2032                 push_len = min_t(u32, len, tx_ring->tx_max_header_size);
2033                 header_len = push_len;
2034                 push_hdr = skb->data;
2035         } else {
2036                 push_len = 0;
2037                 header_len = min_t(u32, len, tx_ring->tx_max_header_size);
2038                 push_hdr = NULL;
2039         }
2040
2041         netif_dbg(adapter, tx_queued, dev,
2042                   "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2043                   push_hdr, push_len);
2044
2045         if (len > push_len) {
2046                 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2047                                      len - push_len, DMA_TO_DEVICE);
2048                 if (dma_mapping_error(tx_ring->dev, dma))
2049                         goto error_report_dma_error;
2050
2051                 ena_buf->paddr = dma;
2052                 ena_buf->len = len - push_len;
2053
2054                 ena_buf++;
2055                 tx_info->num_of_bufs++;
2056         }
2057
2058         last_frag = skb_shinfo(skb)->nr_frags;
2059
2060         for (i = 0; i < last_frag; i++) {
2061                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2062
2063                 len = skb_frag_size(frag);
2064                 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
2065                                        DMA_TO_DEVICE);
2066                 if (dma_mapping_error(tx_ring->dev, dma))
2067                         goto error_report_dma_error;
2068
2069                 ena_buf->paddr = dma;
2070                 ena_buf->len = len;
2071                 ena_buf++;
2072         }
2073
2074         tx_info->num_of_bufs += last_frag;
2075
2076         memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2077         ena_tx_ctx.ena_bufs = tx_info->bufs;
2078         ena_tx_ctx.push_header = push_hdr;
2079         ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2080         ena_tx_ctx.req_id = req_id;
2081         ena_tx_ctx.header_len = header_len;
2082
2083         /* set flags and meta data */
2084         ena_tx_csum(&ena_tx_ctx, skb);
2085
2086         /* prepare the packet's descriptors to dma engine */
2087         rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
2088                                 &nb_hw_desc);
2089
2090         if (unlikely(rc)) {
2091                 netif_err(adapter, tx_queued, dev,
2092                           "failed to prepare tx bufs\n");
2093                 u64_stats_update_begin(&tx_ring->syncp);
2094                 tx_ring->tx_stats.queue_stop++;
2095                 tx_ring->tx_stats.prepare_ctx_err++;
2096                 u64_stats_update_end(&tx_ring->syncp);
2097                 netif_tx_stop_queue(txq);
2098                 goto error_unmap_dma;
2099         }
2100
2101         netdev_tx_sent_queue(txq, skb->len);
2102
2103         u64_stats_update_begin(&tx_ring->syncp);
2104         tx_ring->tx_stats.cnt++;
2105         tx_ring->tx_stats.bytes += skb->len;
2106         u64_stats_update_end(&tx_ring->syncp);
2107
2108         tx_info->tx_descs = nb_hw_desc;
2109         tx_info->last_jiffies = jiffies;
2110         tx_info->print_once = 0;
2111
2112         tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2113                 tx_ring->ring_size);
2114
2115         /* This WMB is aimed to:
2116          * 1 - perform smp barrier before reading next_to_completion
2117          * 2 - make sure the desc were written before trigger DB
2118          */
2119         wmb();
2120
2121         /* stop the queue when no more space available, the packet can have up
2122          * to sgl_size + 2. one for the meta descriptor and one for header
2123          * (if the header is larger than tx_max_header_size).
2124          */
2125         if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
2126                      (tx_ring->sgl_size + 2))) {
2127                 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
2128                           __func__, qid);
2129
2130                 netif_tx_stop_queue(txq);
2131                 u64_stats_update_begin(&tx_ring->syncp);
2132                 tx_ring->tx_stats.queue_stop++;
2133                 u64_stats_update_end(&tx_ring->syncp);
2134
2135                 /* There is a rare condition where this function decide to
2136                  * stop the queue but meanwhile clean_tx_irq updates
2137                  * next_to_completion and terminates.
2138                  * The queue will remain stopped forever.
2139                  * To solve this issue this function perform rmb, check
2140                  * the wakeup condition and wake up the queue if needed.
2141                  */
2142                 smp_rmb();
2143
2144                 if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
2145                                 > ENA_TX_WAKEUP_THRESH) {
2146                         netif_tx_wake_queue(txq);
2147                         u64_stats_update_begin(&tx_ring->syncp);
2148                         tx_ring->tx_stats.queue_wakeup++;
2149                         u64_stats_update_end(&tx_ring->syncp);
2150                 }
2151         }
2152
2153         if (netif_xmit_stopped(txq) || !skb->xmit_more) {
2154                 /* trigger the dma engine */
2155                 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);
2156                 u64_stats_update_begin(&tx_ring->syncp);
2157                 tx_ring->tx_stats.doorbells++;
2158                 u64_stats_update_end(&tx_ring->syncp);
2159         }
2160
2161         return NETDEV_TX_OK;
2162
2163 error_report_dma_error:
2164         u64_stats_update_begin(&tx_ring->syncp);
2165         tx_ring->tx_stats.dma_mapping_err++;
2166         u64_stats_update_end(&tx_ring->syncp);
2167         netdev_warn(adapter->netdev, "failed to map skb\n");
2168
2169         tx_info->skb = NULL;
2170
2171 error_unmap_dma:
2172         if (i >= 0) {
2173                 /* save value of frag that failed */
2174                 last_frag = i;
2175
2176                 /* start back at beginning and unmap skb */
2177                 tx_info->skb = NULL;
2178                 ena_buf = tx_info->bufs;
2179                 dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
2180                                  dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
2181
2182                 /* unmap remaining mapped pages */
2183                 for (i = 0; i < last_frag; i++) {
2184                         ena_buf++;
2185                         dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
2186                                        dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
2187                 }
2188         }
2189
2190 error_drop_packet:
2191
2192         dev_kfree_skb(skb);
2193         return NETDEV_TX_OK;
2194 }
2195
2196 #ifdef CONFIG_NET_POLL_CONTROLLER
2197 static void ena_netpoll(struct net_device *netdev)
2198 {
2199         struct ena_adapter *adapter = netdev_priv(netdev);
2200         int i;
2201
2202         /* Dont schedule NAPI if the driver is in the middle of reset
2203          * or netdev is down.
2204          */
2205
2206         if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
2207             test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2208                 return;
2209
2210         for (i = 0; i < adapter->num_queues; i++)
2211                 napi_schedule(&adapter->ena_napi[i].napi);
2212 }
2213 #endif /* CONFIG_NET_POLL_CONTROLLER */
2214
2215 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
2216                             struct net_device *sb_dev,
2217                             select_queue_fallback_t fallback)
2218 {
2219         u16 qid;
2220         /* we suspect that this is good for in--kernel network services that
2221          * want to loop incoming skb rx to tx in normal user generated traffic,
2222          * most probably we will not get to this
2223          */
2224         if (skb_rx_queue_recorded(skb))
2225                 qid = skb_get_rx_queue(skb);
2226         else
2227                 qid = fallback(dev, skb, NULL);
2228
2229         return qid;
2230 }
2231
2232 static void ena_config_host_info(struct ena_com_dev *ena_dev)
2233 {
2234         struct ena_admin_host_info *host_info;
2235         int rc;
2236
2237         /* Allocate only the host info */
2238         rc = ena_com_allocate_host_info(ena_dev);
2239         if (rc) {
2240                 pr_err("Cannot allocate host info\n");
2241                 return;
2242         }
2243
2244         host_info = ena_dev->host_attr.host_info;
2245
2246         host_info->os_type = ENA_ADMIN_OS_LINUX;
2247         host_info->kernel_ver = LINUX_VERSION_CODE;
2248         strncpy(host_info->kernel_ver_str, utsname()->version,
2249                 sizeof(host_info->kernel_ver_str) - 1);
2250         host_info->os_dist = 0;
2251         strncpy(host_info->os_dist_str, utsname()->release,
2252                 sizeof(host_info->os_dist_str) - 1);
2253         host_info->driver_version =
2254                 (DRV_MODULE_VER_MAJOR) |
2255                 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2256                 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
2257
2258         rc = ena_com_set_host_attributes(ena_dev);
2259         if (rc) {
2260                 if (rc == -EOPNOTSUPP)
2261                         pr_warn("Cannot set host attributes\n");
2262                 else
2263                         pr_err("Cannot set host attributes\n");
2264
2265                 goto err;
2266         }
2267
2268         return;
2269
2270 err:
2271         ena_com_delete_host_info(ena_dev);
2272 }
2273
2274 static void ena_config_debug_area(struct ena_adapter *adapter)
2275 {
2276         u32 debug_area_size;
2277         int rc, ss_count;
2278
2279         ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
2280         if (ss_count <= 0) {
2281                 netif_err(adapter, drv, adapter->netdev,
2282                           "SS count is negative\n");
2283                 return;
2284         }
2285
2286         /* allocate 32 bytes for each string and 64bit for the value */
2287         debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
2288
2289         rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
2290         if (rc) {
2291                 pr_err("Cannot allocate debug area\n");
2292                 return;
2293         }
2294
2295         rc = ena_com_set_host_attributes(adapter->ena_dev);
2296         if (rc) {
2297                 if (rc == -EOPNOTSUPP)
2298                         netif_warn(adapter, drv, adapter->netdev,
2299                                    "Cannot set host attributes\n");
2300                 else
2301                         netif_err(adapter, drv, adapter->netdev,
2302                                   "Cannot set host attributes\n");
2303                 goto err;
2304         }
2305
2306         return;
2307 err:
2308         ena_com_delete_debug_area(adapter->ena_dev);
2309 }
2310
2311 static void ena_get_stats64(struct net_device *netdev,
2312                             struct rtnl_link_stats64 *stats)
2313 {
2314         struct ena_adapter *adapter = netdev_priv(netdev);
2315         struct ena_ring *rx_ring, *tx_ring;
2316         unsigned int start;
2317         u64 rx_drops;
2318         int i;
2319
2320         if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2321                 return;
2322
2323         for (i = 0; i < adapter->num_queues; i++) {
2324                 u64 bytes, packets;
2325
2326                 tx_ring = &adapter->tx_ring[i];
2327
2328                 do {
2329                         start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
2330                         packets = tx_ring->tx_stats.cnt;
2331                         bytes = tx_ring->tx_stats.bytes;
2332                 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
2333
2334                 stats->tx_packets += packets;
2335                 stats->tx_bytes += bytes;
2336
2337                 rx_ring = &adapter->rx_ring[i];
2338
2339                 do {
2340                         start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
2341                         packets = rx_ring->rx_stats.cnt;
2342                         bytes = rx_ring->rx_stats.bytes;
2343                 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
2344
2345                 stats->rx_packets += packets;
2346                 stats->rx_bytes += bytes;
2347         }
2348
2349         do {
2350                 start = u64_stats_fetch_begin_irq(&adapter->syncp);
2351                 rx_drops = adapter->dev_stats.rx_drops;
2352         } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
2353
2354         stats->rx_dropped = rx_drops;
2355
2356         stats->multicast = 0;
2357         stats->collisions = 0;
2358
2359         stats->rx_length_errors = 0;
2360         stats->rx_crc_errors = 0;
2361         stats->rx_frame_errors = 0;
2362         stats->rx_fifo_errors = 0;
2363         stats->rx_missed_errors = 0;
2364         stats->tx_window_errors = 0;
2365
2366         stats->rx_errors = 0;
2367         stats->tx_errors = 0;
2368 }
2369
2370 static const struct net_device_ops ena_netdev_ops = {
2371         .ndo_open               = ena_open,
2372         .ndo_stop               = ena_close,
2373         .ndo_start_xmit         = ena_start_xmit,
2374         .ndo_select_queue       = ena_select_queue,
2375         .ndo_get_stats64        = ena_get_stats64,
2376         .ndo_tx_timeout         = ena_tx_timeout,
2377         .ndo_change_mtu         = ena_change_mtu,
2378         .ndo_set_mac_address    = NULL,
2379         .ndo_validate_addr      = eth_validate_addr,
2380 #ifdef CONFIG_NET_POLL_CONTROLLER
2381         .ndo_poll_controller    = ena_netpoll,
2382 #endif /* CONFIG_NET_POLL_CONTROLLER */
2383 };
2384
2385 static int ena_device_validate_params(struct ena_adapter *adapter,
2386                                       struct ena_com_dev_get_features_ctx *get_feat_ctx)
2387 {
2388         struct net_device *netdev = adapter->netdev;
2389         int rc;
2390
2391         rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
2392                               adapter->mac_addr);
2393         if (!rc) {
2394                 netif_err(adapter, drv, netdev,
2395                           "Error, mac address are different\n");
2396                 return -EINVAL;
2397         }
2398
2399         if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) ||
2400             (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) {
2401                 netif_err(adapter, drv, netdev,
2402                           "Error, device doesn't support enough queues\n");
2403                 return -EINVAL;
2404         }
2405
2406         if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
2407                 netif_err(adapter, drv, netdev,
2408                           "Error, device max mtu is smaller than netdev MTU\n");
2409                 return -EINVAL;
2410         }
2411
2412         return 0;
2413 }
2414
2415 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
2416                            struct ena_com_dev_get_features_ctx *get_feat_ctx,
2417                            bool *wd_state)
2418 {
2419         struct device *dev = &pdev->dev;
2420         bool readless_supported;
2421         u32 aenq_groups;
2422         int dma_width;
2423         int rc;
2424
2425         rc = ena_com_mmio_reg_read_request_init(ena_dev);
2426         if (rc) {
2427                 dev_err(dev, "failed to init mmio read less\n");
2428                 return rc;
2429         }
2430
2431         /* The PCIe configuration space revision id indicate if mmio reg
2432          * read is disabled
2433          */
2434         readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
2435         ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2436
2437         rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2438         if (rc) {
2439                 dev_err(dev, "Can not reset device\n");
2440                 goto err_mmio_read_less;
2441         }
2442
2443         rc = ena_com_validate_version(ena_dev);
2444         if (rc) {
2445                 dev_err(dev, "device version is too low\n");
2446                 goto err_mmio_read_less;
2447         }
2448
2449         dma_width = ena_com_get_dma_width(ena_dev);
2450         if (dma_width < 0) {
2451                 dev_err(dev, "Invalid dma width value %d", dma_width);
2452                 rc = dma_width;
2453                 goto err_mmio_read_less;
2454         }
2455
2456         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2457         if (rc) {
2458                 dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
2459                 goto err_mmio_read_less;
2460         }
2461
2462         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2463         if (rc) {
2464                 dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
2465                         rc);
2466                 goto err_mmio_read_less;
2467         }
2468
2469         /* ENA admin level init */
2470         rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
2471         if (rc) {
2472                 dev_err(dev,
2473                         "Can not initialize ena admin queue with device\n");
2474                 goto err_mmio_read_less;
2475         }
2476
2477         /* To enable the msix interrupts the driver needs to know the number
2478          * of queues. So the driver uses polling mode to retrieve this
2479          * information
2480          */
2481         ena_com_set_admin_polling_mode(ena_dev, true);
2482
2483         ena_config_host_info(ena_dev);
2484
2485         /* Get Device Attributes*/
2486         rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2487         if (rc) {
2488                 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
2489                 goto err_admin_init;
2490         }
2491
2492         /* Try to turn all the available aenq groups */
2493         aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2494                 BIT(ENA_ADMIN_FATAL_ERROR) |
2495                 BIT(ENA_ADMIN_WARNING) |
2496                 BIT(ENA_ADMIN_NOTIFICATION) |
2497                 BIT(ENA_ADMIN_KEEP_ALIVE);
2498
2499         aenq_groups &= get_feat_ctx->aenq.supported_groups;
2500
2501         rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2502         if (rc) {
2503                 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
2504                 goto err_admin_init;
2505         }
2506
2507         *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2508
2509         return 0;
2510
2511 err_admin_init:
2512         ena_com_delete_host_info(ena_dev);
2513         ena_com_admin_destroy(ena_dev);
2514 err_mmio_read_less:
2515         ena_com_mmio_reg_read_request_destroy(ena_dev);
2516
2517         return rc;
2518 }
2519
2520 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
2521                                                     int io_vectors)
2522 {
2523         struct ena_com_dev *ena_dev = adapter->ena_dev;
2524         struct device *dev = &adapter->pdev->dev;
2525         int rc;
2526
2527         rc = ena_enable_msix(adapter, io_vectors);
2528         if (rc) {
2529                 dev_err(dev, "Can not reserve msix vectors\n");
2530                 return rc;
2531         }
2532
2533         ena_setup_mgmnt_intr(adapter);
2534
2535         rc = ena_request_mgmnt_irq(adapter);
2536         if (rc) {
2537                 dev_err(dev, "Can not setup management interrupts\n");
2538                 goto err_disable_msix;
2539         }
2540
2541         ena_com_set_admin_polling_mode(ena_dev, false);
2542
2543         ena_com_admin_aenq_enable(ena_dev);
2544
2545         return 0;
2546
2547 err_disable_msix:
2548         ena_disable_msix(adapter);
2549
2550         return rc;
2551 }
2552
2553 static void ena_destroy_device(struct ena_adapter *adapter)
2554 {
2555         struct net_device *netdev = adapter->netdev;
2556         struct ena_com_dev *ena_dev = adapter->ena_dev;
2557         bool dev_up;
2558
2559         netif_carrier_off(netdev);
2560
2561         del_timer_sync(&adapter->timer_service);
2562
2563         dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2564         adapter->dev_up_before_reset = dev_up;
2565
2566         ena_com_set_admin_running_state(ena_dev, false);
2567
2568         if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2569                 ena_down(adapter);
2570
2571         /* Before releasing the ENA resources, a device reset is required.
2572          * (to prevent the device from accessing them).
2573          * In case the reset flag is set and the device is up, ena_down()
2574          * already perform the reset, so it can be skipped.
2575          */
2576         if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
2577                 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2578
2579         ena_free_mgmnt_irq(adapter);
2580
2581         ena_disable_msix(adapter);
2582
2583         ena_com_abort_admin_commands(ena_dev);
2584
2585         ena_com_wait_for_abort_completion(ena_dev);
2586
2587         ena_com_admin_destroy(ena_dev);
2588
2589         ena_com_mmio_reg_read_request_destroy(ena_dev);
2590
2591         adapter->reset_reason = ENA_REGS_RESET_NORMAL;
2592
2593         clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2594 }
2595
2596 static int ena_restore_device(struct ena_adapter *adapter)
2597 {
2598         struct ena_com_dev_get_features_ctx get_feat_ctx;
2599         struct ena_com_dev *ena_dev = adapter->ena_dev;
2600         struct pci_dev *pdev = adapter->pdev;
2601         bool wd_state;
2602         int rc;
2603
2604         set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2605         rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
2606         if (rc) {
2607                 dev_err(&pdev->dev, "Can not initialize device\n");
2608                 goto err;
2609         }
2610         adapter->wd_state = wd_state;
2611
2612         rc = ena_device_validate_params(adapter, &get_feat_ctx);
2613         if (rc) {
2614                 dev_err(&pdev->dev, "Validation of device parameters failed\n");
2615                 goto err_device_destroy;
2616         }
2617
2618         clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2619         /* Make sure we don't have a race with AENQ Links state handler */
2620         if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2621                 netif_carrier_on(adapter->netdev);
2622
2623         rc = ena_enable_msix_and_set_admin_interrupts(adapter,
2624                                                       adapter->num_queues);
2625         if (rc) {
2626                 dev_err(&pdev->dev, "Enable MSI-X failed\n");
2627                 goto err_device_destroy;
2628         }
2629         /* If the interface was up before the reset bring it up */
2630         if (adapter->dev_up_before_reset) {
2631                 rc = ena_up(adapter);
2632                 if (rc) {
2633                         dev_err(&pdev->dev, "Failed to create I/O queues\n");
2634                         goto err_disable_msix;
2635                 }
2636         }
2637
2638         mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2639         dev_err(&pdev->dev, "Device reset completed successfully\n");
2640
2641         return rc;
2642 err_disable_msix:
2643         ena_free_mgmnt_irq(adapter);
2644         ena_disable_msix(adapter);
2645 err_device_destroy:
2646         ena_com_admin_destroy(ena_dev);
2647 err:
2648         clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2649         clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2650         dev_err(&pdev->dev,
2651                 "Reset attempt failed. Can not reset the device\n");
2652
2653         return rc;
2654 }
2655
2656 static void ena_fw_reset_device(struct work_struct *work)
2657 {
2658         struct ena_adapter *adapter =
2659                 container_of(work, struct ena_adapter, reset_task);
2660         struct pci_dev *pdev = adapter->pdev;
2661
2662         if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2663                 dev_err(&pdev->dev,
2664                         "device reset schedule while reset bit is off\n");
2665                 return;
2666         }
2667         rtnl_lock();
2668         ena_destroy_device(adapter);
2669         ena_restore_device(adapter);
2670         rtnl_unlock();
2671 }
2672
2673 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
2674                                         struct ena_ring *rx_ring)
2675 {
2676         if (likely(rx_ring->first_interrupt))
2677                 return 0;
2678
2679         if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
2680                 return 0;
2681
2682         rx_ring->no_interrupt_event_cnt++;
2683
2684         if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
2685                 netif_err(adapter, rx_err, adapter->netdev,
2686                           "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
2687                           rx_ring->qid);
2688                 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2689                 smp_mb__before_atomic();
2690                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2691                 return -EIO;
2692         }
2693
2694         return 0;
2695 }
2696
2697 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
2698                                           struct ena_ring *tx_ring)
2699 {
2700         struct ena_tx_buffer *tx_buf;
2701         unsigned long last_jiffies;
2702         u32 missed_tx = 0;
2703         int i, rc = 0;
2704
2705         for (i = 0; i < tx_ring->ring_size; i++) {
2706                 tx_buf = &tx_ring->tx_buffer_info[i];
2707                 last_jiffies = tx_buf->last_jiffies;
2708
2709                 if (last_jiffies == 0)
2710                         /* no pending Tx at this location */
2711                         continue;
2712
2713                 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
2714                              2 * adapter->missing_tx_completion_to))) {
2715                         /* If after graceful period interrupt is still not
2716                          * received, we schedule a reset
2717                          */
2718                         netif_err(adapter, tx_err, adapter->netdev,
2719                                   "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
2720                                   tx_ring->qid);
2721                         adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2722                         smp_mb__before_atomic();
2723                         set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2724                         return -EIO;
2725                 }
2726
2727                 if (unlikely(time_is_before_jiffies(last_jiffies +
2728                                 adapter->missing_tx_completion_to))) {
2729                         if (!tx_buf->print_once)
2730                                 netif_notice(adapter, tx_err, adapter->netdev,
2731                                              "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2732                                              tx_ring->qid, i);
2733
2734                         tx_buf->print_once = 1;
2735                         missed_tx++;
2736                 }
2737         }
2738
2739         if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
2740                 netif_err(adapter, tx_err, adapter->netdev,
2741                           "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
2742                           missed_tx,
2743                           adapter->missing_tx_completion_threshold);
2744                 adapter->reset_reason =
2745                         ENA_REGS_RESET_MISS_TX_CMPL;
2746                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2747                 rc = -EIO;
2748         }
2749
2750         u64_stats_update_begin(&tx_ring->syncp);
2751         tx_ring->tx_stats.missed_tx = missed_tx;
2752         u64_stats_update_end(&tx_ring->syncp);
2753
2754         return rc;
2755 }
2756
2757 static void check_for_missing_completions(struct ena_adapter *adapter)
2758 {
2759         struct ena_ring *tx_ring;
2760         struct ena_ring *rx_ring;
2761         int i, budget, rc;
2762
2763         /* Make sure the driver doesn't turn the device in other process */
2764         smp_rmb();
2765
2766         if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2767                 return;
2768
2769         if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2770                 return;
2771
2772         if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
2773                 return;
2774
2775         budget = ENA_MONITORED_TX_QUEUES;
2776
2777         for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
2778                 tx_ring = &adapter->tx_ring[i];
2779                 rx_ring = &adapter->rx_ring[i];
2780
2781                 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
2782                 if (unlikely(rc))
2783                         return;
2784
2785                 rc = check_for_rx_interrupt_queue(adapter, rx_ring);
2786                 if (unlikely(rc))
2787                         return;
2788
2789                 budget--;
2790                 if (!budget)
2791                         break;
2792         }
2793
2794         adapter->last_monitored_tx_qid = i % adapter->num_queues;
2795 }
2796
2797 /* trigger napi schedule after 2 consecutive detections */
2798 #define EMPTY_RX_REFILL 2
2799 /* For the rare case where the device runs out of Rx descriptors and the
2800  * napi handler failed to refill new Rx descriptors (due to a lack of memory
2801  * for example).
2802  * This case will lead to a deadlock:
2803  * The device won't send interrupts since all the new Rx packets will be dropped
2804  * The napi handler won't allocate new Rx descriptors so the device will be
2805  * able to send new packets.
2806  *
2807  * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
2808  * It is recommended to have at least 512MB, with a minimum of 128MB for
2809  * constrained environment).
2810  *
2811  * When such a situation is detected - Reschedule napi
2812  */
2813 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
2814 {
2815         struct ena_ring *rx_ring;
2816         int i, refill_required;
2817
2818         if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2819                 return;
2820
2821         if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2822                 return;
2823
2824         for (i = 0; i < adapter->num_queues; i++) {
2825                 rx_ring = &adapter->rx_ring[i];
2826
2827                 refill_required =
2828                         ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
2829                 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
2830                         rx_ring->empty_rx_queue++;
2831
2832                         if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
2833                                 u64_stats_update_begin(&rx_ring->syncp);
2834                                 rx_ring->rx_stats.empty_rx_ring++;
2835                                 u64_stats_update_end(&rx_ring->syncp);
2836
2837                                 netif_err(adapter, drv, adapter->netdev,
2838                                           "trigger refill for ring %d\n", i);
2839
2840                                 napi_schedule(rx_ring->napi);
2841                                 rx_ring->empty_rx_queue = 0;
2842                         }
2843                 } else {
2844                         rx_ring->empty_rx_queue = 0;
2845                 }
2846         }
2847 }
2848
2849 /* Check for keep alive expiration */
2850 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2851 {
2852         unsigned long keep_alive_expired;
2853
2854         if (!adapter->wd_state)
2855                 return;
2856
2857         if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2858                 return;
2859
2860         keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
2861                                            adapter->keep_alive_timeout);
2862         if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
2863                 netif_err(adapter, drv, adapter->netdev,
2864                           "Keep alive watchdog timeout.\n");
2865                 u64_stats_update_begin(&adapter->syncp);
2866                 adapter->dev_stats.wd_expired++;
2867                 u64_stats_update_end(&adapter->syncp);
2868                 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
2869                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2870         }
2871 }
2872
2873 static void check_for_admin_com_state(struct ena_adapter *adapter)
2874 {
2875         if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
2876                 netif_err(adapter, drv, adapter->netdev,
2877                           "ENA admin queue is not in running state!\n");
2878                 u64_stats_update_begin(&adapter->syncp);
2879                 adapter->dev_stats.admin_q_pause++;
2880                 u64_stats_update_end(&adapter->syncp);
2881                 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
2882                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2883         }
2884 }
2885
2886 static void ena_update_hints(struct ena_adapter *adapter,
2887                              struct ena_admin_ena_hw_hints *hints)
2888 {
2889         struct net_device *netdev = adapter->netdev;
2890
2891         if (hints->admin_completion_tx_timeout)
2892                 adapter->ena_dev->admin_queue.completion_timeout =
2893                         hints->admin_completion_tx_timeout * 1000;
2894
2895         if (hints->mmio_read_timeout)
2896                 /* convert to usec */
2897                 adapter->ena_dev->mmio_read.reg_read_to =
2898                         hints->mmio_read_timeout * 1000;
2899
2900         if (hints->missed_tx_completion_count_threshold_to_reset)
2901                 adapter->missing_tx_completion_threshold =
2902                         hints->missed_tx_completion_count_threshold_to_reset;
2903
2904         if (hints->missing_tx_completion_timeout) {
2905                 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2906                         adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
2907                 else
2908                         adapter->missing_tx_completion_to =
2909                                 msecs_to_jiffies(hints->missing_tx_completion_timeout);
2910         }
2911
2912         if (hints->netdev_wd_timeout)
2913                 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
2914
2915         if (hints->driver_watchdog_timeout) {
2916                 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2917                         adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2918                 else
2919                         adapter->keep_alive_timeout =
2920                                 msecs_to_jiffies(hints->driver_watchdog_timeout);
2921         }
2922 }
2923
2924 static void ena_update_host_info(struct ena_admin_host_info *host_info,
2925                                  struct net_device *netdev)
2926 {
2927         host_info->supported_network_features[0] =
2928                 netdev->features & GENMASK_ULL(31, 0);
2929         host_info->supported_network_features[1] =
2930                 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
2931 }
2932
2933 static void ena_timer_service(struct timer_list *t)
2934 {
2935         struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
2936         u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
2937         struct ena_admin_host_info *host_info =
2938                 adapter->ena_dev->host_attr.host_info;
2939
2940         check_for_missing_keep_alive(adapter);
2941
2942         check_for_admin_com_state(adapter);
2943
2944         check_for_missing_completions(adapter);
2945
2946         check_for_empty_rx_ring(adapter);
2947
2948         if (debug_area)
2949                 ena_dump_stats_to_buf(adapter, debug_area);
2950
2951         if (host_info)
2952                 ena_update_host_info(host_info, adapter->netdev);
2953
2954         if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2955                 netif_err(adapter, drv, adapter->netdev,
2956                           "Trigger reset is on\n");
2957                 ena_dump_stats_to_dmesg(adapter);
2958                 queue_work(ena_wq, &adapter->reset_task);
2959                 return;
2960         }
2961
2962         /* Reset the timer */
2963         mod_timer(&adapter->timer_service, jiffies + HZ);
2964 }
2965
2966 static int ena_calc_io_queue_num(struct pci_dev *pdev,
2967                                  struct ena_com_dev *ena_dev,
2968                                  struct ena_com_dev_get_features_ctx *get_feat_ctx)
2969 {
2970         int io_sq_num, io_queue_num;
2971
2972         /* In case of LLQ use the llq number in the get feature cmd */
2973         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2974                 io_sq_num = get_feat_ctx->max_queues.max_llq_num;
2975
2976                 if (io_sq_num == 0) {
2977                         dev_err(&pdev->dev,
2978                                 "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
2979
2980                         ena_dev->tx_mem_queue_type =
2981                                 ENA_ADMIN_PLACEMENT_POLICY_HOST;
2982                         io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2983                 }
2984         } else {
2985                 io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2986         }
2987
2988         io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
2989         io_queue_num = min_t(int, io_queue_num, io_sq_num);
2990         io_queue_num = min_t(int, io_queue_num,
2991                              get_feat_ctx->max_queues.max_cq_num);
2992         /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
2993         io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
2994         if (unlikely(!io_queue_num)) {
2995                 dev_err(&pdev->dev, "The device doesn't have io queues\n");
2996                 return -EFAULT;
2997         }
2998
2999         return io_queue_num;
3000 }
3001
3002 static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
3003                               struct ena_com_dev_get_features_ctx *get_feat_ctx)
3004 {
3005         bool has_mem_bar;
3006
3007         has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
3008
3009         /* Enable push mode if device supports LLQ */
3010         if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0))
3011                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
3012         else
3013                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3014 }
3015
3016 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3017                                  struct net_device *netdev)
3018 {
3019         netdev_features_t dev_features = 0;
3020
3021         /* Set offload features */
3022         if (feat->offload.tx &
3023                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3024                 dev_features |= NETIF_F_IP_CSUM;
3025
3026         if (feat->offload.tx &
3027                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3028                 dev_features |= NETIF_F_IPV6_CSUM;
3029
3030         if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3031                 dev_features |= NETIF_F_TSO;
3032
3033         if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3034                 dev_features |= NETIF_F_TSO6;
3035
3036         if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3037                 dev_features |= NETIF_F_TSO_ECN;
3038
3039         if (feat->offload.rx_supported &
3040                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3041                 dev_features |= NETIF_F_RXCSUM;
3042
3043         if (feat->offload.rx_supported &
3044                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3045                 dev_features |= NETIF_F_RXCSUM;
3046
3047         netdev->features =
3048                 dev_features |
3049                 NETIF_F_SG |
3050                 NETIF_F_RXHASH |
3051                 NETIF_F_HIGHDMA;
3052
3053         netdev->hw_features |= netdev->features;
3054         netdev->vlan_features |= netdev->features;
3055 }
3056
3057 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3058                                      struct ena_com_dev_get_features_ctx *feat)
3059 {
3060         struct net_device *netdev = adapter->netdev;
3061
3062         /* Copy mac address */
3063         if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3064                 eth_hw_addr_random(netdev);
3065                 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3066         } else {
3067                 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3068                 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3069         }
3070
3071         /* Set offload features */
3072         ena_set_dev_offloads(feat, netdev);
3073
3074         adapter->max_mtu = feat->dev_attr.max_mtu;
3075         netdev->max_mtu = adapter->max_mtu;
3076         netdev->min_mtu = ENA_MIN_MTU;
3077 }
3078
3079 static int ena_rss_init_default(struct ena_adapter *adapter)
3080 {
3081         struct ena_com_dev *ena_dev = adapter->ena_dev;
3082         struct device *dev = &adapter->pdev->dev;
3083         int rc, i;
3084         u32 val;
3085
3086         rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3087         if (unlikely(rc)) {
3088                 dev_err(dev, "Cannot init indirect table\n");
3089                 goto err_rss_init;
3090         }
3091
3092         for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
3093                 val = ethtool_rxfh_indir_default(i, adapter->num_queues);
3094                 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
3095                                                        ENA_IO_RXQ_IDX(val));
3096                 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3097                         dev_err(dev, "Cannot fill indirect table\n");
3098                         goto err_fill_indir;
3099                 }
3100         }
3101
3102         rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
3103                                         ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
3104         if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3105                 dev_err(dev, "Cannot fill hash function\n");
3106                 goto err_fill_indir;
3107         }
3108
3109         rc = ena_com_set_default_hash_ctrl(ena_dev);
3110         if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3111                 dev_err(dev, "Cannot fill hash control\n");
3112                 goto err_fill_indir;
3113         }
3114
3115         return 0;
3116
3117 err_fill_indir:
3118         ena_com_rss_destroy(ena_dev);
3119 err_rss_init:
3120
3121         return rc;
3122 }
3123
3124 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3125 {
3126         int release_bars;
3127
3128         if (ena_dev->mem_bar)
3129                 devm_iounmap(&pdev->dev, ena_dev->mem_bar);
3130
3131         if (ena_dev->reg_bar)
3132                 devm_iounmap(&pdev->dev, ena_dev->reg_bar);
3133
3134         release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3135         pci_release_selected_regions(pdev, release_bars);
3136 }
3137
3138 static int ena_calc_queue_size(struct pci_dev *pdev,
3139                                struct ena_com_dev *ena_dev,
3140                                u16 *max_tx_sgl_size,
3141                                u16 *max_rx_sgl_size,
3142                                struct ena_com_dev_get_features_ctx *get_feat_ctx)
3143 {
3144         u32 queue_size = ENA_DEFAULT_RING_SIZE;
3145
3146         queue_size = min_t(u32, queue_size,
3147                            get_feat_ctx->max_queues.max_cq_depth);
3148         queue_size = min_t(u32, queue_size,
3149                            get_feat_ctx->max_queues.max_sq_depth);
3150
3151         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3152                 queue_size = min_t(u32, queue_size,
3153                                    get_feat_ctx->max_queues.max_llq_depth);
3154
3155         queue_size = rounddown_pow_of_two(queue_size);
3156
3157         if (unlikely(!queue_size)) {
3158                 dev_err(&pdev->dev, "Invalid queue size\n");
3159                 return -EFAULT;
3160         }
3161
3162         *max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3163                                  get_feat_ctx->max_queues.max_packet_tx_descs);
3164         *max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3165                                  get_feat_ctx->max_queues.max_packet_rx_descs);
3166
3167         return queue_size;
3168 }
3169
3170 /* ena_probe - Device Initialization Routine
3171  * @pdev: PCI device information struct
3172  * @ent: entry in ena_pci_tbl
3173  *
3174  * Returns 0 on success, negative on failure
3175  *
3176  * ena_probe initializes an adapter identified by a pci_dev structure.
3177  * The OS initialization, configuring of the adapter private structure,
3178  * and a hardware reset occur.
3179  */
3180 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3181 {
3182         struct ena_com_dev_get_features_ctx get_feat_ctx;
3183         static int version_printed;
3184         struct net_device *netdev;
3185         struct ena_adapter *adapter;
3186         struct ena_com_dev *ena_dev = NULL;
3187         static int adapters_found;
3188         int io_queue_num, bars, rc;
3189         int queue_size;
3190         u16 tx_sgl_size = 0;
3191         u16 rx_sgl_size = 0;
3192         bool wd_state;
3193
3194         dev_dbg(&pdev->dev, "%s\n", __func__);
3195
3196         if (version_printed++ == 0)
3197                 dev_info(&pdev->dev, "%s", version);
3198
3199         rc = pci_enable_device_mem(pdev);
3200         if (rc) {
3201                 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
3202                 return rc;
3203         }
3204
3205         pci_set_master(pdev);
3206
3207         ena_dev = vzalloc(sizeof(*ena_dev));
3208         if (!ena_dev) {
3209                 rc = -ENOMEM;
3210                 goto err_disable_device;
3211         }
3212
3213         bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3214         rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
3215         if (rc) {
3216                 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
3217                         rc);
3218                 goto err_free_ena_dev;
3219         }
3220
3221         ena_dev->reg_bar = devm_ioremap(&pdev->dev,
3222                                         pci_resource_start(pdev, ENA_REG_BAR),
3223                                         pci_resource_len(pdev, ENA_REG_BAR));
3224         if (!ena_dev->reg_bar) {
3225                 dev_err(&pdev->dev, "failed to remap regs bar\n");
3226                 rc = -EFAULT;
3227                 goto err_free_region;
3228         }
3229
3230         ena_dev->dmadev = &pdev->dev;
3231
3232         rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
3233         if (rc) {
3234                 dev_err(&pdev->dev, "ena device init failed\n");
3235                 if (rc == -ETIME)
3236                         rc = -EPROBE_DEFER;
3237                 goto err_free_region;
3238         }
3239
3240         ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
3241
3242         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
3243                 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3244                                                    pci_resource_start(pdev, ENA_MEM_BAR),
3245                                                    pci_resource_len(pdev, ENA_MEM_BAR));
3246                 if (!ena_dev->mem_bar) {
3247                         rc = -EFAULT;
3248                         goto err_device_destroy;
3249                 }
3250         }
3251
3252         /* initial Tx interrupt delay, Assumes 1 usec granularity.
3253         * Updated during device initialization with the real granularity
3254         */
3255         ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
3256         io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
3257         queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size,
3258                                          &rx_sgl_size, &get_feat_ctx);
3259         if ((queue_size <= 0) || (io_queue_num <= 0)) {
3260                 rc = -EFAULT;
3261                 goto err_device_destroy;
3262         }
3263
3264         dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n",
3265                  io_queue_num, queue_size);
3266
3267         /* dev zeroed in init_etherdev */
3268         netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
3269         if (!netdev) {
3270                 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
3271                 rc = -ENOMEM;
3272                 goto err_device_destroy;
3273         }
3274
3275         SET_NETDEV_DEV(netdev, &pdev->dev);
3276
3277         adapter = netdev_priv(netdev);
3278         pci_set_drvdata(pdev, adapter);
3279
3280         adapter->ena_dev = ena_dev;
3281         adapter->netdev = netdev;
3282         adapter->pdev = pdev;
3283
3284         ena_set_conf_feat_params(adapter, &get_feat_ctx);
3285
3286         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3287         adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3288
3289         adapter->tx_ring_size = queue_size;
3290         adapter->rx_ring_size = queue_size;
3291
3292         adapter->max_tx_sgl_size = tx_sgl_size;
3293         adapter->max_rx_sgl_size = rx_sgl_size;
3294
3295         adapter->num_queues = io_queue_num;
3296         adapter->last_monitored_tx_qid = 0;
3297
3298         adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
3299         adapter->wd_state = wd_state;
3300
3301         snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
3302
3303         rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
3304         if (rc) {
3305                 dev_err(&pdev->dev,
3306                         "Failed to query interrupt moderation feature\n");
3307                 goto err_netdev_destroy;
3308         }
3309         ena_init_io_rings(adapter);
3310
3311         netdev->netdev_ops = &ena_netdev_ops;
3312         netdev->watchdog_timeo = TX_TIMEOUT;
3313         ena_set_ethtool_ops(netdev);
3314
3315         netdev->priv_flags |= IFF_UNICAST_FLT;
3316
3317         u64_stats_init(&adapter->syncp);
3318
3319         rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
3320         if (rc) {
3321                 dev_err(&pdev->dev,
3322                         "Failed to enable and set the admin interrupts\n");
3323                 goto err_worker_destroy;
3324         }
3325         rc = ena_rss_init_default(adapter);
3326         if (rc && (rc != -EOPNOTSUPP)) {
3327                 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
3328                 goto err_free_msix;
3329         }
3330
3331         ena_config_debug_area(adapter);
3332
3333         memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
3334
3335         netif_carrier_off(netdev);
3336
3337         rc = register_netdev(netdev);
3338         if (rc) {
3339                 dev_err(&pdev->dev, "Cannot register net device\n");
3340                 goto err_rss;
3341         }
3342
3343         INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
3344
3345         adapter->last_keep_alive_jiffies = jiffies;
3346         adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
3347         adapter->missing_tx_completion_to = TX_TIMEOUT;
3348         adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
3349
3350         ena_update_hints(adapter, &get_feat_ctx.hw_hints);
3351
3352         timer_setup(&adapter->timer_service, ena_timer_service, 0);
3353         mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3354
3355         dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
3356                  DEVICE_NAME, (long)pci_resource_start(pdev, 0),
3357                  netdev->dev_addr, io_queue_num);
3358
3359         set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3360
3361         adapters_found++;
3362
3363         return 0;
3364
3365 err_rss:
3366         ena_com_delete_debug_area(ena_dev);
3367         ena_com_rss_destroy(ena_dev);
3368 err_free_msix:
3369         ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
3370         ena_free_mgmnt_irq(adapter);
3371         ena_disable_msix(adapter);
3372 err_worker_destroy:
3373         ena_com_destroy_interrupt_moderation(ena_dev);
3374         del_timer(&adapter->timer_service);
3375 err_netdev_destroy:
3376         free_netdev(netdev);
3377 err_device_destroy:
3378         ena_com_delete_host_info(ena_dev);
3379         ena_com_admin_destroy(ena_dev);
3380 err_free_region:
3381         ena_release_bars(ena_dev, pdev);
3382 err_free_ena_dev:
3383         vfree(ena_dev);
3384 err_disable_device:
3385         pci_disable_device(pdev);
3386         return rc;
3387 }
3388
3389 /*****************************************************************************/
3390
3391 /* ena_remove - Device Removal Routine
3392  * @pdev: PCI device information struct
3393  *
3394  * ena_remove is called by the PCI subsystem to alert the driver
3395  * that it should release a PCI device.
3396  */
3397 static void ena_remove(struct pci_dev *pdev)
3398 {
3399         struct ena_adapter *adapter = pci_get_drvdata(pdev);
3400         struct ena_com_dev *ena_dev;
3401         struct net_device *netdev;
3402
3403         ena_dev = adapter->ena_dev;
3404         netdev = adapter->netdev;
3405
3406 #ifdef CONFIG_RFS_ACCEL
3407         if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
3408                 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
3409                 netdev->rx_cpu_rmap = NULL;
3410         }
3411 #endif /* CONFIG_RFS_ACCEL */
3412
3413         unregister_netdev(netdev);
3414         del_timer_sync(&adapter->timer_service);
3415
3416         cancel_work_sync(&adapter->reset_task);
3417
3418         /* Reset the device only if the device is running. */
3419         if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3420                 ena_com_dev_reset(ena_dev, adapter->reset_reason);
3421
3422         ena_free_mgmnt_irq(adapter);
3423
3424         ena_disable_msix(adapter);
3425
3426         free_netdev(netdev);
3427
3428         ena_com_mmio_reg_read_request_destroy(ena_dev);
3429
3430         ena_com_abort_admin_commands(ena_dev);
3431
3432         ena_com_wait_for_abort_completion(ena_dev);
3433
3434         ena_com_admin_destroy(ena_dev);
3435
3436         ena_com_rss_destroy(ena_dev);
3437
3438         ena_com_delete_debug_area(ena_dev);
3439
3440         ena_com_delete_host_info(ena_dev);
3441
3442         ena_release_bars(ena_dev, pdev);
3443
3444         pci_disable_device(pdev);
3445
3446         ena_com_destroy_interrupt_moderation(ena_dev);
3447
3448         vfree(ena_dev);
3449 }
3450
3451 #ifdef CONFIG_PM
3452 /* ena_suspend - PM suspend callback
3453  * @pdev: PCI device information struct
3454  * @state:power state
3455  */
3456 static int ena_suspend(struct pci_dev *pdev,  pm_message_t state)
3457 {
3458         struct ena_adapter *adapter = pci_get_drvdata(pdev);
3459
3460         u64_stats_update_begin(&adapter->syncp);
3461         adapter->dev_stats.suspend++;
3462         u64_stats_update_end(&adapter->syncp);
3463
3464         rtnl_lock();
3465         if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3466                 dev_err(&pdev->dev,
3467                         "ignoring device reset request as the device is being suspended\n");
3468                 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3469         }
3470         ena_destroy_device(adapter);
3471         rtnl_unlock();
3472         return 0;
3473 }
3474
3475 /* ena_resume - PM resume callback
3476  * @pdev: PCI device information struct
3477  *
3478  */
3479 static int ena_resume(struct pci_dev *pdev)
3480 {
3481         struct ena_adapter *adapter = pci_get_drvdata(pdev);
3482         int rc;
3483
3484         u64_stats_update_begin(&adapter->syncp);
3485         adapter->dev_stats.resume++;
3486         u64_stats_update_end(&adapter->syncp);
3487
3488         rtnl_lock();
3489         rc = ena_restore_device(adapter);
3490         rtnl_unlock();
3491         return rc;
3492 }
3493 #endif
3494
3495 static struct pci_driver ena_pci_driver = {
3496         .name           = DRV_MODULE_NAME,
3497         .id_table       = ena_pci_tbl,
3498         .probe          = ena_probe,
3499         .remove         = ena_remove,
3500 #ifdef CONFIG_PM
3501         .suspend    = ena_suspend,
3502         .resume     = ena_resume,
3503 #endif
3504         .sriov_configure = pci_sriov_configure_simple,
3505 };
3506
3507 static int __init ena_init(void)
3508 {
3509         pr_info("%s", version);
3510
3511         ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
3512         if (!ena_wq) {
3513                 pr_err("Failed to create workqueue\n");
3514                 return -ENOMEM;
3515         }
3516
3517         return pci_register_driver(&ena_pci_driver);
3518 }
3519
3520 static void __exit ena_cleanup(void)
3521 {
3522         pci_unregister_driver(&ena_pci_driver);
3523
3524         if (ena_wq) {
3525                 destroy_workqueue(ena_wq);
3526                 ena_wq = NULL;
3527         }
3528 }
3529
3530 /******************************************************************************
3531  ******************************** AENQ Handlers *******************************
3532  *****************************************************************************/
3533 /* ena_update_on_link_change:
3534  * Notify the network interface about the change in link status
3535  */
3536 static void ena_update_on_link_change(void *adapter_data,
3537                                       struct ena_admin_aenq_entry *aenq_e)
3538 {
3539         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3540         struct ena_admin_aenq_link_change_desc *aenq_desc =
3541                 (struct ena_admin_aenq_link_change_desc *)aenq_e;
3542         int status = aenq_desc->flags &
3543                 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3544
3545         if (status) {
3546                 netdev_dbg(adapter->netdev, "%s\n", __func__);
3547                 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3548                 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
3549                         netif_carrier_on(adapter->netdev);
3550         } else {
3551                 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3552                 netif_carrier_off(adapter->netdev);
3553         }
3554 }
3555
3556 static void ena_keep_alive_wd(void *adapter_data,
3557                               struct ena_admin_aenq_entry *aenq_e)
3558 {
3559         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3560         struct ena_admin_aenq_keep_alive_desc *desc;
3561         u64 rx_drops;
3562
3563         desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
3564         adapter->last_keep_alive_jiffies = jiffies;
3565
3566         rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
3567
3568         u64_stats_update_begin(&adapter->syncp);
3569         adapter->dev_stats.rx_drops = rx_drops;
3570         u64_stats_update_end(&adapter->syncp);
3571 }
3572
3573 static void ena_notification(void *adapter_data,
3574                              struct ena_admin_aenq_entry *aenq_e)
3575 {
3576         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3577         struct ena_admin_ena_hw_hints *hints;
3578
3579         WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
3580              "Invalid group(%x) expected %x\n",
3581              aenq_e->aenq_common_desc.group,
3582              ENA_ADMIN_NOTIFICATION);
3583
3584         switch (aenq_e->aenq_common_desc.syndrom) {
3585         case ENA_ADMIN_UPDATE_HINTS:
3586                 hints = (struct ena_admin_ena_hw_hints *)
3587                         (&aenq_e->inline_data_w4);
3588                 ena_update_hints(adapter, hints);
3589                 break;
3590         default:
3591                 netif_err(adapter, drv, adapter->netdev,
3592                           "Invalid aenq notification link state %d\n",
3593                           aenq_e->aenq_common_desc.syndrom);
3594         }
3595 }
3596
3597 /* This handler will called for unknown event group or unimplemented handlers*/
3598 static void unimplemented_aenq_handler(void *data,
3599                                        struct ena_admin_aenq_entry *aenq_e)
3600 {
3601         struct ena_adapter *adapter = (struct ena_adapter *)data;
3602
3603         netif_err(adapter, drv, adapter->netdev,
3604                   "Unknown event was received or event with unimplemented handler\n");
3605 }
3606
3607 static struct ena_aenq_handlers aenq_handlers = {
3608         .handlers = {
3609                 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3610                 [ENA_ADMIN_NOTIFICATION] = ena_notification,
3611                 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3612         },
3613         .unimplemented_handler = unimplemented_aenq_handler
3614 };
3615
3616 module_init(ena_init);
3617 module_exit(ena_cleanup);