]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/net/ethernet/cavium/liquidio/lio_main.c
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[linux.git] / drivers / net / ethernet / cavium / liquidio / lio_main.c
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include "liquidio_common.h"
25 #include "octeon_droq.h"
26 #include "octeon_iq.h"
27 #include "response_manager.h"
28 #include "octeon_device.h"
29 #include "octeon_nic.h"
30 #include "octeon_main.h"
31 #include "octeon_network.h"
32 #include "cn66xx_regs.h"
33 #include "cn66xx_device.h"
34 #include "cn68xx_device.h"
35 #include "cn23xx_pf_device.h"
36 #include "liquidio_image.h"
37 #include "lio_vf_rep.h"
38
39 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
40 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
41 MODULE_LICENSE("GPL");
42 MODULE_VERSION(LIQUIDIO_VERSION);
43 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
44                 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
45 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
46                 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
47 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
48                 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
49 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
50                 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
51
52 static int ddr_timeout = 10000;
53 module_param(ddr_timeout, int, 0644);
54 MODULE_PARM_DESC(ddr_timeout,
55                  "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
56
57 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
58
59 static int debug = -1;
60 module_param(debug, int, 0644);
61 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
62
63 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
64 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
65 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
66
67 static u32 console_bitmask;
68 module_param(console_bitmask, int, 0644);
69 MODULE_PARM_DESC(console_bitmask,
70                  "Bitmask indicating which consoles have debug output redirected to syslog.");
71
72 /**
73  * \brief determines if a given console has debug enabled.
74  * @param console console to check
75  * @returns  1 = enabled. 0 otherwise
76  */
77 static int octeon_console_debug_enabled(u32 console)
78 {
79         return (console_bitmask >> (console)) & 0x1;
80 }
81
82 /* Polling interval for determining when NIC application is alive */
83 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
84
85 /* runtime link query interval */
86 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
87 /* update localtime to octeon firmware every 60 seconds.
88  * make firmware to use same time reference, so that it will be easy to
89  * correlate firmware logged events/errors with host events, for debugging.
90  */
91 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
92
93 /* time to wait for possible in-flight requests in milliseconds */
94 #define WAIT_INFLIGHT_REQUEST   msecs_to_jiffies(1000)
95
96 struct lio_trusted_vf_ctx {
97         struct completion complete;
98         int status;
99 };
100
101 struct oct_link_status_resp {
102         u64 rh;
103         struct oct_link_info link_info;
104         u64 status;
105 };
106
107 struct oct_timestamp_resp {
108         u64 rh;
109         u64 timestamp;
110         u64 status;
111 };
112
113 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
114
115 union tx_info {
116         u64 u64;
117         struct {
118 #ifdef __BIG_ENDIAN_BITFIELD
119                 u16 gso_size;
120                 u16 gso_segs;
121                 u32 reserved;
122 #else
123                 u32 reserved;
124                 u16 gso_segs;
125                 u16 gso_size;
126 #endif
127         } s;
128 };
129
130 /** Octeon device properties to be used by the NIC module.
131  * Each octeon device in the system will be represented
132  * by this structure in the NIC module.
133  */
134
135 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
136 #define OCTNIC_GSO_MAX_SIZE                                                    \
137         (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
138
139 struct handshake {
140         struct completion init;
141         struct completion started;
142         struct pci_dev *pci_dev;
143         int init_ok;
144         int started_ok;
145 };
146
147 #ifdef CONFIG_PCI_IOV
148 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
149 #endif
150
151 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
152                                     char *prefix, char *suffix);
153
154 static int octeon_device_init(struct octeon_device *);
155 static int liquidio_stop(struct net_device *netdev);
156 static void liquidio_remove(struct pci_dev *pdev);
157 static int liquidio_probe(struct pci_dev *pdev,
158                           const struct pci_device_id *ent);
159 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
160                                       int linkstate);
161
162 static struct handshake handshake[MAX_OCTEON_DEVICES];
163 static struct completion first_stage;
164
165 static void octeon_droq_bh(unsigned long pdev)
166 {
167         int q_no;
168         int reschedule = 0;
169         struct octeon_device *oct = (struct octeon_device *)pdev;
170         struct octeon_device_priv *oct_priv =
171                 (struct octeon_device_priv *)oct->priv;
172
173         for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
174                 if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
175                         continue;
176                 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
177                                                           MAX_PACKET_BUDGET);
178                 lio_enable_irq(oct->droq[q_no], NULL);
179
180                 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
181                         /* set time and cnt interrupt thresholds for this DROQ
182                          * for NAPI
183                          */
184                         int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
185
186                         octeon_write_csr64(
187                             oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
188                             0x5700000040ULL);
189                         octeon_write_csr64(
190                             oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
191                 }
192         }
193
194         if (reschedule)
195                 tasklet_schedule(&oct_priv->droq_tasklet);
196 }
197
198 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
199 {
200         struct octeon_device_priv *oct_priv =
201                 (struct octeon_device_priv *)oct->priv;
202         int retry = 100, pkt_cnt = 0, pending_pkts = 0;
203         int i;
204
205         do {
206                 pending_pkts = 0;
207
208                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
209                         if (!(oct->io_qmask.oq & BIT_ULL(i)))
210                                 continue;
211                         pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
212                 }
213                 if (pkt_cnt > 0) {
214                         pending_pkts += pkt_cnt;
215                         tasklet_schedule(&oct_priv->droq_tasklet);
216                 }
217                 pkt_cnt = 0;
218                 schedule_timeout_uninterruptible(1);
219
220         } while (retry-- && pending_pkts);
221
222         return pkt_cnt;
223 }
224
225 /**
226  * \brief Forces all IO queues off on a given device
227  * @param oct Pointer to Octeon device
228  */
229 static void force_io_queues_off(struct octeon_device *oct)
230 {
231         if ((oct->chip_id == OCTEON_CN66XX) ||
232             (oct->chip_id == OCTEON_CN68XX)) {
233                 /* Reset the Enable bits for Input Queues. */
234                 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
235
236                 /* Reset the Enable bits for Output Queues. */
237                 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
238         }
239 }
240
241 /**
242  * \brief Cause device to go quiet so it can be safely removed/reset/etc
243  * @param oct Pointer to Octeon device
244  */
245 static inline void pcierror_quiesce_device(struct octeon_device *oct)
246 {
247         int i;
248
249         /* Disable the input and output queues now. No more packets will
250          * arrive from Octeon, but we should wait for all packet processing
251          * to finish.
252          */
253         force_io_queues_off(oct);
254
255         /* To allow for in-flight requests */
256         schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
257
258         if (wait_for_pending_requests(oct))
259                 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
260
261         /* Force all requests waiting to be fetched by OCTEON to complete. */
262         for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
263                 struct octeon_instr_queue *iq;
264
265                 if (!(oct->io_qmask.iq & BIT_ULL(i)))
266                         continue;
267                 iq = oct->instr_queue[i];
268
269                 if (atomic_read(&iq->instr_pending)) {
270                         spin_lock_bh(&iq->lock);
271                         iq->fill_cnt = 0;
272                         iq->octeon_read_index = iq->host_write_index;
273                         iq->stats.instr_processed +=
274                                 atomic_read(&iq->instr_pending);
275                         lio_process_iq_request_list(oct, iq, 0);
276                         spin_unlock_bh(&iq->lock);
277                 }
278         }
279
280         /* Force all pending ordered list requests to time out. */
281         lio_process_ordered_list(oct, 1);
282
283         /* We do not need to wait for output queue packets to be processed. */
284 }
285
286 /**
287  * \brief Cleanup PCI AER uncorrectable error status
288  * @param dev Pointer to PCI device
289  */
290 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
291 {
292         int pos = 0x100;
293         u32 status, mask;
294
295         pr_info("%s :\n", __func__);
296
297         pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
298         pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
299         if (dev->error_state == pci_channel_io_normal)
300                 status &= ~mask;        /* Clear corresponding nonfatal bits */
301         else
302                 status &= mask;         /* Clear corresponding fatal bits */
303         pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
304 }
305
306 /**
307  * \brief Stop all PCI IO to a given device
308  * @param dev Pointer to Octeon device
309  */
310 static void stop_pci_io(struct octeon_device *oct)
311 {
312         /* No more instructions will be forwarded. */
313         atomic_set(&oct->status, OCT_DEV_IN_RESET);
314
315         pci_disable_device(oct->pci_dev);
316
317         /* Disable interrupts  */
318         oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
319
320         pcierror_quiesce_device(oct);
321
322         /* Release the interrupt line */
323         free_irq(oct->pci_dev->irq, oct);
324
325         if (oct->flags & LIO_FLAG_MSI_ENABLED)
326                 pci_disable_msi(oct->pci_dev);
327
328         dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
329                 lio_get_state_string(&oct->status));
330
331         /* making it a common function for all OCTEON models */
332         cleanup_aer_uncorrect_error_status(oct->pci_dev);
333 }
334
335 /**
336  * \brief called when PCI error is detected
337  * @param pdev Pointer to PCI device
338  * @param state The current pci connection state
339  *
340  * This function is called after a PCI bus error affecting
341  * this device has been detected.
342  */
343 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
344                                                      pci_channel_state_t state)
345 {
346         struct octeon_device *oct = pci_get_drvdata(pdev);
347
348         /* Non-correctable Non-fatal errors */
349         if (state == pci_channel_io_normal) {
350                 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
351                 cleanup_aer_uncorrect_error_status(oct->pci_dev);
352                 return PCI_ERS_RESULT_CAN_RECOVER;
353         }
354
355         /* Non-correctable Fatal errors */
356         dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
357         stop_pci_io(oct);
358
359         /* Always return a DISCONNECT. There is no support for recovery but only
360          * for a clean shutdown.
361          */
362         return PCI_ERS_RESULT_DISCONNECT;
363 }
364
365 /**
366  * \brief mmio handler
367  * @param pdev Pointer to PCI device
368  */
369 static pci_ers_result_t liquidio_pcie_mmio_enabled(
370                                 struct pci_dev *pdev __attribute__((unused)))
371 {
372         /* We should never hit this since we never ask for a reset for a Fatal
373          * Error. We always return DISCONNECT in io_error above.
374          * But play safe and return RECOVERED for now.
375          */
376         return PCI_ERS_RESULT_RECOVERED;
377 }
378
379 /**
380  * \brief called after the pci bus has been reset.
381  * @param pdev Pointer to PCI device
382  *
383  * Restart the card from scratch, as if from a cold-boot. Implementation
384  * resembles the first-half of the octeon_resume routine.
385  */
386 static pci_ers_result_t liquidio_pcie_slot_reset(
387                                 struct pci_dev *pdev __attribute__((unused)))
388 {
389         /* We should never hit this since we never ask for a reset for a Fatal
390          * Error. We always return DISCONNECT in io_error above.
391          * But play safe and return RECOVERED for now.
392          */
393         return PCI_ERS_RESULT_RECOVERED;
394 }
395
396 /**
397  * \brief called when traffic can start flowing again.
398  * @param pdev Pointer to PCI device
399  *
400  * This callback is called when the error recovery driver tells us that
401  * its OK to resume normal operation. Implementation resembles the
402  * second-half of the octeon_resume routine.
403  */
404 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
405 {
406         /* Nothing to be done here. */
407 }
408
409 #ifdef CONFIG_PM
410 /**
411  * \brief called when suspending
412  * @param pdev Pointer to PCI device
413  * @param state state to suspend to
414  */
415 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
416                             pm_message_t state __attribute__((unused)))
417 {
418         return 0;
419 }
420
421 /**
422  * \brief called when resuming
423  * @param pdev Pointer to PCI device
424  */
425 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
426 {
427         return 0;
428 }
429 #endif
430
431 /* For PCI-E Advanced Error Recovery (AER) Interface */
432 static const struct pci_error_handlers liquidio_err_handler = {
433         .error_detected = liquidio_pcie_error_detected,
434         .mmio_enabled   = liquidio_pcie_mmio_enabled,
435         .slot_reset     = liquidio_pcie_slot_reset,
436         .resume         = liquidio_pcie_resume,
437 };
438
439 static const struct pci_device_id liquidio_pci_tbl[] = {
440         {       /* 68xx */
441                 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
442         },
443         {       /* 66xx */
444                 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
445         },
446         {       /* 23xx pf */
447                 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
448         },
449         {
450                 0, 0, 0, 0, 0, 0, 0
451         }
452 };
453 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
454
455 static struct pci_driver liquidio_pci_driver = {
456         .name           = "LiquidIO",
457         .id_table       = liquidio_pci_tbl,
458         .probe          = liquidio_probe,
459         .remove         = liquidio_remove,
460         .err_handler    = &liquidio_err_handler,    /* For AER */
461
462 #ifdef CONFIG_PM
463         .suspend        = liquidio_suspend,
464         .resume         = liquidio_resume,
465 #endif
466 #ifdef CONFIG_PCI_IOV
467         .sriov_configure = liquidio_enable_sriov,
468 #endif
469 };
470
471 /**
472  * \brief register PCI driver
473  */
474 static int liquidio_init_pci(void)
475 {
476         return pci_register_driver(&liquidio_pci_driver);
477 }
478
479 /**
480  * \brief unregister PCI driver
481  */
482 static void liquidio_deinit_pci(void)
483 {
484         pci_unregister_driver(&liquidio_pci_driver);
485 }
486
487 /**
488  * \brief Check Tx queue status, and take appropriate action
489  * @param lio per-network private data
490  * @returns 0 if full, number of queues woken up otherwise
491  */
492 static inline int check_txq_status(struct lio *lio)
493 {
494         int numqs = lio->netdev->real_num_tx_queues;
495         int ret_val = 0;
496         int q, iq;
497
498         /* check each sub-queue state */
499         for (q = 0; q < numqs; q++) {
500                 iq = lio->linfo.txpciq[q %
501                         lio->oct_dev->num_iqs].s.q_no;
502                 if (octnet_iq_is_full(lio->oct_dev, iq))
503                         continue;
504                 if (__netif_subqueue_stopped(lio->netdev, q)) {
505                         netif_wake_subqueue(lio->netdev, q);
506                         INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
507                                                   tx_restart, 1);
508                         ret_val++;
509                 }
510         }
511
512         return ret_val;
513 }
514
515 /**
516  * \brief Print link information
517  * @param netdev network device
518  */
519 static void print_link_info(struct net_device *netdev)
520 {
521         struct lio *lio = GET_LIO(netdev);
522
523         if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
524             ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
525                 struct oct_link_info *linfo = &lio->linfo;
526
527                 if (linfo->link.s.link_up) {
528                         netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
529                                    linfo->link.s.speed,
530                                    (linfo->link.s.duplex) ? "Full" : "Half");
531                 } else {
532                         netif_info(lio, link, lio->netdev, "Link Down\n");
533                 }
534         }
535 }
536
537 /**
538  * \brief Routine to notify MTU change
539  * @param work work_struct data structure
540  */
541 static void octnet_link_status_change(struct work_struct *work)
542 {
543         struct cavium_wk *wk = (struct cavium_wk *)work;
544         struct lio *lio = (struct lio *)wk->ctxptr;
545
546         /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
547          * this API is invoked only when new max-MTU of the interface is
548          * less than current MTU.
549          */
550         rtnl_lock();
551         dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
552         rtnl_unlock();
553 }
554
555 /**
556  * \brief Sets up the mtu status change work
557  * @param netdev network device
558  */
559 static inline int setup_link_status_change_wq(struct net_device *netdev)
560 {
561         struct lio *lio = GET_LIO(netdev);
562         struct octeon_device *oct = lio->oct_dev;
563
564         lio->link_status_wq.wq = alloc_workqueue("link-status",
565                                                  WQ_MEM_RECLAIM, 0);
566         if (!lio->link_status_wq.wq) {
567                 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
568                 return -1;
569         }
570         INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
571                           octnet_link_status_change);
572         lio->link_status_wq.wk.ctxptr = lio;
573
574         return 0;
575 }
576
577 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
578 {
579         struct lio *lio = GET_LIO(netdev);
580
581         if (lio->link_status_wq.wq) {
582                 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
583                 destroy_workqueue(lio->link_status_wq.wq);
584         }
585 }
586
587 /**
588  * \brief Update link status
589  * @param netdev network device
590  * @param ls link status structure
591  *
592  * Called on receipt of a link status response from the core application to
593  * update each interface's link status.
594  */
595 static inline void update_link_status(struct net_device *netdev,
596                                       union oct_link_status *ls)
597 {
598         struct lio *lio = GET_LIO(netdev);
599         int changed = (lio->linfo.link.u64 != ls->u64);
600         int current_max_mtu = lio->linfo.link.s.mtu;
601         struct octeon_device *oct = lio->oct_dev;
602
603         dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
604                 __func__, lio->linfo.link.u64, ls->u64);
605         lio->linfo.link.u64 = ls->u64;
606
607         if ((lio->intf_open) && (changed)) {
608                 print_link_info(netdev);
609                 lio->link_changes++;
610
611                 if (lio->linfo.link.s.link_up) {
612                         dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
613                         netif_carrier_on(netdev);
614                         wake_txqs(netdev);
615                 } else {
616                         dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
617                         netif_carrier_off(netdev);
618                         stop_txqs(netdev);
619                 }
620                 if (lio->linfo.link.s.mtu != current_max_mtu) {
621                         netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
622                                    current_max_mtu, lio->linfo.link.s.mtu);
623                         netdev->max_mtu = lio->linfo.link.s.mtu;
624                 }
625                 if (lio->linfo.link.s.mtu < netdev->mtu) {
626                         dev_warn(&oct->pci_dev->dev,
627                                  "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
628                                      netdev->mtu, lio->linfo.link.s.mtu);
629                         queue_delayed_work(lio->link_status_wq.wq,
630                                            &lio->link_status_wq.wk.work, 0);
631                 }
632         }
633 }
634
635 /**
636  * lio_sync_octeon_time - send latest localtime to octeon firmware so that
637  * firmware will correct it's time, in case there is a time skew
638  *
639  * @work: work scheduled to send time update to octeon firmware
640  **/
641 static void lio_sync_octeon_time(struct work_struct *work)
642 {
643         struct cavium_wk *wk = (struct cavium_wk *)work;
644         struct lio *lio = (struct lio *)wk->ctxptr;
645         struct octeon_device *oct = lio->oct_dev;
646         struct octeon_soft_command *sc;
647         struct timespec64 ts;
648         struct lio_time *lt;
649         int ret;
650
651         sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
652         if (!sc) {
653                 dev_err(&oct->pci_dev->dev,
654                         "Failed to sync time to octeon: soft command allocation failed\n");
655                 return;
656         }
657
658         lt = (struct lio_time *)sc->virtdptr;
659
660         /* Get time of the day */
661         ktime_get_real_ts64(&ts);
662         lt->sec = ts.tv_sec;
663         lt->nsec = ts.tv_nsec;
664         octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
665
666         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
667         octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
668                                     OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
669
670         init_completion(&sc->complete);
671         sc->sc_status = OCTEON_REQUEST_PENDING;
672
673         ret = octeon_send_soft_command(oct, sc);
674         if (ret == IQ_SEND_FAILED) {
675                 dev_err(&oct->pci_dev->dev,
676                         "Failed to sync time to octeon: failed to send soft command\n");
677                 octeon_free_soft_command(oct, sc);
678         } else {
679                 WRITE_ONCE(sc->caller_is_done, true);
680         }
681
682         queue_delayed_work(lio->sync_octeon_time_wq.wq,
683                            &lio->sync_octeon_time_wq.wk.work,
684                            msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
685 }
686
687 /**
688  * setup_sync_octeon_time_wq - Sets up the work to periodically update
689  * local time to octeon firmware
690  *
691  * @netdev - network device which should send time update to firmware
692  **/
693 static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
694 {
695         struct lio *lio = GET_LIO(netdev);
696         struct octeon_device *oct = lio->oct_dev;
697
698         lio->sync_octeon_time_wq.wq =
699                 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
700         if (!lio->sync_octeon_time_wq.wq) {
701                 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
702                 return -1;
703         }
704         INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
705                           lio_sync_octeon_time);
706         lio->sync_octeon_time_wq.wk.ctxptr = lio;
707         queue_delayed_work(lio->sync_octeon_time_wq.wq,
708                            &lio->sync_octeon_time_wq.wk.work,
709                            msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
710
711         return 0;
712 }
713
714 /**
715  * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
716  * to periodically update local time to octeon firmware
717  *
718  * @netdev - network device which should send time update to firmware
719  **/
720 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
721 {
722         struct lio *lio = GET_LIO(netdev);
723         struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
724
725         if (time_wq->wq) {
726                 cancel_delayed_work_sync(&time_wq->wk.work);
727                 destroy_workqueue(time_wq->wq);
728         }
729 }
730
731 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
732 {
733         struct octeon_device *other_oct;
734
735         other_oct = lio_get_device(oct->octeon_id + 1);
736
737         if (other_oct && other_oct->pci_dev) {
738                 int oct_busnum, other_oct_busnum;
739
740                 oct_busnum = oct->pci_dev->bus->number;
741                 other_oct_busnum = other_oct->pci_dev->bus->number;
742
743                 if (oct_busnum == other_oct_busnum) {
744                         int oct_slot, other_oct_slot;
745
746                         oct_slot = PCI_SLOT(oct->pci_dev->devfn);
747                         other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
748
749                         if (oct_slot == other_oct_slot)
750                                 return other_oct;
751                 }
752         }
753
754         return NULL;
755 }
756
757 static void disable_all_vf_links(struct octeon_device *oct)
758 {
759         struct net_device *netdev;
760         int max_vfs, vf, i;
761
762         if (!oct)
763                 return;
764
765         max_vfs = oct->sriov_info.max_vfs;
766
767         for (i = 0; i < oct->ifcount; i++) {
768                 netdev = oct->props[i].netdev;
769                 if (!netdev)
770                         continue;
771
772                 for (vf = 0; vf < max_vfs; vf++)
773                         liquidio_set_vf_link_state(netdev, vf,
774                                                    IFLA_VF_LINK_STATE_DISABLE);
775         }
776 }
777
778 static int liquidio_watchdog(void *param)
779 {
780         bool err_msg_was_printed[LIO_MAX_CORES];
781         u16 mask_of_crashed_or_stuck_cores = 0;
782         bool all_vf_links_are_disabled = false;
783         struct octeon_device *oct = param;
784         struct octeon_device *other_oct;
785 #ifdef CONFIG_MODULE_UNLOAD
786         long refcount, vfs_referencing_pf;
787         u64 vfs_mask1, vfs_mask2;
788 #endif
789         int core;
790
791         memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
792
793         while (!kthread_should_stop()) {
794                 /* sleep for a couple of seconds so that we don't hog the CPU */
795                 set_current_state(TASK_INTERRUPTIBLE);
796                 schedule_timeout(msecs_to_jiffies(2000));
797
798                 mask_of_crashed_or_stuck_cores =
799                     (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
800
801                 if (!mask_of_crashed_or_stuck_cores)
802                         continue;
803
804                 WRITE_ONCE(oct->cores_crashed, true);
805                 other_oct = get_other_octeon_device(oct);
806                 if (other_oct)
807                         WRITE_ONCE(other_oct->cores_crashed, true);
808
809                 for (core = 0; core < LIO_MAX_CORES; core++) {
810                         bool core_crashed_or_got_stuck;
811
812                         core_crashed_or_got_stuck =
813                                                 (mask_of_crashed_or_stuck_cores
814                                                  >> core) & 1;
815
816                         if (core_crashed_or_got_stuck &&
817                             !err_msg_was_printed[core]) {
818                                 dev_err(&oct->pci_dev->dev,
819                                         "ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
820                                         core);
821                                 err_msg_was_printed[core] = true;
822                         }
823                 }
824
825                 if (all_vf_links_are_disabled)
826                         continue;
827
828                 disable_all_vf_links(oct);
829                 disable_all_vf_links(other_oct);
830                 all_vf_links_are_disabled = true;
831
832 #ifdef CONFIG_MODULE_UNLOAD
833                 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
834                 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
835
836                 vfs_referencing_pf  = hweight64(vfs_mask1);
837                 vfs_referencing_pf += hweight64(vfs_mask2);
838
839                 refcount = module_refcount(THIS_MODULE);
840                 if (refcount >= vfs_referencing_pf) {
841                         while (vfs_referencing_pf) {
842                                 module_put(THIS_MODULE);
843                                 vfs_referencing_pf--;
844                         }
845                 }
846 #endif
847         }
848
849         return 0;
850 }
851
852 /**
853  * \brief PCI probe handler
854  * @param pdev PCI device structure
855  * @param ent unused
856  */
857 static int
858 liquidio_probe(struct pci_dev *pdev,
859                const struct pci_device_id *ent __attribute__((unused)))
860 {
861         struct octeon_device *oct_dev = NULL;
862         struct handshake *hs;
863
864         oct_dev = octeon_allocate_device(pdev->device,
865                                          sizeof(struct octeon_device_priv));
866         if (!oct_dev) {
867                 dev_err(&pdev->dev, "Unable to allocate device\n");
868                 return -ENOMEM;
869         }
870
871         if (pdev->device == OCTEON_CN23XX_PF_VID)
872                 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
873
874         /* Enable PTP for 6XXX Device */
875         if (((pdev->device == OCTEON_CN66XX) ||
876              (pdev->device == OCTEON_CN68XX)))
877                 oct_dev->ptp_enable = true;
878         else
879                 oct_dev->ptp_enable = false;
880
881         dev_info(&pdev->dev, "Initializing device %x:%x.\n",
882                  (u32)pdev->vendor, (u32)pdev->device);
883
884         /* Assign octeon_device for this device to the private data area. */
885         pci_set_drvdata(pdev, oct_dev);
886
887         /* set linux specific device pointer */
888         oct_dev->pci_dev = (void *)pdev;
889
890         oct_dev->subsystem_id = pdev->subsystem_vendor |
891                 (pdev->subsystem_device << 16);
892
893         hs = &handshake[oct_dev->octeon_id];
894         init_completion(&hs->init);
895         init_completion(&hs->started);
896         hs->pci_dev = pdev;
897
898         if (oct_dev->octeon_id == 0)
899                 /* first LiquidIO NIC is detected */
900                 complete(&first_stage);
901
902         if (octeon_device_init(oct_dev)) {
903                 complete(&hs->init);
904                 liquidio_remove(pdev);
905                 return -ENOMEM;
906         }
907
908         if (OCTEON_CN23XX_PF(oct_dev)) {
909                 u8 bus, device, function;
910
911                 if (atomic_read(oct_dev->adapter_refcount) == 1) {
912                         /* Each NIC gets one watchdog kernel thread.  The first
913                          * PF (of each NIC) that gets pci_driver->probe()'d
914                          * creates that thread.
915                          */
916                         bus = pdev->bus->number;
917                         device = PCI_SLOT(pdev->devfn);
918                         function = PCI_FUNC(pdev->devfn);
919                         oct_dev->watchdog_task = kthread_create(
920                             liquidio_watchdog, oct_dev,
921                             "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
922                         if (!IS_ERR(oct_dev->watchdog_task)) {
923                                 wake_up_process(oct_dev->watchdog_task);
924                         } else {
925                                 oct_dev->watchdog_task = NULL;
926                                 dev_err(&oct_dev->pci_dev->dev,
927                                         "failed to create kernel_thread\n");
928                                 liquidio_remove(pdev);
929                                 return -1;
930                         }
931                 }
932         }
933
934         oct_dev->rx_pause = 1;
935         oct_dev->tx_pause = 1;
936
937         dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
938
939         return 0;
940 }
941
942 static bool fw_type_is_auto(void)
943 {
944         return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
945                        sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
946 }
947
948 /**
949  * \brief PCI FLR for each Octeon device.
950  * @param oct octeon device
951  */
952 static void octeon_pci_flr(struct octeon_device *oct)
953 {
954         int rc;
955
956         pci_save_state(oct->pci_dev);
957
958         pci_cfg_access_lock(oct->pci_dev);
959
960         /* Quiesce the device completely */
961         pci_write_config_word(oct->pci_dev, PCI_COMMAND,
962                               PCI_COMMAND_INTX_DISABLE);
963
964         rc = __pci_reset_function_locked(oct->pci_dev);
965
966         if (rc != 0)
967                 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
968                         rc, oct->pf_num);
969
970         pci_cfg_access_unlock(oct->pci_dev);
971
972         pci_restore_state(oct->pci_dev);
973 }
974
975 /**
976  *\brief Destroy resources associated with octeon device
977  * @param pdev PCI device structure
978  * @param ent unused
979  */
980 static void octeon_destroy_resources(struct octeon_device *oct)
981 {
982         int i, refcount;
983         struct msix_entry *msix_entries;
984         struct octeon_device_priv *oct_priv =
985                 (struct octeon_device_priv *)oct->priv;
986
987         struct handshake *hs;
988
989         switch (atomic_read(&oct->status)) {
990         case OCT_DEV_RUNNING:
991         case OCT_DEV_CORE_OK:
992
993                 /* No more instructions will be forwarded. */
994                 atomic_set(&oct->status, OCT_DEV_IN_RESET);
995
996                 oct->app_mode = CVM_DRV_INVALID_APP;
997                 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
998                         lio_get_state_string(&oct->status));
999
1000                 schedule_timeout_uninterruptible(HZ / 10);
1001
1002                 /* fallthrough */
1003         case OCT_DEV_HOST_OK:
1004
1005                 /* fallthrough */
1006         case OCT_DEV_CONSOLE_INIT_DONE:
1007                 /* Remove any consoles */
1008                 octeon_remove_consoles(oct);
1009
1010                 /* fallthrough */
1011         case OCT_DEV_IO_QUEUES_DONE:
1012                 if (lio_wait_for_instr_fetch(oct))
1013                         dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1014
1015                 if (wait_for_pending_requests(oct))
1016                         dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1017
1018                 /* Disable the input and output queues now. No more packets will
1019                  * arrive from Octeon, but we should wait for all packet
1020                  * processing to finish.
1021                  */
1022                 oct->fn_list.disable_io_queues(oct);
1023
1024                 if (lio_wait_for_oq_pkts(oct))
1025                         dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1026
1027                 /* Force all requests waiting to be fetched by OCTEON to
1028                  * complete.
1029                  */
1030                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1031                         struct octeon_instr_queue *iq;
1032
1033                         if (!(oct->io_qmask.iq & BIT_ULL(i)))
1034                                 continue;
1035                         iq = oct->instr_queue[i];
1036
1037                         if (atomic_read(&iq->instr_pending)) {
1038                                 spin_lock_bh(&iq->lock);
1039                                 iq->fill_cnt = 0;
1040                                 iq->octeon_read_index = iq->host_write_index;
1041                                 iq->stats.instr_processed +=
1042                                         atomic_read(&iq->instr_pending);
1043                                 lio_process_iq_request_list(oct, iq, 0);
1044                                 spin_unlock_bh(&iq->lock);
1045                         }
1046                 }
1047
1048                 lio_process_ordered_list(oct, 1);
1049                 octeon_free_sc_done_list(oct);
1050                 octeon_free_sc_zombie_list(oct);
1051
1052         /* fallthrough */
1053         case OCT_DEV_INTR_SET_DONE:
1054                 /* Disable interrupts  */
1055                 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1056
1057                 if (oct->msix_on) {
1058                         msix_entries = (struct msix_entry *)oct->msix_entries;
1059                         for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1060                                 if (oct->ioq_vector[i].vector) {
1061                                         /* clear the affinity_cpumask */
1062                                         irq_set_affinity_hint(
1063                                                         msix_entries[i].vector,
1064                                                         NULL);
1065                                         free_irq(msix_entries[i].vector,
1066                                                  &oct->ioq_vector[i]);
1067                                         oct->ioq_vector[i].vector = 0;
1068                                 }
1069                         }
1070                         /* non-iov vector's argument is oct struct */
1071                         free_irq(msix_entries[i].vector, oct);
1072
1073                         pci_disable_msix(oct->pci_dev);
1074                         kfree(oct->msix_entries);
1075                         oct->msix_entries = NULL;
1076                 } else {
1077                         /* Release the interrupt line */
1078                         free_irq(oct->pci_dev->irq, oct);
1079
1080                         if (oct->flags & LIO_FLAG_MSI_ENABLED)
1081                                 pci_disable_msi(oct->pci_dev);
1082                 }
1083
1084                 kfree(oct->irq_name_storage);
1085                 oct->irq_name_storage = NULL;
1086
1087         /* fallthrough */
1088         case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1089                 if (OCTEON_CN23XX_PF(oct))
1090                         octeon_free_ioq_vector(oct);
1091
1092         /* fallthrough */
1093         case OCT_DEV_MBOX_SETUP_DONE:
1094                 if (OCTEON_CN23XX_PF(oct))
1095                         oct->fn_list.free_mbox(oct);
1096
1097         /* fallthrough */
1098         case OCT_DEV_IN_RESET:
1099         case OCT_DEV_DROQ_INIT_DONE:
1100                 /* Wait for any pending operations */
1101                 mdelay(100);
1102                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1103                         if (!(oct->io_qmask.oq & BIT_ULL(i)))
1104                                 continue;
1105                         octeon_delete_droq(oct, i);
1106                 }
1107
1108                 /* Force any pending handshakes to complete */
1109                 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1110                         hs = &handshake[i];
1111
1112                         if (hs->pci_dev) {
1113                                 handshake[oct->octeon_id].init_ok = 0;
1114                                 complete(&handshake[oct->octeon_id].init);
1115                                 handshake[oct->octeon_id].started_ok = 0;
1116                                 complete(&handshake[oct->octeon_id].started);
1117                         }
1118                 }
1119
1120                 /* fallthrough */
1121         case OCT_DEV_RESP_LIST_INIT_DONE:
1122                 octeon_delete_response_list(oct);
1123
1124                 /* fallthrough */
1125         case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1126                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1127                         if (!(oct->io_qmask.iq & BIT_ULL(i)))
1128                                 continue;
1129                         octeon_delete_instr_queue(oct, i);
1130                 }
1131 #ifdef CONFIG_PCI_IOV
1132                 if (oct->sriov_info.sriov_enabled)
1133                         pci_disable_sriov(oct->pci_dev);
1134 #endif
1135                 /* fallthrough */
1136         case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1137                 octeon_free_sc_buffer_pool(oct);
1138
1139                 /* fallthrough */
1140         case OCT_DEV_DISPATCH_INIT_DONE:
1141                 octeon_delete_dispatch_list(oct);
1142                 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1143
1144                 /* fallthrough */
1145         case OCT_DEV_PCI_MAP_DONE:
1146                 refcount = octeon_deregister_device(oct);
1147
1148                 /* Soft reset the octeon device before exiting.
1149                  * However, if fw was loaded from card (i.e. autoboot),
1150                  * perform an FLR instead.
1151                  * Implementation note: only soft-reset the device
1152                  * if it is a CN6XXX OR the LAST CN23XX device.
1153                  */
1154                 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1155                         octeon_pci_flr(oct);
1156                 else if (OCTEON_CN6XXX(oct) || !refcount)
1157                         oct->fn_list.soft_reset(oct);
1158
1159                 octeon_unmap_pci_barx(oct, 0);
1160                 octeon_unmap_pci_barx(oct, 1);
1161
1162                 /* fallthrough */
1163         case OCT_DEV_PCI_ENABLE_DONE:
1164                 pci_clear_master(oct->pci_dev);
1165                 /* Disable the device, releasing the PCI INT */
1166                 pci_disable_device(oct->pci_dev);
1167
1168                 /* fallthrough */
1169         case OCT_DEV_BEGIN_STATE:
1170                 /* Nothing to be done here either */
1171                 break;
1172         }                       /* end switch (oct->status) */
1173
1174         tasklet_kill(&oct_priv->droq_tasklet);
1175 }
1176
1177 /**
1178  * \brief Send Rx control command
1179  * @param lio per-network private data
1180  * @param start_stop whether to start or stop
1181  */
1182 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1183 {
1184         struct octeon_soft_command *sc;
1185         union octnet_cmd *ncmd;
1186         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1187         int retval;
1188
1189         if (oct->props[lio->ifidx].rx_on == start_stop)
1190                 return;
1191
1192         sc = (struct octeon_soft_command *)
1193                 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1194                                           16, 0);
1195
1196         ncmd = (union octnet_cmd *)sc->virtdptr;
1197
1198         ncmd->u64 = 0;
1199         ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1200         ncmd->s.param1 = start_stop;
1201
1202         octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1203
1204         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1205
1206         octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1207                                     OPCODE_NIC_CMD, 0, 0, 0);
1208
1209         init_completion(&sc->complete);
1210         sc->sc_status = OCTEON_REQUEST_PENDING;
1211
1212         retval = octeon_send_soft_command(oct, sc);
1213         if (retval == IQ_SEND_FAILED) {
1214                 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1215                 octeon_free_soft_command(oct, sc);
1216                 return;
1217         } else {
1218                 /* Sleep on a wait queue till the cond flag indicates that the
1219                  * response arrived or timed-out.
1220                  */
1221                 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1222                 if (retval)
1223                         return;
1224
1225                 oct->props[lio->ifidx].rx_on = start_stop;
1226                 WRITE_ONCE(sc->caller_is_done, true);
1227         }
1228 }
1229
1230 /**
1231  * \brief Destroy NIC device interface
1232  * @param oct octeon device
1233  * @param ifidx which interface to destroy
1234  *
1235  * Cleanup associated with each interface for an Octeon device  when NIC
1236  * module is being unloaded or if initialization fails during load.
1237  */
1238 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1239 {
1240         struct net_device *netdev = oct->props[ifidx].netdev;
1241         struct octeon_device_priv *oct_priv =
1242                 (struct octeon_device_priv *)oct->priv;
1243         struct napi_struct *napi, *n;
1244         struct lio *lio;
1245
1246         if (!netdev) {
1247                 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1248                         __func__, ifidx);
1249                 return;
1250         }
1251
1252         lio = GET_LIO(netdev);
1253
1254         dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1255
1256         if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1257                 liquidio_stop(netdev);
1258
1259         if (oct->props[lio->ifidx].napi_enabled == 1) {
1260                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1261                         napi_disable(napi);
1262
1263                 oct->props[lio->ifidx].napi_enabled = 0;
1264
1265                 if (OCTEON_CN23XX_PF(oct))
1266                         oct->droq[0]->ops.poll_mode = 0;
1267         }
1268
1269         /* Delete NAPI */
1270         list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1271                 netif_napi_del(napi);
1272
1273         tasklet_enable(&oct_priv->droq_tasklet);
1274
1275         if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1276                 unregister_netdev(netdev);
1277
1278         cleanup_sync_octeon_time_wq(netdev);
1279         cleanup_link_status_change_wq(netdev);
1280
1281         cleanup_rx_oom_poll_fn(netdev);
1282
1283         lio_delete_glists(lio);
1284
1285         free_netdev(netdev);
1286
1287         oct->props[ifidx].gmxport = -1;
1288
1289         oct->props[ifidx].netdev = NULL;
1290 }
1291
1292 /**
1293  * \brief Stop complete NIC functionality
1294  * @param oct octeon device
1295  */
1296 static int liquidio_stop_nic_module(struct octeon_device *oct)
1297 {
1298         int i, j;
1299         struct lio *lio;
1300
1301         dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1302         if (!oct->ifcount) {
1303                 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1304                 return 1;
1305         }
1306
1307         spin_lock_bh(&oct->cmd_resp_wqlock);
1308         oct->cmd_resp_state = OCT_DRV_OFFLINE;
1309         spin_unlock_bh(&oct->cmd_resp_wqlock);
1310
1311         lio_vf_rep_destroy(oct);
1312
1313         for (i = 0; i < oct->ifcount; i++) {
1314                 lio = GET_LIO(oct->props[i].netdev);
1315                 for (j = 0; j < oct->num_oqs; j++)
1316                         octeon_unregister_droq_ops(oct,
1317                                                    lio->linfo.rxpciq[j].s.q_no);
1318         }
1319
1320         for (i = 0; i < oct->ifcount; i++)
1321                 liquidio_destroy_nic_device(oct, i);
1322
1323         if (oct->devlink) {
1324                 devlink_unregister(oct->devlink);
1325                 devlink_free(oct->devlink);
1326                 oct->devlink = NULL;
1327         }
1328
1329         dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1330         return 0;
1331 }
1332
1333 /**
1334  * \brief Cleans up resources at unload time
1335  * @param pdev PCI device structure
1336  */
1337 static void liquidio_remove(struct pci_dev *pdev)
1338 {
1339         struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1340
1341         dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1342
1343         if (oct_dev->watchdog_task)
1344                 kthread_stop(oct_dev->watchdog_task);
1345
1346         if (!oct_dev->octeon_id &&
1347             oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1348                 lio_vf_rep_modexit();
1349
1350         if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1351                 liquidio_stop_nic_module(oct_dev);
1352
1353         /* Reset the octeon device and cleanup all memory allocated for
1354          * the octeon device by driver.
1355          */
1356         octeon_destroy_resources(oct_dev);
1357
1358         dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1359
1360         /* This octeon device has been removed. Update the global
1361          * data structure to reflect this. Free the device structure.
1362          */
1363         octeon_free_device_mem(oct_dev);
1364 }
1365
1366 /**
1367  * \brief Identify the Octeon device and to map the BAR address space
1368  * @param oct octeon device
1369  */
1370 static int octeon_chip_specific_setup(struct octeon_device *oct)
1371 {
1372         u32 dev_id, rev_id;
1373         int ret = 1;
1374         char *s;
1375
1376         pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1377         pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1378         oct->rev_id = rev_id & 0xff;
1379
1380         switch (dev_id) {
1381         case OCTEON_CN68XX_PCIID:
1382                 oct->chip_id = OCTEON_CN68XX;
1383                 ret = lio_setup_cn68xx_octeon_device(oct);
1384                 s = "CN68XX";
1385                 break;
1386
1387         case OCTEON_CN66XX_PCIID:
1388                 oct->chip_id = OCTEON_CN66XX;
1389                 ret = lio_setup_cn66xx_octeon_device(oct);
1390                 s = "CN66XX";
1391                 break;
1392
1393         case OCTEON_CN23XX_PCIID_PF:
1394                 oct->chip_id = OCTEON_CN23XX_PF_VID;
1395                 ret = setup_cn23xx_octeon_pf_device(oct);
1396                 if (ret)
1397                         break;
1398 #ifdef CONFIG_PCI_IOV
1399                 if (!ret)
1400                         pci_sriov_set_totalvfs(oct->pci_dev,
1401                                                oct->sriov_info.max_vfs);
1402 #endif
1403                 s = "CN23XX";
1404                 break;
1405
1406         default:
1407                 s = "?";
1408                 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1409                         dev_id);
1410         }
1411
1412         if (!ret)
1413                 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1414                          OCTEON_MAJOR_REV(oct),
1415                          OCTEON_MINOR_REV(oct),
1416                          octeon_get_conf(oct)->card_name,
1417                          LIQUIDIO_VERSION);
1418
1419         return ret;
1420 }
1421
1422 /**
1423  * \brief PCI initialization for each Octeon device.
1424  * @param oct octeon device
1425  */
1426 static int octeon_pci_os_setup(struct octeon_device *oct)
1427 {
1428         /* setup PCI stuff first */
1429         if (pci_enable_device(oct->pci_dev)) {
1430                 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1431                 return 1;
1432         }
1433
1434         if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1435                 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1436                 pci_disable_device(oct->pci_dev);
1437                 return 1;
1438         }
1439
1440         /* Enable PCI DMA Master. */
1441         pci_set_master(oct->pci_dev);
1442
1443         return 0;
1444 }
1445
1446 /**
1447  * \brief Unmap and free network buffer
1448  * @param buf buffer
1449  */
1450 static void free_netbuf(void *buf)
1451 {
1452         struct sk_buff *skb;
1453         struct octnet_buf_free_info *finfo;
1454         struct lio *lio;
1455
1456         finfo = (struct octnet_buf_free_info *)buf;
1457         skb = finfo->skb;
1458         lio = finfo->lio;
1459
1460         dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1461                          DMA_TO_DEVICE);
1462
1463         tx_buffer_free(skb);
1464 }
1465
1466 /**
1467  * \brief Unmap and free gather buffer
1468  * @param buf buffer
1469  */
1470 static void free_netsgbuf(void *buf)
1471 {
1472         struct octnet_buf_free_info *finfo;
1473         struct sk_buff *skb;
1474         struct lio *lio;
1475         struct octnic_gather *g;
1476         int i, frags, iq;
1477
1478         finfo = (struct octnet_buf_free_info *)buf;
1479         skb = finfo->skb;
1480         lio = finfo->lio;
1481         g = finfo->g;
1482         frags = skb_shinfo(skb)->nr_frags;
1483
1484         dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1485                          g->sg[0].ptr[0], (skb->len - skb->data_len),
1486                          DMA_TO_DEVICE);
1487
1488         i = 1;
1489         while (frags--) {
1490                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1491
1492                 pci_unmap_page((lio->oct_dev)->pci_dev,
1493                                g->sg[(i >> 2)].ptr[(i & 3)],
1494                                frag->size, DMA_TO_DEVICE);
1495                 i++;
1496         }
1497
1498         iq = skb_iq(lio->oct_dev, skb);
1499         spin_lock(&lio->glist_lock[iq]);
1500         list_add_tail(&g->list, &lio->glist[iq]);
1501         spin_unlock(&lio->glist_lock[iq]);
1502
1503         tx_buffer_free(skb);
1504 }
1505
1506 /**
1507  * \brief Unmap and free gather buffer with response
1508  * @param buf buffer
1509  */
1510 static void free_netsgbuf_with_resp(void *buf)
1511 {
1512         struct octeon_soft_command *sc;
1513         struct octnet_buf_free_info *finfo;
1514         struct sk_buff *skb;
1515         struct lio *lio;
1516         struct octnic_gather *g;
1517         int i, frags, iq;
1518
1519         sc = (struct octeon_soft_command *)buf;
1520         skb = (struct sk_buff *)sc->callback_arg;
1521         finfo = (struct octnet_buf_free_info *)&skb->cb;
1522
1523         lio = finfo->lio;
1524         g = finfo->g;
1525         frags = skb_shinfo(skb)->nr_frags;
1526
1527         dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1528                          g->sg[0].ptr[0], (skb->len - skb->data_len),
1529                          DMA_TO_DEVICE);
1530
1531         i = 1;
1532         while (frags--) {
1533                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1534
1535                 pci_unmap_page((lio->oct_dev)->pci_dev,
1536                                g->sg[(i >> 2)].ptr[(i & 3)],
1537                                frag->size, DMA_TO_DEVICE);
1538                 i++;
1539         }
1540
1541         iq = skb_iq(lio->oct_dev, skb);
1542
1543         spin_lock(&lio->glist_lock[iq]);
1544         list_add_tail(&g->list, &lio->glist[iq]);
1545         spin_unlock(&lio->glist_lock[iq]);
1546
1547         /* Don't free the skb yet */
1548 }
1549
1550 /**
1551  * \brief Adjust ptp frequency
1552  * @param ptp PTP clock info
1553  * @param ppb how much to adjust by, in parts-per-billion
1554  */
1555 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1556 {
1557         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1558         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1559         u64 comp, delta;
1560         unsigned long flags;
1561         bool neg_adj = false;
1562
1563         if (ppb < 0) {
1564                 neg_adj = true;
1565                 ppb = -ppb;
1566         }
1567
1568         /* The hardware adds the clock compensation value to the
1569          * PTP clock on every coprocessor clock cycle, so we
1570          * compute the delta in terms of coprocessor clocks.
1571          */
1572         delta = (u64)ppb << 32;
1573         do_div(delta, oct->coproc_clock_rate);
1574
1575         spin_lock_irqsave(&lio->ptp_lock, flags);
1576         comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1577         if (neg_adj)
1578                 comp -= delta;
1579         else
1580                 comp += delta;
1581         lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1582         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1583
1584         return 0;
1585 }
1586
1587 /**
1588  * \brief Adjust ptp time
1589  * @param ptp PTP clock info
1590  * @param delta how much to adjust by, in nanosecs
1591  */
1592 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1593 {
1594         unsigned long flags;
1595         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1596
1597         spin_lock_irqsave(&lio->ptp_lock, flags);
1598         lio->ptp_adjust += delta;
1599         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1600
1601         return 0;
1602 }
1603
1604 /**
1605  * \brief Get hardware clock time, including any adjustment
1606  * @param ptp PTP clock info
1607  * @param ts timespec
1608  */
1609 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1610                                 struct timespec64 *ts)
1611 {
1612         u64 ns;
1613         unsigned long flags;
1614         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1615         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1616
1617         spin_lock_irqsave(&lio->ptp_lock, flags);
1618         ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1619         ns += lio->ptp_adjust;
1620         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1621
1622         *ts = ns_to_timespec64(ns);
1623
1624         return 0;
1625 }
1626
1627 /**
1628  * \brief Set hardware clock time. Reset adjustment
1629  * @param ptp PTP clock info
1630  * @param ts timespec
1631  */
1632 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1633                                 const struct timespec64 *ts)
1634 {
1635         u64 ns;
1636         unsigned long flags;
1637         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1638         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1639
1640         ns = timespec64_to_ns(ts);
1641
1642         spin_lock_irqsave(&lio->ptp_lock, flags);
1643         lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1644         lio->ptp_adjust = 0;
1645         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1646
1647         return 0;
1648 }
1649
1650 /**
1651  * \brief Check if PTP is enabled
1652  * @param ptp PTP clock info
1653  * @param rq request
1654  * @param on is it on
1655  */
1656 static int
1657 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1658                     struct ptp_clock_request *rq __attribute__((unused)),
1659                     int on __attribute__((unused)))
1660 {
1661         return -EOPNOTSUPP;
1662 }
1663
1664 /**
1665  * \brief Open PTP clock source
1666  * @param netdev network device
1667  */
1668 static void oct_ptp_open(struct net_device *netdev)
1669 {
1670         struct lio *lio = GET_LIO(netdev);
1671         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1672
1673         spin_lock_init(&lio->ptp_lock);
1674
1675         snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1676         lio->ptp_info.owner = THIS_MODULE;
1677         lio->ptp_info.max_adj = 250000000;
1678         lio->ptp_info.n_alarm = 0;
1679         lio->ptp_info.n_ext_ts = 0;
1680         lio->ptp_info.n_per_out = 0;
1681         lio->ptp_info.pps = 0;
1682         lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1683         lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1684         lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1685         lio->ptp_info.settime64 = liquidio_ptp_settime;
1686         lio->ptp_info.enable = liquidio_ptp_enable;
1687
1688         lio->ptp_adjust = 0;
1689
1690         lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1691                                              &oct->pci_dev->dev);
1692
1693         if (IS_ERR(lio->ptp_clock))
1694                 lio->ptp_clock = NULL;
1695 }
1696
1697 /**
1698  * \brief Init PTP clock
1699  * @param oct octeon device
1700  */
1701 static void liquidio_ptp_init(struct octeon_device *oct)
1702 {
1703         u64 clock_comp, cfg;
1704
1705         clock_comp = (u64)NSEC_PER_SEC << 32;
1706         do_div(clock_comp, oct->coproc_clock_rate);
1707         lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1708
1709         /* Enable */
1710         cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1711         lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1712 }
1713
1714 /**
1715  * \brief Load firmware to device
1716  * @param oct octeon device
1717  *
1718  * Maps device to firmware filename, requests firmware, and downloads it
1719  */
1720 static int load_firmware(struct octeon_device *oct)
1721 {
1722         int ret = 0;
1723         const struct firmware *fw;
1724         char fw_name[LIO_MAX_FW_FILENAME_LEN];
1725         char *tmp_fw_type;
1726
1727         if (fw_type_is_auto()) {
1728                 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1729                 strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1730         } else {
1731                 tmp_fw_type = fw_type;
1732         }
1733
1734         sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1735                 octeon_get_conf(oct)->card_name, tmp_fw_type,
1736                 LIO_FW_NAME_SUFFIX);
1737
1738         ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1739         if (ret) {
1740                 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1741                         fw_name);
1742                 release_firmware(fw);
1743                 return ret;
1744         }
1745
1746         ret = octeon_download_firmware(oct, fw->data, fw->size);
1747
1748         release_firmware(fw);
1749
1750         return ret;
1751 }
1752
1753 /**
1754  * \brief Poll routine for checking transmit queue status
1755  * @param work work_struct data structure
1756  */
1757 static void octnet_poll_check_txq_status(struct work_struct *work)
1758 {
1759         struct cavium_wk *wk = (struct cavium_wk *)work;
1760         struct lio *lio = (struct lio *)wk->ctxptr;
1761
1762         if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1763                 return;
1764
1765         check_txq_status(lio);
1766         queue_delayed_work(lio->txq_status_wq.wq,
1767                            &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1768 }
1769
1770 /**
1771  * \brief Sets up the txq poll check
1772  * @param netdev network device
1773  */
1774 static inline int setup_tx_poll_fn(struct net_device *netdev)
1775 {
1776         struct lio *lio = GET_LIO(netdev);
1777         struct octeon_device *oct = lio->oct_dev;
1778
1779         lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1780                                                 WQ_MEM_RECLAIM, 0);
1781         if (!lio->txq_status_wq.wq) {
1782                 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1783                 return -1;
1784         }
1785         INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1786                           octnet_poll_check_txq_status);
1787         lio->txq_status_wq.wk.ctxptr = lio;
1788         queue_delayed_work(lio->txq_status_wq.wq,
1789                            &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1790         return 0;
1791 }
1792
1793 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1794 {
1795         struct lio *lio = GET_LIO(netdev);
1796
1797         if (lio->txq_status_wq.wq) {
1798                 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1799                 destroy_workqueue(lio->txq_status_wq.wq);
1800         }
1801 }
1802
1803 /**
1804  * \brief Net device open for LiquidIO
1805  * @param netdev network device
1806  */
1807 static int liquidio_open(struct net_device *netdev)
1808 {
1809         struct lio *lio = GET_LIO(netdev);
1810         struct octeon_device *oct = lio->oct_dev;
1811         struct octeon_device_priv *oct_priv =
1812                 (struct octeon_device_priv *)oct->priv;
1813         struct napi_struct *napi, *n;
1814
1815         if (oct->props[lio->ifidx].napi_enabled == 0) {
1816                 tasklet_disable(&oct_priv->droq_tasklet);
1817
1818                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1819                         napi_enable(napi);
1820
1821                 oct->props[lio->ifidx].napi_enabled = 1;
1822
1823                 if (OCTEON_CN23XX_PF(oct))
1824                         oct->droq[0]->ops.poll_mode = 1;
1825         }
1826
1827         if (oct->ptp_enable)
1828                 oct_ptp_open(netdev);
1829
1830         ifstate_set(lio, LIO_IFSTATE_RUNNING);
1831
1832         if (OCTEON_CN23XX_PF(oct)) {
1833                 if (!oct->msix_on)
1834                         if (setup_tx_poll_fn(netdev))
1835                                 return -1;
1836         } else {
1837                 if (setup_tx_poll_fn(netdev))
1838                         return -1;
1839         }
1840
1841         netif_tx_start_all_queues(netdev);
1842
1843         /* Ready for link status updates */
1844         lio->intf_open = 1;
1845
1846         netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1847
1848         /* tell Octeon to start forwarding packets to host */
1849         send_rx_ctrl_cmd(lio, 1);
1850
1851         /* start periodical statistics fetch */
1852         INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
1853         lio->stats_wk.ctxptr = lio;
1854         schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
1855                                         (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
1856
1857         dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1858                  netdev->name);
1859
1860         return 0;
1861 }
1862
1863 /**
1864  * \brief Net device stop for LiquidIO
1865  * @param netdev network device
1866  */
1867 static int liquidio_stop(struct net_device *netdev)
1868 {
1869         struct lio *lio = GET_LIO(netdev);
1870         struct octeon_device *oct = lio->oct_dev;
1871         struct octeon_device_priv *oct_priv =
1872                 (struct octeon_device_priv *)oct->priv;
1873         struct napi_struct *napi, *n;
1874
1875         ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1876
1877         /* Stop any link updates */
1878         lio->intf_open = 0;
1879
1880         stop_txqs(netdev);
1881
1882         /* Inform that netif carrier is down */
1883         netif_carrier_off(netdev);
1884         netif_tx_disable(netdev);
1885
1886         lio->linfo.link.s.link_up = 0;
1887         lio->link_changes++;
1888
1889         /* Tell Octeon that nic interface is down. */
1890         send_rx_ctrl_cmd(lio, 0);
1891
1892         if (OCTEON_CN23XX_PF(oct)) {
1893                 if (!oct->msix_on)
1894                         cleanup_tx_poll_fn(netdev);
1895         } else {
1896                 cleanup_tx_poll_fn(netdev);
1897         }
1898
1899         cancel_delayed_work_sync(&lio->stats_wk.work);
1900
1901         if (lio->ptp_clock) {
1902                 ptp_clock_unregister(lio->ptp_clock);
1903                 lio->ptp_clock = NULL;
1904         }
1905
1906         /* Wait for any pending Rx descriptors */
1907         if (lio_wait_for_clean_oq(oct))
1908                 netif_info(lio, rx_err, lio->netdev,
1909                            "Proceeding with stop interface after partial RX desc processing\n");
1910
1911         if (oct->props[lio->ifidx].napi_enabled == 1) {
1912                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1913                         napi_disable(napi);
1914
1915                 oct->props[lio->ifidx].napi_enabled = 0;
1916
1917                 if (OCTEON_CN23XX_PF(oct))
1918                         oct->droq[0]->ops.poll_mode = 0;
1919
1920                 tasklet_enable(&oct_priv->droq_tasklet);
1921         }
1922
1923         dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1924
1925         return 0;
1926 }
1927
1928 /**
1929  * \brief Converts a mask based on net device flags
1930  * @param netdev network device
1931  *
1932  * This routine generates a octnet_ifflags mask from the net device flags
1933  * received from the OS.
1934  */
1935 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1936 {
1937         enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1938
1939         if (netdev->flags & IFF_PROMISC)
1940                 f |= OCTNET_IFFLAG_PROMISC;
1941
1942         if (netdev->flags & IFF_ALLMULTI)
1943                 f |= OCTNET_IFFLAG_ALLMULTI;
1944
1945         if (netdev->flags & IFF_MULTICAST) {
1946                 f |= OCTNET_IFFLAG_MULTICAST;
1947
1948                 /* Accept all multicast addresses if there are more than we
1949                  * can handle
1950                  */
1951                 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1952                         f |= OCTNET_IFFLAG_ALLMULTI;
1953         }
1954
1955         if (netdev->flags & IFF_BROADCAST)
1956                 f |= OCTNET_IFFLAG_BROADCAST;
1957
1958         return f;
1959 }
1960
1961 /**
1962  * \brief Net device set_multicast_list
1963  * @param netdev network device
1964  */
1965 static void liquidio_set_mcast_list(struct net_device *netdev)
1966 {
1967         struct lio *lio = GET_LIO(netdev);
1968         struct octeon_device *oct = lio->oct_dev;
1969         struct octnic_ctrl_pkt nctrl;
1970         struct netdev_hw_addr *ha;
1971         u64 *mc;
1972         int ret;
1973         int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1974
1975         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1976
1977         /* Create a ctrl pkt command to be sent to core app. */
1978         nctrl.ncmd.u64 = 0;
1979         nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1980         nctrl.ncmd.s.param1 = get_new_flags(netdev);
1981         nctrl.ncmd.s.param2 = mc_count;
1982         nctrl.ncmd.s.more = mc_count;
1983         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1984         nctrl.netpndev = (u64)netdev;
1985         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1986
1987         /* copy all the addresses into the udd */
1988         mc = &nctrl.udd[0];
1989         netdev_for_each_mc_addr(ha, netdev) {
1990                 *mc = 0;
1991                 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
1992                 /* no need to swap bytes */
1993
1994                 if (++mc > &nctrl.udd[mc_count])
1995                         break;
1996         }
1997
1998         /* Apparently, any activity in this call from the kernel has to
1999          * be atomic. So we won't wait for response.
2000          */
2001
2002         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2003         if (ret) {
2004                 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2005                         ret);
2006         }
2007 }
2008
2009 /**
2010  * \brief Net device set_mac_address
2011  * @param netdev network device
2012  */
2013 static int liquidio_set_mac(struct net_device *netdev, void *p)
2014 {
2015         int ret = 0;
2016         struct lio *lio = GET_LIO(netdev);
2017         struct octeon_device *oct = lio->oct_dev;
2018         struct sockaddr *addr = (struct sockaddr *)p;
2019         struct octnic_ctrl_pkt nctrl;
2020
2021         if (!is_valid_ether_addr(addr->sa_data))
2022                 return -EADDRNOTAVAIL;
2023
2024         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2025
2026         nctrl.ncmd.u64 = 0;
2027         nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2028         nctrl.ncmd.s.param1 = 0;
2029         nctrl.ncmd.s.more = 1;
2030         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2031         nctrl.netpndev = (u64)netdev;
2032
2033         nctrl.udd[0] = 0;
2034         /* The MAC Address is presented in network byte order. */
2035         memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2036
2037         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2038         if (ret < 0) {
2039                 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2040                 return -ENOMEM;
2041         }
2042
2043         if (nctrl.sc_status) {
2044                 dev_err(&oct->pci_dev->dev,
2045                         "%s: MAC Address change failed. sc return=%x\n",
2046                          __func__, nctrl.sc_status);
2047                 return -EIO;
2048         }
2049
2050         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2051         memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2052
2053         return 0;
2054 }
2055
2056 static void
2057 liquidio_get_stats64(struct net_device *netdev,
2058                      struct rtnl_link_stats64 *lstats)
2059 {
2060         struct lio *lio = GET_LIO(netdev);
2061         struct octeon_device *oct;
2062         u64 pkts = 0, drop = 0, bytes = 0;
2063         struct oct_droq_stats *oq_stats;
2064         struct oct_iq_stats *iq_stats;
2065         int i, iq_no, oq_no;
2066
2067         oct = lio->oct_dev;
2068
2069         if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2070                 return;
2071
2072         for (i = 0; i < oct->num_iqs; i++) {
2073                 iq_no = lio->linfo.txpciq[i].s.q_no;
2074                 iq_stats = &oct->instr_queue[iq_no]->stats;
2075                 pkts += iq_stats->tx_done;
2076                 drop += iq_stats->tx_dropped;
2077                 bytes += iq_stats->tx_tot_bytes;
2078         }
2079
2080         lstats->tx_packets = pkts;
2081         lstats->tx_bytes = bytes;
2082         lstats->tx_dropped = drop;
2083
2084         pkts = 0;
2085         drop = 0;
2086         bytes = 0;
2087
2088         for (i = 0; i < oct->num_oqs; i++) {
2089                 oq_no = lio->linfo.rxpciq[i].s.q_no;
2090                 oq_stats = &oct->droq[oq_no]->stats;
2091                 pkts += oq_stats->rx_pkts_received;
2092                 drop += (oq_stats->rx_dropped +
2093                          oq_stats->dropped_nodispatch +
2094                          oq_stats->dropped_toomany +
2095                          oq_stats->dropped_nomem);
2096                 bytes += oq_stats->rx_bytes_received;
2097         }
2098
2099         lstats->rx_bytes = bytes;
2100         lstats->rx_packets = pkts;
2101         lstats->rx_dropped = drop;
2102
2103         lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2104         lstats->collisions = oct->link_stats.fromhost.total_collisions;
2105
2106         /* detailed rx_errors: */
2107         lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2108         /* recved pkt with crc error    */
2109         lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2110         /* recv'd frame alignment error */
2111         lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2112         /* recv'r fifo overrun */
2113         lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2114
2115         lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2116                 lstats->rx_frame_errors + lstats->rx_fifo_errors;
2117
2118         /* detailed tx_errors */
2119         lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2120         lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2121         lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2122
2123         lstats->tx_errors = lstats->tx_aborted_errors +
2124                 lstats->tx_carrier_errors +
2125                 lstats->tx_fifo_errors;
2126 }
2127
2128 /**
2129  * \brief Handler for SIOCSHWTSTAMP ioctl
2130  * @param netdev network device
2131  * @param ifr interface request
2132  * @param cmd command
2133  */
2134 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2135 {
2136         struct hwtstamp_config conf;
2137         struct lio *lio = GET_LIO(netdev);
2138
2139         if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2140                 return -EFAULT;
2141
2142         if (conf.flags)
2143                 return -EINVAL;
2144
2145         switch (conf.tx_type) {
2146         case HWTSTAMP_TX_ON:
2147         case HWTSTAMP_TX_OFF:
2148                 break;
2149         default:
2150                 return -ERANGE;
2151         }
2152
2153         switch (conf.rx_filter) {
2154         case HWTSTAMP_FILTER_NONE:
2155                 break;
2156         case HWTSTAMP_FILTER_ALL:
2157         case HWTSTAMP_FILTER_SOME:
2158         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2159         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2160         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2161         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2162         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2163         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2164         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2165         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2166         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2167         case HWTSTAMP_FILTER_PTP_V2_EVENT:
2168         case HWTSTAMP_FILTER_PTP_V2_SYNC:
2169         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2170         case HWTSTAMP_FILTER_NTP_ALL:
2171                 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2172                 break;
2173         default:
2174                 return -ERANGE;
2175         }
2176
2177         if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2178                 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2179
2180         else
2181                 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2182
2183         return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2184 }
2185
2186 /**
2187  * \brief ioctl handler
2188  * @param netdev network device
2189  * @param ifr interface request
2190  * @param cmd command
2191  */
2192 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2193 {
2194         struct lio *lio = GET_LIO(netdev);
2195
2196         switch (cmd) {
2197         case SIOCSHWTSTAMP:
2198                 if (lio->oct_dev->ptp_enable)
2199                         return hwtstamp_ioctl(netdev, ifr);
2200                 /* fall through */
2201         default:
2202                 return -EOPNOTSUPP;
2203         }
2204 }
2205
2206 /**
2207  * \brief handle a Tx timestamp response
2208  * @param status response status
2209  * @param buf pointer to skb
2210  */
2211 static void handle_timestamp(struct octeon_device *oct,
2212                              u32 status,
2213                              void *buf)
2214 {
2215         struct octnet_buf_free_info *finfo;
2216         struct octeon_soft_command *sc;
2217         struct oct_timestamp_resp *resp;
2218         struct lio *lio;
2219         struct sk_buff *skb = (struct sk_buff *)buf;
2220
2221         finfo = (struct octnet_buf_free_info *)skb->cb;
2222         lio = finfo->lio;
2223         sc = finfo->sc;
2224         oct = lio->oct_dev;
2225         resp = (struct oct_timestamp_resp *)sc->virtrptr;
2226
2227         if (status != OCTEON_REQUEST_DONE) {
2228                 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2229                         CVM_CAST64(status));
2230                 resp->timestamp = 0;
2231         }
2232
2233         octeon_swap_8B_data(&resp->timestamp, 1);
2234
2235         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2236                 struct skb_shared_hwtstamps ts;
2237                 u64 ns = resp->timestamp;
2238
2239                 netif_info(lio, tx_done, lio->netdev,
2240                            "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2241                            skb, (unsigned long long)ns);
2242                 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2243                 skb_tstamp_tx(skb, &ts);
2244         }
2245
2246         octeon_free_soft_command(oct, sc);
2247         tx_buffer_free(skb);
2248 }
2249
2250 /* \brief Send a data packet that will be timestamped
2251  * @param oct octeon device
2252  * @param ndata pointer to network data
2253  * @param finfo pointer to private network data
2254  */
2255 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2256                                          struct octnic_data_pkt *ndata,
2257                                          struct octnet_buf_free_info *finfo,
2258                                          int xmit_more)
2259 {
2260         int retval;
2261         struct octeon_soft_command *sc;
2262         struct lio *lio;
2263         int ring_doorbell;
2264         u32 len;
2265
2266         lio = finfo->lio;
2267
2268         sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2269                                             sizeof(struct oct_timestamp_resp));
2270         finfo->sc = sc;
2271
2272         if (!sc) {
2273                 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2274                 return IQ_SEND_FAILED;
2275         }
2276
2277         if (ndata->reqtype == REQTYPE_NORESP_NET)
2278                 ndata->reqtype = REQTYPE_RESP_NET;
2279         else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2280                 ndata->reqtype = REQTYPE_RESP_NET_SG;
2281
2282         sc->callback = handle_timestamp;
2283         sc->callback_arg = finfo->skb;
2284         sc->iq_no = ndata->q_no;
2285
2286         if (OCTEON_CN23XX_PF(oct))
2287                 len = (u32)((struct octeon_instr_ih3 *)
2288                             (&sc->cmd.cmd3.ih3))->dlengsz;
2289         else
2290                 len = (u32)((struct octeon_instr_ih2 *)
2291                             (&sc->cmd.cmd2.ih2))->dlengsz;
2292
2293         ring_doorbell = !xmit_more;
2294
2295         retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2296                                      sc, len, ndata->reqtype);
2297
2298         if (retval == IQ_SEND_FAILED) {
2299                 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2300                         retval);
2301                 octeon_free_soft_command(oct, sc);
2302         } else {
2303                 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2304         }
2305
2306         return retval;
2307 }
2308
2309 /** \brief Transmit networks packets to the Octeon interface
2310  * @param skbuff   skbuff struct to be passed to network layer.
2311  * @param netdev    pointer to network device
2312  * @returns whether the packet was transmitted to the device okay or not
2313  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2314  */
2315 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2316 {
2317         struct lio *lio;
2318         struct octnet_buf_free_info *finfo;
2319         union octnic_cmd_setup cmdsetup;
2320         struct octnic_data_pkt ndata;
2321         struct octeon_device *oct;
2322         struct oct_iq_stats *stats;
2323         struct octeon_instr_irh *irh;
2324         union tx_info *tx_info;
2325         int status = 0;
2326         int q_idx = 0, iq_no = 0;
2327         int j, xmit_more = 0;
2328         u64 dptr = 0;
2329         u32 tag = 0;
2330
2331         lio = GET_LIO(netdev);
2332         oct = lio->oct_dev;
2333
2334         q_idx = skb_iq(oct, skb);
2335         tag = q_idx;
2336         iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2337
2338         stats = &oct->instr_queue[iq_no]->stats;
2339
2340         /* Check for all conditions in which the current packet cannot be
2341          * transmitted.
2342          */
2343         if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2344             (!lio->linfo.link.s.link_up) ||
2345             (skb->len <= 0)) {
2346                 netif_info(lio, tx_err, lio->netdev,
2347                            "Transmit failed link_status : %d\n",
2348                            lio->linfo.link.s.link_up);
2349                 goto lio_xmit_failed;
2350         }
2351
2352         /* Use space in skb->cb to store info used to unmap and
2353          * free the buffers.
2354          */
2355         finfo = (struct octnet_buf_free_info *)skb->cb;
2356         finfo->lio = lio;
2357         finfo->skb = skb;
2358         finfo->sc = NULL;
2359
2360         /* Prepare the attributes for the data to be passed to OSI. */
2361         memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2362
2363         ndata.buf = (void *)finfo;
2364
2365         ndata.q_no = iq_no;
2366
2367         if (octnet_iq_is_full(oct, ndata.q_no)) {
2368                 /* defer sending if queue is full */
2369                 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2370                            ndata.q_no);
2371                 stats->tx_iq_busy++;
2372                 return NETDEV_TX_BUSY;
2373         }
2374
2375         /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2376          *      lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2377          */
2378
2379         ndata.datasize = skb->len;
2380
2381         cmdsetup.u64 = 0;
2382         cmdsetup.s.iq_no = iq_no;
2383
2384         if (skb->ip_summed == CHECKSUM_PARTIAL) {
2385                 if (skb->encapsulation) {
2386                         cmdsetup.s.tnl_csum = 1;
2387                         stats->tx_vxlan++;
2388                 } else {
2389                         cmdsetup.s.transport_csum = 1;
2390                 }
2391         }
2392         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2393                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2394                 cmdsetup.s.timestamp = 1;
2395         }
2396
2397         if (skb_shinfo(skb)->nr_frags == 0) {
2398                 cmdsetup.s.u.datasize = skb->len;
2399                 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2400
2401                 /* Offload checksum calculation for TCP/UDP packets */
2402                 dptr = dma_map_single(&oct->pci_dev->dev,
2403                                       skb->data,
2404                                       skb->len,
2405                                       DMA_TO_DEVICE);
2406                 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2407                         dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2408                                 __func__);
2409                         stats->tx_dmamap_fail++;
2410                         return NETDEV_TX_BUSY;
2411                 }
2412
2413                 if (OCTEON_CN23XX_PF(oct))
2414                         ndata.cmd.cmd3.dptr = dptr;
2415                 else
2416                         ndata.cmd.cmd2.dptr = dptr;
2417                 finfo->dptr = dptr;
2418                 ndata.reqtype = REQTYPE_NORESP_NET;
2419
2420         } else {
2421                 int i, frags;
2422                 struct skb_frag_struct *frag;
2423                 struct octnic_gather *g;
2424
2425                 spin_lock(&lio->glist_lock[q_idx]);
2426                 g = (struct octnic_gather *)
2427                         lio_list_delete_head(&lio->glist[q_idx]);
2428                 spin_unlock(&lio->glist_lock[q_idx]);
2429
2430                 if (!g) {
2431                         netif_info(lio, tx_err, lio->netdev,
2432                                    "Transmit scatter gather: glist null!\n");
2433                         goto lio_xmit_failed;
2434                 }
2435
2436                 cmdsetup.s.gather = 1;
2437                 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2438                 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2439
2440                 memset(g->sg, 0, g->sg_size);
2441
2442                 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2443                                                  skb->data,
2444                                                  (skb->len - skb->data_len),
2445                                                  DMA_TO_DEVICE);
2446                 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2447                         dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2448                                 __func__);
2449                         stats->tx_dmamap_fail++;
2450                         return NETDEV_TX_BUSY;
2451                 }
2452                 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2453
2454                 frags = skb_shinfo(skb)->nr_frags;
2455                 i = 1;
2456                 while (frags--) {
2457                         frag = &skb_shinfo(skb)->frags[i - 1];
2458
2459                         g->sg[(i >> 2)].ptr[(i & 3)] =
2460                                 dma_map_page(&oct->pci_dev->dev,
2461                                              frag->page.p,
2462                                              frag->page_offset,
2463                                              frag->size,
2464                                              DMA_TO_DEVICE);
2465
2466                         if (dma_mapping_error(&oct->pci_dev->dev,
2467                                               g->sg[i >> 2].ptr[i & 3])) {
2468                                 dma_unmap_single(&oct->pci_dev->dev,
2469                                                  g->sg[0].ptr[0],
2470                                                  skb->len - skb->data_len,
2471                                                  DMA_TO_DEVICE);
2472                                 for (j = 1; j < i; j++) {
2473                                         frag = &skb_shinfo(skb)->frags[j - 1];
2474                                         dma_unmap_page(&oct->pci_dev->dev,
2475                                                        g->sg[j >> 2].ptr[j & 3],
2476                                                        frag->size,
2477                                                        DMA_TO_DEVICE);
2478                                 }
2479                                 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2480                                         __func__);
2481                                 return NETDEV_TX_BUSY;
2482                         }
2483
2484                         add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2485                         i++;
2486                 }
2487
2488                 dptr = g->sg_dma_ptr;
2489
2490                 if (OCTEON_CN23XX_PF(oct))
2491                         ndata.cmd.cmd3.dptr = dptr;
2492                 else
2493                         ndata.cmd.cmd2.dptr = dptr;
2494                 finfo->dptr = dptr;
2495                 finfo->g = g;
2496
2497                 ndata.reqtype = REQTYPE_NORESP_NET_SG;
2498         }
2499
2500         if (OCTEON_CN23XX_PF(oct)) {
2501                 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2502                 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2503         } else {
2504                 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2505                 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2506         }
2507
2508         if (skb_shinfo(skb)->gso_size) {
2509                 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2510                 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2511                 stats->tx_gso++;
2512         }
2513
2514         /* HW insert VLAN tag */
2515         if (skb_vlan_tag_present(skb)) {
2516                 irh->priority = skb_vlan_tag_get(skb) >> 13;
2517                 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2518         }
2519
2520         xmit_more = skb->xmit_more;
2521
2522         if (unlikely(cmdsetup.s.timestamp))
2523                 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2524         else
2525                 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2526         if (status == IQ_SEND_FAILED)
2527                 goto lio_xmit_failed;
2528
2529         netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2530
2531         if (status == IQ_SEND_STOP)
2532                 netif_stop_subqueue(netdev, q_idx);
2533
2534         netif_trans_update(netdev);
2535
2536         if (tx_info->s.gso_segs)
2537                 stats->tx_done += tx_info->s.gso_segs;
2538         else
2539                 stats->tx_done++;
2540         stats->tx_tot_bytes += ndata.datasize;
2541
2542         return NETDEV_TX_OK;
2543
2544 lio_xmit_failed:
2545         stats->tx_dropped++;
2546         netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2547                    iq_no, stats->tx_dropped);
2548         if (dptr)
2549                 dma_unmap_single(&oct->pci_dev->dev, dptr,
2550                                  ndata.datasize, DMA_TO_DEVICE);
2551
2552         octeon_ring_doorbell_locked(oct, iq_no);
2553
2554         tx_buffer_free(skb);
2555         return NETDEV_TX_OK;
2556 }
2557
2558 /** \brief Network device Tx timeout
2559  * @param netdev    pointer to network device
2560  */
2561 static void liquidio_tx_timeout(struct net_device *netdev)
2562 {
2563         struct lio *lio;
2564
2565         lio = GET_LIO(netdev);
2566
2567         netif_info(lio, tx_err, lio->netdev,
2568                    "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2569                    netdev->stats.tx_dropped);
2570         netif_trans_update(netdev);
2571         wake_txqs(netdev);
2572 }
2573
2574 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2575                                     __be16 proto __attribute__((unused)),
2576                                     u16 vid)
2577 {
2578         struct lio *lio = GET_LIO(netdev);
2579         struct octeon_device *oct = lio->oct_dev;
2580         struct octnic_ctrl_pkt nctrl;
2581         int ret = 0;
2582
2583         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2584
2585         nctrl.ncmd.u64 = 0;
2586         nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2587         nctrl.ncmd.s.param1 = vid;
2588         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2589         nctrl.netpndev = (u64)netdev;
2590         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2591
2592         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2593         if (ret) {
2594                 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2595                         ret);
2596                 if (ret > 0)
2597                         ret = -EIO;
2598         }
2599
2600         return ret;
2601 }
2602
2603 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2604                                      __be16 proto __attribute__((unused)),
2605                                      u16 vid)
2606 {
2607         struct lio *lio = GET_LIO(netdev);
2608         struct octeon_device *oct = lio->oct_dev;
2609         struct octnic_ctrl_pkt nctrl;
2610         int ret = 0;
2611
2612         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2613
2614         nctrl.ncmd.u64 = 0;
2615         nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2616         nctrl.ncmd.s.param1 = vid;
2617         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2618         nctrl.netpndev = (u64)netdev;
2619         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2620
2621         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2622         if (ret) {
2623                 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2624                         ret);
2625                 if (ret > 0)
2626                         ret = -EIO;
2627         }
2628         return ret;
2629 }
2630
2631 /** Sending command to enable/disable RX checksum offload
2632  * @param netdev                pointer to network device
2633  * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
2634  * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
2635  *                              OCTNET_CMD_RXCSUM_DISABLE
2636  * @returns                     SUCCESS or FAILURE
2637  */
2638 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2639                                        u8 rx_cmd)
2640 {
2641         struct lio *lio = GET_LIO(netdev);
2642         struct octeon_device *oct = lio->oct_dev;
2643         struct octnic_ctrl_pkt nctrl;
2644         int ret = 0;
2645
2646         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2647
2648         nctrl.ncmd.u64 = 0;
2649         nctrl.ncmd.s.cmd = command;
2650         nctrl.ncmd.s.param1 = rx_cmd;
2651         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2652         nctrl.netpndev = (u64)netdev;
2653         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2654
2655         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2656         if (ret) {
2657                 dev_err(&oct->pci_dev->dev,
2658                         "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2659                         ret);
2660                 if (ret > 0)
2661                         ret = -EIO;
2662         }
2663         return ret;
2664 }
2665
2666 /** Sending command to add/delete VxLAN UDP port to firmware
2667  * @param netdev                pointer to network device
2668  * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
2669  * @param vxlan_port            VxLAN port to be added or deleted
2670  * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
2671  *                              OCTNET_CMD_VXLAN_PORT_DEL
2672  * @returns                     SUCCESS or FAILURE
2673  */
2674 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2675                                        u16 vxlan_port, u8 vxlan_cmd_bit)
2676 {
2677         struct lio *lio = GET_LIO(netdev);
2678         struct octeon_device *oct = lio->oct_dev;
2679         struct octnic_ctrl_pkt nctrl;
2680         int ret = 0;
2681
2682         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2683
2684         nctrl.ncmd.u64 = 0;
2685         nctrl.ncmd.s.cmd = command;
2686         nctrl.ncmd.s.more = vxlan_cmd_bit;
2687         nctrl.ncmd.s.param1 = vxlan_port;
2688         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2689         nctrl.netpndev = (u64)netdev;
2690         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2691
2692         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2693         if (ret) {
2694                 dev_err(&oct->pci_dev->dev,
2695                         "VxLAN port add/delete failed in core (ret:0x%x)\n",
2696                         ret);
2697                 if (ret > 0)
2698                         ret = -EIO;
2699         }
2700         return ret;
2701 }
2702
2703 /** \brief Net device fix features
2704  * @param netdev  pointer to network device
2705  * @param request features requested
2706  * @returns updated features list
2707  */
2708 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2709                                                netdev_features_t request)
2710 {
2711         struct lio *lio = netdev_priv(netdev);
2712
2713         if ((request & NETIF_F_RXCSUM) &&
2714             !(lio->dev_capability & NETIF_F_RXCSUM))
2715                 request &= ~NETIF_F_RXCSUM;
2716
2717         if ((request & NETIF_F_HW_CSUM) &&
2718             !(lio->dev_capability & NETIF_F_HW_CSUM))
2719                 request &= ~NETIF_F_HW_CSUM;
2720
2721         if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2722                 request &= ~NETIF_F_TSO;
2723
2724         if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2725                 request &= ~NETIF_F_TSO6;
2726
2727         if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2728                 request &= ~NETIF_F_LRO;
2729
2730         /*Disable LRO if RXCSUM is off */
2731         if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2732             (lio->dev_capability & NETIF_F_LRO))
2733                 request &= ~NETIF_F_LRO;
2734
2735         if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2736             !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2737                 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2738
2739         return request;
2740 }
2741
2742 /** \brief Net device set features
2743  * @param netdev  pointer to network device
2744  * @param features features to enable/disable
2745  */
2746 static int liquidio_set_features(struct net_device *netdev,
2747                                  netdev_features_t features)
2748 {
2749         struct lio *lio = netdev_priv(netdev);
2750
2751         if ((features & NETIF_F_LRO) &&
2752             (lio->dev_capability & NETIF_F_LRO) &&
2753             !(netdev->features & NETIF_F_LRO))
2754                 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2755                                      OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2756         else if (!(features & NETIF_F_LRO) &&
2757                  (lio->dev_capability & NETIF_F_LRO) &&
2758                  (netdev->features & NETIF_F_LRO))
2759                 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2760                                      OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2761
2762         /* Sending command to firmware to enable/disable RX checksum
2763          * offload settings using ethtool
2764          */
2765         if (!(netdev->features & NETIF_F_RXCSUM) &&
2766             (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2767             (features & NETIF_F_RXCSUM))
2768                 liquidio_set_rxcsum_command(netdev,
2769                                             OCTNET_CMD_TNL_RX_CSUM_CTL,
2770                                             OCTNET_CMD_RXCSUM_ENABLE);
2771         else if ((netdev->features & NETIF_F_RXCSUM) &&
2772                  (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2773                  !(features & NETIF_F_RXCSUM))
2774                 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2775                                             OCTNET_CMD_RXCSUM_DISABLE);
2776
2777         if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2778             (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2779             !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2780                 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2781                                      OCTNET_CMD_VLAN_FILTER_ENABLE);
2782         else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2783                  (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2784                  (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2785                 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2786                                      OCTNET_CMD_VLAN_FILTER_DISABLE);
2787
2788         return 0;
2789 }
2790
2791 static void liquidio_add_vxlan_port(struct net_device *netdev,
2792                                     struct udp_tunnel_info *ti)
2793 {
2794         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2795                 return;
2796
2797         liquidio_vxlan_port_command(netdev,
2798                                     OCTNET_CMD_VXLAN_PORT_CONFIG,
2799                                     htons(ti->port),
2800                                     OCTNET_CMD_VXLAN_PORT_ADD);
2801 }
2802
2803 static void liquidio_del_vxlan_port(struct net_device *netdev,
2804                                     struct udp_tunnel_info *ti)
2805 {
2806         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2807                 return;
2808
2809         liquidio_vxlan_port_command(netdev,
2810                                     OCTNET_CMD_VXLAN_PORT_CONFIG,
2811                                     htons(ti->port),
2812                                     OCTNET_CMD_VXLAN_PORT_DEL);
2813 }
2814
2815 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2816                                  u8 *mac, bool is_admin_assigned)
2817 {
2818         struct lio *lio = GET_LIO(netdev);
2819         struct octeon_device *oct = lio->oct_dev;
2820         struct octnic_ctrl_pkt nctrl;
2821         int ret = 0;
2822
2823         if (!is_valid_ether_addr(mac))
2824                 return -EINVAL;
2825
2826         if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2827                 return -EINVAL;
2828
2829         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2830
2831         nctrl.ncmd.u64 = 0;
2832         nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2833         /* vfidx is 0 based, but vf_num (param1) is 1 based */
2834         nctrl.ncmd.s.param1 = vfidx + 1;
2835         nctrl.ncmd.s.more = 1;
2836         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2837         nctrl.netpndev = (u64)netdev;
2838         if (is_admin_assigned) {
2839                 nctrl.ncmd.s.param2 = true;
2840                 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2841         }
2842
2843         nctrl.udd[0] = 0;
2844         /* The MAC Address is presented in network byte order. */
2845         ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2846
2847         oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2848
2849         ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2850         if (ret > 0)
2851                 ret = -EIO;
2852
2853         return ret;
2854 }
2855
2856 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2857 {
2858         struct lio *lio = GET_LIO(netdev);
2859         struct octeon_device *oct = lio->oct_dev;
2860         int retval;
2861
2862         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2863                 return -EINVAL;
2864
2865         retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2866         if (!retval)
2867                 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2868
2869         return retval;
2870 }
2871
2872 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
2873                                     bool enable)
2874 {
2875         struct lio *lio = GET_LIO(netdev);
2876         struct octeon_device *oct = lio->oct_dev;
2877         struct octnic_ctrl_pkt nctrl;
2878         int retval;
2879
2880         if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
2881                 netif_info(lio, drv, lio->netdev,
2882                            "firmware does not support spoofchk\n");
2883                 return -EOPNOTSUPP;
2884         }
2885
2886         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
2887                 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
2888                 return -EINVAL;
2889         }
2890
2891         if (enable) {
2892                 if (oct->sriov_info.vf_spoofchk[vfidx])
2893                         return 0;
2894         } else {
2895                 /* Clear */
2896                 if (!oct->sriov_info.vf_spoofchk[vfidx])
2897                         return 0;
2898         }
2899
2900         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2901         nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
2902         nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
2903         nctrl.ncmd.s.param1 =
2904                 vfidx + 1; /* vfidx is 0 based,
2905                             * but vf_num (param1) is 1 based
2906                             */
2907         nctrl.ncmd.s.param2 = enable;
2908         nctrl.ncmd.s.more = 0;
2909         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2910         nctrl.cb_fn = NULL;
2911
2912         retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2913
2914         if (retval) {
2915                 netif_info(lio, drv, lio->netdev,
2916                            "Failed to set VF %d spoofchk %s\n", vfidx,
2917                         enable ? "on" : "off");
2918                 return -1;
2919         }
2920
2921         oct->sriov_info.vf_spoofchk[vfidx] = enable;
2922         netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
2923                    enable ? "on" : "off");
2924
2925         return 0;
2926 }
2927
2928 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2929                                 u16 vlan, u8 qos, __be16 vlan_proto)
2930 {
2931         struct lio *lio = GET_LIO(netdev);
2932         struct octeon_device *oct = lio->oct_dev;
2933         struct octnic_ctrl_pkt nctrl;
2934         u16 vlantci;
2935         int ret = 0;
2936
2937         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2938                 return -EINVAL;
2939
2940         if (vlan_proto != htons(ETH_P_8021Q))
2941                 return -EPROTONOSUPPORT;
2942
2943         if (vlan >= VLAN_N_VID || qos > 7)
2944                 return -EINVAL;
2945
2946         if (vlan)
2947                 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2948         else
2949                 vlantci = 0;
2950
2951         if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2952                 return 0;
2953
2954         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2955
2956         if (vlan)
2957                 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2958         else
2959                 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2960
2961         nctrl.ncmd.s.param1 = vlantci;
2962         nctrl.ncmd.s.param2 =
2963             vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2964         nctrl.ncmd.s.more = 0;
2965         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2966         nctrl.cb_fn = NULL;
2967
2968         ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2969         if (ret) {
2970                 if (ret > 0)
2971                         ret = -EIO;
2972                 return ret;
2973         }
2974
2975         oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2976
2977         return ret;
2978 }
2979
2980 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2981                                   struct ifla_vf_info *ivi)
2982 {
2983         struct lio *lio = GET_LIO(netdev);
2984         struct octeon_device *oct = lio->oct_dev;
2985         u8 *macaddr;
2986
2987         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2988                 return -EINVAL;
2989
2990         memset(ivi, 0, sizeof(struct ifla_vf_info));
2991
2992         ivi->vf = vfidx;
2993         macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2994         ether_addr_copy(&ivi->mac[0], macaddr);
2995         ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
2996         ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
2997         if (oct->sriov_info.trusted_vf.active &&
2998             oct->sriov_info.trusted_vf.id == vfidx)
2999                 ivi->trusted = true;
3000         else
3001                 ivi->trusted = false;
3002         ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
3003         ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
3004         ivi->max_tx_rate = lio->linfo.link.s.speed;
3005         ivi->min_tx_rate = 0;
3006
3007         return 0;
3008 }
3009
3010 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
3011 {
3012         struct octeon_device *oct = lio->oct_dev;
3013         struct octeon_soft_command *sc;
3014         int retval;
3015
3016         sc = octeon_alloc_soft_command(oct, 0, 16, 0);
3017         if (!sc)
3018                 return -ENOMEM;
3019
3020         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
3021
3022         /* vfidx is 0 based, but vf_num (param1) is 1 based */
3023         octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
3024                                     OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
3025                                     trusted);
3026
3027         init_completion(&sc->complete);
3028         sc->sc_status = OCTEON_REQUEST_PENDING;
3029
3030         retval = octeon_send_soft_command(oct, sc);
3031         if (retval == IQ_SEND_FAILED) {
3032                 octeon_free_soft_command(oct, sc);
3033                 retval = -1;
3034         } else {
3035                 /* Wait for response or timeout */
3036                 retval = wait_for_sc_completion_timeout(oct, sc, 0);
3037                 if (retval)
3038                         return (retval);
3039
3040                 WRITE_ONCE(sc->caller_is_done, true);
3041         }
3042
3043         return retval;
3044 }
3045
3046 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3047                                  bool setting)
3048 {
3049         struct lio *lio = GET_LIO(netdev);
3050         struct octeon_device *oct = lio->oct_dev;
3051
3052         if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3053                 /* trusted vf is not supported by firmware older than 1.7.1 */
3054                 return -EOPNOTSUPP;
3055         }
3056
3057         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3058                 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3059                 return -EINVAL;
3060         }
3061
3062         if (setting) {
3063                 /* Set */
3064
3065                 if (oct->sriov_info.trusted_vf.active &&
3066                     oct->sriov_info.trusted_vf.id == vfidx)
3067                         return 0;
3068
3069                 if (oct->sriov_info.trusted_vf.active) {
3070                         netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3071                         return -EPERM;
3072                 }
3073         } else {
3074                 /* Clear */
3075
3076                 if (!oct->sriov_info.trusted_vf.active)
3077                         return 0;
3078         }
3079
3080         if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3081                 if (setting) {
3082                         oct->sriov_info.trusted_vf.id = vfidx;
3083                         oct->sriov_info.trusted_vf.active = true;
3084                 } else {
3085                         oct->sriov_info.trusted_vf.active = false;
3086                 }
3087
3088                 netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3089                            setting ? "" : "not ");
3090         } else {
3091                 netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3092                 return -1;
3093         }
3094
3095         return 0;
3096 }
3097
3098 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3099                                       int linkstate)
3100 {
3101         struct lio *lio = GET_LIO(netdev);
3102         struct octeon_device *oct = lio->oct_dev;
3103         struct octnic_ctrl_pkt nctrl;
3104         int ret = 0;
3105
3106         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3107                 return -EINVAL;
3108
3109         if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3110                 return 0;
3111
3112         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3113         nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3114         nctrl.ncmd.s.param1 =
3115             vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3116         nctrl.ncmd.s.param2 = linkstate;
3117         nctrl.ncmd.s.more = 0;
3118         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3119         nctrl.cb_fn = NULL;
3120
3121         ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
3122
3123         if (!ret)
3124                 oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3125         else if (ret > 0)
3126                 ret = -EIO;
3127
3128         return ret;
3129 }
3130
3131 static int
3132 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3133 {
3134         struct lio_devlink_priv *priv;
3135         struct octeon_device *oct;
3136
3137         priv = devlink_priv(devlink);
3138         oct = priv->oct;
3139
3140         *mode = oct->eswitch_mode;
3141
3142         return 0;
3143 }
3144
3145 static int
3146 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
3147                           struct netlink_ext_ack *extack)
3148 {
3149         struct lio_devlink_priv *priv;
3150         struct octeon_device *oct;
3151         int ret = 0;
3152
3153         priv = devlink_priv(devlink);
3154         oct = priv->oct;
3155
3156         if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3157                 return -EINVAL;
3158
3159         if (oct->eswitch_mode == mode)
3160                 return 0;
3161
3162         switch (mode) {
3163         case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3164                 oct->eswitch_mode = mode;
3165                 ret = lio_vf_rep_create(oct);
3166                 break;
3167
3168         case DEVLINK_ESWITCH_MODE_LEGACY:
3169                 lio_vf_rep_destroy(oct);
3170                 oct->eswitch_mode = mode;
3171                 break;
3172
3173         default:
3174                 ret = -EINVAL;
3175         }
3176
3177         return ret;
3178 }
3179
3180 static const struct devlink_ops liquidio_devlink_ops = {
3181         .eswitch_mode_get = liquidio_eswitch_mode_get,
3182         .eswitch_mode_set = liquidio_eswitch_mode_set,
3183 };
3184
3185 static int
3186 liquidio_get_port_parent_id(struct net_device *dev,
3187                             struct netdev_phys_item_id *ppid)
3188 {
3189         struct lio *lio = GET_LIO(dev);
3190         struct octeon_device *oct = lio->oct_dev;
3191
3192         if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3193                 return -EOPNOTSUPP;
3194
3195         ppid->id_len = ETH_ALEN;
3196         ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
3197
3198         return 0;
3199 }
3200
3201 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3202                                  struct ifla_vf_stats *vf_stats)
3203 {
3204         struct lio *lio = GET_LIO(netdev);
3205         struct octeon_device *oct = lio->oct_dev;
3206         struct oct_vf_stats stats;
3207         int ret;
3208
3209         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3210                 return -EINVAL;
3211
3212         memset(&stats, 0, sizeof(struct oct_vf_stats));
3213         ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3214         if (!ret) {
3215                 vf_stats->rx_packets = stats.rx_packets;
3216                 vf_stats->tx_packets = stats.tx_packets;
3217                 vf_stats->rx_bytes = stats.rx_bytes;
3218                 vf_stats->tx_bytes = stats.tx_bytes;
3219                 vf_stats->broadcast = stats.broadcast;
3220                 vf_stats->multicast = stats.multicast;
3221         }
3222
3223         return ret;
3224 }
3225
3226 static const struct net_device_ops lionetdevops = {
3227         .ndo_open               = liquidio_open,
3228         .ndo_stop               = liquidio_stop,
3229         .ndo_start_xmit         = liquidio_xmit,
3230         .ndo_get_stats64        = liquidio_get_stats64,
3231         .ndo_set_mac_address    = liquidio_set_mac,
3232         .ndo_set_rx_mode        = liquidio_set_mcast_list,
3233         .ndo_tx_timeout         = liquidio_tx_timeout,
3234
3235         .ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3236         .ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3237         .ndo_change_mtu         = liquidio_change_mtu,
3238         .ndo_do_ioctl           = liquidio_ioctl,
3239         .ndo_fix_features       = liquidio_fix_features,
3240         .ndo_set_features       = liquidio_set_features,
3241         .ndo_udp_tunnel_add     = liquidio_add_vxlan_port,
3242         .ndo_udp_tunnel_del     = liquidio_del_vxlan_port,
3243         .ndo_set_vf_mac         = liquidio_set_vf_mac,
3244         .ndo_set_vf_vlan        = liquidio_set_vf_vlan,
3245         .ndo_get_vf_config      = liquidio_get_vf_config,
3246         .ndo_set_vf_spoofchk    = liquidio_set_vf_spoofchk,
3247         .ndo_set_vf_trust       = liquidio_set_vf_trust,
3248         .ndo_set_vf_link_state  = liquidio_set_vf_link_state,
3249         .ndo_get_vf_stats       = liquidio_get_vf_stats,
3250         .ndo_get_port_parent_id = liquidio_get_port_parent_id,
3251 };
3252
3253 /** \brief Entry point for the liquidio module
3254  */
3255 static int __init liquidio_init(void)
3256 {
3257         int i;
3258         struct handshake *hs;
3259
3260         init_completion(&first_stage);
3261
3262         octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3263
3264         if (liquidio_init_pci())
3265                 return -EINVAL;
3266
3267         wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3268
3269         for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3270                 hs = &handshake[i];
3271                 if (hs->pci_dev) {
3272                         wait_for_completion(&hs->init);
3273                         if (!hs->init_ok) {
3274                                 /* init handshake failed */
3275                                 dev_err(&hs->pci_dev->dev,
3276                                         "Failed to init device\n");
3277                                 liquidio_deinit_pci();
3278                                 return -EIO;
3279                         }
3280                 }
3281         }
3282
3283         for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3284                 hs = &handshake[i];
3285                 if (hs->pci_dev) {
3286                         wait_for_completion_timeout(&hs->started,
3287                                                     msecs_to_jiffies(30000));
3288                         if (!hs->started_ok) {
3289                                 /* starter handshake failed */
3290                                 dev_err(&hs->pci_dev->dev,
3291                                         "Firmware failed to start\n");
3292                                 liquidio_deinit_pci();
3293                                 return -EIO;
3294                         }
3295                 }
3296         }
3297
3298         return 0;
3299 }
3300
3301 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3302 {
3303         struct octeon_device *oct = (struct octeon_device *)buf;
3304         struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3305         int gmxport = 0;
3306         union oct_link_status *ls;
3307         int i;
3308
3309         if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3310                 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3311                         recv_pkt->buffer_size[0],
3312                         recv_pkt->rh.r_nic_info.gmxport);
3313                 goto nic_info_err;
3314         }
3315
3316         gmxport = recv_pkt->rh.r_nic_info.gmxport;
3317         ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3318                 OCT_DROQ_INFO_SIZE);
3319
3320         octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3321         for (i = 0; i < oct->ifcount; i++) {
3322                 if (oct->props[i].gmxport == gmxport) {
3323                         update_link_status(oct->props[i].netdev, ls);
3324                         break;
3325                 }
3326         }
3327
3328 nic_info_err:
3329         for (i = 0; i < recv_pkt->buffer_count; i++)
3330                 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3331         octeon_free_recv_info(recv_info);
3332         return 0;
3333 }
3334
3335 /**
3336  * \brief Setup network interfaces
3337  * @param octeon_dev  octeon device
3338  *
3339  * Called during init time for each device. It assumes the NIC
3340  * is already up and running.  The link information for each
3341  * interface is passed in link_info.
3342  */
3343 static int setup_nic_devices(struct octeon_device *octeon_dev)
3344 {
3345         struct lio *lio = NULL;
3346         struct net_device *netdev;
3347         u8 mac[6], i, j, *fw_ver, *micro_ver;
3348         unsigned long micro;
3349         u32 cur_ver;
3350         struct octeon_soft_command *sc;
3351         struct liquidio_if_cfg_resp *resp;
3352         struct octdev_props *props;
3353         int retval, num_iqueues, num_oqueues;
3354         int max_num_queues = 0;
3355         union oct_nic_if_cfg if_cfg;
3356         unsigned int base_queue;
3357         unsigned int gmx_port_id;
3358         u32 resp_size, data_size;
3359         u32 ifidx_or_pfnum;
3360         struct lio_version *vdata;
3361         struct devlink *devlink;
3362         struct lio_devlink_priv *lio_devlink;
3363
3364         /* This is to handle link status changes */
3365         octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3366                                     OPCODE_NIC_INFO,
3367                                     lio_nic_info, octeon_dev);
3368
3369         /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3370          * They are handled directly.
3371          */
3372         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3373                                         free_netbuf);
3374
3375         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3376                                         free_netsgbuf);
3377
3378         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3379                                         free_netsgbuf_with_resp);
3380
3381         for (i = 0; i < octeon_dev->ifcount; i++) {
3382                 resp_size = sizeof(struct liquidio_if_cfg_resp);
3383                 data_size = sizeof(struct lio_version);
3384                 sc = (struct octeon_soft_command *)
3385                         octeon_alloc_soft_command(octeon_dev, data_size,
3386                                                   resp_size, 0);
3387                 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3388                 vdata = (struct lio_version *)sc->virtdptr;
3389
3390                 *((u64 *)vdata) = 0;
3391                 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3392                 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3393                 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3394
3395                 if (OCTEON_CN23XX_PF(octeon_dev)) {
3396                         num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3397                         num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3398                         base_queue = octeon_dev->sriov_info.pf_srn;
3399
3400                         gmx_port_id = octeon_dev->pf_num;
3401                         ifidx_or_pfnum = octeon_dev->pf_num;
3402                 } else {
3403                         num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3404                                                 octeon_get_conf(octeon_dev), i);
3405                         num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3406                                                 octeon_get_conf(octeon_dev), i);
3407                         base_queue = CFG_GET_BASE_QUE_NIC_IF(
3408                                                 octeon_get_conf(octeon_dev), i);
3409                         gmx_port_id = CFG_GET_GMXID_NIC_IF(
3410                                                 octeon_get_conf(octeon_dev), i);
3411                         ifidx_or_pfnum = i;
3412                 }
3413
3414                 dev_dbg(&octeon_dev->pci_dev->dev,
3415                         "requesting config for interface %d, iqs %d, oqs %d\n",
3416                         ifidx_or_pfnum, num_iqueues, num_oqueues);
3417
3418                 if_cfg.u64 = 0;
3419                 if_cfg.s.num_iqueues = num_iqueues;
3420                 if_cfg.s.num_oqueues = num_oqueues;
3421                 if_cfg.s.base_queue = base_queue;
3422                 if_cfg.s.gmx_port_id = gmx_port_id;
3423
3424                 sc->iq_no = 0;
3425
3426                 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3427                                             OPCODE_NIC_IF_CFG, 0,
3428                                             if_cfg.u64, 0);
3429
3430                 init_completion(&sc->complete);
3431                 sc->sc_status = OCTEON_REQUEST_PENDING;
3432
3433                 retval = octeon_send_soft_command(octeon_dev, sc);
3434                 if (retval == IQ_SEND_FAILED) {
3435                         dev_err(&octeon_dev->pci_dev->dev,
3436                                 "iq/oq config failed status: %x\n",
3437                                 retval);
3438                         /* Soft instr is freed by driver in case of failure. */
3439                         octeon_free_soft_command(octeon_dev, sc);
3440                         return(-EIO);
3441                 }
3442
3443                 /* Sleep on a wait queue till the cond flag indicates that the
3444                  * response arrived or timed-out.
3445                  */
3446                 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
3447                 if (retval)
3448                         return retval;
3449
3450                 retval = resp->status;
3451                 if (retval) {
3452                         dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3453                         WRITE_ONCE(sc->caller_is_done, true);
3454                         goto setup_nic_dev_done;
3455                 }
3456                 snprintf(octeon_dev->fw_info.liquidio_firmware_version,
3457                          32, "%s",
3458                          resp->cfg_info.liquidio_firmware_version);
3459
3460                 /* Verify f/w version (in case of 'auto' loading from flash) */
3461                 fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3462                 if (memcmp(LIQUIDIO_BASE_VERSION,
3463                            fw_ver,
3464                            strlen(LIQUIDIO_BASE_VERSION))) {
3465                         dev_err(&octeon_dev->pci_dev->dev,
3466                                 "Unmatched firmware version. Expected %s.x, got %s.\n",
3467                                 LIQUIDIO_BASE_VERSION, fw_ver);
3468                         WRITE_ONCE(sc->caller_is_done, true);
3469                         goto setup_nic_dev_done;
3470                 } else if (atomic_read(octeon_dev->adapter_fw_state) ==
3471                            FW_IS_PRELOADED) {
3472                         dev_info(&octeon_dev->pci_dev->dev,
3473                                  "Using auto-loaded firmware version %s.\n",
3474                                  fw_ver);
3475                 }
3476
3477                 /* extract micro version field; point past '<maj>.<min>.' */
3478                 micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3479                 if (kstrtoul(micro_ver, 10, &micro) != 0)
3480                         micro = 0;
3481                 octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3482                 octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3483                 octeon_dev->fw_info.ver.rev = micro;
3484
3485                 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3486                                     (sizeof(struct liquidio_if_cfg_info)) >> 3);
3487
3488                 num_iqueues = hweight64(resp->cfg_info.iqmask);
3489                 num_oqueues = hweight64(resp->cfg_info.oqmask);
3490
3491                 if (!(num_iqueues) || !(num_oqueues)) {
3492                         dev_err(&octeon_dev->pci_dev->dev,
3493                                 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3494                                 resp->cfg_info.iqmask,
3495                                 resp->cfg_info.oqmask);
3496                         WRITE_ONCE(sc->caller_is_done, true);
3497                         goto setup_nic_dev_done;
3498                 }
3499
3500                 if (OCTEON_CN6XXX(octeon_dev)) {
3501                         max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3502                                                                     cn6xxx));
3503                 } else if (OCTEON_CN23XX_PF(octeon_dev)) {
3504                         max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3505                                                                     cn23xx_pf));
3506                 }
3507
3508                 dev_dbg(&octeon_dev->pci_dev->dev,
3509                         "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3510                         i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3511                         num_iqueues, num_oqueues, max_num_queues);
3512                 netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3513
3514                 if (!netdev) {
3515                         dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3516                         WRITE_ONCE(sc->caller_is_done, true);
3517                         goto setup_nic_dev_done;
3518                 }
3519
3520                 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3521
3522                 /* Associate the routines that will handle different
3523                  * netdev tasks.
3524                  */
3525                 netdev->netdev_ops = &lionetdevops;
3526
3527                 retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3528                 if (retval) {
3529                         dev_err(&octeon_dev->pci_dev->dev,
3530                                 "setting real number rx failed\n");
3531                         WRITE_ONCE(sc->caller_is_done, true);
3532                         goto setup_nic_dev_free;
3533                 }
3534
3535                 retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3536                 if (retval) {
3537                         dev_err(&octeon_dev->pci_dev->dev,
3538                                 "setting real number tx failed\n");
3539                         WRITE_ONCE(sc->caller_is_done, true);
3540                         goto setup_nic_dev_free;
3541                 }
3542
3543                 lio = GET_LIO(netdev);
3544
3545                 memset(lio, 0, sizeof(struct lio));
3546
3547                 lio->ifidx = ifidx_or_pfnum;
3548
3549                 props = &octeon_dev->props[i];
3550                 props->gmxport = resp->cfg_info.linfo.gmxport;
3551                 props->netdev = netdev;
3552
3553                 lio->linfo.num_rxpciq = num_oqueues;
3554                 lio->linfo.num_txpciq = num_iqueues;
3555                 for (j = 0; j < num_oqueues; j++) {
3556                         lio->linfo.rxpciq[j].u64 =
3557                                 resp->cfg_info.linfo.rxpciq[j].u64;
3558                 }
3559                 for (j = 0; j < num_iqueues; j++) {
3560                         lio->linfo.txpciq[j].u64 =
3561                                 resp->cfg_info.linfo.txpciq[j].u64;
3562                 }
3563                 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3564                 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3565                 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3566
3567                 WRITE_ONCE(sc->caller_is_done, true);
3568
3569                 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3570
3571                 if (OCTEON_CN23XX_PF(octeon_dev) ||
3572                     OCTEON_CN6XXX(octeon_dev)) {
3573                         lio->dev_capability = NETIF_F_HIGHDMA
3574                                               | NETIF_F_IP_CSUM
3575                                               | NETIF_F_IPV6_CSUM
3576                                               | NETIF_F_SG | NETIF_F_RXCSUM
3577                                               | NETIF_F_GRO
3578                                               | NETIF_F_TSO | NETIF_F_TSO6
3579                                               | NETIF_F_LRO;
3580                 }
3581                 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3582
3583                 /*  Copy of transmit encapsulation capabilities:
3584                  *  TSO, TSO6, Checksums for this device
3585                  */
3586                 lio->enc_dev_capability = NETIF_F_IP_CSUM
3587                                           | NETIF_F_IPV6_CSUM
3588                                           | NETIF_F_GSO_UDP_TUNNEL
3589                                           | NETIF_F_HW_CSUM | NETIF_F_SG
3590                                           | NETIF_F_RXCSUM
3591                                           | NETIF_F_TSO | NETIF_F_TSO6
3592                                           | NETIF_F_LRO;
3593
3594                 netdev->hw_enc_features = (lio->enc_dev_capability &
3595                                            ~NETIF_F_LRO);
3596
3597                 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3598
3599                 netdev->vlan_features = lio->dev_capability;
3600                 /* Add any unchangeable hw features */
3601                 lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3602                                         NETIF_F_HW_VLAN_CTAG_RX |
3603                                         NETIF_F_HW_VLAN_CTAG_TX;
3604
3605                 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3606
3607                 netdev->hw_features = lio->dev_capability;
3608                 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3609                 netdev->hw_features = netdev->hw_features &
3610                         ~NETIF_F_HW_VLAN_CTAG_RX;
3611
3612                 /* MTU range: 68 - 16000 */
3613                 netdev->min_mtu = LIO_MIN_MTU_SIZE;
3614                 netdev->max_mtu = LIO_MAX_MTU_SIZE;
3615
3616                 /* Point to the  properties for octeon device to which this
3617                  * interface belongs.
3618                  */
3619                 lio->oct_dev = octeon_dev;
3620                 lio->octprops = props;
3621                 lio->netdev = netdev;
3622
3623                 dev_dbg(&octeon_dev->pci_dev->dev,
3624                         "if%d gmx: %d hw_addr: 0x%llx\n", i,
3625                         lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3626
3627                 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3628                         u8 vfmac[ETH_ALEN];
3629
3630                         eth_random_addr(vfmac);
3631                         if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3632                                 dev_err(&octeon_dev->pci_dev->dev,
3633                                         "Error setting VF%d MAC address\n",
3634                                         j);
3635                                 goto setup_nic_dev_free;
3636                         }
3637                 }
3638
3639                 /* 64-bit swap required on LE machines */
3640                 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3641                 for (j = 0; j < 6; j++)
3642                         mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3643
3644                 /* Copy MAC Address to OS network device structure */
3645
3646                 ether_addr_copy(netdev->dev_addr, mac);
3647
3648                 /* By default all interfaces on a single Octeon uses the same
3649                  * tx and rx queues
3650                  */
3651                 lio->txq = lio->linfo.txpciq[0].s.q_no;
3652                 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3653                 if (liquidio_setup_io_queues(octeon_dev, i,
3654                                              lio->linfo.num_txpciq,
3655                                              lio->linfo.num_rxpciq)) {
3656                         dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3657                         goto setup_nic_dev_free;
3658                 }
3659
3660                 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3661
3662                 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3663                 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3664
3665                 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3666                         dev_err(&octeon_dev->pci_dev->dev,
3667                                 "Gather list allocation failed\n");
3668                         goto setup_nic_dev_free;
3669                 }
3670
3671                 /* Register ethtool support */
3672                 liquidio_set_ethtool_ops(netdev);
3673                 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3674                         octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3675                 else
3676                         octeon_dev->priv_flags = 0x0;
3677
3678                 if (netdev->features & NETIF_F_LRO)
3679                         liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3680                                              OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3681
3682                 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3683                                      OCTNET_CMD_VLAN_FILTER_ENABLE);
3684
3685                 if ((debug != -1) && (debug & NETIF_MSG_HW))
3686                         liquidio_set_feature(netdev,
3687                                              OCTNET_CMD_VERBOSE_ENABLE, 0);
3688
3689                 if (setup_link_status_change_wq(netdev))
3690                         goto setup_nic_dev_free;
3691
3692                 if ((octeon_dev->fw_info.app_cap_flags &
3693                      LIQUIDIO_TIME_SYNC_CAP) &&
3694                     setup_sync_octeon_time_wq(netdev))
3695                         goto setup_nic_dev_free;
3696
3697                 if (setup_rx_oom_poll_fn(netdev))
3698                         goto setup_nic_dev_free;
3699
3700                 /* Register the network device with the OS */
3701                 if (register_netdev(netdev)) {
3702                         dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3703                         goto setup_nic_dev_free;
3704                 }
3705
3706                 dev_dbg(&octeon_dev->pci_dev->dev,
3707                         "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3708                         i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3709                 netif_carrier_off(netdev);
3710                 lio->link_changes++;
3711
3712                 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3713
3714                 /* Sending command to firmware to enable Rx checksum offload
3715                  * by default at the time of setup of Liquidio driver for
3716                  * this device
3717                  */
3718                 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3719                                             OCTNET_CMD_RXCSUM_ENABLE);
3720                 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3721                                      OCTNET_CMD_TXCSUM_ENABLE);
3722
3723                 dev_dbg(&octeon_dev->pci_dev->dev,
3724                         "NIC ifidx:%d Setup successful\n", i);
3725
3726                 if (octeon_dev->subsystem_id ==
3727                         OCTEON_CN2350_25GB_SUBSYS_ID ||
3728                     octeon_dev->subsystem_id ==
3729                         OCTEON_CN2360_25GB_SUBSYS_ID) {
3730                         cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3731                                              octeon_dev->fw_info.ver.min,
3732                                              octeon_dev->fw_info.ver.rev);
3733
3734                         /* speed control unsupported in f/w older than 1.7.2 */
3735                         if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3736                                 dev_info(&octeon_dev->pci_dev->dev,
3737                                          "speed setting not supported by f/w.");
3738                                 octeon_dev->speed_setting = 25;
3739                                 octeon_dev->no_speed_setting = 1;
3740                         } else {
3741                                 liquidio_get_speed(lio);
3742                         }
3743
3744                         if (octeon_dev->speed_setting == 0) {
3745                                 octeon_dev->speed_setting = 25;
3746                                 octeon_dev->no_speed_setting = 1;
3747                         }
3748                 } else {
3749                         octeon_dev->no_speed_setting = 1;
3750                         octeon_dev->speed_setting = 10;
3751                 }
3752                 octeon_dev->speed_boot = octeon_dev->speed_setting;
3753
3754                 /* don't read FEC setting if unsupported by f/w (see above) */
3755                 if (octeon_dev->speed_boot == 25 &&
3756                     !octeon_dev->no_speed_setting) {
3757                         liquidio_get_fec(lio);
3758                         octeon_dev->props[lio->ifidx].fec_boot =
3759                                 octeon_dev->props[lio->ifidx].fec;
3760                 }
3761         }
3762
3763         devlink = devlink_alloc(&liquidio_devlink_ops,
3764                                 sizeof(struct lio_devlink_priv));
3765         if (!devlink) {
3766                 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3767                 goto setup_nic_dev_free;
3768         }
3769
3770         lio_devlink = devlink_priv(devlink);
3771         lio_devlink->oct = octeon_dev;
3772
3773         if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
3774                 devlink_free(devlink);
3775                 dev_err(&octeon_dev->pci_dev->dev,
3776                         "devlink registration failed\n");
3777                 goto setup_nic_dev_free;
3778         }
3779
3780         octeon_dev->devlink = devlink;
3781         octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3782
3783         return 0;
3784
3785 setup_nic_dev_free:
3786
3787         while (i--) {
3788                 dev_err(&octeon_dev->pci_dev->dev,
3789                         "NIC ifidx:%d Setup failed\n", i);
3790                 liquidio_destroy_nic_device(octeon_dev, i);
3791         }
3792
3793 setup_nic_dev_done:
3794
3795         return -ENODEV;
3796 }
3797
3798 #ifdef CONFIG_PCI_IOV
3799 static int octeon_enable_sriov(struct octeon_device *oct)
3800 {
3801         unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3802         struct pci_dev *vfdev;
3803         int err;
3804         u32 u;
3805
3806         if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3807                 err = pci_enable_sriov(oct->pci_dev,
3808                                        oct->sriov_info.num_vfs_alloced);
3809                 if (err) {
3810                         dev_err(&oct->pci_dev->dev,
3811                                 "OCTEON: Failed to enable PCI sriov: %d\n",
3812                                 err);
3813                         oct->sriov_info.num_vfs_alloced = 0;
3814                         return err;
3815                 }
3816                 oct->sriov_info.sriov_enabled = 1;
3817
3818                 /* init lookup table that maps DPI ring number to VF pci_dev
3819                  * struct pointer
3820                  */
3821                 u = 0;
3822                 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3823                                        OCTEON_CN23XX_VF_VID, NULL);
3824                 while (vfdev) {
3825                         if (vfdev->is_virtfn &&
3826                             (vfdev->physfn == oct->pci_dev)) {
3827                                 oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3828                                         vfdev;
3829                                 u += oct->sriov_info.rings_per_vf;
3830                         }
3831                         vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3832                                                OCTEON_CN23XX_VF_VID, vfdev);
3833                 }
3834         }
3835
3836         return num_vfs_alloced;
3837 }
3838
3839 static int lio_pci_sriov_disable(struct octeon_device *oct)
3840 {
3841         int u;
3842
3843         if (pci_vfs_assigned(oct->pci_dev)) {
3844                 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3845                 return -EPERM;
3846         }
3847
3848         pci_disable_sriov(oct->pci_dev);
3849
3850         u = 0;
3851         while (u < MAX_POSSIBLE_VFS) {
3852                 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3853                 u += oct->sriov_info.rings_per_vf;
3854         }
3855
3856         oct->sriov_info.num_vfs_alloced = 0;
3857         dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3858                  oct->pf_num);
3859
3860         return 0;
3861 }
3862
3863 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3864 {
3865         struct octeon_device *oct = pci_get_drvdata(dev);
3866         int ret = 0;
3867
3868         if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3869             (oct->sriov_info.sriov_enabled)) {
3870                 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3871                          oct->pf_num, num_vfs);
3872                 return 0;
3873         }
3874
3875         if (!num_vfs) {
3876                 lio_vf_rep_destroy(oct);
3877                 ret = lio_pci_sriov_disable(oct);
3878         } else if (num_vfs > oct->sriov_info.max_vfs) {
3879                 dev_err(&oct->pci_dev->dev,
3880                         "OCTEON: Max allowed VFs:%d user requested:%d",
3881                         oct->sriov_info.max_vfs, num_vfs);
3882                 ret = -EPERM;
3883         } else {
3884                 oct->sriov_info.num_vfs_alloced = num_vfs;
3885                 ret = octeon_enable_sriov(oct);
3886                 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3887                          oct->pf_num, num_vfs);
3888                 ret = lio_vf_rep_create(oct);
3889                 if (ret)
3890                         dev_info(&oct->pci_dev->dev,
3891                                  "vf representor create failed");
3892         }
3893
3894         return ret;
3895 }
3896 #endif
3897
3898 /**
3899  * \brief initialize the NIC
3900  * @param oct octeon device
3901  *
3902  * This initialization routine is called once the Octeon device application is
3903  * up and running
3904  */
3905 static int liquidio_init_nic_module(struct octeon_device *oct)
3906 {
3907         int i, retval = 0;
3908         int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3909
3910         dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3911
3912         /* only default iq and oq were initialized
3913          * initialize the rest as well
3914          */
3915         /* run port_config command for each port */
3916         oct->ifcount = num_nic_ports;
3917
3918         memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3919
3920         for (i = 0; i < MAX_OCTEON_LINKS; i++)
3921                 oct->props[i].gmxport = -1;
3922
3923         retval = setup_nic_devices(oct);
3924         if (retval) {
3925                 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3926                 goto octnet_init_failure;
3927         }
3928
3929         /* Call vf_rep_modinit if the firmware is switchdev capable
3930          * and do it from the first liquidio function probed.
3931          */
3932         if (!oct->octeon_id &&
3933             oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3934                 retval = lio_vf_rep_modinit();
3935                 if (retval) {
3936                         liquidio_stop_nic_module(oct);
3937                         goto octnet_init_failure;
3938                 }
3939         }
3940
3941         liquidio_ptp_init(oct);
3942
3943         dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3944
3945         return retval;
3946
3947 octnet_init_failure:
3948
3949         oct->ifcount = 0;
3950
3951         return retval;
3952 }
3953
3954 /**
3955  * \brief starter callback that invokes the remaining initialization work after
3956  * the NIC is up and running.
3957  * @param octptr  work struct work_struct
3958  */
3959 static void nic_starter(struct work_struct *work)
3960 {
3961         struct octeon_device *oct;
3962         struct cavium_wk *wk = (struct cavium_wk *)work;
3963
3964         oct = (struct octeon_device *)wk->ctxptr;
3965
3966         if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3967                 return;
3968
3969         /* If the status of the device is CORE_OK, the core
3970          * application has reported its application type. Call
3971          * any registered handlers now and move to the RUNNING
3972          * state.
3973          */
3974         if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3975                 schedule_delayed_work(&oct->nic_poll_work.work,
3976                                       LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3977                 return;
3978         }
3979
3980         atomic_set(&oct->status, OCT_DEV_RUNNING);
3981
3982         if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3983                 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3984
3985                 if (liquidio_init_nic_module(oct))
3986                         dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3987                 else
3988                         handshake[oct->octeon_id].started_ok = 1;
3989         } else {
3990                 dev_err(&oct->pci_dev->dev,
3991                         "Unexpected application running on NIC (%d). Check firmware.\n",
3992                         oct->app_mode);
3993         }
3994
3995         complete(&handshake[oct->octeon_id].started);
3996 }
3997
3998 static int
3999 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
4000 {
4001         struct octeon_device *oct = (struct octeon_device *)buf;
4002         struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
4003         int i, notice, vf_idx;
4004         bool cores_crashed;
4005         u64 *data, vf_num;
4006
4007         notice = recv_pkt->rh.r.ossp;
4008         data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
4009
4010         /* the first 64-bit word of data is the vf_num */
4011         vf_num = data[0];
4012         octeon_swap_8B_data(&vf_num, 1);
4013         vf_idx = (int)vf_num - 1;
4014
4015         cores_crashed = READ_ONCE(oct->cores_crashed);
4016
4017         if (notice == VF_DRV_LOADED) {
4018                 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4019                         oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4020                         dev_info(&oct->pci_dev->dev,
4021                                  "driver for VF%d was loaded\n", vf_idx);
4022                         if (!cores_crashed)
4023                                 try_module_get(THIS_MODULE);
4024                 }
4025         } else if (notice == VF_DRV_REMOVED) {
4026                 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4027                         oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4028                         dev_info(&oct->pci_dev->dev,
4029                                  "driver for VF%d was removed\n", vf_idx);
4030                         if (!cores_crashed)
4031                                 module_put(THIS_MODULE);
4032                 }
4033         } else if (notice == VF_DRV_MACADDR_CHANGED) {
4034                 u8 *b = (u8 *)&data[1];
4035
4036                 oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4037                 dev_info(&oct->pci_dev->dev,
4038                          "VF driver changed VF%d's MAC address to %pM\n",
4039                          vf_idx, b + 2);
4040         }
4041
4042         for (i = 0; i < recv_pkt->buffer_count; i++)
4043                 recv_buffer_free(recv_pkt->buffer_ptr[i]);
4044         octeon_free_recv_info(recv_info);
4045
4046         return 0;
4047 }
4048
4049 /**
4050  * \brief Device initialization for each Octeon device that is probed
4051  * @param octeon_dev  octeon device
4052  */
4053 static int octeon_device_init(struct octeon_device *octeon_dev)
4054 {
4055         int j, ret;
4056         char bootcmd[] = "\n";
4057         char *dbg_enb = NULL;
4058         enum lio_fw_state fw_state;
4059         struct octeon_device_priv *oct_priv =
4060                 (struct octeon_device_priv *)octeon_dev->priv;
4061         atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4062
4063         /* Enable access to the octeon device and make its DMA capability
4064          * known to the OS.
4065          */
4066         if (octeon_pci_os_setup(octeon_dev))
4067                 return 1;
4068
4069         atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4070
4071         /* Identify the Octeon type and map the BAR address space. */
4072         if (octeon_chip_specific_setup(octeon_dev)) {
4073                 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4074                 return 1;
4075         }
4076
4077         atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4078
4079         /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4080          * since that is what is required for the reference to be removed
4081          * during de-initialization (see 'octeon_destroy_resources').
4082          */
4083         octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4084                                PCI_SLOT(octeon_dev->pci_dev->devfn),
4085                                PCI_FUNC(octeon_dev->pci_dev->devfn),
4086                                true);
4087
4088         octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4089
4090         /* CN23XX supports preloaded firmware if the following is true:
4091          *
4092          * The adapter indicates that firmware is currently running AND
4093          * 'fw_type' is 'auto'.
4094          *
4095          * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4096          */
4097         if (OCTEON_CN23XX_PF(octeon_dev) &&
4098             cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4099                 atomic_cmpxchg(octeon_dev->adapter_fw_state,
4100                                FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4101         }
4102
4103         /* If loading firmware, only first device of adapter needs to do so. */
4104         fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4105                                   FW_NEEDS_TO_BE_LOADED,
4106                                   FW_IS_BEING_LOADED);
4107
4108         /* Here, [local variable] 'fw_state' is set to one of:
4109          *
4110          *   FW_IS_PRELOADED:       No firmware is to be loaded (see above)
4111          *   FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4112          *                          firmware to the adapter.
4113          *   FW_IS_BEING_LOADED:    The driver's second instance will not load
4114          *                          firmware to the adapter.
4115          */
4116
4117         /* Prior to f/w load, perform a soft reset of the Octeon device;
4118          * if error resetting, return w/error.
4119          */
4120         if (fw_state == FW_NEEDS_TO_BE_LOADED)
4121                 if (octeon_dev->fn_list.soft_reset(octeon_dev))
4122                         return 1;
4123
4124         /* Initialize the dispatch mechanism used to push packets arriving on
4125          * Octeon Output queues.
4126          */
4127         if (octeon_init_dispatch_list(octeon_dev))
4128                 return 1;
4129
4130         octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4131                                     OPCODE_NIC_CORE_DRV_ACTIVE,
4132                                     octeon_core_drv_init,
4133                                     octeon_dev);
4134
4135         octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4136                                     OPCODE_NIC_VF_DRV_NOTICE,
4137                                     octeon_recv_vf_drv_notice, octeon_dev);
4138         INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4139         octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4140         schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4141                               LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4142
4143         atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4144
4145         if (octeon_set_io_queues_off(octeon_dev)) {
4146                 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4147                 return 1;
4148         }
4149
4150         if (OCTEON_CN23XX_PF(octeon_dev)) {
4151                 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4152                 if (ret) {
4153                         dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4154                         return ret;
4155                 }
4156         }
4157
4158         /* Initialize soft command buffer pool
4159          */
4160         if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4161                 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4162                 return 1;
4163         }
4164         atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4165
4166         /*  Setup the data structures that manage this Octeon's Input queues. */
4167         if (octeon_setup_instr_queues(octeon_dev)) {
4168                 dev_err(&octeon_dev->pci_dev->dev,
4169                         "instruction queue initialization failed\n");
4170                 return 1;
4171         }
4172         atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4173
4174         /* Initialize lists to manage the requests of different types that
4175          * arrive from user & kernel applications for this octeon device.
4176          */
4177         if (octeon_setup_response_list(octeon_dev)) {
4178                 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4179                 return 1;
4180         }
4181         atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4182
4183         if (octeon_setup_output_queues(octeon_dev)) {
4184                 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4185                 return 1;
4186         }
4187
4188         atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4189
4190         if (OCTEON_CN23XX_PF(octeon_dev)) {
4191                 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4192                         dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4193                         return 1;
4194                 }
4195                 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4196
4197                 if (octeon_allocate_ioq_vector
4198                                 (octeon_dev,
4199                                  octeon_dev->sriov_info.num_pf_rings)) {
4200                         dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4201                         return 1;
4202                 }
4203                 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4204
4205         } else {
4206                 /* The input and output queue registers were setup earlier (the
4207                  * queues were not enabled). Any additional registers
4208                  * that need to be programmed should be done now.
4209                  */
4210                 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4211                 if (ret) {
4212                         dev_err(&octeon_dev->pci_dev->dev,
4213                                 "Failed to configure device registers\n");
4214                         return ret;
4215                 }
4216         }
4217
4218         /* Initialize the tasklet that handles output queue packet processing.*/
4219         dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4220         tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4221                      (unsigned long)octeon_dev);
4222
4223         /* Setup the interrupt handler and record the INT SUM register address
4224          */
4225         if (octeon_setup_interrupt(octeon_dev,
4226                                    octeon_dev->sriov_info.num_pf_rings))
4227                 return 1;
4228
4229         /* Enable Octeon device interrupts */
4230         octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4231
4232         atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4233
4234         /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4235          * the output queue is enabled.
4236          * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4237          * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4238          * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4239          * before any credits have been issued, causing the ring to be reset
4240          * (and the f/w appear to never have started).
4241          */
4242         for (j = 0; j < octeon_dev->num_oqs; j++)
4243                 writel(octeon_dev->droq[j]->max_count,
4244                        octeon_dev->droq[j]->pkts_credit_reg);
4245
4246         /* Enable the input and output queues for this Octeon device */
4247         ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4248         if (ret) {
4249                 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4250                 return ret;
4251         }
4252
4253         atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4254
4255         if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4256                 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4257                 if (!ddr_timeout) {
4258                         dev_info(&octeon_dev->pci_dev->dev,
4259                                  "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4260                 }
4261
4262                 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4263
4264                 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4265                 while (!ddr_timeout) {
4266                         set_current_state(TASK_INTERRUPTIBLE);
4267                         if (schedule_timeout(HZ / 10)) {
4268                                 /* user probably pressed Control-C */
4269                                 return 1;
4270                         }
4271                 }
4272                 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4273                 if (ret) {
4274                         dev_err(&octeon_dev->pci_dev->dev,
4275                                 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4276                                 ret);
4277                         return 1;
4278                 }
4279
4280                 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4281                         dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4282                         return 1;
4283                 }
4284
4285                 /* Divert uboot to take commands from host instead. */
4286                 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4287
4288                 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4289                 ret = octeon_init_consoles(octeon_dev);
4290                 if (ret) {
4291                         dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4292                         return 1;
4293                 }
4294                 /* If console debug enabled, specify empty string to use default
4295                  * enablement ELSE specify NULL string for 'disabled'.
4296                  */
4297                 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4298                 ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4299                 if (ret) {
4300                         dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4301                         return 1;
4302                 } else if (octeon_console_debug_enabled(0)) {
4303                         /* If console was added AND we're logging console output
4304                          * then set our console print function.
4305                          */
4306                         octeon_dev->console[0].print = octeon_dbg_console_print;
4307                 }
4308
4309                 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4310
4311                 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4312                 ret = load_firmware(octeon_dev);
4313                 if (ret) {
4314                         dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4315                         return 1;
4316                 }
4317
4318                 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4319         }
4320
4321         handshake[octeon_dev->octeon_id].init_ok = 1;
4322         complete(&handshake[octeon_dev->octeon_id].init);
4323
4324         atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4325
4326         return 0;
4327 }
4328
4329 /**
4330  * \brief Debug console print function
4331  * @param octeon_dev  octeon device
4332  * @param console_num console number
4333  * @param prefix      first portion of line to display
4334  * @param suffix      second portion of line to display
4335  *
4336  * The OCTEON debug console outputs entire lines (excluding '\n').
4337  * Normally, the line will be passed in the 'prefix' parameter.
4338  * However, due to buffering, it is possible for a line to be split into two
4339  * parts, in which case they will be passed as the 'prefix' parameter and
4340  * 'suffix' parameter.
4341  */
4342 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4343                                     char *prefix, char *suffix)
4344 {
4345         if (prefix && suffix)
4346                 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4347                          suffix);
4348         else if (prefix)
4349                 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4350         else if (suffix)
4351                 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4352
4353         return 0;
4354 }
4355
4356 /**
4357  * \brief Exits the module
4358  */
4359 static void __exit liquidio_exit(void)
4360 {
4361         liquidio_deinit_pci();
4362
4363         pr_info("LiquidIO network module is now unloaded\n");
4364 }
4365
4366 module_init(liquidio_init);
4367 module_exit(liquidio_exit);