1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include <net/switchdev.h>
25 #include "liquidio_common.h"
26 #include "octeon_droq.h"
27 #include "octeon_iq.h"
28 #include "response_manager.h"
29 #include "octeon_device.h"
30 #include "octeon_nic.h"
31 #include "octeon_main.h"
32 #include "octeon_network.h"
33 #include "cn66xx_regs.h"
34 #include "cn66xx_device.h"
35 #include "cn68xx_device.h"
36 #include "cn23xx_pf_device.h"
37 #include "liquidio_image.h"
38 #include "lio_vf_rep.h"
40 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
41 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
42 MODULE_LICENSE("GPL");
43 MODULE_VERSION(LIQUIDIO_VERSION);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
50 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
51 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
53 static int ddr_timeout = 10000;
54 module_param(ddr_timeout, int, 0644);
55 MODULE_PARM_DESC(ddr_timeout,
56 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
58 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
60 static int debug = -1;
61 module_param(debug, int, 0644);
62 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
64 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
65 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
66 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
68 static u32 console_bitmask;
69 module_param(console_bitmask, int, 0644);
70 MODULE_PARM_DESC(console_bitmask,
71 "Bitmask indicating which consoles have debug output redirected to syslog.");
74 * \brief determines if a given console has debug enabled.
75 * @param console console to check
76 * @returns 1 = enabled. 0 otherwise
78 static int octeon_console_debug_enabled(u32 console)
80 return (console_bitmask >> (console)) & 0x1;
83 /* Polling interval for determining when NIC application is alive */
84 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
86 /* runtime link query interval */
87 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
88 /* update localtime to octeon firmware every 60 seconds.
89 * make firmware to use same time reference, so that it will be easy to
90 * correlate firmware logged events/errors with host events, for debugging.
92 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
94 struct lio_trusted_vf_ctx {
95 struct completion complete;
99 struct liquidio_rx_ctl_context {
102 wait_queue_head_t wc;
107 struct oct_link_status_resp {
109 struct oct_link_info link_info;
113 struct oct_timestamp_resp {
119 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
124 #ifdef __BIG_ENDIAN_BITFIELD
136 /** Octeon device properties to be used by the NIC module.
137 * Each octeon device in the system will be represented
138 * by this structure in the NIC module.
141 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
143 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
144 #define OCTNIC_GSO_MAX_SIZE \
145 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
147 /** Structure of a node in list of gather components maintained by
148 * NIC driver for each network device.
150 struct octnic_gather {
151 /** List manipulation. Next and prev pointers. */
152 struct list_head list;
154 /** Size of the gather component at sg in bytes. */
157 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
160 /** Gather component that can accommodate max sized fragment list
161 * received from the IP layer.
163 struct octeon_sg_entry *sg;
165 dma_addr_t sg_dma_ptr;
169 struct completion init;
170 struct completion started;
171 struct pci_dev *pci_dev;
176 #ifdef CONFIG_PCI_IOV
177 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
180 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
181 char *prefix, char *suffix);
183 static int octeon_device_init(struct octeon_device *);
184 static int liquidio_stop(struct net_device *netdev);
185 static void liquidio_remove(struct pci_dev *pdev);
186 static int liquidio_probe(struct pci_dev *pdev,
187 const struct pci_device_id *ent);
188 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
191 static struct handshake handshake[MAX_OCTEON_DEVICES];
192 static struct completion first_stage;
194 static void octeon_droq_bh(unsigned long pdev)
198 struct octeon_device *oct = (struct octeon_device *)pdev;
199 struct octeon_device_priv *oct_priv =
200 (struct octeon_device_priv *)oct->priv;
202 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
203 if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
205 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
207 lio_enable_irq(oct->droq[q_no], NULL);
209 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
210 /* set time and cnt interrupt thresholds for this DROQ
213 int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
216 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
219 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
224 tasklet_schedule(&oct_priv->droq_tasklet);
227 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
229 struct octeon_device_priv *oct_priv =
230 (struct octeon_device_priv *)oct->priv;
231 int retry = 100, pkt_cnt = 0, pending_pkts = 0;
237 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
238 if (!(oct->io_qmask.oq & BIT_ULL(i)))
240 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
243 pending_pkts += pkt_cnt;
244 tasklet_schedule(&oct_priv->droq_tasklet);
247 schedule_timeout_uninterruptible(1);
249 } while (retry-- && pending_pkts);
255 * \brief Forces all IO queues off on a given device
256 * @param oct Pointer to Octeon device
258 static void force_io_queues_off(struct octeon_device *oct)
260 if ((oct->chip_id == OCTEON_CN66XX) ||
261 (oct->chip_id == OCTEON_CN68XX)) {
262 /* Reset the Enable bits for Input Queues. */
263 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
265 /* Reset the Enable bits for Output Queues. */
266 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
271 * \brief Cause device to go quiet so it can be safely removed/reset/etc
272 * @param oct Pointer to Octeon device
274 static inline void pcierror_quiesce_device(struct octeon_device *oct)
278 /* Disable the input and output queues now. No more packets will
279 * arrive from Octeon, but we should wait for all packet processing
282 force_io_queues_off(oct);
284 /* To allow for in-flight requests */
285 schedule_timeout_uninterruptible(100);
287 if (wait_for_pending_requests(oct))
288 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
290 /* Force all requests waiting to be fetched by OCTEON to complete. */
291 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
292 struct octeon_instr_queue *iq;
294 if (!(oct->io_qmask.iq & BIT_ULL(i)))
296 iq = oct->instr_queue[i];
298 if (atomic_read(&iq->instr_pending)) {
299 spin_lock_bh(&iq->lock);
301 iq->octeon_read_index = iq->host_write_index;
302 iq->stats.instr_processed +=
303 atomic_read(&iq->instr_pending);
304 lio_process_iq_request_list(oct, iq, 0);
305 spin_unlock_bh(&iq->lock);
309 /* Force all pending ordered list requests to time out. */
310 lio_process_ordered_list(oct, 1);
312 /* We do not need to wait for output queue packets to be processed. */
316 * \brief Cleanup PCI AER uncorrectable error status
317 * @param dev Pointer to PCI device
319 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
324 pr_info("%s :\n", __func__);
326 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
327 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
328 if (dev->error_state == pci_channel_io_normal)
329 status &= ~mask; /* Clear corresponding nonfatal bits */
331 status &= mask; /* Clear corresponding fatal bits */
332 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
336 * \brief Stop all PCI IO to a given device
337 * @param dev Pointer to Octeon device
339 static void stop_pci_io(struct octeon_device *oct)
341 /* No more instructions will be forwarded. */
342 atomic_set(&oct->status, OCT_DEV_IN_RESET);
344 pci_disable_device(oct->pci_dev);
346 /* Disable interrupts */
347 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
349 pcierror_quiesce_device(oct);
351 /* Release the interrupt line */
352 free_irq(oct->pci_dev->irq, oct);
354 if (oct->flags & LIO_FLAG_MSI_ENABLED)
355 pci_disable_msi(oct->pci_dev);
357 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
358 lio_get_state_string(&oct->status));
360 /* making it a common function for all OCTEON models */
361 cleanup_aer_uncorrect_error_status(oct->pci_dev);
365 * \brief called when PCI error is detected
366 * @param pdev Pointer to PCI device
367 * @param state The current pci connection state
369 * This function is called after a PCI bus error affecting
370 * this device has been detected.
372 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
373 pci_channel_state_t state)
375 struct octeon_device *oct = pci_get_drvdata(pdev);
377 /* Non-correctable Non-fatal errors */
378 if (state == pci_channel_io_normal) {
379 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
380 cleanup_aer_uncorrect_error_status(oct->pci_dev);
381 return PCI_ERS_RESULT_CAN_RECOVER;
384 /* Non-correctable Fatal errors */
385 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
388 /* Always return a DISCONNECT. There is no support for recovery but only
389 * for a clean shutdown.
391 return PCI_ERS_RESULT_DISCONNECT;
395 * \brief mmio handler
396 * @param pdev Pointer to PCI device
398 static pci_ers_result_t liquidio_pcie_mmio_enabled(
399 struct pci_dev *pdev __attribute__((unused)))
401 /* We should never hit this since we never ask for a reset for a Fatal
402 * Error. We always return DISCONNECT in io_error above.
403 * But play safe and return RECOVERED for now.
405 return PCI_ERS_RESULT_RECOVERED;
409 * \brief called after the pci bus has been reset.
410 * @param pdev Pointer to PCI device
412 * Restart the card from scratch, as if from a cold-boot. Implementation
413 * resembles the first-half of the octeon_resume routine.
415 static pci_ers_result_t liquidio_pcie_slot_reset(
416 struct pci_dev *pdev __attribute__((unused)))
418 /* We should never hit this since we never ask for a reset for a Fatal
419 * Error. We always return DISCONNECT in io_error above.
420 * But play safe and return RECOVERED for now.
422 return PCI_ERS_RESULT_RECOVERED;
426 * \brief called when traffic can start flowing again.
427 * @param pdev Pointer to PCI device
429 * This callback is called when the error recovery driver tells us that
430 * its OK to resume normal operation. Implementation resembles the
431 * second-half of the octeon_resume routine.
433 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
435 /* Nothing to be done here. */
440 * \brief called when suspending
441 * @param pdev Pointer to PCI device
442 * @param state state to suspend to
444 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
445 pm_message_t state __attribute__((unused)))
451 * \brief called when resuming
452 * @param pdev Pointer to PCI device
454 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
460 /* For PCI-E Advanced Error Recovery (AER) Interface */
461 static const struct pci_error_handlers liquidio_err_handler = {
462 .error_detected = liquidio_pcie_error_detected,
463 .mmio_enabled = liquidio_pcie_mmio_enabled,
464 .slot_reset = liquidio_pcie_slot_reset,
465 .resume = liquidio_pcie_resume,
468 static const struct pci_device_id liquidio_pci_tbl[] = {
470 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
473 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
476 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
482 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
484 static struct pci_driver liquidio_pci_driver = {
486 .id_table = liquidio_pci_tbl,
487 .probe = liquidio_probe,
488 .remove = liquidio_remove,
489 .err_handler = &liquidio_err_handler, /* For AER */
492 .suspend = liquidio_suspend,
493 .resume = liquidio_resume,
495 #ifdef CONFIG_PCI_IOV
496 .sriov_configure = liquidio_enable_sriov,
501 * \brief register PCI driver
503 static int liquidio_init_pci(void)
505 return pci_register_driver(&liquidio_pci_driver);
509 * \brief unregister PCI driver
511 static void liquidio_deinit_pci(void)
513 pci_unregister_driver(&liquidio_pci_driver);
517 * \brief Check Tx queue status, and take appropriate action
518 * @param lio per-network private data
519 * @returns 0 if full, number of queues woken up otherwise
521 static inline int check_txq_status(struct lio *lio)
523 int numqs = lio->netdev->num_tx_queues;
527 /* check each sub-queue state */
528 for (q = 0; q < numqs; q++) {
529 iq = lio->linfo.txpciq[q %
530 lio->oct_dev->num_iqs].s.q_no;
531 if (octnet_iq_is_full(lio->oct_dev, iq))
533 if (__netif_subqueue_stopped(lio->netdev, q)) {
534 netif_wake_subqueue(lio->netdev, q);
535 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
545 * Remove the node at the head of the list. The list would be empty at
546 * the end of this call if there are no more nodes in the list.
548 static inline struct list_head *list_delete_head(struct list_head *root)
550 struct list_head *node;
552 if ((root->prev == root) && (root->next == root))
564 * \brief Delete gather lists
565 * @param lio per-network private data
567 static void delete_glists(struct lio *lio)
569 struct octnic_gather *g;
572 kfree(lio->glist_lock);
573 lio->glist_lock = NULL;
578 for (i = 0; i < lio->linfo.num_txpciq; i++) {
580 g = (struct octnic_gather *)
581 list_delete_head(&lio->glist[i]);
586 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
587 lio->glists_dma_base && lio->glists_dma_base[i]) {
588 lio_dma_free(lio->oct_dev,
589 lio->glist_entry_size * lio->tx_qsize,
590 lio->glists_virt_base[i],
591 lio->glists_dma_base[i]);
595 kfree(lio->glists_virt_base);
596 lio->glists_virt_base = NULL;
598 kfree(lio->glists_dma_base);
599 lio->glists_dma_base = NULL;
606 * \brief Setup gather lists
607 * @param lio per-network private data
609 static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
612 struct octnic_gather *g;
614 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
616 if (!lio->glist_lock)
619 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
622 kfree(lio->glist_lock);
623 lio->glist_lock = NULL;
627 lio->glist_entry_size =
628 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
630 /* allocate memory to store virtual and dma base address of
631 * per glist consistent memory
633 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
635 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
638 if (!lio->glists_virt_base || !lio->glists_dma_base) {
643 for (i = 0; i < num_iqs; i++) {
644 int numa_node = dev_to_node(&oct->pci_dev->dev);
646 spin_lock_init(&lio->glist_lock[i]);
648 INIT_LIST_HEAD(&lio->glist[i]);
650 lio->glists_virt_base[i] =
652 lio->glist_entry_size * lio->tx_qsize,
653 &lio->glists_dma_base[i]);
655 if (!lio->glists_virt_base[i]) {
660 for (j = 0; j < lio->tx_qsize; j++) {
661 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
664 g = kzalloc(sizeof(*g), GFP_KERNEL);
668 g->sg = lio->glists_virt_base[i] +
669 (j * lio->glist_entry_size);
671 g->sg_dma_ptr = lio->glists_dma_base[i] +
672 (j * lio->glist_entry_size);
674 list_add_tail(&g->list, &lio->glist[i]);
677 if (j != lio->tx_qsize) {
687 * \brief Print link information
688 * @param netdev network device
690 static void print_link_info(struct net_device *netdev)
692 struct lio *lio = GET_LIO(netdev);
694 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
695 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
696 struct oct_link_info *linfo = &lio->linfo;
698 if (linfo->link.s.link_up) {
699 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
701 (linfo->link.s.duplex) ? "Full" : "Half");
703 netif_info(lio, link, lio->netdev, "Link Down\n");
709 * \brief Routine to notify MTU change
710 * @param work work_struct data structure
712 static void octnet_link_status_change(struct work_struct *work)
714 struct cavium_wk *wk = (struct cavium_wk *)work;
715 struct lio *lio = (struct lio *)wk->ctxptr;
717 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
718 * this API is invoked only when new max-MTU of the interface is
719 * less than current MTU.
722 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
727 * \brief Sets up the mtu status change work
728 * @param netdev network device
730 static inline int setup_link_status_change_wq(struct net_device *netdev)
732 struct lio *lio = GET_LIO(netdev);
733 struct octeon_device *oct = lio->oct_dev;
735 lio->link_status_wq.wq = alloc_workqueue("link-status",
737 if (!lio->link_status_wq.wq) {
738 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
741 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
742 octnet_link_status_change);
743 lio->link_status_wq.wk.ctxptr = lio;
748 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
750 struct lio *lio = GET_LIO(netdev);
752 if (lio->link_status_wq.wq) {
753 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
754 destroy_workqueue(lio->link_status_wq.wq);
759 * \brief Update link status
760 * @param netdev network device
761 * @param ls link status structure
763 * Called on receipt of a link status response from the core application to
764 * update each interface's link status.
766 static inline void update_link_status(struct net_device *netdev,
767 union oct_link_status *ls)
769 struct lio *lio = GET_LIO(netdev);
770 int changed = (lio->linfo.link.u64 != ls->u64);
771 int current_max_mtu = lio->linfo.link.s.mtu;
772 struct octeon_device *oct = lio->oct_dev;
774 dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
775 __func__, lio->linfo.link.u64, ls->u64);
776 lio->linfo.link.u64 = ls->u64;
778 if ((lio->intf_open) && (changed)) {
779 print_link_info(netdev);
782 if (lio->linfo.link.s.link_up) {
783 dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
784 netif_carrier_on(netdev);
787 dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
788 netif_carrier_off(netdev);
791 if (lio->linfo.link.s.mtu != current_max_mtu) {
792 netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
793 current_max_mtu, lio->linfo.link.s.mtu);
794 netdev->max_mtu = lio->linfo.link.s.mtu;
796 if (lio->linfo.link.s.mtu < netdev->mtu) {
797 dev_warn(&oct->pci_dev->dev,
798 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
799 netdev->mtu, lio->linfo.link.s.mtu);
800 queue_delayed_work(lio->link_status_wq.wq,
801 &lio->link_status_wq.wk.work, 0);
807 * lio_sync_octeon_time_cb - callback that is invoked when soft command
808 * sent by lio_sync_octeon_time() has completed successfully or failed
810 * @oct - octeon device structure
811 * @status - indicates success or failure
812 * @buf - pointer to the command that was sent to firmware
814 static void lio_sync_octeon_time_cb(struct octeon_device *oct,
815 u32 status, void *buf)
817 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
820 dev_err(&oct->pci_dev->dev,
821 "Failed to sync time to octeon; error=%d\n", status);
823 octeon_free_soft_command(oct, sc);
827 * lio_sync_octeon_time - send latest localtime to octeon firmware so that
828 * firmware will correct it's time, in case there is a time skew
830 * @work: work scheduled to send time update to octeon firmware
832 static void lio_sync_octeon_time(struct work_struct *work)
834 struct cavium_wk *wk = (struct cavium_wk *)work;
835 struct lio *lio = (struct lio *)wk->ctxptr;
836 struct octeon_device *oct = lio->oct_dev;
837 struct octeon_soft_command *sc;
838 struct timespec64 ts;
842 sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 0, 0);
844 dev_err(&oct->pci_dev->dev,
845 "Failed to sync time to octeon: soft command allocation failed\n");
849 lt = (struct lio_time *)sc->virtdptr;
851 /* Get time of the day */
852 getnstimeofday64(&ts);
854 lt->nsec = ts.tv_nsec;
855 octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
857 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
858 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
859 OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
861 sc->callback = lio_sync_octeon_time_cb;
862 sc->callback_arg = sc;
863 sc->wait_time = 1000;
865 ret = octeon_send_soft_command(oct, sc);
866 if (ret == IQ_SEND_FAILED) {
867 dev_err(&oct->pci_dev->dev,
868 "Failed to sync time to octeon: failed to send soft command\n");
869 octeon_free_soft_command(oct, sc);
872 queue_delayed_work(lio->sync_octeon_time_wq.wq,
873 &lio->sync_octeon_time_wq.wk.work,
874 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
878 * setup_sync_octeon_time_wq - Sets up the work to periodically update
879 * local time to octeon firmware
881 * @netdev - network device which should send time update to firmware
883 static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
885 struct lio *lio = GET_LIO(netdev);
886 struct octeon_device *oct = lio->oct_dev;
888 lio->sync_octeon_time_wq.wq =
889 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
890 if (!lio->sync_octeon_time_wq.wq) {
891 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
894 INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
895 lio_sync_octeon_time);
896 lio->sync_octeon_time_wq.wk.ctxptr = lio;
897 queue_delayed_work(lio->sync_octeon_time_wq.wq,
898 &lio->sync_octeon_time_wq.wk.work,
899 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
905 * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
906 * to periodically update local time to octeon firmware
908 * @netdev - network device which should send time update to firmware
910 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
912 struct lio *lio = GET_LIO(netdev);
913 struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
916 cancel_delayed_work_sync(&time_wq->wk.work);
917 destroy_workqueue(time_wq->wq);
921 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
923 struct octeon_device *other_oct;
925 other_oct = lio_get_device(oct->octeon_id + 1);
927 if (other_oct && other_oct->pci_dev) {
928 int oct_busnum, other_oct_busnum;
930 oct_busnum = oct->pci_dev->bus->number;
931 other_oct_busnum = other_oct->pci_dev->bus->number;
933 if (oct_busnum == other_oct_busnum) {
934 int oct_slot, other_oct_slot;
936 oct_slot = PCI_SLOT(oct->pci_dev->devfn);
937 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
939 if (oct_slot == other_oct_slot)
947 static void disable_all_vf_links(struct octeon_device *oct)
949 struct net_device *netdev;
955 max_vfs = oct->sriov_info.max_vfs;
957 for (i = 0; i < oct->ifcount; i++) {
958 netdev = oct->props[i].netdev;
962 for (vf = 0; vf < max_vfs; vf++)
963 liquidio_set_vf_link_state(netdev, vf,
964 IFLA_VF_LINK_STATE_DISABLE);
968 static int liquidio_watchdog(void *param)
970 bool err_msg_was_printed[LIO_MAX_CORES];
971 u16 mask_of_crashed_or_stuck_cores = 0;
972 bool all_vf_links_are_disabled = false;
973 struct octeon_device *oct = param;
974 struct octeon_device *other_oct;
975 #ifdef CONFIG_MODULE_UNLOAD
976 long refcount, vfs_referencing_pf;
977 u64 vfs_mask1, vfs_mask2;
981 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
983 while (!kthread_should_stop()) {
984 /* sleep for a couple of seconds so that we don't hog the CPU */
985 set_current_state(TASK_INTERRUPTIBLE);
986 schedule_timeout(msecs_to_jiffies(2000));
988 mask_of_crashed_or_stuck_cores =
989 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
991 if (!mask_of_crashed_or_stuck_cores)
994 WRITE_ONCE(oct->cores_crashed, true);
995 other_oct = get_other_octeon_device(oct);
997 WRITE_ONCE(other_oct->cores_crashed, true);
999 for (core = 0; core < LIO_MAX_CORES; core++) {
1000 bool core_crashed_or_got_stuck;
1002 core_crashed_or_got_stuck =
1003 (mask_of_crashed_or_stuck_cores
1006 if (core_crashed_or_got_stuck &&
1007 !err_msg_was_printed[core]) {
1008 dev_err(&oct->pci_dev->dev,
1009 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
1011 err_msg_was_printed[core] = true;
1015 if (all_vf_links_are_disabled)
1018 disable_all_vf_links(oct);
1019 disable_all_vf_links(other_oct);
1020 all_vf_links_are_disabled = true;
1022 #ifdef CONFIG_MODULE_UNLOAD
1023 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
1024 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
1026 vfs_referencing_pf = hweight64(vfs_mask1);
1027 vfs_referencing_pf += hweight64(vfs_mask2);
1029 refcount = module_refcount(THIS_MODULE);
1030 if (refcount >= vfs_referencing_pf) {
1031 while (vfs_referencing_pf) {
1032 module_put(THIS_MODULE);
1033 vfs_referencing_pf--;
1043 * \brief PCI probe handler
1044 * @param pdev PCI device structure
1048 liquidio_probe(struct pci_dev *pdev,
1049 const struct pci_device_id *ent __attribute__((unused)))
1051 struct octeon_device *oct_dev = NULL;
1052 struct handshake *hs;
1054 oct_dev = octeon_allocate_device(pdev->device,
1055 sizeof(struct octeon_device_priv));
1057 dev_err(&pdev->dev, "Unable to allocate device\n");
1061 if (pdev->device == OCTEON_CN23XX_PF_VID)
1062 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
1064 /* Enable PTP for 6XXX Device */
1065 if (((pdev->device == OCTEON_CN66XX) ||
1066 (pdev->device == OCTEON_CN68XX)))
1067 oct_dev->ptp_enable = true;
1069 oct_dev->ptp_enable = false;
1071 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
1072 (u32)pdev->vendor, (u32)pdev->device);
1074 /* Assign octeon_device for this device to the private data area. */
1075 pci_set_drvdata(pdev, oct_dev);
1077 /* set linux specific device pointer */
1078 oct_dev->pci_dev = (void *)pdev;
1080 hs = &handshake[oct_dev->octeon_id];
1081 init_completion(&hs->init);
1082 init_completion(&hs->started);
1085 if (oct_dev->octeon_id == 0)
1086 /* first LiquidIO NIC is detected */
1087 complete(&first_stage);
1089 if (octeon_device_init(oct_dev)) {
1090 complete(&hs->init);
1091 liquidio_remove(pdev);
1095 if (OCTEON_CN23XX_PF(oct_dev)) {
1096 u8 bus, device, function;
1098 if (atomic_read(oct_dev->adapter_refcount) == 1) {
1099 /* Each NIC gets one watchdog kernel thread. The first
1100 * PF (of each NIC) that gets pci_driver->probe()'d
1101 * creates that thread.
1103 bus = pdev->bus->number;
1104 device = PCI_SLOT(pdev->devfn);
1105 function = PCI_FUNC(pdev->devfn);
1106 oct_dev->watchdog_task = kthread_create(
1107 liquidio_watchdog, oct_dev,
1108 "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
1109 if (!IS_ERR(oct_dev->watchdog_task)) {
1110 wake_up_process(oct_dev->watchdog_task);
1112 oct_dev->watchdog_task = NULL;
1113 dev_err(&oct_dev->pci_dev->dev,
1114 "failed to create kernel_thread\n");
1115 liquidio_remove(pdev);
1121 oct_dev->rx_pause = 1;
1122 oct_dev->tx_pause = 1;
1124 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
1129 static bool fw_type_is_auto(void)
1131 return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
1132 sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
1136 * \brief PCI FLR for each Octeon device.
1137 * @param oct octeon device
1139 static void octeon_pci_flr(struct octeon_device *oct)
1143 pci_save_state(oct->pci_dev);
1145 pci_cfg_access_lock(oct->pci_dev);
1147 /* Quiesce the device completely */
1148 pci_write_config_word(oct->pci_dev, PCI_COMMAND,
1149 PCI_COMMAND_INTX_DISABLE);
1151 rc = __pci_reset_function_locked(oct->pci_dev);
1154 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
1157 pci_cfg_access_unlock(oct->pci_dev);
1159 pci_restore_state(oct->pci_dev);
1163 *\brief Destroy resources associated with octeon device
1164 * @param pdev PCI device structure
1167 static void octeon_destroy_resources(struct octeon_device *oct)
1170 struct msix_entry *msix_entries;
1171 struct octeon_device_priv *oct_priv =
1172 (struct octeon_device_priv *)oct->priv;
1174 struct handshake *hs;
1176 switch (atomic_read(&oct->status)) {
1177 case OCT_DEV_RUNNING:
1178 case OCT_DEV_CORE_OK:
1180 /* No more instructions will be forwarded. */
1181 atomic_set(&oct->status, OCT_DEV_IN_RESET);
1183 oct->app_mode = CVM_DRV_INVALID_APP;
1184 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1185 lio_get_state_string(&oct->status));
1187 schedule_timeout_uninterruptible(HZ / 10);
1190 case OCT_DEV_HOST_OK:
1193 case OCT_DEV_CONSOLE_INIT_DONE:
1194 /* Remove any consoles */
1195 octeon_remove_consoles(oct);
1198 case OCT_DEV_IO_QUEUES_DONE:
1199 if (wait_for_pending_requests(oct))
1200 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1202 if (lio_wait_for_instr_fetch(oct))
1203 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1205 /* Disable the input and output queues now. No more packets will
1206 * arrive from Octeon, but we should wait for all packet
1207 * processing to finish.
1209 oct->fn_list.disable_io_queues(oct);
1211 if (lio_wait_for_oq_pkts(oct))
1212 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1215 case OCT_DEV_INTR_SET_DONE:
1216 /* Disable interrupts */
1217 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1220 msix_entries = (struct msix_entry *)oct->msix_entries;
1221 for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1222 if (oct->ioq_vector[i].vector) {
1223 /* clear the affinity_cpumask */
1224 irq_set_affinity_hint(
1225 msix_entries[i].vector,
1227 free_irq(msix_entries[i].vector,
1228 &oct->ioq_vector[i]);
1229 oct->ioq_vector[i].vector = 0;
1232 /* non-iov vector's argument is oct struct */
1233 free_irq(msix_entries[i].vector, oct);
1235 pci_disable_msix(oct->pci_dev);
1236 kfree(oct->msix_entries);
1237 oct->msix_entries = NULL;
1239 /* Release the interrupt line */
1240 free_irq(oct->pci_dev->irq, oct);
1242 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1243 pci_disable_msi(oct->pci_dev);
1246 kfree(oct->irq_name_storage);
1247 oct->irq_name_storage = NULL;
1250 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1251 if (OCTEON_CN23XX_PF(oct))
1252 octeon_free_ioq_vector(oct);
1255 case OCT_DEV_MBOX_SETUP_DONE:
1256 if (OCTEON_CN23XX_PF(oct))
1257 oct->fn_list.free_mbox(oct);
1260 case OCT_DEV_IN_RESET:
1261 case OCT_DEV_DROQ_INIT_DONE:
1262 /* Wait for any pending operations */
1264 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1265 if (!(oct->io_qmask.oq & BIT_ULL(i)))
1267 octeon_delete_droq(oct, i);
1270 /* Force any pending handshakes to complete */
1271 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1275 handshake[oct->octeon_id].init_ok = 0;
1276 complete(&handshake[oct->octeon_id].init);
1277 handshake[oct->octeon_id].started_ok = 0;
1278 complete(&handshake[oct->octeon_id].started);
1283 case OCT_DEV_RESP_LIST_INIT_DONE:
1284 octeon_delete_response_list(oct);
1287 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1288 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1289 if (!(oct->io_qmask.iq & BIT_ULL(i)))
1291 octeon_delete_instr_queue(oct, i);
1293 #ifdef CONFIG_PCI_IOV
1294 if (oct->sriov_info.sriov_enabled)
1295 pci_disable_sriov(oct->pci_dev);
1298 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1299 octeon_free_sc_buffer_pool(oct);
1302 case OCT_DEV_DISPATCH_INIT_DONE:
1303 octeon_delete_dispatch_list(oct);
1304 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1307 case OCT_DEV_PCI_MAP_DONE:
1308 refcount = octeon_deregister_device(oct);
1310 /* Soft reset the octeon device before exiting.
1311 * However, if fw was loaded from card (i.e. autoboot),
1312 * perform an FLR instead.
1313 * Implementation note: only soft-reset the device
1314 * if it is a CN6XXX OR the LAST CN23XX device.
1316 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1317 octeon_pci_flr(oct);
1318 else if (OCTEON_CN6XXX(oct) || !refcount)
1319 oct->fn_list.soft_reset(oct);
1321 octeon_unmap_pci_barx(oct, 0);
1322 octeon_unmap_pci_barx(oct, 1);
1325 case OCT_DEV_PCI_ENABLE_DONE:
1326 pci_clear_master(oct->pci_dev);
1327 /* Disable the device, releasing the PCI INT */
1328 pci_disable_device(oct->pci_dev);
1331 case OCT_DEV_BEGIN_STATE:
1332 /* Nothing to be done here either */
1334 } /* end switch (oct->status) */
1336 tasklet_kill(&oct_priv->droq_tasklet);
1340 * \brief Callback for rx ctrl
1341 * @param status status of request
1342 * @param buf pointer to resp structure
1344 static void rx_ctl_callback(struct octeon_device *oct,
1348 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1349 struct liquidio_rx_ctl_context *ctx;
1351 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1353 oct = lio_get_device(ctx->octeon_id);
1355 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
1356 CVM_CAST64(status));
1357 WRITE_ONCE(ctx->cond, 1);
1359 /* This barrier is required to be sure that the response has been
1360 * written fully before waking up the handler
1364 wake_up_interruptible(&ctx->wc);
1368 * \brief Send Rx control command
1369 * @param lio per-network private data
1370 * @param start_stop whether to start or stop
1372 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1374 struct octeon_soft_command *sc;
1375 struct liquidio_rx_ctl_context *ctx;
1376 union octnet_cmd *ncmd;
1377 int ctx_size = sizeof(struct liquidio_rx_ctl_context);
1378 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1381 if (oct->props[lio->ifidx].rx_on == start_stop)
1384 sc = (struct octeon_soft_command *)
1385 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1388 ncmd = (union octnet_cmd *)sc->virtdptr;
1389 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1391 WRITE_ONCE(ctx->cond, 0);
1392 ctx->octeon_id = lio_get_device_id(oct);
1393 init_waitqueue_head(&ctx->wc);
1396 ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1397 ncmd->s.param1 = start_stop;
1399 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1401 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1403 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1404 OPCODE_NIC_CMD, 0, 0, 0);
1406 sc->callback = rx_ctl_callback;
1407 sc->callback_arg = sc;
1408 sc->wait_time = 5000;
1410 retval = octeon_send_soft_command(oct, sc);
1411 if (retval == IQ_SEND_FAILED) {
1412 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1414 /* Sleep on a wait queue till the cond flag indicates that the
1415 * response arrived or timed-out.
1417 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
1419 oct->props[lio->ifidx].rx_on = start_stop;
1422 octeon_free_soft_command(oct, sc);
1426 * \brief Destroy NIC device interface
1427 * @param oct octeon device
1428 * @param ifidx which interface to destroy
1430 * Cleanup associated with each interface for an Octeon device when NIC
1431 * module is being unloaded or if initialization fails during load.
1433 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1435 struct net_device *netdev = oct->props[ifidx].netdev;
1437 struct napi_struct *napi, *n;
1440 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1445 lio = GET_LIO(netdev);
1447 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1449 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1450 liquidio_stop(netdev);
1452 if (oct->props[lio->ifidx].napi_enabled == 1) {
1453 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1456 oct->props[lio->ifidx].napi_enabled = 0;
1458 if (OCTEON_CN23XX_PF(oct))
1459 oct->droq[0]->ops.poll_mode = 0;
1463 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1464 netif_napi_del(napi);
1466 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1467 unregister_netdev(netdev);
1469 cleanup_sync_octeon_time_wq(netdev);
1470 cleanup_link_status_change_wq(netdev);
1472 cleanup_rx_oom_poll_fn(netdev);
1476 free_netdev(netdev);
1478 oct->props[ifidx].gmxport = -1;
1480 oct->props[ifidx].netdev = NULL;
1484 * \brief Stop complete NIC functionality
1485 * @param oct octeon device
1487 static int liquidio_stop_nic_module(struct octeon_device *oct)
1492 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1493 if (!oct->ifcount) {
1494 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1498 spin_lock_bh(&oct->cmd_resp_wqlock);
1499 oct->cmd_resp_state = OCT_DRV_OFFLINE;
1500 spin_unlock_bh(&oct->cmd_resp_wqlock);
1502 lio_vf_rep_destroy(oct);
1504 for (i = 0; i < oct->ifcount; i++) {
1505 lio = GET_LIO(oct->props[i].netdev);
1506 for (j = 0; j < oct->num_oqs; j++)
1507 octeon_unregister_droq_ops(oct,
1508 lio->linfo.rxpciq[j].s.q_no);
1511 for (i = 0; i < oct->ifcount; i++)
1512 liquidio_destroy_nic_device(oct, i);
1515 devlink_unregister(oct->devlink);
1516 devlink_free(oct->devlink);
1517 oct->devlink = NULL;
1520 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1525 * \brief Cleans up resources at unload time
1526 * @param pdev PCI device structure
1528 static void liquidio_remove(struct pci_dev *pdev)
1530 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1532 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1534 if (oct_dev->watchdog_task)
1535 kthread_stop(oct_dev->watchdog_task);
1537 if (!oct_dev->octeon_id &&
1538 oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1539 lio_vf_rep_modexit();
1541 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1542 liquidio_stop_nic_module(oct_dev);
1544 /* Reset the octeon device and cleanup all memory allocated for
1545 * the octeon device by driver.
1547 octeon_destroy_resources(oct_dev);
1549 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1551 /* This octeon device has been removed. Update the global
1552 * data structure to reflect this. Free the device structure.
1554 octeon_free_device_mem(oct_dev);
1558 * \brief Identify the Octeon device and to map the BAR address space
1559 * @param oct octeon device
1561 static int octeon_chip_specific_setup(struct octeon_device *oct)
1567 pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1568 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1569 oct->rev_id = rev_id & 0xff;
1572 case OCTEON_CN68XX_PCIID:
1573 oct->chip_id = OCTEON_CN68XX;
1574 ret = lio_setup_cn68xx_octeon_device(oct);
1578 case OCTEON_CN66XX_PCIID:
1579 oct->chip_id = OCTEON_CN66XX;
1580 ret = lio_setup_cn66xx_octeon_device(oct);
1584 case OCTEON_CN23XX_PCIID_PF:
1585 oct->chip_id = OCTEON_CN23XX_PF_VID;
1586 ret = setup_cn23xx_octeon_pf_device(oct);
1589 #ifdef CONFIG_PCI_IOV
1591 pci_sriov_set_totalvfs(oct->pci_dev,
1592 oct->sriov_info.max_vfs);
1599 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1604 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1605 OCTEON_MAJOR_REV(oct),
1606 OCTEON_MINOR_REV(oct),
1607 octeon_get_conf(oct)->card_name,
1614 * \brief PCI initialization for each Octeon device.
1615 * @param oct octeon device
1617 static int octeon_pci_os_setup(struct octeon_device *oct)
1619 /* setup PCI stuff first */
1620 if (pci_enable_device(oct->pci_dev)) {
1621 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1625 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1626 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1627 pci_disable_device(oct->pci_dev);
1631 /* Enable PCI DMA Master. */
1632 pci_set_master(oct->pci_dev);
1638 * \brief Unmap and free network buffer
1641 static void free_netbuf(void *buf)
1643 struct sk_buff *skb;
1644 struct octnet_buf_free_info *finfo;
1647 finfo = (struct octnet_buf_free_info *)buf;
1651 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1654 tx_buffer_free(skb);
1658 * \brief Unmap and free gather buffer
1661 static void free_netsgbuf(void *buf)
1663 struct octnet_buf_free_info *finfo;
1664 struct sk_buff *skb;
1666 struct octnic_gather *g;
1669 finfo = (struct octnet_buf_free_info *)buf;
1673 frags = skb_shinfo(skb)->nr_frags;
1675 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1676 g->sg[0].ptr[0], (skb->len - skb->data_len),
1681 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1683 pci_unmap_page((lio->oct_dev)->pci_dev,
1684 g->sg[(i >> 2)].ptr[(i & 3)],
1685 frag->size, DMA_TO_DEVICE);
1689 iq = skb_iq(lio, skb);
1690 spin_lock(&lio->glist_lock[iq]);
1691 list_add_tail(&g->list, &lio->glist[iq]);
1692 spin_unlock(&lio->glist_lock[iq]);
1694 tx_buffer_free(skb);
1698 * \brief Unmap and free gather buffer with response
1701 static void free_netsgbuf_with_resp(void *buf)
1703 struct octeon_soft_command *sc;
1704 struct octnet_buf_free_info *finfo;
1705 struct sk_buff *skb;
1707 struct octnic_gather *g;
1710 sc = (struct octeon_soft_command *)buf;
1711 skb = (struct sk_buff *)sc->callback_arg;
1712 finfo = (struct octnet_buf_free_info *)&skb->cb;
1716 frags = skb_shinfo(skb)->nr_frags;
1718 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1719 g->sg[0].ptr[0], (skb->len - skb->data_len),
1724 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1726 pci_unmap_page((lio->oct_dev)->pci_dev,
1727 g->sg[(i >> 2)].ptr[(i & 3)],
1728 frag->size, DMA_TO_DEVICE);
1732 iq = skb_iq(lio, skb);
1734 spin_lock(&lio->glist_lock[iq]);
1735 list_add_tail(&g->list, &lio->glist[iq]);
1736 spin_unlock(&lio->glist_lock[iq]);
1738 /* Don't free the skb yet */
1742 * \brief Adjust ptp frequency
1743 * @param ptp PTP clock info
1744 * @param ppb how much to adjust by, in parts-per-billion
1746 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1748 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1749 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1751 unsigned long flags;
1752 bool neg_adj = false;
1759 /* The hardware adds the clock compensation value to the
1760 * PTP clock on every coprocessor clock cycle, so we
1761 * compute the delta in terms of coprocessor clocks.
1763 delta = (u64)ppb << 32;
1764 do_div(delta, oct->coproc_clock_rate);
1766 spin_lock_irqsave(&lio->ptp_lock, flags);
1767 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1772 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1773 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1779 * \brief Adjust ptp time
1780 * @param ptp PTP clock info
1781 * @param delta how much to adjust by, in nanosecs
1783 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1785 unsigned long flags;
1786 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1788 spin_lock_irqsave(&lio->ptp_lock, flags);
1789 lio->ptp_adjust += delta;
1790 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1796 * \brief Get hardware clock time, including any adjustment
1797 * @param ptp PTP clock info
1798 * @param ts timespec
1800 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1801 struct timespec64 *ts)
1804 unsigned long flags;
1805 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1806 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1808 spin_lock_irqsave(&lio->ptp_lock, flags);
1809 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1810 ns += lio->ptp_adjust;
1811 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1813 *ts = ns_to_timespec64(ns);
1819 * \brief Set hardware clock time. Reset adjustment
1820 * @param ptp PTP clock info
1821 * @param ts timespec
1823 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1824 const struct timespec64 *ts)
1827 unsigned long flags;
1828 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1829 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1831 ns = timespec64_to_ns(ts);
1833 spin_lock_irqsave(&lio->ptp_lock, flags);
1834 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1835 lio->ptp_adjust = 0;
1836 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1842 * \brief Check if PTP is enabled
1843 * @param ptp PTP clock info
1845 * @param on is it on
1848 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1849 struct ptp_clock_request *rq __attribute__((unused)),
1850 int on __attribute__((unused)))
1856 * \brief Open PTP clock source
1857 * @param netdev network device
1859 static void oct_ptp_open(struct net_device *netdev)
1861 struct lio *lio = GET_LIO(netdev);
1862 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1864 spin_lock_init(&lio->ptp_lock);
1866 snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1867 lio->ptp_info.owner = THIS_MODULE;
1868 lio->ptp_info.max_adj = 250000000;
1869 lio->ptp_info.n_alarm = 0;
1870 lio->ptp_info.n_ext_ts = 0;
1871 lio->ptp_info.n_per_out = 0;
1872 lio->ptp_info.pps = 0;
1873 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1874 lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1875 lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1876 lio->ptp_info.settime64 = liquidio_ptp_settime;
1877 lio->ptp_info.enable = liquidio_ptp_enable;
1879 lio->ptp_adjust = 0;
1881 lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1882 &oct->pci_dev->dev);
1884 if (IS_ERR(lio->ptp_clock))
1885 lio->ptp_clock = NULL;
1889 * \brief Init PTP clock
1890 * @param oct octeon device
1892 static void liquidio_ptp_init(struct octeon_device *oct)
1894 u64 clock_comp, cfg;
1896 clock_comp = (u64)NSEC_PER_SEC << 32;
1897 do_div(clock_comp, oct->coproc_clock_rate);
1898 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1901 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1902 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1906 * \brief Load firmware to device
1907 * @param oct octeon device
1909 * Maps device to firmware filename, requests firmware, and downloads it
1911 static int load_firmware(struct octeon_device *oct)
1914 const struct firmware *fw;
1915 char fw_name[LIO_MAX_FW_FILENAME_LEN];
1918 if (fw_type_is_auto()) {
1919 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1920 strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1922 tmp_fw_type = fw_type;
1925 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1926 octeon_get_conf(oct)->card_name, tmp_fw_type,
1927 LIO_FW_NAME_SUFFIX);
1929 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1931 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
1933 release_firmware(fw);
1937 ret = octeon_download_firmware(oct, fw->data, fw->size);
1939 release_firmware(fw);
1945 * \brief Callback for getting interface configuration
1946 * @param status status of request
1947 * @param buf pointer to resp structure
1949 static void if_cfg_callback(struct octeon_device *oct,
1950 u32 status __attribute__((unused)),
1953 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1954 struct liquidio_if_cfg_resp *resp;
1955 struct liquidio_if_cfg_context *ctx;
1957 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1958 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
1960 oct = lio_get_device(ctx->octeon_id);
1962 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
1963 CVM_CAST64(resp->status), status);
1964 WRITE_ONCE(ctx->cond, 1);
1966 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
1967 resp->cfg_info.liquidio_firmware_version);
1969 /* This barrier is required to be sure that the response has been
1970 * written fully before waking up the handler
1974 wake_up_interruptible(&ctx->wc);
1978 * \brief Poll routine for checking transmit queue status
1979 * @param work work_struct data structure
1981 static void octnet_poll_check_txq_status(struct work_struct *work)
1983 struct cavium_wk *wk = (struct cavium_wk *)work;
1984 struct lio *lio = (struct lio *)wk->ctxptr;
1986 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1989 check_txq_status(lio);
1990 queue_delayed_work(lio->txq_status_wq.wq,
1991 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1995 * \brief Sets up the txq poll check
1996 * @param netdev network device
1998 static inline int setup_tx_poll_fn(struct net_device *netdev)
2000 struct lio *lio = GET_LIO(netdev);
2001 struct octeon_device *oct = lio->oct_dev;
2003 lio->txq_status_wq.wq = alloc_workqueue("txq-status",
2005 if (!lio->txq_status_wq.wq) {
2006 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
2009 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
2010 octnet_poll_check_txq_status);
2011 lio->txq_status_wq.wk.ctxptr = lio;
2012 queue_delayed_work(lio->txq_status_wq.wq,
2013 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2017 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
2019 struct lio *lio = GET_LIO(netdev);
2021 if (lio->txq_status_wq.wq) {
2022 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
2023 destroy_workqueue(lio->txq_status_wq.wq);
2028 * \brief Net device open for LiquidIO
2029 * @param netdev network device
2031 static int liquidio_open(struct net_device *netdev)
2033 struct lio *lio = GET_LIO(netdev);
2034 struct octeon_device *oct = lio->oct_dev;
2035 struct napi_struct *napi, *n;
2037 if (oct->props[lio->ifidx].napi_enabled == 0) {
2038 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2041 oct->props[lio->ifidx].napi_enabled = 1;
2043 if (OCTEON_CN23XX_PF(oct))
2044 oct->droq[0]->ops.poll_mode = 1;
2047 if (oct->ptp_enable)
2048 oct_ptp_open(netdev);
2050 ifstate_set(lio, LIO_IFSTATE_RUNNING);
2052 /* Ready for link status updates */
2055 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
2057 if (OCTEON_CN23XX_PF(oct)) {
2059 if (setup_tx_poll_fn(netdev))
2062 if (setup_tx_poll_fn(netdev))
2068 /* tell Octeon to start forwarding packets to host */
2069 send_rx_ctrl_cmd(lio, 1);
2071 dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
2078 * \brief Net device stop for LiquidIO
2079 * @param netdev network device
2081 static int liquidio_stop(struct net_device *netdev)
2083 struct lio *lio = GET_LIO(netdev);
2084 struct octeon_device *oct = lio->oct_dev;
2085 struct napi_struct *napi, *n;
2087 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2089 netif_tx_disable(netdev);
2091 /* Inform that netif carrier is down */
2092 netif_carrier_off(netdev);
2094 lio->linfo.link.s.link_up = 0;
2095 lio->link_changes++;
2097 /* Tell Octeon that nic interface is down. */
2098 send_rx_ctrl_cmd(lio, 0);
2100 if (OCTEON_CN23XX_PF(oct)) {
2102 cleanup_tx_poll_fn(netdev);
2104 cleanup_tx_poll_fn(netdev);
2107 if (lio->ptp_clock) {
2108 ptp_clock_unregister(lio->ptp_clock);
2109 lio->ptp_clock = NULL;
2112 /* Wait for any pending Rx descriptors */
2113 if (lio_wait_for_clean_oq(oct))
2114 netif_info(lio, rx_err, lio->netdev,
2115 "Proceeding with stop interface after partial RX desc processing\n");
2117 if (oct->props[lio->ifidx].napi_enabled == 1) {
2118 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2121 oct->props[lio->ifidx].napi_enabled = 0;
2123 if (OCTEON_CN23XX_PF(oct))
2124 oct->droq[0]->ops.poll_mode = 0;
2127 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
2133 * \brief Converts a mask based on net device flags
2134 * @param netdev network device
2136 * This routine generates a octnet_ifflags mask from the net device flags
2137 * received from the OS.
2139 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
2141 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
2143 if (netdev->flags & IFF_PROMISC)
2144 f |= OCTNET_IFFLAG_PROMISC;
2146 if (netdev->flags & IFF_ALLMULTI)
2147 f |= OCTNET_IFFLAG_ALLMULTI;
2149 if (netdev->flags & IFF_MULTICAST) {
2150 f |= OCTNET_IFFLAG_MULTICAST;
2152 /* Accept all multicast addresses if there are more than we
2155 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
2156 f |= OCTNET_IFFLAG_ALLMULTI;
2159 if (netdev->flags & IFF_BROADCAST)
2160 f |= OCTNET_IFFLAG_BROADCAST;
2166 * \brief Net device set_multicast_list
2167 * @param netdev network device
2169 static void liquidio_set_mcast_list(struct net_device *netdev)
2171 struct lio *lio = GET_LIO(netdev);
2172 struct octeon_device *oct = lio->oct_dev;
2173 struct octnic_ctrl_pkt nctrl;
2174 struct netdev_hw_addr *ha;
2177 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
2179 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2181 /* Create a ctrl pkt command to be sent to core app. */
2183 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
2184 nctrl.ncmd.s.param1 = get_new_flags(netdev);
2185 nctrl.ncmd.s.param2 = mc_count;
2186 nctrl.ncmd.s.more = mc_count;
2187 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2188 nctrl.netpndev = (u64)netdev;
2189 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2191 /* copy all the addresses into the udd */
2193 netdev_for_each_mc_addr(ha, netdev) {
2195 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2196 /* no need to swap bytes */
2198 if (++mc > &nctrl.udd[mc_count])
2202 /* Apparently, any activity in this call from the kernel has to
2203 * be atomic. So we won't wait for response.
2205 nctrl.wait_time = 0;
2207 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2209 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2215 * \brief Net device set_mac_address
2216 * @param netdev network device
2218 static int liquidio_set_mac(struct net_device *netdev, void *p)
2221 struct lio *lio = GET_LIO(netdev);
2222 struct octeon_device *oct = lio->oct_dev;
2223 struct sockaddr *addr = (struct sockaddr *)p;
2224 struct octnic_ctrl_pkt nctrl;
2226 if (!is_valid_ether_addr(addr->sa_data))
2227 return -EADDRNOTAVAIL;
2229 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2232 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2233 nctrl.ncmd.s.param1 = 0;
2234 nctrl.ncmd.s.more = 1;
2235 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2236 nctrl.netpndev = (u64)netdev;
2237 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2238 nctrl.wait_time = 100;
2241 /* The MAC Address is presented in network byte order. */
2242 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2244 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2246 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2249 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2250 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2256 * \brief Net device get_stats
2257 * @param netdev network device
2259 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2261 struct lio *lio = GET_LIO(netdev);
2262 struct net_device_stats *stats = &netdev->stats;
2263 struct octeon_device *oct;
2264 u64 pkts = 0, drop = 0, bytes = 0;
2265 struct oct_droq_stats *oq_stats;
2266 struct oct_iq_stats *iq_stats;
2267 int i, iq_no, oq_no;
2271 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2274 for (i = 0; i < oct->num_iqs; i++) {
2275 iq_no = lio->linfo.txpciq[i].s.q_no;
2276 iq_stats = &oct->instr_queue[iq_no]->stats;
2277 pkts += iq_stats->tx_done;
2278 drop += iq_stats->tx_dropped;
2279 bytes += iq_stats->tx_tot_bytes;
2282 stats->tx_packets = pkts;
2283 stats->tx_bytes = bytes;
2284 stats->tx_dropped = drop;
2290 for (i = 0; i < oct->num_oqs; i++) {
2291 oq_no = lio->linfo.rxpciq[i].s.q_no;
2292 oq_stats = &oct->droq[oq_no]->stats;
2293 pkts += oq_stats->rx_pkts_received;
2294 drop += (oq_stats->rx_dropped +
2295 oq_stats->dropped_nodispatch +
2296 oq_stats->dropped_toomany +
2297 oq_stats->dropped_nomem);
2298 bytes += oq_stats->rx_bytes_received;
2301 stats->rx_bytes = bytes;
2302 stats->rx_packets = pkts;
2303 stats->rx_dropped = drop;
2309 * \brief Handler for SIOCSHWTSTAMP ioctl
2310 * @param netdev network device
2311 * @param ifr interface request
2312 * @param cmd command
2314 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2316 struct hwtstamp_config conf;
2317 struct lio *lio = GET_LIO(netdev);
2319 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2325 switch (conf.tx_type) {
2326 case HWTSTAMP_TX_ON:
2327 case HWTSTAMP_TX_OFF:
2333 switch (conf.rx_filter) {
2334 case HWTSTAMP_FILTER_NONE:
2336 case HWTSTAMP_FILTER_ALL:
2337 case HWTSTAMP_FILTER_SOME:
2338 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2339 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2340 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2341 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2342 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2343 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2344 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2345 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2346 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2347 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2348 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2349 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2350 case HWTSTAMP_FILTER_NTP_ALL:
2351 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2357 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2358 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2361 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2363 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2367 * \brief ioctl handler
2368 * @param netdev network device
2369 * @param ifr interface request
2370 * @param cmd command
2372 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2374 struct lio *lio = GET_LIO(netdev);
2378 if (lio->oct_dev->ptp_enable)
2379 return hwtstamp_ioctl(netdev, ifr);
2386 * \brief handle a Tx timestamp response
2387 * @param status response status
2388 * @param buf pointer to skb
2390 static void handle_timestamp(struct octeon_device *oct,
2394 struct octnet_buf_free_info *finfo;
2395 struct octeon_soft_command *sc;
2396 struct oct_timestamp_resp *resp;
2398 struct sk_buff *skb = (struct sk_buff *)buf;
2400 finfo = (struct octnet_buf_free_info *)skb->cb;
2404 resp = (struct oct_timestamp_resp *)sc->virtrptr;
2406 if (status != OCTEON_REQUEST_DONE) {
2407 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2408 CVM_CAST64(status));
2409 resp->timestamp = 0;
2412 octeon_swap_8B_data(&resp->timestamp, 1);
2414 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2415 struct skb_shared_hwtstamps ts;
2416 u64 ns = resp->timestamp;
2418 netif_info(lio, tx_done, lio->netdev,
2419 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2420 skb, (unsigned long long)ns);
2421 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2422 skb_tstamp_tx(skb, &ts);
2425 octeon_free_soft_command(oct, sc);
2426 tx_buffer_free(skb);
2429 /* \brief Send a data packet that will be timestamped
2430 * @param oct octeon device
2431 * @param ndata pointer to network data
2432 * @param finfo pointer to private network data
2434 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2435 struct octnic_data_pkt *ndata,
2436 struct octnet_buf_free_info *finfo,
2440 struct octeon_soft_command *sc;
2447 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2448 sizeof(struct oct_timestamp_resp));
2452 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2453 return IQ_SEND_FAILED;
2456 if (ndata->reqtype == REQTYPE_NORESP_NET)
2457 ndata->reqtype = REQTYPE_RESP_NET;
2458 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2459 ndata->reqtype = REQTYPE_RESP_NET_SG;
2461 sc->callback = handle_timestamp;
2462 sc->callback_arg = finfo->skb;
2463 sc->iq_no = ndata->q_no;
2465 if (OCTEON_CN23XX_PF(oct))
2466 len = (u32)((struct octeon_instr_ih3 *)
2467 (&sc->cmd.cmd3.ih3))->dlengsz;
2469 len = (u32)((struct octeon_instr_ih2 *)
2470 (&sc->cmd.cmd2.ih2))->dlengsz;
2472 ring_doorbell = !xmit_more;
2474 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2475 sc, len, ndata->reqtype);
2477 if (retval == IQ_SEND_FAILED) {
2478 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2480 octeon_free_soft_command(oct, sc);
2482 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2488 /** \brief Transmit networks packets to the Octeon interface
2489 * @param skbuff skbuff struct to be passed to network layer.
2490 * @param netdev pointer to network device
2491 * @returns whether the packet was transmitted to the device okay or not
2492 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2494 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2497 struct octnet_buf_free_info *finfo;
2498 union octnic_cmd_setup cmdsetup;
2499 struct octnic_data_pkt ndata;
2500 struct octeon_device *oct;
2501 struct oct_iq_stats *stats;
2502 struct octeon_instr_irh *irh;
2503 union tx_info *tx_info;
2505 int q_idx = 0, iq_no = 0;
2506 int j, xmit_more = 0;
2510 lio = GET_LIO(netdev);
2513 q_idx = skb_iq(lio, skb);
2515 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2517 stats = &oct->instr_queue[iq_no]->stats;
2519 /* Check for all conditions in which the current packet cannot be
2522 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2523 (!lio->linfo.link.s.link_up) ||
2525 netif_info(lio, tx_err, lio->netdev,
2526 "Transmit failed link_status : %d\n",
2527 lio->linfo.link.s.link_up);
2528 goto lio_xmit_failed;
2531 /* Use space in skb->cb to store info used to unmap and
2534 finfo = (struct octnet_buf_free_info *)skb->cb;
2539 /* Prepare the attributes for the data to be passed to OSI. */
2540 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2542 ndata.buf = (void *)finfo;
2546 if (octnet_iq_is_full(oct, ndata.q_no)) {
2547 /* defer sending if queue is full */
2548 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2550 stats->tx_iq_busy++;
2551 return NETDEV_TX_BUSY;
2554 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2555 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2558 ndata.datasize = skb->len;
2561 cmdsetup.s.iq_no = iq_no;
2563 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2564 if (skb->encapsulation) {
2565 cmdsetup.s.tnl_csum = 1;
2568 cmdsetup.s.transport_csum = 1;
2571 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2572 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2573 cmdsetup.s.timestamp = 1;
2576 if (skb_shinfo(skb)->nr_frags == 0) {
2577 cmdsetup.s.u.datasize = skb->len;
2578 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2580 /* Offload checksum calculation for TCP/UDP packets */
2581 dptr = dma_map_single(&oct->pci_dev->dev,
2585 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2586 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2588 return NETDEV_TX_BUSY;
2591 if (OCTEON_CN23XX_PF(oct))
2592 ndata.cmd.cmd3.dptr = dptr;
2594 ndata.cmd.cmd2.dptr = dptr;
2596 ndata.reqtype = REQTYPE_NORESP_NET;
2600 struct skb_frag_struct *frag;
2601 struct octnic_gather *g;
2603 spin_lock(&lio->glist_lock[q_idx]);
2604 g = (struct octnic_gather *)
2605 list_delete_head(&lio->glist[q_idx]);
2606 spin_unlock(&lio->glist_lock[q_idx]);
2609 netif_info(lio, tx_err, lio->netdev,
2610 "Transmit scatter gather: glist null!\n");
2611 goto lio_xmit_failed;
2614 cmdsetup.s.gather = 1;
2615 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2616 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2618 memset(g->sg, 0, g->sg_size);
2620 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2622 (skb->len - skb->data_len),
2624 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2625 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2627 return NETDEV_TX_BUSY;
2629 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2631 frags = skb_shinfo(skb)->nr_frags;
2634 frag = &skb_shinfo(skb)->frags[i - 1];
2636 g->sg[(i >> 2)].ptr[(i & 3)] =
2637 dma_map_page(&oct->pci_dev->dev,
2643 if (dma_mapping_error(&oct->pci_dev->dev,
2644 g->sg[i >> 2].ptr[i & 3])) {
2645 dma_unmap_single(&oct->pci_dev->dev,
2647 skb->len - skb->data_len,
2649 for (j = 1; j < i; j++) {
2650 frag = &skb_shinfo(skb)->frags[j - 1];
2651 dma_unmap_page(&oct->pci_dev->dev,
2652 g->sg[j >> 2].ptr[j & 3],
2656 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2658 return NETDEV_TX_BUSY;
2661 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2665 dptr = g->sg_dma_ptr;
2667 if (OCTEON_CN23XX_PF(oct))
2668 ndata.cmd.cmd3.dptr = dptr;
2670 ndata.cmd.cmd2.dptr = dptr;
2674 ndata.reqtype = REQTYPE_NORESP_NET_SG;
2677 if (OCTEON_CN23XX_PF(oct)) {
2678 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2679 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2681 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2682 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2685 if (skb_shinfo(skb)->gso_size) {
2686 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2687 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2691 /* HW insert VLAN tag */
2692 if (skb_vlan_tag_present(skb)) {
2693 irh->priority = skb_vlan_tag_get(skb) >> 13;
2694 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2697 xmit_more = skb->xmit_more;
2699 if (unlikely(cmdsetup.s.timestamp))
2700 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2702 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2703 if (status == IQ_SEND_FAILED)
2704 goto lio_xmit_failed;
2706 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2708 if (status == IQ_SEND_STOP)
2709 netif_stop_subqueue(netdev, q_idx);
2711 netif_trans_update(netdev);
2713 if (tx_info->s.gso_segs)
2714 stats->tx_done += tx_info->s.gso_segs;
2717 stats->tx_tot_bytes += ndata.datasize;
2719 return NETDEV_TX_OK;
2722 stats->tx_dropped++;
2723 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2724 iq_no, stats->tx_dropped);
2726 dma_unmap_single(&oct->pci_dev->dev, dptr,
2727 ndata.datasize, DMA_TO_DEVICE);
2729 octeon_ring_doorbell_locked(oct, iq_no);
2731 tx_buffer_free(skb);
2732 return NETDEV_TX_OK;
2735 /** \brief Network device Tx timeout
2736 * @param netdev pointer to network device
2738 static void liquidio_tx_timeout(struct net_device *netdev)
2742 lio = GET_LIO(netdev);
2744 netif_info(lio, tx_err, lio->netdev,
2745 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2746 netdev->stats.tx_dropped);
2747 netif_trans_update(netdev);
2751 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2752 __be16 proto __attribute__((unused)),
2755 struct lio *lio = GET_LIO(netdev);
2756 struct octeon_device *oct = lio->oct_dev;
2757 struct octnic_ctrl_pkt nctrl;
2760 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2763 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2764 nctrl.ncmd.s.param1 = vid;
2765 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2766 nctrl.wait_time = 100;
2767 nctrl.netpndev = (u64)netdev;
2768 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2770 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2772 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2779 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2780 __be16 proto __attribute__((unused)),
2783 struct lio *lio = GET_LIO(netdev);
2784 struct octeon_device *oct = lio->oct_dev;
2785 struct octnic_ctrl_pkt nctrl;
2788 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2791 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2792 nctrl.ncmd.s.param1 = vid;
2793 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2794 nctrl.wait_time = 100;
2795 nctrl.netpndev = (u64)netdev;
2796 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2798 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2800 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2806 /** Sending command to enable/disable RX checksum offload
2807 * @param netdev pointer to network device
2808 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
2809 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
2810 * OCTNET_CMD_RXCSUM_DISABLE
2811 * @returns SUCCESS or FAILURE
2813 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2816 struct lio *lio = GET_LIO(netdev);
2817 struct octeon_device *oct = lio->oct_dev;
2818 struct octnic_ctrl_pkt nctrl;
2821 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2824 nctrl.ncmd.s.cmd = command;
2825 nctrl.ncmd.s.param1 = rx_cmd;
2826 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2827 nctrl.wait_time = 100;
2828 nctrl.netpndev = (u64)netdev;
2829 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2831 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2833 dev_err(&oct->pci_dev->dev,
2834 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2840 /** Sending command to add/delete VxLAN UDP port to firmware
2841 * @param netdev pointer to network device
2842 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
2843 * @param vxlan_port VxLAN port to be added or deleted
2844 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
2845 * OCTNET_CMD_VXLAN_PORT_DEL
2846 * @returns SUCCESS or FAILURE
2848 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2849 u16 vxlan_port, u8 vxlan_cmd_bit)
2851 struct lio *lio = GET_LIO(netdev);
2852 struct octeon_device *oct = lio->oct_dev;
2853 struct octnic_ctrl_pkt nctrl;
2856 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2859 nctrl.ncmd.s.cmd = command;
2860 nctrl.ncmd.s.more = vxlan_cmd_bit;
2861 nctrl.ncmd.s.param1 = vxlan_port;
2862 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2863 nctrl.wait_time = 100;
2864 nctrl.netpndev = (u64)netdev;
2865 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2867 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2869 dev_err(&oct->pci_dev->dev,
2870 "VxLAN port add/delete failed in core (ret:0x%x)\n",
2876 /** \brief Net device fix features
2877 * @param netdev pointer to network device
2878 * @param request features requested
2879 * @returns updated features list
2881 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2882 netdev_features_t request)
2884 struct lio *lio = netdev_priv(netdev);
2886 if ((request & NETIF_F_RXCSUM) &&
2887 !(lio->dev_capability & NETIF_F_RXCSUM))
2888 request &= ~NETIF_F_RXCSUM;
2890 if ((request & NETIF_F_HW_CSUM) &&
2891 !(lio->dev_capability & NETIF_F_HW_CSUM))
2892 request &= ~NETIF_F_HW_CSUM;
2894 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2895 request &= ~NETIF_F_TSO;
2897 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2898 request &= ~NETIF_F_TSO6;
2900 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2901 request &= ~NETIF_F_LRO;
2903 /*Disable LRO if RXCSUM is off */
2904 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2905 (lio->dev_capability & NETIF_F_LRO))
2906 request &= ~NETIF_F_LRO;
2908 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2909 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2910 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2915 /** \brief Net device set features
2916 * @param netdev pointer to network device
2917 * @param features features to enable/disable
2919 static int liquidio_set_features(struct net_device *netdev,
2920 netdev_features_t features)
2922 struct lio *lio = netdev_priv(netdev);
2924 if ((features & NETIF_F_LRO) &&
2925 (lio->dev_capability & NETIF_F_LRO) &&
2926 !(netdev->features & NETIF_F_LRO))
2927 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2928 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2929 else if (!(features & NETIF_F_LRO) &&
2930 (lio->dev_capability & NETIF_F_LRO) &&
2931 (netdev->features & NETIF_F_LRO))
2932 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2933 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2935 /* Sending command to firmware to enable/disable RX checksum
2936 * offload settings using ethtool
2938 if (!(netdev->features & NETIF_F_RXCSUM) &&
2939 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2940 (features & NETIF_F_RXCSUM))
2941 liquidio_set_rxcsum_command(netdev,
2942 OCTNET_CMD_TNL_RX_CSUM_CTL,
2943 OCTNET_CMD_RXCSUM_ENABLE);
2944 else if ((netdev->features & NETIF_F_RXCSUM) &&
2945 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2946 !(features & NETIF_F_RXCSUM))
2947 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2948 OCTNET_CMD_RXCSUM_DISABLE);
2950 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2951 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2952 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2953 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2954 OCTNET_CMD_VLAN_FILTER_ENABLE);
2955 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2956 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2957 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2958 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2959 OCTNET_CMD_VLAN_FILTER_DISABLE);
2964 static void liquidio_add_vxlan_port(struct net_device *netdev,
2965 struct udp_tunnel_info *ti)
2967 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2970 liquidio_vxlan_port_command(netdev,
2971 OCTNET_CMD_VXLAN_PORT_CONFIG,
2973 OCTNET_CMD_VXLAN_PORT_ADD);
2976 static void liquidio_del_vxlan_port(struct net_device *netdev,
2977 struct udp_tunnel_info *ti)
2979 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2982 liquidio_vxlan_port_command(netdev,
2983 OCTNET_CMD_VXLAN_PORT_CONFIG,
2985 OCTNET_CMD_VXLAN_PORT_DEL);
2988 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2989 u8 *mac, bool is_admin_assigned)
2991 struct lio *lio = GET_LIO(netdev);
2992 struct octeon_device *oct = lio->oct_dev;
2993 struct octnic_ctrl_pkt nctrl;
2995 if (!is_valid_ether_addr(mac))
2998 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
3001 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3004 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
3005 /* vfidx is 0 based, but vf_num (param1) is 1 based */
3006 nctrl.ncmd.s.param1 = vfidx + 1;
3007 nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
3008 nctrl.ncmd.s.more = 1;
3009 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3010 nctrl.netpndev = (u64)netdev;
3011 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3012 nctrl.wait_time = LIO_CMD_WAIT_TM;
3015 /* The MAC Address is presented in network byte order. */
3016 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
3018 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
3020 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3025 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
3027 struct lio *lio = GET_LIO(netdev);
3028 struct octeon_device *oct = lio->oct_dev;
3031 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3034 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
3036 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
3041 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
3042 u16 vlan, u8 qos, __be16 vlan_proto)
3044 struct lio *lio = GET_LIO(netdev);
3045 struct octeon_device *oct = lio->oct_dev;
3046 struct octnic_ctrl_pkt nctrl;
3049 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3052 if (vlan_proto != htons(ETH_P_8021Q))
3053 return -EPROTONOSUPPORT;
3055 if (vlan >= VLAN_N_VID || qos > 7)
3059 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
3063 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
3066 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3069 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
3071 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3073 nctrl.ncmd.s.param1 = vlantci;
3074 nctrl.ncmd.s.param2 =
3075 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
3076 nctrl.ncmd.s.more = 0;
3077 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3079 nctrl.wait_time = LIO_CMD_WAIT_TM;
3081 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3083 oct->sriov_info.vf_vlantci[vfidx] = vlantci;
3088 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
3089 struct ifla_vf_info *ivi)
3091 struct lio *lio = GET_LIO(netdev);
3092 struct octeon_device *oct = lio->oct_dev;
3095 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3099 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
3100 ether_addr_copy(&ivi->mac[0], macaddr);
3101 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
3102 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
3103 if (oct->sriov_info.trusted_vf.active &&
3104 oct->sriov_info.trusted_vf.id == vfidx)
3105 ivi->trusted = true;
3107 ivi->trusted = false;
3108 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
3112 static void trusted_vf_callback(struct octeon_device *oct_dev,
3113 u32 status, void *ptr)
3115 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
3116 struct lio_trusted_vf_ctx *ctx;
3118 ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr;
3119 ctx->status = status;
3121 complete(&ctx->complete);
3124 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
3126 struct octeon_device *oct = lio->oct_dev;
3127 struct lio_trusted_vf_ctx *ctx;
3128 struct octeon_soft_command *sc;
3129 int ctx_size, retval;
3131 ctx_size = sizeof(struct lio_trusted_vf_ctx);
3132 sc = octeon_alloc_soft_command(oct, 0, 0, ctx_size);
3134 ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr;
3135 init_completion(&ctx->complete);
3137 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
3139 /* vfidx is 0 based, but vf_num (param1) is 1 based */
3140 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
3141 OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
3144 sc->callback = trusted_vf_callback;
3145 sc->callback_arg = sc;
3146 sc->wait_time = 1000;
3148 retval = octeon_send_soft_command(oct, sc);
3149 if (retval == IQ_SEND_FAILED) {
3152 /* Wait for response or timeout */
3153 if (wait_for_completion_timeout(&ctx->complete,
3154 msecs_to_jiffies(2000)))
3155 retval = ctx->status;
3160 octeon_free_soft_command(oct, sc);
3165 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3168 struct lio *lio = GET_LIO(netdev);
3169 struct octeon_device *oct = lio->oct_dev;
3171 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3172 /* trusted vf is not supported by firmware older than 1.7.1 */
3176 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3177 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3184 if (oct->sriov_info.trusted_vf.active &&
3185 oct->sriov_info.trusted_vf.id == vfidx)
3188 if (oct->sriov_info.trusted_vf.active) {
3189 netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3195 if (!oct->sriov_info.trusted_vf.active)
3199 if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3201 oct->sriov_info.trusted_vf.id = vfidx;
3202 oct->sriov_info.trusted_vf.active = true;
3204 oct->sriov_info.trusted_vf.active = false;
3207 netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3208 setting ? "" : "not ");
3210 netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3217 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3220 struct lio *lio = GET_LIO(netdev);
3221 struct octeon_device *oct = lio->oct_dev;
3222 struct octnic_ctrl_pkt nctrl;
3224 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3227 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3230 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3231 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3232 nctrl.ncmd.s.param1 =
3233 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3234 nctrl.ncmd.s.param2 = linkstate;
3235 nctrl.ncmd.s.more = 0;
3236 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3238 nctrl.wait_time = LIO_CMD_WAIT_TM;
3240 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3242 oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3248 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3250 struct lio_devlink_priv *priv;
3251 struct octeon_device *oct;
3253 priv = devlink_priv(devlink);
3256 *mode = oct->eswitch_mode;
3262 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode)
3264 struct lio_devlink_priv *priv;
3265 struct octeon_device *oct;
3268 priv = devlink_priv(devlink);
3271 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3274 if (oct->eswitch_mode == mode)
3278 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3279 oct->eswitch_mode = mode;
3280 ret = lio_vf_rep_create(oct);
3283 case DEVLINK_ESWITCH_MODE_LEGACY:
3284 lio_vf_rep_destroy(oct);
3285 oct->eswitch_mode = mode;
3295 static const struct devlink_ops liquidio_devlink_ops = {
3296 .eswitch_mode_get = liquidio_eswitch_mode_get,
3297 .eswitch_mode_set = liquidio_eswitch_mode_set,
3301 lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr)
3303 struct lio *lio = GET_LIO(dev);
3304 struct octeon_device *oct = lio->oct_dev;
3306 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3310 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
3311 attr->u.ppid.id_len = ETH_ALEN;
3312 ether_addr_copy(attr->u.ppid.id,
3313 (void *)&lio->linfo.hw_addr + 2);
3323 static const struct switchdev_ops lio_pf_switchdev_ops = {
3324 .switchdev_port_attr_get = lio_pf_switchdev_attr_get,
3327 static const struct net_device_ops lionetdevops = {
3328 .ndo_open = liquidio_open,
3329 .ndo_stop = liquidio_stop,
3330 .ndo_start_xmit = liquidio_xmit,
3331 .ndo_get_stats = liquidio_get_stats,
3332 .ndo_set_mac_address = liquidio_set_mac,
3333 .ndo_set_rx_mode = liquidio_set_mcast_list,
3334 .ndo_tx_timeout = liquidio_tx_timeout,
3336 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
3337 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
3338 .ndo_change_mtu = liquidio_change_mtu,
3339 .ndo_do_ioctl = liquidio_ioctl,
3340 .ndo_fix_features = liquidio_fix_features,
3341 .ndo_set_features = liquidio_set_features,
3342 .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
3343 .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
3344 .ndo_set_vf_mac = liquidio_set_vf_mac,
3345 .ndo_set_vf_vlan = liquidio_set_vf_vlan,
3346 .ndo_get_vf_config = liquidio_get_vf_config,
3347 .ndo_set_vf_trust = liquidio_set_vf_trust,
3348 .ndo_set_vf_link_state = liquidio_set_vf_link_state,
3351 /** \brief Entry point for the liquidio module
3353 static int __init liquidio_init(void)
3356 struct handshake *hs;
3358 init_completion(&first_stage);
3360 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3362 if (liquidio_init_pci())
3365 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3367 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3370 wait_for_completion(&hs->init);
3372 /* init handshake failed */
3373 dev_err(&hs->pci_dev->dev,
3374 "Failed to init device\n");
3375 liquidio_deinit_pci();
3381 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3384 wait_for_completion_timeout(&hs->started,
3385 msecs_to_jiffies(30000));
3386 if (!hs->started_ok) {
3387 /* starter handshake failed */
3388 dev_err(&hs->pci_dev->dev,
3389 "Firmware failed to start\n");
3390 liquidio_deinit_pci();
3399 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3401 struct octeon_device *oct = (struct octeon_device *)buf;
3402 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3404 union oct_link_status *ls;
3407 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3408 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3409 recv_pkt->buffer_size[0],
3410 recv_pkt->rh.r_nic_info.gmxport);
3414 gmxport = recv_pkt->rh.r_nic_info.gmxport;
3415 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3416 OCT_DROQ_INFO_SIZE);
3418 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3419 for (i = 0; i < oct->ifcount; i++) {
3420 if (oct->props[i].gmxport == gmxport) {
3421 update_link_status(oct->props[i].netdev, ls);
3427 for (i = 0; i < recv_pkt->buffer_count; i++)
3428 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3429 octeon_free_recv_info(recv_info);
3434 * \brief Setup network interfaces
3435 * @param octeon_dev octeon device
3437 * Called during init time for each device. It assumes the NIC
3438 * is already up and running. The link information for each
3439 * interface is passed in link_info.
3441 static int setup_nic_devices(struct octeon_device *octeon_dev)
3443 struct lio *lio = NULL;
3444 struct net_device *netdev;
3445 u8 mac[6], i, j, *fw_ver;
3446 struct octeon_soft_command *sc;
3447 struct liquidio_if_cfg_context *ctx;
3448 struct liquidio_if_cfg_resp *resp;
3449 struct octdev_props *props;
3450 int retval, num_iqueues, num_oqueues;
3451 union oct_nic_if_cfg if_cfg;
3452 unsigned int base_queue;
3453 unsigned int gmx_port_id;
3454 u32 resp_size, ctx_size, data_size;
3456 struct lio_version *vdata;
3457 struct devlink *devlink;
3458 struct lio_devlink_priv *lio_devlink;
3460 /* This is to handle link status changes */
3461 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3463 lio_nic_info, octeon_dev);
3465 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3466 * They are handled directly.
3468 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3471 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3474 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3475 free_netsgbuf_with_resp);
3477 for (i = 0; i < octeon_dev->ifcount; i++) {
3478 resp_size = sizeof(struct liquidio_if_cfg_resp);
3479 ctx_size = sizeof(struct liquidio_if_cfg_context);
3480 data_size = sizeof(struct lio_version);
3481 sc = (struct octeon_soft_command *)
3482 octeon_alloc_soft_command(octeon_dev, data_size,
3483 resp_size, ctx_size);
3484 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3485 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
3486 vdata = (struct lio_version *)sc->virtdptr;
3488 *((u64 *)vdata) = 0;
3489 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3490 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3491 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3493 if (OCTEON_CN23XX_PF(octeon_dev)) {
3494 num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3495 num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3496 base_queue = octeon_dev->sriov_info.pf_srn;
3498 gmx_port_id = octeon_dev->pf_num;
3499 ifidx_or_pfnum = octeon_dev->pf_num;
3501 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3502 octeon_get_conf(octeon_dev), i);
3503 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3504 octeon_get_conf(octeon_dev), i);
3505 base_queue = CFG_GET_BASE_QUE_NIC_IF(
3506 octeon_get_conf(octeon_dev), i);
3507 gmx_port_id = CFG_GET_GMXID_NIC_IF(
3508 octeon_get_conf(octeon_dev), i);
3512 dev_dbg(&octeon_dev->pci_dev->dev,
3513 "requesting config for interface %d, iqs %d, oqs %d\n",
3514 ifidx_or_pfnum, num_iqueues, num_oqueues);
3515 WRITE_ONCE(ctx->cond, 0);
3516 ctx->octeon_id = lio_get_device_id(octeon_dev);
3517 init_waitqueue_head(&ctx->wc);
3520 if_cfg.s.num_iqueues = num_iqueues;
3521 if_cfg.s.num_oqueues = num_oqueues;
3522 if_cfg.s.base_queue = base_queue;
3523 if_cfg.s.gmx_port_id = gmx_port_id;
3527 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3528 OPCODE_NIC_IF_CFG, 0,
3531 sc->callback = if_cfg_callback;
3532 sc->callback_arg = sc;
3533 sc->wait_time = 3000;
3535 retval = octeon_send_soft_command(octeon_dev, sc);
3536 if (retval == IQ_SEND_FAILED) {
3537 dev_err(&octeon_dev->pci_dev->dev,
3538 "iq/oq config failed status: %x\n",
3540 /* Soft instr is freed by driver in case of failure. */
3541 goto setup_nic_dev_fail;
3544 /* Sleep on a wait queue till the cond flag indicates that the
3545 * response arrived or timed-out.
3547 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
3548 dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
3549 goto setup_nic_wait_intr;
3552 retval = resp->status;
3554 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3555 goto setup_nic_dev_fail;
3558 /* Verify f/w version (in case of 'auto' loading from flash) */
3559 fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3560 if (memcmp(LIQUIDIO_BASE_VERSION,
3562 strlen(LIQUIDIO_BASE_VERSION))) {
3563 dev_err(&octeon_dev->pci_dev->dev,
3564 "Unmatched firmware version. Expected %s.x, got %s.\n",
3565 LIQUIDIO_BASE_VERSION, fw_ver);
3566 goto setup_nic_dev_fail;
3567 } else if (atomic_read(octeon_dev->adapter_fw_state) ==
3569 dev_info(&octeon_dev->pci_dev->dev,
3570 "Using auto-loaded firmware version %s.\n",
3574 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3575 (sizeof(struct liquidio_if_cfg_info)) >> 3);
3577 num_iqueues = hweight64(resp->cfg_info.iqmask);
3578 num_oqueues = hweight64(resp->cfg_info.oqmask);
3580 if (!(num_iqueues) || !(num_oqueues)) {
3581 dev_err(&octeon_dev->pci_dev->dev,
3582 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3583 resp->cfg_info.iqmask,
3584 resp->cfg_info.oqmask);
3585 goto setup_nic_dev_fail;
3587 dev_dbg(&octeon_dev->pci_dev->dev,
3588 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3589 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3590 num_iqueues, num_oqueues);
3591 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
3594 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3595 goto setup_nic_dev_fail;
3598 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3600 /* Associate the routines that will handle different
3603 netdev->netdev_ops = &lionetdevops;
3604 SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
3606 lio = GET_LIO(netdev);
3608 memset(lio, 0, sizeof(struct lio));
3610 lio->ifidx = ifidx_or_pfnum;
3612 props = &octeon_dev->props[i];
3613 props->gmxport = resp->cfg_info.linfo.gmxport;
3614 props->netdev = netdev;
3616 lio->linfo.num_rxpciq = num_oqueues;
3617 lio->linfo.num_txpciq = num_iqueues;
3618 for (j = 0; j < num_oqueues; j++) {
3619 lio->linfo.rxpciq[j].u64 =
3620 resp->cfg_info.linfo.rxpciq[j].u64;
3622 for (j = 0; j < num_iqueues; j++) {
3623 lio->linfo.txpciq[j].u64 =
3624 resp->cfg_info.linfo.txpciq[j].u64;
3626 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3627 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3628 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3630 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3632 if (OCTEON_CN23XX_PF(octeon_dev) ||
3633 OCTEON_CN6XXX(octeon_dev)) {
3634 lio->dev_capability = NETIF_F_HIGHDMA
3637 | NETIF_F_SG | NETIF_F_RXCSUM
3639 | NETIF_F_TSO | NETIF_F_TSO6
3642 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3644 /* Copy of transmit encapsulation capabilities:
3645 * TSO, TSO6, Checksums for this device
3647 lio->enc_dev_capability = NETIF_F_IP_CSUM
3649 | NETIF_F_GSO_UDP_TUNNEL
3650 | NETIF_F_HW_CSUM | NETIF_F_SG
3652 | NETIF_F_TSO | NETIF_F_TSO6
3655 netdev->hw_enc_features = (lio->enc_dev_capability &
3658 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3660 netdev->vlan_features = lio->dev_capability;
3661 /* Add any unchangeable hw features */
3662 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
3663 NETIF_F_HW_VLAN_CTAG_RX |
3664 NETIF_F_HW_VLAN_CTAG_TX;
3666 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3668 netdev->hw_features = lio->dev_capability;
3669 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3670 netdev->hw_features = netdev->hw_features &
3671 ~NETIF_F_HW_VLAN_CTAG_RX;
3673 /* MTU range: 68 - 16000 */
3674 netdev->min_mtu = LIO_MIN_MTU_SIZE;
3675 netdev->max_mtu = LIO_MAX_MTU_SIZE;
3677 /* Point to the properties for octeon device to which this
3678 * interface belongs.
3680 lio->oct_dev = octeon_dev;
3681 lio->octprops = props;
3682 lio->netdev = netdev;
3684 dev_dbg(&octeon_dev->pci_dev->dev,
3685 "if%d gmx: %d hw_addr: 0x%llx\n", i,
3686 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3688 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3691 random_ether_addr(&vfmac[0]);
3692 if (__liquidio_set_vf_mac(netdev, j,
3693 &vfmac[0], false)) {
3694 dev_err(&octeon_dev->pci_dev->dev,
3695 "Error setting VF%d MAC address\n",
3697 goto setup_nic_dev_fail;
3701 /* 64-bit swap required on LE machines */
3702 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3703 for (j = 0; j < 6; j++)
3704 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3706 /* Copy MAC Address to OS network device structure */
3708 ether_addr_copy(netdev->dev_addr, mac);
3710 /* By default all interfaces on a single Octeon uses the same
3713 lio->txq = lio->linfo.txpciq[0].s.q_no;
3714 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3715 if (liquidio_setup_io_queues(octeon_dev, i,
3716 lio->linfo.num_txpciq,
3717 lio->linfo.num_rxpciq)) {
3718 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3719 goto setup_nic_dev_fail;
3722 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3724 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3725 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3727 if (setup_glists(octeon_dev, lio, num_iqueues)) {
3728 dev_err(&octeon_dev->pci_dev->dev,
3729 "Gather list allocation failed\n");
3730 goto setup_nic_dev_fail;
3733 /* Register ethtool support */
3734 liquidio_set_ethtool_ops(netdev);
3735 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3736 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3738 octeon_dev->priv_flags = 0x0;
3740 if (netdev->features & NETIF_F_LRO)
3741 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3742 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3744 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3745 OCTNET_CMD_VLAN_FILTER_ENABLE);
3747 if ((debug != -1) && (debug & NETIF_MSG_HW))
3748 liquidio_set_feature(netdev,
3749 OCTNET_CMD_VERBOSE_ENABLE, 0);
3751 if (setup_link_status_change_wq(netdev))
3752 goto setup_nic_dev_fail;
3754 if ((octeon_dev->fw_info.app_cap_flags &
3755 LIQUIDIO_TIME_SYNC_CAP) &&
3756 setup_sync_octeon_time_wq(netdev))
3757 goto setup_nic_dev_fail;
3759 if (setup_rx_oom_poll_fn(netdev))
3760 goto setup_nic_dev_fail;
3762 /* Register the network device with the OS */
3763 if (register_netdev(netdev)) {
3764 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3765 goto setup_nic_dev_fail;
3768 dev_dbg(&octeon_dev->pci_dev->dev,
3769 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3770 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3771 netif_carrier_off(netdev);
3772 lio->link_changes++;
3774 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3776 /* Sending command to firmware to enable Rx checksum offload
3777 * by default at the time of setup of Liquidio driver for
3780 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3781 OCTNET_CMD_RXCSUM_ENABLE);
3782 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3783 OCTNET_CMD_TXCSUM_ENABLE);
3785 dev_dbg(&octeon_dev->pci_dev->dev,
3786 "NIC ifidx:%d Setup successful\n", i);
3788 octeon_free_soft_command(octeon_dev, sc);
3791 devlink = devlink_alloc(&liquidio_devlink_ops,
3792 sizeof(struct lio_devlink_priv));
3794 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3795 goto setup_nic_wait_intr;
3798 lio_devlink = devlink_priv(devlink);
3799 lio_devlink->oct = octeon_dev;
3801 if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
3802 devlink_free(devlink);
3803 dev_err(&octeon_dev->pci_dev->dev,
3804 "devlink registration failed\n");
3805 goto setup_nic_wait_intr;
3808 octeon_dev->devlink = devlink;
3809 octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3815 octeon_free_soft_command(octeon_dev, sc);
3817 setup_nic_wait_intr:
3820 dev_err(&octeon_dev->pci_dev->dev,
3821 "NIC ifidx:%d Setup failed\n", i);
3822 liquidio_destroy_nic_device(octeon_dev, i);
3827 #ifdef CONFIG_PCI_IOV
3828 static int octeon_enable_sriov(struct octeon_device *oct)
3830 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3831 struct pci_dev *vfdev;
3835 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3836 err = pci_enable_sriov(oct->pci_dev,
3837 oct->sriov_info.num_vfs_alloced);
3839 dev_err(&oct->pci_dev->dev,
3840 "OCTEON: Failed to enable PCI sriov: %d\n",
3842 oct->sriov_info.num_vfs_alloced = 0;
3845 oct->sriov_info.sriov_enabled = 1;
3847 /* init lookup table that maps DPI ring number to VF pci_dev
3851 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3852 OCTEON_CN23XX_VF_VID, NULL);
3854 if (vfdev->is_virtfn &&
3855 (vfdev->physfn == oct->pci_dev)) {
3856 oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3858 u += oct->sriov_info.rings_per_vf;
3860 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3861 OCTEON_CN23XX_VF_VID, vfdev);
3865 return num_vfs_alloced;
3868 static int lio_pci_sriov_disable(struct octeon_device *oct)
3872 if (pci_vfs_assigned(oct->pci_dev)) {
3873 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3877 pci_disable_sriov(oct->pci_dev);
3880 while (u < MAX_POSSIBLE_VFS) {
3881 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3882 u += oct->sriov_info.rings_per_vf;
3885 oct->sriov_info.num_vfs_alloced = 0;
3886 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3892 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3894 struct octeon_device *oct = pci_get_drvdata(dev);
3897 if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3898 (oct->sriov_info.sriov_enabled)) {
3899 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3900 oct->pf_num, num_vfs);
3905 lio_vf_rep_destroy(oct);
3906 ret = lio_pci_sriov_disable(oct);
3907 } else if (num_vfs > oct->sriov_info.max_vfs) {
3908 dev_err(&oct->pci_dev->dev,
3909 "OCTEON: Max allowed VFs:%d user requested:%d",
3910 oct->sriov_info.max_vfs, num_vfs);
3913 oct->sriov_info.num_vfs_alloced = num_vfs;
3914 ret = octeon_enable_sriov(oct);
3915 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3916 oct->pf_num, num_vfs);
3917 ret = lio_vf_rep_create(oct);
3919 dev_info(&oct->pci_dev->dev,
3920 "vf representor create failed");
3928 * \brief initialize the NIC
3929 * @param oct octeon device
3931 * This initialization routine is called once the Octeon device application is
3934 static int liquidio_init_nic_module(struct octeon_device *oct)
3937 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3939 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3941 /* only default iq and oq were initialized
3942 * initialize the rest as well
3944 /* run port_config command for each port */
3945 oct->ifcount = num_nic_ports;
3947 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3949 for (i = 0; i < MAX_OCTEON_LINKS; i++)
3950 oct->props[i].gmxport = -1;
3952 retval = setup_nic_devices(oct);
3954 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3955 goto octnet_init_failure;
3958 /* Call vf_rep_modinit if the firmware is switchdev capable
3959 * and do it from the first liquidio function probed.
3961 if (!oct->octeon_id &&
3962 oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3963 retval = lio_vf_rep_modinit();
3965 liquidio_stop_nic_module(oct);
3966 goto octnet_init_failure;
3970 liquidio_ptp_init(oct);
3972 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3976 octnet_init_failure:
3984 * \brief starter callback that invokes the remaining initialization work after
3985 * the NIC is up and running.
3986 * @param octptr work struct work_struct
3988 static void nic_starter(struct work_struct *work)
3990 struct octeon_device *oct;
3991 struct cavium_wk *wk = (struct cavium_wk *)work;
3993 oct = (struct octeon_device *)wk->ctxptr;
3995 if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3998 /* If the status of the device is CORE_OK, the core
3999 * application has reported its application type. Call
4000 * any registered handlers now and move to the RUNNING
4003 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
4004 schedule_delayed_work(&oct->nic_poll_work.work,
4005 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4009 atomic_set(&oct->status, OCT_DEV_RUNNING);
4011 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
4012 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
4014 if (liquidio_init_nic_module(oct))
4015 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
4017 handshake[oct->octeon_id].started_ok = 1;
4019 dev_err(&oct->pci_dev->dev,
4020 "Unexpected application running on NIC (%d). Check firmware.\n",
4024 complete(&handshake[oct->octeon_id].started);
4028 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
4030 struct octeon_device *oct = (struct octeon_device *)buf;
4031 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
4032 int i, notice, vf_idx;
4036 notice = recv_pkt->rh.r.ossp;
4037 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
4039 /* the first 64-bit word of data is the vf_num */
4041 octeon_swap_8B_data(&vf_num, 1);
4042 vf_idx = (int)vf_num - 1;
4044 cores_crashed = READ_ONCE(oct->cores_crashed);
4046 if (notice == VF_DRV_LOADED) {
4047 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4048 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4049 dev_info(&oct->pci_dev->dev,
4050 "driver for VF%d was loaded\n", vf_idx);
4052 try_module_get(THIS_MODULE);
4054 } else if (notice == VF_DRV_REMOVED) {
4055 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4056 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4057 dev_info(&oct->pci_dev->dev,
4058 "driver for VF%d was removed\n", vf_idx);
4060 module_put(THIS_MODULE);
4062 } else if (notice == VF_DRV_MACADDR_CHANGED) {
4063 u8 *b = (u8 *)&data[1];
4065 oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4066 dev_info(&oct->pci_dev->dev,
4067 "VF driver changed VF%d's MAC address to %pM\n",
4071 for (i = 0; i < recv_pkt->buffer_count; i++)
4072 recv_buffer_free(recv_pkt->buffer_ptr[i]);
4073 octeon_free_recv_info(recv_info);
4079 * \brief Device initialization for each Octeon device that is probed
4080 * @param octeon_dev octeon device
4082 static int octeon_device_init(struct octeon_device *octeon_dev)
4085 char bootcmd[] = "\n";
4086 char *dbg_enb = NULL;
4087 enum lio_fw_state fw_state;
4088 struct octeon_device_priv *oct_priv =
4089 (struct octeon_device_priv *)octeon_dev->priv;
4090 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4092 /* Enable access to the octeon device and make its DMA capability
4095 if (octeon_pci_os_setup(octeon_dev))
4098 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4100 /* Identify the Octeon type and map the BAR address space. */
4101 if (octeon_chip_specific_setup(octeon_dev)) {
4102 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4106 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4108 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4109 * since that is what is required for the reference to be removed
4110 * during de-initialization (see 'octeon_destroy_resources').
4112 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4113 PCI_SLOT(octeon_dev->pci_dev->devfn),
4114 PCI_FUNC(octeon_dev->pci_dev->devfn),
4117 octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4119 /* CN23XX supports preloaded firmware if the following is true:
4121 * The adapter indicates that firmware is currently running AND
4122 * 'fw_type' is 'auto'.
4124 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4126 if (OCTEON_CN23XX_PF(octeon_dev) &&
4127 cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4128 atomic_cmpxchg(octeon_dev->adapter_fw_state,
4129 FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4132 /* If loading firmware, only first device of adapter needs to do so. */
4133 fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4134 FW_NEEDS_TO_BE_LOADED,
4135 FW_IS_BEING_LOADED);
4137 /* Here, [local variable] 'fw_state' is set to one of:
4139 * FW_IS_PRELOADED: No firmware is to be loaded (see above)
4140 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4141 * firmware to the adapter.
4142 * FW_IS_BEING_LOADED: The driver's second instance will not load
4143 * firmware to the adapter.
4146 /* Prior to f/w load, perform a soft reset of the Octeon device;
4147 * if error resetting, return w/error.
4149 if (fw_state == FW_NEEDS_TO_BE_LOADED)
4150 if (octeon_dev->fn_list.soft_reset(octeon_dev))
4153 /* Initialize the dispatch mechanism used to push packets arriving on
4154 * Octeon Output queues.
4156 if (octeon_init_dispatch_list(octeon_dev))
4159 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4160 OPCODE_NIC_CORE_DRV_ACTIVE,
4161 octeon_core_drv_init,
4164 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4165 OPCODE_NIC_VF_DRV_NOTICE,
4166 octeon_recv_vf_drv_notice, octeon_dev);
4167 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4168 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4169 schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4170 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4172 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4174 if (octeon_set_io_queues_off(octeon_dev)) {
4175 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4179 if (OCTEON_CN23XX_PF(octeon_dev)) {
4180 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4182 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4187 /* Initialize soft command buffer pool
4189 if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4190 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4193 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4195 /* Setup the data structures that manage this Octeon's Input queues. */
4196 if (octeon_setup_instr_queues(octeon_dev)) {
4197 dev_err(&octeon_dev->pci_dev->dev,
4198 "instruction queue initialization failed\n");
4201 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4203 /* Initialize lists to manage the requests of different types that
4204 * arrive from user & kernel applications for this octeon device.
4206 if (octeon_setup_response_list(octeon_dev)) {
4207 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4210 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4212 if (octeon_setup_output_queues(octeon_dev)) {
4213 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4217 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4219 if (OCTEON_CN23XX_PF(octeon_dev)) {
4220 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4221 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4224 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4226 if (octeon_allocate_ioq_vector(octeon_dev)) {
4227 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4230 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4233 /* The input and output queue registers were setup earlier (the
4234 * queues were not enabled). Any additional registers
4235 * that need to be programmed should be done now.
4237 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4239 dev_err(&octeon_dev->pci_dev->dev,
4240 "Failed to configure device registers\n");
4245 /* Initialize the tasklet that handles output queue packet processing.*/
4246 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4247 tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4248 (unsigned long)octeon_dev);
4250 /* Setup the interrupt handler and record the INT SUM register address
4252 if (octeon_setup_interrupt(octeon_dev,
4253 octeon_dev->sriov_info.num_pf_rings))
4256 /* Enable Octeon device interrupts */
4257 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4259 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4261 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4262 * the output queue is enabled.
4263 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4264 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4265 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4266 * before any credits have been issued, causing the ring to be reset
4267 * (and the f/w appear to never have started).
4269 for (j = 0; j < octeon_dev->num_oqs; j++)
4270 writel(octeon_dev->droq[j]->max_count,
4271 octeon_dev->droq[j]->pkts_credit_reg);
4273 /* Enable the input and output queues for this Octeon device */
4274 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4276 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4280 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4282 if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4283 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4285 dev_info(&octeon_dev->pci_dev->dev,
4286 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4289 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4291 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4292 while (!ddr_timeout) {
4293 set_current_state(TASK_INTERRUPTIBLE);
4294 if (schedule_timeout(HZ / 10)) {
4295 /* user probably pressed Control-C */
4299 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4301 dev_err(&octeon_dev->pci_dev->dev,
4302 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4307 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4308 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4312 /* Divert uboot to take commands from host instead. */
4313 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4315 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4316 ret = octeon_init_consoles(octeon_dev);
4318 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4321 /* If console debug enabled, specify empty string to use default
4322 * enablement ELSE specify NULL string for 'disabled'.
4324 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4325 ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4327 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4329 } else if (octeon_console_debug_enabled(0)) {
4330 /* If console was added AND we're logging console output
4331 * then set our console print function.
4333 octeon_dev->console[0].print = octeon_dbg_console_print;
4336 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4338 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4339 ret = load_firmware(octeon_dev);
4341 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4345 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4348 handshake[octeon_dev->octeon_id].init_ok = 1;
4349 complete(&handshake[octeon_dev->octeon_id].init);
4351 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4357 * \brief Debug console print function
4358 * @param octeon_dev octeon device
4359 * @param console_num console number
4360 * @param prefix first portion of line to display
4361 * @param suffix second portion of line to display
4363 * The OCTEON debug console outputs entire lines (excluding '\n').
4364 * Normally, the line will be passed in the 'prefix' parameter.
4365 * However, due to buffering, it is possible for a line to be split into two
4366 * parts, in which case they will be passed as the 'prefix' parameter and
4367 * 'suffix' parameter.
4369 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4370 char *prefix, char *suffix)
4372 if (prefix && suffix)
4373 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4376 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4378 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4384 * \brief Exits the module
4386 static void __exit liquidio_exit(void)
4388 liquidio_deinit_pci();
4390 pr_info("LiquidIO network module is now unloaded\n");
4393 module_init(liquidio_init);
4394 module_exit(liquidio_exit);