1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include "liquidio_common.h"
25 #include "octeon_droq.h"
26 #include "octeon_iq.h"
27 #include "response_manager.h"
28 #include "octeon_device.h"
29 #include "octeon_nic.h"
30 #include "octeon_main.h"
31 #include "octeon_network.h"
32 #include "cn66xx_regs.h"
33 #include "cn66xx_device.h"
34 #include "cn68xx_device.h"
35 #include "cn23xx_pf_device.h"
36 #include "liquidio_image.h"
37 #include "lio_vf_rep.h"
39 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
40 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
41 MODULE_LICENSE("GPL");
42 MODULE_VERSION(LIQUIDIO_VERSION);
43 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
44 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
45 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
46 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
47 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
48 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
49 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
50 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
52 static int ddr_timeout = 10000;
53 module_param(ddr_timeout, int, 0644);
54 MODULE_PARM_DESC(ddr_timeout,
55 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
57 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
59 static int debug = -1;
60 module_param(debug, int, 0644);
61 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
63 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
64 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
65 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
67 static u32 console_bitmask;
68 module_param(console_bitmask, int, 0644);
69 MODULE_PARM_DESC(console_bitmask,
70 "Bitmask indicating which consoles have debug output redirected to syslog.");
73 * \brief determines if a given console has debug enabled.
74 * @param console console to check
75 * @returns 1 = enabled. 0 otherwise
77 static int octeon_console_debug_enabled(u32 console)
79 return (console_bitmask >> (console)) & 0x1;
82 /* Polling interval for determining when NIC application is alive */
83 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
85 /* runtime link query interval */
86 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
87 /* update localtime to octeon firmware every 60 seconds.
88 * make firmware to use same time reference, so that it will be easy to
89 * correlate firmware logged events/errors with host events, for debugging.
91 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
93 /* time to wait for possible in-flight requests in milliseconds */
94 #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
96 struct lio_trusted_vf_ctx {
97 struct completion complete;
101 struct oct_link_status_resp {
103 struct oct_link_info link_info;
107 struct oct_timestamp_resp {
113 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
118 #ifdef __BIG_ENDIAN_BITFIELD
130 /** Octeon device properties to be used by the NIC module.
131 * Each octeon device in the system will be represented
132 * by this structure in the NIC module.
135 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
136 #define OCTNIC_GSO_MAX_SIZE \
137 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
140 struct completion init;
141 struct completion started;
142 struct pci_dev *pci_dev;
147 #ifdef CONFIG_PCI_IOV
148 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
151 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
152 char *prefix, char *suffix);
154 static int octeon_device_init(struct octeon_device *);
155 static int liquidio_stop(struct net_device *netdev);
156 static void liquidio_remove(struct pci_dev *pdev);
157 static int liquidio_probe(struct pci_dev *pdev,
158 const struct pci_device_id *ent);
159 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
162 static struct handshake handshake[MAX_OCTEON_DEVICES];
163 static struct completion first_stage;
165 static void octeon_droq_bh(unsigned long pdev)
169 struct octeon_device *oct = (struct octeon_device *)pdev;
170 struct octeon_device_priv *oct_priv =
171 (struct octeon_device_priv *)oct->priv;
173 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
174 if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
176 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
178 lio_enable_irq(oct->droq[q_no], NULL);
180 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
181 /* set time and cnt interrupt thresholds for this DROQ
184 int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
187 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
190 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
195 tasklet_schedule(&oct_priv->droq_tasklet);
198 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
200 struct octeon_device_priv *oct_priv =
201 (struct octeon_device_priv *)oct->priv;
202 int retry = 100, pkt_cnt = 0, pending_pkts = 0;
208 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
209 if (!(oct->io_qmask.oq & BIT_ULL(i)))
211 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
214 pending_pkts += pkt_cnt;
215 tasklet_schedule(&oct_priv->droq_tasklet);
218 schedule_timeout_uninterruptible(1);
220 } while (retry-- && pending_pkts);
226 * \brief Forces all IO queues off on a given device
227 * @param oct Pointer to Octeon device
229 static void force_io_queues_off(struct octeon_device *oct)
231 if ((oct->chip_id == OCTEON_CN66XX) ||
232 (oct->chip_id == OCTEON_CN68XX)) {
233 /* Reset the Enable bits for Input Queues. */
234 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
236 /* Reset the Enable bits for Output Queues. */
237 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
242 * \brief Cause device to go quiet so it can be safely removed/reset/etc
243 * @param oct Pointer to Octeon device
245 static inline void pcierror_quiesce_device(struct octeon_device *oct)
249 /* Disable the input and output queues now. No more packets will
250 * arrive from Octeon, but we should wait for all packet processing
253 force_io_queues_off(oct);
255 /* To allow for in-flight requests */
256 schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
258 if (wait_for_pending_requests(oct))
259 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
261 /* Force all requests waiting to be fetched by OCTEON to complete. */
262 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
263 struct octeon_instr_queue *iq;
265 if (!(oct->io_qmask.iq & BIT_ULL(i)))
267 iq = oct->instr_queue[i];
269 if (atomic_read(&iq->instr_pending)) {
270 spin_lock_bh(&iq->lock);
272 iq->octeon_read_index = iq->host_write_index;
273 iq->stats.instr_processed +=
274 atomic_read(&iq->instr_pending);
275 lio_process_iq_request_list(oct, iq, 0);
276 spin_unlock_bh(&iq->lock);
280 /* Force all pending ordered list requests to time out. */
281 lio_process_ordered_list(oct, 1);
283 /* We do not need to wait for output queue packets to be processed. */
287 * \brief Cleanup PCI AER uncorrectable error status
288 * @param dev Pointer to PCI device
290 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
295 pr_info("%s :\n", __func__);
297 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
298 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
299 if (dev->error_state == pci_channel_io_normal)
300 status &= ~mask; /* Clear corresponding nonfatal bits */
302 status &= mask; /* Clear corresponding fatal bits */
303 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
307 * \brief Stop all PCI IO to a given device
308 * @param dev Pointer to Octeon device
310 static void stop_pci_io(struct octeon_device *oct)
312 /* No more instructions will be forwarded. */
313 atomic_set(&oct->status, OCT_DEV_IN_RESET);
315 pci_disable_device(oct->pci_dev);
317 /* Disable interrupts */
318 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
320 pcierror_quiesce_device(oct);
322 /* Release the interrupt line */
323 free_irq(oct->pci_dev->irq, oct);
325 if (oct->flags & LIO_FLAG_MSI_ENABLED)
326 pci_disable_msi(oct->pci_dev);
328 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
329 lio_get_state_string(&oct->status));
331 /* making it a common function for all OCTEON models */
332 cleanup_aer_uncorrect_error_status(oct->pci_dev);
336 * \brief called when PCI error is detected
337 * @param pdev Pointer to PCI device
338 * @param state The current pci connection state
340 * This function is called after a PCI bus error affecting
341 * this device has been detected.
343 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
344 pci_channel_state_t state)
346 struct octeon_device *oct = pci_get_drvdata(pdev);
348 /* Non-correctable Non-fatal errors */
349 if (state == pci_channel_io_normal) {
350 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
351 cleanup_aer_uncorrect_error_status(oct->pci_dev);
352 return PCI_ERS_RESULT_CAN_RECOVER;
355 /* Non-correctable Fatal errors */
356 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
359 /* Always return a DISCONNECT. There is no support for recovery but only
360 * for a clean shutdown.
362 return PCI_ERS_RESULT_DISCONNECT;
366 * \brief mmio handler
367 * @param pdev Pointer to PCI device
369 static pci_ers_result_t liquidio_pcie_mmio_enabled(
370 struct pci_dev *pdev __attribute__((unused)))
372 /* We should never hit this since we never ask for a reset for a Fatal
373 * Error. We always return DISCONNECT in io_error above.
374 * But play safe and return RECOVERED for now.
376 return PCI_ERS_RESULT_RECOVERED;
380 * \brief called after the pci bus has been reset.
381 * @param pdev Pointer to PCI device
383 * Restart the card from scratch, as if from a cold-boot. Implementation
384 * resembles the first-half of the octeon_resume routine.
386 static pci_ers_result_t liquidio_pcie_slot_reset(
387 struct pci_dev *pdev __attribute__((unused)))
389 /* We should never hit this since we never ask for a reset for a Fatal
390 * Error. We always return DISCONNECT in io_error above.
391 * But play safe and return RECOVERED for now.
393 return PCI_ERS_RESULT_RECOVERED;
397 * \brief called when traffic can start flowing again.
398 * @param pdev Pointer to PCI device
400 * This callback is called when the error recovery driver tells us that
401 * its OK to resume normal operation. Implementation resembles the
402 * second-half of the octeon_resume routine.
404 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
406 /* Nothing to be done here. */
411 * \brief called when suspending
412 * @param pdev Pointer to PCI device
413 * @param state state to suspend to
415 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
416 pm_message_t state __attribute__((unused)))
422 * \brief called when resuming
423 * @param pdev Pointer to PCI device
425 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
431 /* For PCI-E Advanced Error Recovery (AER) Interface */
432 static const struct pci_error_handlers liquidio_err_handler = {
433 .error_detected = liquidio_pcie_error_detected,
434 .mmio_enabled = liquidio_pcie_mmio_enabled,
435 .slot_reset = liquidio_pcie_slot_reset,
436 .resume = liquidio_pcie_resume,
439 static const struct pci_device_id liquidio_pci_tbl[] = {
441 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
444 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
447 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
453 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
455 static struct pci_driver liquidio_pci_driver = {
457 .id_table = liquidio_pci_tbl,
458 .probe = liquidio_probe,
459 .remove = liquidio_remove,
460 .err_handler = &liquidio_err_handler, /* For AER */
463 .suspend = liquidio_suspend,
464 .resume = liquidio_resume,
466 #ifdef CONFIG_PCI_IOV
467 .sriov_configure = liquidio_enable_sriov,
472 * \brief register PCI driver
474 static int liquidio_init_pci(void)
476 return pci_register_driver(&liquidio_pci_driver);
480 * \brief unregister PCI driver
482 static void liquidio_deinit_pci(void)
484 pci_unregister_driver(&liquidio_pci_driver);
488 * \brief Check Tx queue status, and take appropriate action
489 * @param lio per-network private data
490 * @returns 0 if full, number of queues woken up otherwise
492 static inline int check_txq_status(struct lio *lio)
494 int numqs = lio->netdev->real_num_tx_queues;
498 /* check each sub-queue state */
499 for (q = 0; q < numqs; q++) {
500 iq = lio->linfo.txpciq[q %
501 lio->oct_dev->num_iqs].s.q_no;
502 if (octnet_iq_is_full(lio->oct_dev, iq))
504 if (__netif_subqueue_stopped(lio->netdev, q)) {
505 netif_wake_subqueue(lio->netdev, q);
506 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
516 * \brief Print link information
517 * @param netdev network device
519 static void print_link_info(struct net_device *netdev)
521 struct lio *lio = GET_LIO(netdev);
523 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
524 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
525 struct oct_link_info *linfo = &lio->linfo;
527 if (linfo->link.s.link_up) {
528 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
530 (linfo->link.s.duplex) ? "Full" : "Half");
532 netif_info(lio, link, lio->netdev, "Link Down\n");
538 * \brief Routine to notify MTU change
539 * @param work work_struct data structure
541 static void octnet_link_status_change(struct work_struct *work)
543 struct cavium_wk *wk = (struct cavium_wk *)work;
544 struct lio *lio = (struct lio *)wk->ctxptr;
546 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
547 * this API is invoked only when new max-MTU of the interface is
548 * less than current MTU.
551 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
556 * \brief Sets up the mtu status change work
557 * @param netdev network device
559 static inline int setup_link_status_change_wq(struct net_device *netdev)
561 struct lio *lio = GET_LIO(netdev);
562 struct octeon_device *oct = lio->oct_dev;
564 lio->link_status_wq.wq = alloc_workqueue("link-status",
566 if (!lio->link_status_wq.wq) {
567 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
570 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
571 octnet_link_status_change);
572 lio->link_status_wq.wk.ctxptr = lio;
577 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
579 struct lio *lio = GET_LIO(netdev);
581 if (lio->link_status_wq.wq) {
582 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
583 destroy_workqueue(lio->link_status_wq.wq);
588 * \brief Update link status
589 * @param netdev network device
590 * @param ls link status structure
592 * Called on receipt of a link status response from the core application to
593 * update each interface's link status.
595 static inline void update_link_status(struct net_device *netdev,
596 union oct_link_status *ls)
598 struct lio *lio = GET_LIO(netdev);
599 int changed = (lio->linfo.link.u64 != ls->u64);
600 int current_max_mtu = lio->linfo.link.s.mtu;
601 struct octeon_device *oct = lio->oct_dev;
603 dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
604 __func__, lio->linfo.link.u64, ls->u64);
605 lio->linfo.link.u64 = ls->u64;
607 if ((lio->intf_open) && (changed)) {
608 print_link_info(netdev);
611 if (lio->linfo.link.s.link_up) {
612 dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
613 netif_carrier_on(netdev);
616 dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
617 netif_carrier_off(netdev);
620 if (lio->linfo.link.s.mtu != current_max_mtu) {
621 netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
622 current_max_mtu, lio->linfo.link.s.mtu);
623 netdev->max_mtu = lio->linfo.link.s.mtu;
625 if (lio->linfo.link.s.mtu < netdev->mtu) {
626 dev_warn(&oct->pci_dev->dev,
627 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
628 netdev->mtu, lio->linfo.link.s.mtu);
629 queue_delayed_work(lio->link_status_wq.wq,
630 &lio->link_status_wq.wk.work, 0);
636 * lio_sync_octeon_time - send latest localtime to octeon firmware so that
637 * firmware will correct it's time, in case there is a time skew
639 * @work: work scheduled to send time update to octeon firmware
641 static void lio_sync_octeon_time(struct work_struct *work)
643 struct cavium_wk *wk = (struct cavium_wk *)work;
644 struct lio *lio = (struct lio *)wk->ctxptr;
645 struct octeon_device *oct = lio->oct_dev;
646 struct octeon_soft_command *sc;
647 struct timespec64 ts;
651 sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
653 dev_err(&oct->pci_dev->dev,
654 "Failed to sync time to octeon: soft command allocation failed\n");
658 lt = (struct lio_time *)sc->virtdptr;
660 /* Get time of the day */
661 ktime_get_real_ts64(&ts);
663 lt->nsec = ts.tv_nsec;
664 octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
666 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
667 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
668 OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
670 init_completion(&sc->complete);
671 sc->sc_status = OCTEON_REQUEST_PENDING;
673 ret = octeon_send_soft_command(oct, sc);
674 if (ret == IQ_SEND_FAILED) {
675 dev_err(&oct->pci_dev->dev,
676 "Failed to sync time to octeon: failed to send soft command\n");
677 octeon_free_soft_command(oct, sc);
679 WRITE_ONCE(sc->caller_is_done, true);
682 queue_delayed_work(lio->sync_octeon_time_wq.wq,
683 &lio->sync_octeon_time_wq.wk.work,
684 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
688 * setup_sync_octeon_time_wq - Sets up the work to periodically update
689 * local time to octeon firmware
691 * @netdev - network device which should send time update to firmware
693 static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
695 struct lio *lio = GET_LIO(netdev);
696 struct octeon_device *oct = lio->oct_dev;
698 lio->sync_octeon_time_wq.wq =
699 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
700 if (!lio->sync_octeon_time_wq.wq) {
701 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
704 INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
705 lio_sync_octeon_time);
706 lio->sync_octeon_time_wq.wk.ctxptr = lio;
707 queue_delayed_work(lio->sync_octeon_time_wq.wq,
708 &lio->sync_octeon_time_wq.wk.work,
709 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
715 * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
716 * to periodically update local time to octeon firmware
718 * @netdev - network device which should send time update to firmware
720 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
722 struct lio *lio = GET_LIO(netdev);
723 struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
726 cancel_delayed_work_sync(&time_wq->wk.work);
727 destroy_workqueue(time_wq->wq);
731 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
733 struct octeon_device *other_oct;
735 other_oct = lio_get_device(oct->octeon_id + 1);
737 if (other_oct && other_oct->pci_dev) {
738 int oct_busnum, other_oct_busnum;
740 oct_busnum = oct->pci_dev->bus->number;
741 other_oct_busnum = other_oct->pci_dev->bus->number;
743 if (oct_busnum == other_oct_busnum) {
744 int oct_slot, other_oct_slot;
746 oct_slot = PCI_SLOT(oct->pci_dev->devfn);
747 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
749 if (oct_slot == other_oct_slot)
757 static void disable_all_vf_links(struct octeon_device *oct)
759 struct net_device *netdev;
765 max_vfs = oct->sriov_info.max_vfs;
767 for (i = 0; i < oct->ifcount; i++) {
768 netdev = oct->props[i].netdev;
772 for (vf = 0; vf < max_vfs; vf++)
773 liquidio_set_vf_link_state(netdev, vf,
774 IFLA_VF_LINK_STATE_DISABLE);
778 static int liquidio_watchdog(void *param)
780 bool err_msg_was_printed[LIO_MAX_CORES];
781 u16 mask_of_crashed_or_stuck_cores = 0;
782 bool all_vf_links_are_disabled = false;
783 struct octeon_device *oct = param;
784 struct octeon_device *other_oct;
785 #ifdef CONFIG_MODULE_UNLOAD
786 long refcount, vfs_referencing_pf;
787 u64 vfs_mask1, vfs_mask2;
791 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
793 while (!kthread_should_stop()) {
794 /* sleep for a couple of seconds so that we don't hog the CPU */
795 set_current_state(TASK_INTERRUPTIBLE);
796 schedule_timeout(msecs_to_jiffies(2000));
798 mask_of_crashed_or_stuck_cores =
799 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
801 if (!mask_of_crashed_or_stuck_cores)
804 WRITE_ONCE(oct->cores_crashed, true);
805 other_oct = get_other_octeon_device(oct);
807 WRITE_ONCE(other_oct->cores_crashed, true);
809 for (core = 0; core < LIO_MAX_CORES; core++) {
810 bool core_crashed_or_got_stuck;
812 core_crashed_or_got_stuck =
813 (mask_of_crashed_or_stuck_cores
816 if (core_crashed_or_got_stuck &&
817 !err_msg_was_printed[core]) {
818 dev_err(&oct->pci_dev->dev,
819 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
821 err_msg_was_printed[core] = true;
825 if (all_vf_links_are_disabled)
828 disable_all_vf_links(oct);
829 disable_all_vf_links(other_oct);
830 all_vf_links_are_disabled = true;
832 #ifdef CONFIG_MODULE_UNLOAD
833 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
834 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
836 vfs_referencing_pf = hweight64(vfs_mask1);
837 vfs_referencing_pf += hweight64(vfs_mask2);
839 refcount = module_refcount(THIS_MODULE);
840 if (refcount >= vfs_referencing_pf) {
841 while (vfs_referencing_pf) {
842 module_put(THIS_MODULE);
843 vfs_referencing_pf--;
853 * \brief PCI probe handler
854 * @param pdev PCI device structure
858 liquidio_probe(struct pci_dev *pdev,
859 const struct pci_device_id *ent __attribute__((unused)))
861 struct octeon_device *oct_dev = NULL;
862 struct handshake *hs;
864 oct_dev = octeon_allocate_device(pdev->device,
865 sizeof(struct octeon_device_priv));
867 dev_err(&pdev->dev, "Unable to allocate device\n");
871 if (pdev->device == OCTEON_CN23XX_PF_VID)
872 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
874 /* Enable PTP for 6XXX Device */
875 if (((pdev->device == OCTEON_CN66XX) ||
876 (pdev->device == OCTEON_CN68XX)))
877 oct_dev->ptp_enable = true;
879 oct_dev->ptp_enable = false;
881 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
882 (u32)pdev->vendor, (u32)pdev->device);
884 /* Assign octeon_device for this device to the private data area. */
885 pci_set_drvdata(pdev, oct_dev);
887 /* set linux specific device pointer */
888 oct_dev->pci_dev = (void *)pdev;
890 oct_dev->subsystem_id = pdev->subsystem_vendor |
891 (pdev->subsystem_device << 16);
893 hs = &handshake[oct_dev->octeon_id];
894 init_completion(&hs->init);
895 init_completion(&hs->started);
898 if (oct_dev->octeon_id == 0)
899 /* first LiquidIO NIC is detected */
900 complete(&first_stage);
902 if (octeon_device_init(oct_dev)) {
904 liquidio_remove(pdev);
908 if (OCTEON_CN23XX_PF(oct_dev)) {
909 u8 bus, device, function;
911 if (atomic_read(oct_dev->adapter_refcount) == 1) {
912 /* Each NIC gets one watchdog kernel thread. The first
913 * PF (of each NIC) that gets pci_driver->probe()'d
914 * creates that thread.
916 bus = pdev->bus->number;
917 device = PCI_SLOT(pdev->devfn);
918 function = PCI_FUNC(pdev->devfn);
919 oct_dev->watchdog_task = kthread_create(
920 liquidio_watchdog, oct_dev,
921 "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
922 if (!IS_ERR(oct_dev->watchdog_task)) {
923 wake_up_process(oct_dev->watchdog_task);
925 oct_dev->watchdog_task = NULL;
926 dev_err(&oct_dev->pci_dev->dev,
927 "failed to create kernel_thread\n");
928 liquidio_remove(pdev);
934 oct_dev->rx_pause = 1;
935 oct_dev->tx_pause = 1;
937 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
942 static bool fw_type_is_auto(void)
944 return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
945 sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
949 * \brief PCI FLR for each Octeon device.
950 * @param oct octeon device
952 static void octeon_pci_flr(struct octeon_device *oct)
956 pci_save_state(oct->pci_dev);
958 pci_cfg_access_lock(oct->pci_dev);
960 /* Quiesce the device completely */
961 pci_write_config_word(oct->pci_dev, PCI_COMMAND,
962 PCI_COMMAND_INTX_DISABLE);
964 rc = __pci_reset_function_locked(oct->pci_dev);
967 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
970 pci_cfg_access_unlock(oct->pci_dev);
972 pci_restore_state(oct->pci_dev);
976 *\brief Destroy resources associated with octeon device
977 * @param pdev PCI device structure
980 static void octeon_destroy_resources(struct octeon_device *oct)
983 struct msix_entry *msix_entries;
984 struct octeon_device_priv *oct_priv =
985 (struct octeon_device_priv *)oct->priv;
987 struct handshake *hs;
989 switch (atomic_read(&oct->status)) {
990 case OCT_DEV_RUNNING:
991 case OCT_DEV_CORE_OK:
993 /* No more instructions will be forwarded. */
994 atomic_set(&oct->status, OCT_DEV_IN_RESET);
996 oct->app_mode = CVM_DRV_INVALID_APP;
997 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
998 lio_get_state_string(&oct->status));
1000 schedule_timeout_uninterruptible(HZ / 10);
1003 case OCT_DEV_HOST_OK:
1006 case OCT_DEV_CONSOLE_INIT_DONE:
1007 /* Remove any consoles */
1008 octeon_remove_consoles(oct);
1011 case OCT_DEV_IO_QUEUES_DONE:
1012 if (lio_wait_for_instr_fetch(oct))
1013 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1015 if (wait_for_pending_requests(oct))
1016 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1018 /* Disable the input and output queues now. No more packets will
1019 * arrive from Octeon, but we should wait for all packet
1020 * processing to finish.
1022 oct->fn_list.disable_io_queues(oct);
1024 if (lio_wait_for_oq_pkts(oct))
1025 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1027 /* Force all requests waiting to be fetched by OCTEON to
1030 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1031 struct octeon_instr_queue *iq;
1033 if (!(oct->io_qmask.iq & BIT_ULL(i)))
1035 iq = oct->instr_queue[i];
1037 if (atomic_read(&iq->instr_pending)) {
1038 spin_lock_bh(&iq->lock);
1040 iq->octeon_read_index = iq->host_write_index;
1041 iq->stats.instr_processed +=
1042 atomic_read(&iq->instr_pending);
1043 lio_process_iq_request_list(oct, iq, 0);
1044 spin_unlock_bh(&iq->lock);
1048 lio_process_ordered_list(oct, 1);
1049 octeon_free_sc_done_list(oct);
1050 octeon_free_sc_zombie_list(oct);
1053 case OCT_DEV_INTR_SET_DONE:
1054 /* Disable interrupts */
1055 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1058 msix_entries = (struct msix_entry *)oct->msix_entries;
1059 for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1060 if (oct->ioq_vector[i].vector) {
1061 /* clear the affinity_cpumask */
1062 irq_set_affinity_hint(
1063 msix_entries[i].vector,
1065 free_irq(msix_entries[i].vector,
1066 &oct->ioq_vector[i]);
1067 oct->ioq_vector[i].vector = 0;
1070 /* non-iov vector's argument is oct struct */
1071 free_irq(msix_entries[i].vector, oct);
1073 pci_disable_msix(oct->pci_dev);
1074 kfree(oct->msix_entries);
1075 oct->msix_entries = NULL;
1077 /* Release the interrupt line */
1078 free_irq(oct->pci_dev->irq, oct);
1080 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1081 pci_disable_msi(oct->pci_dev);
1084 kfree(oct->irq_name_storage);
1085 oct->irq_name_storage = NULL;
1088 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1089 if (OCTEON_CN23XX_PF(oct))
1090 octeon_free_ioq_vector(oct);
1093 case OCT_DEV_MBOX_SETUP_DONE:
1094 if (OCTEON_CN23XX_PF(oct))
1095 oct->fn_list.free_mbox(oct);
1098 case OCT_DEV_IN_RESET:
1099 case OCT_DEV_DROQ_INIT_DONE:
1100 /* Wait for any pending operations */
1102 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1103 if (!(oct->io_qmask.oq & BIT_ULL(i)))
1105 octeon_delete_droq(oct, i);
1108 /* Force any pending handshakes to complete */
1109 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1113 handshake[oct->octeon_id].init_ok = 0;
1114 complete(&handshake[oct->octeon_id].init);
1115 handshake[oct->octeon_id].started_ok = 0;
1116 complete(&handshake[oct->octeon_id].started);
1121 case OCT_DEV_RESP_LIST_INIT_DONE:
1122 octeon_delete_response_list(oct);
1125 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1126 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1127 if (!(oct->io_qmask.iq & BIT_ULL(i)))
1129 octeon_delete_instr_queue(oct, i);
1131 #ifdef CONFIG_PCI_IOV
1132 if (oct->sriov_info.sriov_enabled)
1133 pci_disable_sriov(oct->pci_dev);
1136 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1137 octeon_free_sc_buffer_pool(oct);
1140 case OCT_DEV_DISPATCH_INIT_DONE:
1141 octeon_delete_dispatch_list(oct);
1142 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1145 case OCT_DEV_PCI_MAP_DONE:
1146 refcount = octeon_deregister_device(oct);
1148 /* Soft reset the octeon device before exiting.
1149 * However, if fw was loaded from card (i.e. autoboot),
1150 * perform an FLR instead.
1151 * Implementation note: only soft-reset the device
1152 * if it is a CN6XXX OR the LAST CN23XX device.
1154 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1155 octeon_pci_flr(oct);
1156 else if (OCTEON_CN6XXX(oct) || !refcount)
1157 oct->fn_list.soft_reset(oct);
1159 octeon_unmap_pci_barx(oct, 0);
1160 octeon_unmap_pci_barx(oct, 1);
1163 case OCT_DEV_PCI_ENABLE_DONE:
1164 pci_clear_master(oct->pci_dev);
1165 /* Disable the device, releasing the PCI INT */
1166 pci_disable_device(oct->pci_dev);
1169 case OCT_DEV_BEGIN_STATE:
1170 /* Nothing to be done here either */
1172 } /* end switch (oct->status) */
1174 tasklet_kill(&oct_priv->droq_tasklet);
1178 * \brief Send Rx control command
1179 * @param lio per-network private data
1180 * @param start_stop whether to start or stop
1182 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1184 struct octeon_soft_command *sc;
1185 union octnet_cmd *ncmd;
1186 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1189 if (oct->props[lio->ifidx].rx_on == start_stop)
1192 sc = (struct octeon_soft_command *)
1193 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1196 netif_info(lio, rx_err, lio->netdev,
1197 "Failed to allocate octeon_soft_command\n");
1201 ncmd = (union octnet_cmd *)sc->virtdptr;
1204 ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1205 ncmd->s.param1 = start_stop;
1207 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1209 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1211 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1212 OPCODE_NIC_CMD, 0, 0, 0);
1214 init_completion(&sc->complete);
1215 sc->sc_status = OCTEON_REQUEST_PENDING;
1217 retval = octeon_send_soft_command(oct, sc);
1218 if (retval == IQ_SEND_FAILED) {
1219 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1220 octeon_free_soft_command(oct, sc);
1223 /* Sleep on a wait queue till the cond flag indicates that the
1224 * response arrived or timed-out.
1226 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1230 oct->props[lio->ifidx].rx_on = start_stop;
1231 WRITE_ONCE(sc->caller_is_done, true);
1236 * \brief Destroy NIC device interface
1237 * @param oct octeon device
1238 * @param ifidx which interface to destroy
1240 * Cleanup associated with each interface for an Octeon device when NIC
1241 * module is being unloaded or if initialization fails during load.
1243 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1245 struct net_device *netdev = oct->props[ifidx].netdev;
1246 struct octeon_device_priv *oct_priv =
1247 (struct octeon_device_priv *)oct->priv;
1248 struct napi_struct *napi, *n;
1252 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1257 lio = GET_LIO(netdev);
1259 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1261 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1262 liquidio_stop(netdev);
1264 if (oct->props[lio->ifidx].napi_enabled == 1) {
1265 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1268 oct->props[lio->ifidx].napi_enabled = 0;
1270 if (OCTEON_CN23XX_PF(oct))
1271 oct->droq[0]->ops.poll_mode = 0;
1275 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1276 netif_napi_del(napi);
1278 tasklet_enable(&oct_priv->droq_tasklet);
1280 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1281 unregister_netdev(netdev);
1283 cleanup_sync_octeon_time_wq(netdev);
1284 cleanup_link_status_change_wq(netdev);
1286 cleanup_rx_oom_poll_fn(netdev);
1288 lio_delete_glists(lio);
1290 free_netdev(netdev);
1292 oct->props[ifidx].gmxport = -1;
1294 oct->props[ifidx].netdev = NULL;
1298 * \brief Stop complete NIC functionality
1299 * @param oct octeon device
1301 static int liquidio_stop_nic_module(struct octeon_device *oct)
1306 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1307 if (!oct->ifcount) {
1308 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1312 spin_lock_bh(&oct->cmd_resp_wqlock);
1313 oct->cmd_resp_state = OCT_DRV_OFFLINE;
1314 spin_unlock_bh(&oct->cmd_resp_wqlock);
1316 lio_vf_rep_destroy(oct);
1318 for (i = 0; i < oct->ifcount; i++) {
1319 lio = GET_LIO(oct->props[i].netdev);
1320 for (j = 0; j < oct->num_oqs; j++)
1321 octeon_unregister_droq_ops(oct,
1322 lio->linfo.rxpciq[j].s.q_no);
1325 for (i = 0; i < oct->ifcount; i++)
1326 liquidio_destroy_nic_device(oct, i);
1329 devlink_unregister(oct->devlink);
1330 devlink_free(oct->devlink);
1331 oct->devlink = NULL;
1334 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1339 * \brief Cleans up resources at unload time
1340 * @param pdev PCI device structure
1342 static void liquidio_remove(struct pci_dev *pdev)
1344 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1346 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1348 if (oct_dev->watchdog_task)
1349 kthread_stop(oct_dev->watchdog_task);
1351 if (!oct_dev->octeon_id &&
1352 oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1353 lio_vf_rep_modexit();
1355 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1356 liquidio_stop_nic_module(oct_dev);
1358 /* Reset the octeon device and cleanup all memory allocated for
1359 * the octeon device by driver.
1361 octeon_destroy_resources(oct_dev);
1363 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1365 /* This octeon device has been removed. Update the global
1366 * data structure to reflect this. Free the device structure.
1368 octeon_free_device_mem(oct_dev);
1372 * \brief Identify the Octeon device and to map the BAR address space
1373 * @param oct octeon device
1375 static int octeon_chip_specific_setup(struct octeon_device *oct)
1381 pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1382 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1383 oct->rev_id = rev_id & 0xff;
1386 case OCTEON_CN68XX_PCIID:
1387 oct->chip_id = OCTEON_CN68XX;
1388 ret = lio_setup_cn68xx_octeon_device(oct);
1392 case OCTEON_CN66XX_PCIID:
1393 oct->chip_id = OCTEON_CN66XX;
1394 ret = lio_setup_cn66xx_octeon_device(oct);
1398 case OCTEON_CN23XX_PCIID_PF:
1399 oct->chip_id = OCTEON_CN23XX_PF_VID;
1400 ret = setup_cn23xx_octeon_pf_device(oct);
1403 #ifdef CONFIG_PCI_IOV
1405 pci_sriov_set_totalvfs(oct->pci_dev,
1406 oct->sriov_info.max_vfs);
1413 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1418 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1419 OCTEON_MAJOR_REV(oct),
1420 OCTEON_MINOR_REV(oct),
1421 octeon_get_conf(oct)->card_name,
1428 * \brief PCI initialization for each Octeon device.
1429 * @param oct octeon device
1431 static int octeon_pci_os_setup(struct octeon_device *oct)
1433 /* setup PCI stuff first */
1434 if (pci_enable_device(oct->pci_dev)) {
1435 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1439 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1440 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1441 pci_disable_device(oct->pci_dev);
1445 /* Enable PCI DMA Master. */
1446 pci_set_master(oct->pci_dev);
1452 * \brief Unmap and free network buffer
1455 static void free_netbuf(void *buf)
1457 struct sk_buff *skb;
1458 struct octnet_buf_free_info *finfo;
1461 finfo = (struct octnet_buf_free_info *)buf;
1465 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1468 tx_buffer_free(skb);
1472 * \brief Unmap and free gather buffer
1475 static void free_netsgbuf(void *buf)
1477 struct octnet_buf_free_info *finfo;
1478 struct sk_buff *skb;
1480 struct octnic_gather *g;
1483 finfo = (struct octnet_buf_free_info *)buf;
1487 frags = skb_shinfo(skb)->nr_frags;
1489 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1490 g->sg[0].ptr[0], (skb->len - skb->data_len),
1495 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1497 pci_unmap_page((lio->oct_dev)->pci_dev,
1498 g->sg[(i >> 2)].ptr[(i & 3)],
1499 frag->size, DMA_TO_DEVICE);
1503 iq = skb_iq(lio->oct_dev, skb);
1504 spin_lock(&lio->glist_lock[iq]);
1505 list_add_tail(&g->list, &lio->glist[iq]);
1506 spin_unlock(&lio->glist_lock[iq]);
1508 tx_buffer_free(skb);
1512 * \brief Unmap and free gather buffer with response
1515 static void free_netsgbuf_with_resp(void *buf)
1517 struct octeon_soft_command *sc;
1518 struct octnet_buf_free_info *finfo;
1519 struct sk_buff *skb;
1521 struct octnic_gather *g;
1524 sc = (struct octeon_soft_command *)buf;
1525 skb = (struct sk_buff *)sc->callback_arg;
1526 finfo = (struct octnet_buf_free_info *)&skb->cb;
1530 frags = skb_shinfo(skb)->nr_frags;
1532 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1533 g->sg[0].ptr[0], (skb->len - skb->data_len),
1538 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1540 pci_unmap_page((lio->oct_dev)->pci_dev,
1541 g->sg[(i >> 2)].ptr[(i & 3)],
1542 frag->size, DMA_TO_DEVICE);
1546 iq = skb_iq(lio->oct_dev, skb);
1548 spin_lock(&lio->glist_lock[iq]);
1549 list_add_tail(&g->list, &lio->glist[iq]);
1550 spin_unlock(&lio->glist_lock[iq]);
1552 /* Don't free the skb yet */
1556 * \brief Adjust ptp frequency
1557 * @param ptp PTP clock info
1558 * @param ppb how much to adjust by, in parts-per-billion
1560 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1562 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1563 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1565 unsigned long flags;
1566 bool neg_adj = false;
1573 /* The hardware adds the clock compensation value to the
1574 * PTP clock on every coprocessor clock cycle, so we
1575 * compute the delta in terms of coprocessor clocks.
1577 delta = (u64)ppb << 32;
1578 do_div(delta, oct->coproc_clock_rate);
1580 spin_lock_irqsave(&lio->ptp_lock, flags);
1581 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1586 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1587 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1593 * \brief Adjust ptp time
1594 * @param ptp PTP clock info
1595 * @param delta how much to adjust by, in nanosecs
1597 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1599 unsigned long flags;
1600 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1602 spin_lock_irqsave(&lio->ptp_lock, flags);
1603 lio->ptp_adjust += delta;
1604 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1610 * \brief Get hardware clock time, including any adjustment
1611 * @param ptp PTP clock info
1612 * @param ts timespec
1614 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1615 struct timespec64 *ts)
1618 unsigned long flags;
1619 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1620 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1622 spin_lock_irqsave(&lio->ptp_lock, flags);
1623 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1624 ns += lio->ptp_adjust;
1625 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1627 *ts = ns_to_timespec64(ns);
1633 * \brief Set hardware clock time. Reset adjustment
1634 * @param ptp PTP clock info
1635 * @param ts timespec
1637 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1638 const struct timespec64 *ts)
1641 unsigned long flags;
1642 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1643 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1645 ns = timespec64_to_ns(ts);
1647 spin_lock_irqsave(&lio->ptp_lock, flags);
1648 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1649 lio->ptp_adjust = 0;
1650 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1656 * \brief Check if PTP is enabled
1657 * @param ptp PTP clock info
1659 * @param on is it on
1662 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1663 struct ptp_clock_request *rq __attribute__((unused)),
1664 int on __attribute__((unused)))
1670 * \brief Open PTP clock source
1671 * @param netdev network device
1673 static void oct_ptp_open(struct net_device *netdev)
1675 struct lio *lio = GET_LIO(netdev);
1676 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1678 spin_lock_init(&lio->ptp_lock);
1680 snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1681 lio->ptp_info.owner = THIS_MODULE;
1682 lio->ptp_info.max_adj = 250000000;
1683 lio->ptp_info.n_alarm = 0;
1684 lio->ptp_info.n_ext_ts = 0;
1685 lio->ptp_info.n_per_out = 0;
1686 lio->ptp_info.pps = 0;
1687 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1688 lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1689 lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1690 lio->ptp_info.settime64 = liquidio_ptp_settime;
1691 lio->ptp_info.enable = liquidio_ptp_enable;
1693 lio->ptp_adjust = 0;
1695 lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1696 &oct->pci_dev->dev);
1698 if (IS_ERR(lio->ptp_clock))
1699 lio->ptp_clock = NULL;
1703 * \brief Init PTP clock
1704 * @param oct octeon device
1706 static void liquidio_ptp_init(struct octeon_device *oct)
1708 u64 clock_comp, cfg;
1710 clock_comp = (u64)NSEC_PER_SEC << 32;
1711 do_div(clock_comp, oct->coproc_clock_rate);
1712 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1715 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1716 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1720 * \brief Load firmware to device
1721 * @param oct octeon device
1723 * Maps device to firmware filename, requests firmware, and downloads it
1725 static int load_firmware(struct octeon_device *oct)
1728 const struct firmware *fw;
1729 char fw_name[LIO_MAX_FW_FILENAME_LEN];
1732 if (fw_type_is_auto()) {
1733 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1734 strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1736 tmp_fw_type = fw_type;
1739 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1740 octeon_get_conf(oct)->card_name, tmp_fw_type,
1741 LIO_FW_NAME_SUFFIX);
1743 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1745 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1747 release_firmware(fw);
1751 ret = octeon_download_firmware(oct, fw->data, fw->size);
1753 release_firmware(fw);
1759 * \brief Poll routine for checking transmit queue status
1760 * @param work work_struct data structure
1762 static void octnet_poll_check_txq_status(struct work_struct *work)
1764 struct cavium_wk *wk = (struct cavium_wk *)work;
1765 struct lio *lio = (struct lio *)wk->ctxptr;
1767 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1770 check_txq_status(lio);
1771 queue_delayed_work(lio->txq_status_wq.wq,
1772 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1776 * \brief Sets up the txq poll check
1777 * @param netdev network device
1779 static inline int setup_tx_poll_fn(struct net_device *netdev)
1781 struct lio *lio = GET_LIO(netdev);
1782 struct octeon_device *oct = lio->oct_dev;
1784 lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1786 if (!lio->txq_status_wq.wq) {
1787 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1790 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1791 octnet_poll_check_txq_status);
1792 lio->txq_status_wq.wk.ctxptr = lio;
1793 queue_delayed_work(lio->txq_status_wq.wq,
1794 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1798 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1800 struct lio *lio = GET_LIO(netdev);
1802 if (lio->txq_status_wq.wq) {
1803 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1804 destroy_workqueue(lio->txq_status_wq.wq);
1809 * \brief Net device open for LiquidIO
1810 * @param netdev network device
1812 static int liquidio_open(struct net_device *netdev)
1814 struct lio *lio = GET_LIO(netdev);
1815 struct octeon_device *oct = lio->oct_dev;
1816 struct octeon_device_priv *oct_priv =
1817 (struct octeon_device_priv *)oct->priv;
1818 struct napi_struct *napi, *n;
1820 if (oct->props[lio->ifidx].napi_enabled == 0) {
1821 tasklet_disable(&oct_priv->droq_tasklet);
1823 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1826 oct->props[lio->ifidx].napi_enabled = 1;
1828 if (OCTEON_CN23XX_PF(oct))
1829 oct->droq[0]->ops.poll_mode = 1;
1832 if (oct->ptp_enable)
1833 oct_ptp_open(netdev);
1835 ifstate_set(lio, LIO_IFSTATE_RUNNING);
1837 if (OCTEON_CN23XX_PF(oct)) {
1839 if (setup_tx_poll_fn(netdev))
1842 if (setup_tx_poll_fn(netdev))
1846 netif_tx_start_all_queues(netdev);
1848 /* Ready for link status updates */
1851 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1853 /* tell Octeon to start forwarding packets to host */
1854 send_rx_ctrl_cmd(lio, 1);
1856 /* start periodical statistics fetch */
1857 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
1858 lio->stats_wk.ctxptr = lio;
1859 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
1860 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
1862 dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1869 * \brief Net device stop for LiquidIO
1870 * @param netdev network device
1872 static int liquidio_stop(struct net_device *netdev)
1874 struct lio *lio = GET_LIO(netdev);
1875 struct octeon_device *oct = lio->oct_dev;
1876 struct octeon_device_priv *oct_priv =
1877 (struct octeon_device_priv *)oct->priv;
1878 struct napi_struct *napi, *n;
1880 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1882 /* Stop any link updates */
1887 /* Inform that netif carrier is down */
1888 netif_carrier_off(netdev);
1889 netif_tx_disable(netdev);
1891 lio->linfo.link.s.link_up = 0;
1892 lio->link_changes++;
1894 /* Tell Octeon that nic interface is down. */
1895 send_rx_ctrl_cmd(lio, 0);
1897 if (OCTEON_CN23XX_PF(oct)) {
1899 cleanup_tx_poll_fn(netdev);
1901 cleanup_tx_poll_fn(netdev);
1904 cancel_delayed_work_sync(&lio->stats_wk.work);
1906 if (lio->ptp_clock) {
1907 ptp_clock_unregister(lio->ptp_clock);
1908 lio->ptp_clock = NULL;
1911 /* Wait for any pending Rx descriptors */
1912 if (lio_wait_for_clean_oq(oct))
1913 netif_info(lio, rx_err, lio->netdev,
1914 "Proceeding with stop interface after partial RX desc processing\n");
1916 if (oct->props[lio->ifidx].napi_enabled == 1) {
1917 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1920 oct->props[lio->ifidx].napi_enabled = 0;
1922 if (OCTEON_CN23XX_PF(oct))
1923 oct->droq[0]->ops.poll_mode = 0;
1925 tasklet_enable(&oct_priv->droq_tasklet);
1928 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1934 * \brief Converts a mask based on net device flags
1935 * @param netdev network device
1937 * This routine generates a octnet_ifflags mask from the net device flags
1938 * received from the OS.
1940 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1942 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1944 if (netdev->flags & IFF_PROMISC)
1945 f |= OCTNET_IFFLAG_PROMISC;
1947 if (netdev->flags & IFF_ALLMULTI)
1948 f |= OCTNET_IFFLAG_ALLMULTI;
1950 if (netdev->flags & IFF_MULTICAST) {
1951 f |= OCTNET_IFFLAG_MULTICAST;
1953 /* Accept all multicast addresses if there are more than we
1956 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1957 f |= OCTNET_IFFLAG_ALLMULTI;
1960 if (netdev->flags & IFF_BROADCAST)
1961 f |= OCTNET_IFFLAG_BROADCAST;
1967 * \brief Net device set_multicast_list
1968 * @param netdev network device
1970 static void liquidio_set_mcast_list(struct net_device *netdev)
1972 struct lio *lio = GET_LIO(netdev);
1973 struct octeon_device *oct = lio->oct_dev;
1974 struct octnic_ctrl_pkt nctrl;
1975 struct netdev_hw_addr *ha;
1978 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1980 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1982 /* Create a ctrl pkt command to be sent to core app. */
1984 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1985 nctrl.ncmd.s.param1 = get_new_flags(netdev);
1986 nctrl.ncmd.s.param2 = mc_count;
1987 nctrl.ncmd.s.more = mc_count;
1988 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1989 nctrl.netpndev = (u64)netdev;
1990 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1992 /* copy all the addresses into the udd */
1994 netdev_for_each_mc_addr(ha, netdev) {
1996 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
1997 /* no need to swap bytes */
1999 if (++mc > &nctrl.udd[mc_count])
2003 /* Apparently, any activity in this call from the kernel has to
2004 * be atomic. So we won't wait for response.
2007 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2009 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2015 * \brief Net device set_mac_address
2016 * @param netdev network device
2018 static int liquidio_set_mac(struct net_device *netdev, void *p)
2021 struct lio *lio = GET_LIO(netdev);
2022 struct octeon_device *oct = lio->oct_dev;
2023 struct sockaddr *addr = (struct sockaddr *)p;
2024 struct octnic_ctrl_pkt nctrl;
2026 if (!is_valid_ether_addr(addr->sa_data))
2027 return -EADDRNOTAVAIL;
2029 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2032 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2033 nctrl.ncmd.s.param1 = 0;
2034 nctrl.ncmd.s.more = 1;
2035 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2036 nctrl.netpndev = (u64)netdev;
2039 /* The MAC Address is presented in network byte order. */
2040 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2042 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2044 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2048 if (nctrl.sc_status) {
2049 dev_err(&oct->pci_dev->dev,
2050 "%s: MAC Address change failed. sc return=%x\n",
2051 __func__, nctrl.sc_status);
2055 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2056 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2062 liquidio_get_stats64(struct net_device *netdev,
2063 struct rtnl_link_stats64 *lstats)
2065 struct lio *lio = GET_LIO(netdev);
2066 struct octeon_device *oct;
2067 u64 pkts = 0, drop = 0, bytes = 0;
2068 struct oct_droq_stats *oq_stats;
2069 struct oct_iq_stats *iq_stats;
2070 int i, iq_no, oq_no;
2074 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2077 for (i = 0; i < oct->num_iqs; i++) {
2078 iq_no = lio->linfo.txpciq[i].s.q_no;
2079 iq_stats = &oct->instr_queue[iq_no]->stats;
2080 pkts += iq_stats->tx_done;
2081 drop += iq_stats->tx_dropped;
2082 bytes += iq_stats->tx_tot_bytes;
2085 lstats->tx_packets = pkts;
2086 lstats->tx_bytes = bytes;
2087 lstats->tx_dropped = drop;
2093 for (i = 0; i < oct->num_oqs; i++) {
2094 oq_no = lio->linfo.rxpciq[i].s.q_no;
2095 oq_stats = &oct->droq[oq_no]->stats;
2096 pkts += oq_stats->rx_pkts_received;
2097 drop += (oq_stats->rx_dropped +
2098 oq_stats->dropped_nodispatch +
2099 oq_stats->dropped_toomany +
2100 oq_stats->dropped_nomem);
2101 bytes += oq_stats->rx_bytes_received;
2104 lstats->rx_bytes = bytes;
2105 lstats->rx_packets = pkts;
2106 lstats->rx_dropped = drop;
2108 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2109 lstats->collisions = oct->link_stats.fromhost.total_collisions;
2111 /* detailed rx_errors: */
2112 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2113 /* recved pkt with crc error */
2114 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2115 /* recv'd frame alignment error */
2116 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2117 /* recv'r fifo overrun */
2118 lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2120 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2121 lstats->rx_frame_errors + lstats->rx_fifo_errors;
2123 /* detailed tx_errors */
2124 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2125 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2126 lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2128 lstats->tx_errors = lstats->tx_aborted_errors +
2129 lstats->tx_carrier_errors +
2130 lstats->tx_fifo_errors;
2134 * \brief Handler for SIOCSHWTSTAMP ioctl
2135 * @param netdev network device
2136 * @param ifr interface request
2137 * @param cmd command
2139 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2141 struct hwtstamp_config conf;
2142 struct lio *lio = GET_LIO(netdev);
2144 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2150 switch (conf.tx_type) {
2151 case HWTSTAMP_TX_ON:
2152 case HWTSTAMP_TX_OFF:
2158 switch (conf.rx_filter) {
2159 case HWTSTAMP_FILTER_NONE:
2161 case HWTSTAMP_FILTER_ALL:
2162 case HWTSTAMP_FILTER_SOME:
2163 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2164 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2165 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2166 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2167 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2168 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2169 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2170 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2171 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2172 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2173 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2174 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2175 case HWTSTAMP_FILTER_NTP_ALL:
2176 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2182 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2183 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2186 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2188 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2192 * \brief ioctl handler
2193 * @param netdev network device
2194 * @param ifr interface request
2195 * @param cmd command
2197 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2199 struct lio *lio = GET_LIO(netdev);
2203 if (lio->oct_dev->ptp_enable)
2204 return hwtstamp_ioctl(netdev, ifr);
2212 * \brief handle a Tx timestamp response
2213 * @param status response status
2214 * @param buf pointer to skb
2216 static void handle_timestamp(struct octeon_device *oct,
2220 struct octnet_buf_free_info *finfo;
2221 struct octeon_soft_command *sc;
2222 struct oct_timestamp_resp *resp;
2224 struct sk_buff *skb = (struct sk_buff *)buf;
2226 finfo = (struct octnet_buf_free_info *)skb->cb;
2230 resp = (struct oct_timestamp_resp *)sc->virtrptr;
2232 if (status != OCTEON_REQUEST_DONE) {
2233 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2234 CVM_CAST64(status));
2235 resp->timestamp = 0;
2238 octeon_swap_8B_data(&resp->timestamp, 1);
2240 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2241 struct skb_shared_hwtstamps ts;
2242 u64 ns = resp->timestamp;
2244 netif_info(lio, tx_done, lio->netdev,
2245 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2246 skb, (unsigned long long)ns);
2247 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2248 skb_tstamp_tx(skb, &ts);
2251 octeon_free_soft_command(oct, sc);
2252 tx_buffer_free(skb);
2255 /* \brief Send a data packet that will be timestamped
2256 * @param oct octeon device
2257 * @param ndata pointer to network data
2258 * @param finfo pointer to private network data
2260 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2261 struct octnic_data_pkt *ndata,
2262 struct octnet_buf_free_info *finfo,
2266 struct octeon_soft_command *sc;
2273 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2274 sizeof(struct oct_timestamp_resp));
2278 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2279 return IQ_SEND_FAILED;
2282 if (ndata->reqtype == REQTYPE_NORESP_NET)
2283 ndata->reqtype = REQTYPE_RESP_NET;
2284 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2285 ndata->reqtype = REQTYPE_RESP_NET_SG;
2287 sc->callback = handle_timestamp;
2288 sc->callback_arg = finfo->skb;
2289 sc->iq_no = ndata->q_no;
2291 if (OCTEON_CN23XX_PF(oct))
2292 len = (u32)((struct octeon_instr_ih3 *)
2293 (&sc->cmd.cmd3.ih3))->dlengsz;
2295 len = (u32)((struct octeon_instr_ih2 *)
2296 (&sc->cmd.cmd2.ih2))->dlengsz;
2298 ring_doorbell = !xmit_more;
2300 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2301 sc, len, ndata->reqtype);
2303 if (retval == IQ_SEND_FAILED) {
2304 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2306 octeon_free_soft_command(oct, sc);
2308 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2314 /** \brief Transmit networks packets to the Octeon interface
2315 * @param skbuff skbuff struct to be passed to network layer.
2316 * @param netdev pointer to network device
2317 * @returns whether the packet was transmitted to the device okay or not
2318 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2320 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2323 struct octnet_buf_free_info *finfo;
2324 union octnic_cmd_setup cmdsetup;
2325 struct octnic_data_pkt ndata;
2326 struct octeon_device *oct;
2327 struct oct_iq_stats *stats;
2328 struct octeon_instr_irh *irh;
2329 union tx_info *tx_info;
2331 int q_idx = 0, iq_no = 0;
2332 int j, xmit_more = 0;
2336 lio = GET_LIO(netdev);
2339 q_idx = skb_iq(oct, skb);
2341 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2343 stats = &oct->instr_queue[iq_no]->stats;
2345 /* Check for all conditions in which the current packet cannot be
2348 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2349 (!lio->linfo.link.s.link_up) ||
2351 netif_info(lio, tx_err, lio->netdev,
2352 "Transmit failed link_status : %d\n",
2353 lio->linfo.link.s.link_up);
2354 goto lio_xmit_failed;
2357 /* Use space in skb->cb to store info used to unmap and
2360 finfo = (struct octnet_buf_free_info *)skb->cb;
2365 /* Prepare the attributes for the data to be passed to OSI. */
2366 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2368 ndata.buf = (void *)finfo;
2372 if (octnet_iq_is_full(oct, ndata.q_no)) {
2373 /* defer sending if queue is full */
2374 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2376 stats->tx_iq_busy++;
2377 return NETDEV_TX_BUSY;
2380 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2381 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2384 ndata.datasize = skb->len;
2387 cmdsetup.s.iq_no = iq_no;
2389 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2390 if (skb->encapsulation) {
2391 cmdsetup.s.tnl_csum = 1;
2394 cmdsetup.s.transport_csum = 1;
2397 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2398 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2399 cmdsetup.s.timestamp = 1;
2402 if (skb_shinfo(skb)->nr_frags == 0) {
2403 cmdsetup.s.u.datasize = skb->len;
2404 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2406 /* Offload checksum calculation for TCP/UDP packets */
2407 dptr = dma_map_single(&oct->pci_dev->dev,
2411 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2412 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2414 stats->tx_dmamap_fail++;
2415 return NETDEV_TX_BUSY;
2418 if (OCTEON_CN23XX_PF(oct))
2419 ndata.cmd.cmd3.dptr = dptr;
2421 ndata.cmd.cmd2.dptr = dptr;
2423 ndata.reqtype = REQTYPE_NORESP_NET;
2427 struct skb_frag_struct *frag;
2428 struct octnic_gather *g;
2430 spin_lock(&lio->glist_lock[q_idx]);
2431 g = (struct octnic_gather *)
2432 lio_list_delete_head(&lio->glist[q_idx]);
2433 spin_unlock(&lio->glist_lock[q_idx]);
2436 netif_info(lio, tx_err, lio->netdev,
2437 "Transmit scatter gather: glist null!\n");
2438 goto lio_xmit_failed;
2441 cmdsetup.s.gather = 1;
2442 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2443 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2445 memset(g->sg, 0, g->sg_size);
2447 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2449 (skb->len - skb->data_len),
2451 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2452 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2454 stats->tx_dmamap_fail++;
2455 return NETDEV_TX_BUSY;
2457 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2459 frags = skb_shinfo(skb)->nr_frags;
2462 frag = &skb_shinfo(skb)->frags[i - 1];
2464 g->sg[(i >> 2)].ptr[(i & 3)] =
2465 dma_map_page(&oct->pci_dev->dev,
2471 if (dma_mapping_error(&oct->pci_dev->dev,
2472 g->sg[i >> 2].ptr[i & 3])) {
2473 dma_unmap_single(&oct->pci_dev->dev,
2475 skb->len - skb->data_len,
2477 for (j = 1; j < i; j++) {
2478 frag = &skb_shinfo(skb)->frags[j - 1];
2479 dma_unmap_page(&oct->pci_dev->dev,
2480 g->sg[j >> 2].ptr[j & 3],
2484 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2486 return NETDEV_TX_BUSY;
2489 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2493 dptr = g->sg_dma_ptr;
2495 if (OCTEON_CN23XX_PF(oct))
2496 ndata.cmd.cmd3.dptr = dptr;
2498 ndata.cmd.cmd2.dptr = dptr;
2502 ndata.reqtype = REQTYPE_NORESP_NET_SG;
2505 if (OCTEON_CN23XX_PF(oct)) {
2506 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2507 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2509 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2510 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2513 if (skb_shinfo(skb)->gso_size) {
2514 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2515 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2519 /* HW insert VLAN tag */
2520 if (skb_vlan_tag_present(skb)) {
2521 irh->priority = skb_vlan_tag_get(skb) >> 13;
2522 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2525 xmit_more = skb->xmit_more;
2527 if (unlikely(cmdsetup.s.timestamp))
2528 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2530 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2531 if (status == IQ_SEND_FAILED)
2532 goto lio_xmit_failed;
2534 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2536 if (status == IQ_SEND_STOP)
2537 netif_stop_subqueue(netdev, q_idx);
2539 netif_trans_update(netdev);
2541 if (tx_info->s.gso_segs)
2542 stats->tx_done += tx_info->s.gso_segs;
2545 stats->tx_tot_bytes += ndata.datasize;
2547 return NETDEV_TX_OK;
2550 stats->tx_dropped++;
2551 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2552 iq_no, stats->tx_dropped);
2554 dma_unmap_single(&oct->pci_dev->dev, dptr,
2555 ndata.datasize, DMA_TO_DEVICE);
2557 octeon_ring_doorbell_locked(oct, iq_no);
2559 tx_buffer_free(skb);
2560 return NETDEV_TX_OK;
2563 /** \brief Network device Tx timeout
2564 * @param netdev pointer to network device
2566 static void liquidio_tx_timeout(struct net_device *netdev)
2570 lio = GET_LIO(netdev);
2572 netif_info(lio, tx_err, lio->netdev,
2573 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2574 netdev->stats.tx_dropped);
2575 netif_trans_update(netdev);
2579 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2580 __be16 proto __attribute__((unused)),
2583 struct lio *lio = GET_LIO(netdev);
2584 struct octeon_device *oct = lio->oct_dev;
2585 struct octnic_ctrl_pkt nctrl;
2588 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2591 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2592 nctrl.ncmd.s.param1 = vid;
2593 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2594 nctrl.netpndev = (u64)netdev;
2595 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2597 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2599 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2608 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2609 __be16 proto __attribute__((unused)),
2612 struct lio *lio = GET_LIO(netdev);
2613 struct octeon_device *oct = lio->oct_dev;
2614 struct octnic_ctrl_pkt nctrl;
2617 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2620 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2621 nctrl.ncmd.s.param1 = vid;
2622 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2623 nctrl.netpndev = (u64)netdev;
2624 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2626 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2628 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2636 /** Sending command to enable/disable RX checksum offload
2637 * @param netdev pointer to network device
2638 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
2639 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
2640 * OCTNET_CMD_RXCSUM_DISABLE
2641 * @returns SUCCESS or FAILURE
2643 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2646 struct lio *lio = GET_LIO(netdev);
2647 struct octeon_device *oct = lio->oct_dev;
2648 struct octnic_ctrl_pkt nctrl;
2651 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2654 nctrl.ncmd.s.cmd = command;
2655 nctrl.ncmd.s.param1 = rx_cmd;
2656 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2657 nctrl.netpndev = (u64)netdev;
2658 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2660 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2662 dev_err(&oct->pci_dev->dev,
2663 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2671 /** Sending command to add/delete VxLAN UDP port to firmware
2672 * @param netdev pointer to network device
2673 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
2674 * @param vxlan_port VxLAN port to be added or deleted
2675 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
2676 * OCTNET_CMD_VXLAN_PORT_DEL
2677 * @returns SUCCESS or FAILURE
2679 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2680 u16 vxlan_port, u8 vxlan_cmd_bit)
2682 struct lio *lio = GET_LIO(netdev);
2683 struct octeon_device *oct = lio->oct_dev;
2684 struct octnic_ctrl_pkt nctrl;
2687 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2690 nctrl.ncmd.s.cmd = command;
2691 nctrl.ncmd.s.more = vxlan_cmd_bit;
2692 nctrl.ncmd.s.param1 = vxlan_port;
2693 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2694 nctrl.netpndev = (u64)netdev;
2695 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2697 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2699 dev_err(&oct->pci_dev->dev,
2700 "VxLAN port add/delete failed in core (ret:0x%x)\n",
2708 /** \brief Net device fix features
2709 * @param netdev pointer to network device
2710 * @param request features requested
2711 * @returns updated features list
2713 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2714 netdev_features_t request)
2716 struct lio *lio = netdev_priv(netdev);
2718 if ((request & NETIF_F_RXCSUM) &&
2719 !(lio->dev_capability & NETIF_F_RXCSUM))
2720 request &= ~NETIF_F_RXCSUM;
2722 if ((request & NETIF_F_HW_CSUM) &&
2723 !(lio->dev_capability & NETIF_F_HW_CSUM))
2724 request &= ~NETIF_F_HW_CSUM;
2726 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2727 request &= ~NETIF_F_TSO;
2729 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2730 request &= ~NETIF_F_TSO6;
2732 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2733 request &= ~NETIF_F_LRO;
2735 /*Disable LRO if RXCSUM is off */
2736 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2737 (lio->dev_capability & NETIF_F_LRO))
2738 request &= ~NETIF_F_LRO;
2740 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2741 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2742 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2747 /** \brief Net device set features
2748 * @param netdev pointer to network device
2749 * @param features features to enable/disable
2751 static int liquidio_set_features(struct net_device *netdev,
2752 netdev_features_t features)
2754 struct lio *lio = netdev_priv(netdev);
2756 if ((features & NETIF_F_LRO) &&
2757 (lio->dev_capability & NETIF_F_LRO) &&
2758 !(netdev->features & NETIF_F_LRO))
2759 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2760 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2761 else if (!(features & NETIF_F_LRO) &&
2762 (lio->dev_capability & NETIF_F_LRO) &&
2763 (netdev->features & NETIF_F_LRO))
2764 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2765 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2767 /* Sending command to firmware to enable/disable RX checksum
2768 * offload settings using ethtool
2770 if (!(netdev->features & NETIF_F_RXCSUM) &&
2771 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2772 (features & NETIF_F_RXCSUM))
2773 liquidio_set_rxcsum_command(netdev,
2774 OCTNET_CMD_TNL_RX_CSUM_CTL,
2775 OCTNET_CMD_RXCSUM_ENABLE);
2776 else if ((netdev->features & NETIF_F_RXCSUM) &&
2777 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2778 !(features & NETIF_F_RXCSUM))
2779 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2780 OCTNET_CMD_RXCSUM_DISABLE);
2782 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2783 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2784 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2785 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2786 OCTNET_CMD_VLAN_FILTER_ENABLE);
2787 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2788 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2789 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2790 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2791 OCTNET_CMD_VLAN_FILTER_DISABLE);
2796 static void liquidio_add_vxlan_port(struct net_device *netdev,
2797 struct udp_tunnel_info *ti)
2799 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2802 liquidio_vxlan_port_command(netdev,
2803 OCTNET_CMD_VXLAN_PORT_CONFIG,
2805 OCTNET_CMD_VXLAN_PORT_ADD);
2808 static void liquidio_del_vxlan_port(struct net_device *netdev,
2809 struct udp_tunnel_info *ti)
2811 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2814 liquidio_vxlan_port_command(netdev,
2815 OCTNET_CMD_VXLAN_PORT_CONFIG,
2817 OCTNET_CMD_VXLAN_PORT_DEL);
2820 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2821 u8 *mac, bool is_admin_assigned)
2823 struct lio *lio = GET_LIO(netdev);
2824 struct octeon_device *oct = lio->oct_dev;
2825 struct octnic_ctrl_pkt nctrl;
2828 if (!is_valid_ether_addr(mac))
2831 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2834 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2837 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2838 /* vfidx is 0 based, but vf_num (param1) is 1 based */
2839 nctrl.ncmd.s.param1 = vfidx + 1;
2840 nctrl.ncmd.s.more = 1;
2841 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2842 nctrl.netpndev = (u64)netdev;
2843 if (is_admin_assigned) {
2844 nctrl.ncmd.s.param2 = true;
2845 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2849 /* The MAC Address is presented in network byte order. */
2850 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2852 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2854 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2861 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2863 struct lio *lio = GET_LIO(netdev);
2864 struct octeon_device *oct = lio->oct_dev;
2867 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2870 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2872 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2877 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
2880 struct lio *lio = GET_LIO(netdev);
2881 struct octeon_device *oct = lio->oct_dev;
2882 struct octnic_ctrl_pkt nctrl;
2885 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
2886 netif_info(lio, drv, lio->netdev,
2887 "firmware does not support spoofchk\n");
2891 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
2892 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
2897 if (oct->sriov_info.vf_spoofchk[vfidx])
2901 if (!oct->sriov_info.vf_spoofchk[vfidx])
2905 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2906 nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
2907 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
2908 nctrl.ncmd.s.param1 =
2909 vfidx + 1; /* vfidx is 0 based,
2910 * but vf_num (param1) is 1 based
2912 nctrl.ncmd.s.param2 = enable;
2913 nctrl.ncmd.s.more = 0;
2914 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2917 retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2920 netif_info(lio, drv, lio->netdev,
2921 "Failed to set VF %d spoofchk %s\n", vfidx,
2922 enable ? "on" : "off");
2926 oct->sriov_info.vf_spoofchk[vfidx] = enable;
2927 netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
2928 enable ? "on" : "off");
2933 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2934 u16 vlan, u8 qos, __be16 vlan_proto)
2936 struct lio *lio = GET_LIO(netdev);
2937 struct octeon_device *oct = lio->oct_dev;
2938 struct octnic_ctrl_pkt nctrl;
2942 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2945 if (vlan_proto != htons(ETH_P_8021Q))
2946 return -EPROTONOSUPPORT;
2948 if (vlan >= VLAN_N_VID || qos > 7)
2952 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2956 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2959 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2962 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2964 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2966 nctrl.ncmd.s.param1 = vlantci;
2967 nctrl.ncmd.s.param2 =
2968 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2969 nctrl.ncmd.s.more = 0;
2970 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2973 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2980 oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2985 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2986 struct ifla_vf_info *ivi)
2988 struct lio *lio = GET_LIO(netdev);
2989 struct octeon_device *oct = lio->oct_dev;
2992 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2995 memset(ivi, 0, sizeof(struct ifla_vf_info));
2998 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2999 ether_addr_copy(&ivi->mac[0], macaddr);
3000 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
3001 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
3002 if (oct->sriov_info.trusted_vf.active &&
3003 oct->sriov_info.trusted_vf.id == vfidx)
3004 ivi->trusted = true;
3006 ivi->trusted = false;
3007 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
3008 ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
3009 ivi->max_tx_rate = lio->linfo.link.s.speed;
3010 ivi->min_tx_rate = 0;
3015 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
3017 struct octeon_device *oct = lio->oct_dev;
3018 struct octeon_soft_command *sc;
3021 sc = octeon_alloc_soft_command(oct, 0, 16, 0);
3025 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
3027 /* vfidx is 0 based, but vf_num (param1) is 1 based */
3028 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
3029 OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
3032 init_completion(&sc->complete);
3033 sc->sc_status = OCTEON_REQUEST_PENDING;
3035 retval = octeon_send_soft_command(oct, sc);
3036 if (retval == IQ_SEND_FAILED) {
3037 octeon_free_soft_command(oct, sc);
3040 /* Wait for response or timeout */
3041 retval = wait_for_sc_completion_timeout(oct, sc, 0);
3045 WRITE_ONCE(sc->caller_is_done, true);
3051 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3054 struct lio *lio = GET_LIO(netdev);
3055 struct octeon_device *oct = lio->oct_dev;
3057 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3058 /* trusted vf is not supported by firmware older than 1.7.1 */
3062 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3063 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3070 if (oct->sriov_info.trusted_vf.active &&
3071 oct->sriov_info.trusted_vf.id == vfidx)
3074 if (oct->sriov_info.trusted_vf.active) {
3075 netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3081 if (!oct->sriov_info.trusted_vf.active)
3085 if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3087 oct->sriov_info.trusted_vf.id = vfidx;
3088 oct->sriov_info.trusted_vf.active = true;
3090 oct->sriov_info.trusted_vf.active = false;
3093 netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3094 setting ? "" : "not ");
3096 netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3103 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3106 struct lio *lio = GET_LIO(netdev);
3107 struct octeon_device *oct = lio->oct_dev;
3108 struct octnic_ctrl_pkt nctrl;
3111 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3114 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3117 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3118 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3119 nctrl.ncmd.s.param1 =
3120 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3121 nctrl.ncmd.s.param2 = linkstate;
3122 nctrl.ncmd.s.more = 0;
3123 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3126 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
3129 oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3137 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3139 struct lio_devlink_priv *priv;
3140 struct octeon_device *oct;
3142 priv = devlink_priv(devlink);
3145 *mode = oct->eswitch_mode;
3151 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
3152 struct netlink_ext_ack *extack)
3154 struct lio_devlink_priv *priv;
3155 struct octeon_device *oct;
3158 priv = devlink_priv(devlink);
3161 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3164 if (oct->eswitch_mode == mode)
3168 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3169 oct->eswitch_mode = mode;
3170 ret = lio_vf_rep_create(oct);
3173 case DEVLINK_ESWITCH_MODE_LEGACY:
3174 lio_vf_rep_destroy(oct);
3175 oct->eswitch_mode = mode;
3185 static const struct devlink_ops liquidio_devlink_ops = {
3186 .eswitch_mode_get = liquidio_eswitch_mode_get,
3187 .eswitch_mode_set = liquidio_eswitch_mode_set,
3191 liquidio_get_port_parent_id(struct net_device *dev,
3192 struct netdev_phys_item_id *ppid)
3194 struct lio *lio = GET_LIO(dev);
3195 struct octeon_device *oct = lio->oct_dev;
3197 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3200 ppid->id_len = ETH_ALEN;
3201 ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
3206 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3207 struct ifla_vf_stats *vf_stats)
3209 struct lio *lio = GET_LIO(netdev);
3210 struct octeon_device *oct = lio->oct_dev;
3211 struct oct_vf_stats stats;
3214 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3217 memset(&stats, 0, sizeof(struct oct_vf_stats));
3218 ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3220 vf_stats->rx_packets = stats.rx_packets;
3221 vf_stats->tx_packets = stats.tx_packets;
3222 vf_stats->rx_bytes = stats.rx_bytes;
3223 vf_stats->tx_bytes = stats.tx_bytes;
3224 vf_stats->broadcast = stats.broadcast;
3225 vf_stats->multicast = stats.multicast;
3231 static const struct net_device_ops lionetdevops = {
3232 .ndo_open = liquidio_open,
3233 .ndo_stop = liquidio_stop,
3234 .ndo_start_xmit = liquidio_xmit,
3235 .ndo_get_stats64 = liquidio_get_stats64,
3236 .ndo_set_mac_address = liquidio_set_mac,
3237 .ndo_set_rx_mode = liquidio_set_mcast_list,
3238 .ndo_tx_timeout = liquidio_tx_timeout,
3240 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
3241 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
3242 .ndo_change_mtu = liquidio_change_mtu,
3243 .ndo_do_ioctl = liquidio_ioctl,
3244 .ndo_fix_features = liquidio_fix_features,
3245 .ndo_set_features = liquidio_set_features,
3246 .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
3247 .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
3248 .ndo_set_vf_mac = liquidio_set_vf_mac,
3249 .ndo_set_vf_vlan = liquidio_set_vf_vlan,
3250 .ndo_get_vf_config = liquidio_get_vf_config,
3251 .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk,
3252 .ndo_set_vf_trust = liquidio_set_vf_trust,
3253 .ndo_set_vf_link_state = liquidio_set_vf_link_state,
3254 .ndo_get_vf_stats = liquidio_get_vf_stats,
3255 .ndo_get_port_parent_id = liquidio_get_port_parent_id,
3258 /** \brief Entry point for the liquidio module
3260 static int __init liquidio_init(void)
3263 struct handshake *hs;
3265 init_completion(&first_stage);
3267 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3269 if (liquidio_init_pci())
3272 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3274 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3277 wait_for_completion(&hs->init);
3279 /* init handshake failed */
3280 dev_err(&hs->pci_dev->dev,
3281 "Failed to init device\n");
3282 liquidio_deinit_pci();
3288 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3291 wait_for_completion_timeout(&hs->started,
3292 msecs_to_jiffies(30000));
3293 if (!hs->started_ok) {
3294 /* starter handshake failed */
3295 dev_err(&hs->pci_dev->dev,
3296 "Firmware failed to start\n");
3297 liquidio_deinit_pci();
3306 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3308 struct octeon_device *oct = (struct octeon_device *)buf;
3309 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3311 union oct_link_status *ls;
3314 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3315 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3316 recv_pkt->buffer_size[0],
3317 recv_pkt->rh.r_nic_info.gmxport);
3321 gmxport = recv_pkt->rh.r_nic_info.gmxport;
3322 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3323 OCT_DROQ_INFO_SIZE);
3325 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3326 for (i = 0; i < oct->ifcount; i++) {
3327 if (oct->props[i].gmxport == gmxport) {
3328 update_link_status(oct->props[i].netdev, ls);
3334 for (i = 0; i < recv_pkt->buffer_count; i++)
3335 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3336 octeon_free_recv_info(recv_info);
3341 * \brief Setup network interfaces
3342 * @param octeon_dev octeon device
3344 * Called during init time for each device. It assumes the NIC
3345 * is already up and running. The link information for each
3346 * interface is passed in link_info.
3348 static int setup_nic_devices(struct octeon_device *octeon_dev)
3350 struct lio *lio = NULL;
3351 struct net_device *netdev;
3352 u8 mac[6], i, j, *fw_ver, *micro_ver;
3353 unsigned long micro;
3355 struct octeon_soft_command *sc;
3356 struct liquidio_if_cfg_resp *resp;
3357 struct octdev_props *props;
3358 int retval, num_iqueues, num_oqueues;
3359 int max_num_queues = 0;
3360 union oct_nic_if_cfg if_cfg;
3361 unsigned int base_queue;
3362 unsigned int gmx_port_id;
3363 u32 resp_size, data_size;
3365 struct lio_version *vdata;
3366 struct devlink *devlink;
3367 struct lio_devlink_priv *lio_devlink;
3369 /* This is to handle link status changes */
3370 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3372 lio_nic_info, octeon_dev);
3374 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3375 * They are handled directly.
3377 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3380 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3383 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3384 free_netsgbuf_with_resp);
3386 for (i = 0; i < octeon_dev->ifcount; i++) {
3387 resp_size = sizeof(struct liquidio_if_cfg_resp);
3388 data_size = sizeof(struct lio_version);
3389 sc = (struct octeon_soft_command *)
3390 octeon_alloc_soft_command(octeon_dev, data_size,
3392 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3393 vdata = (struct lio_version *)sc->virtdptr;
3395 *((u64 *)vdata) = 0;
3396 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3397 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3398 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3400 if (OCTEON_CN23XX_PF(octeon_dev)) {
3401 num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3402 num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3403 base_queue = octeon_dev->sriov_info.pf_srn;
3405 gmx_port_id = octeon_dev->pf_num;
3406 ifidx_or_pfnum = octeon_dev->pf_num;
3408 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3409 octeon_get_conf(octeon_dev), i);
3410 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3411 octeon_get_conf(octeon_dev), i);
3412 base_queue = CFG_GET_BASE_QUE_NIC_IF(
3413 octeon_get_conf(octeon_dev), i);
3414 gmx_port_id = CFG_GET_GMXID_NIC_IF(
3415 octeon_get_conf(octeon_dev), i);
3419 dev_dbg(&octeon_dev->pci_dev->dev,
3420 "requesting config for interface %d, iqs %d, oqs %d\n",
3421 ifidx_or_pfnum, num_iqueues, num_oqueues);
3424 if_cfg.s.num_iqueues = num_iqueues;
3425 if_cfg.s.num_oqueues = num_oqueues;
3426 if_cfg.s.base_queue = base_queue;
3427 if_cfg.s.gmx_port_id = gmx_port_id;
3431 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3432 OPCODE_NIC_IF_CFG, 0,
3435 init_completion(&sc->complete);
3436 sc->sc_status = OCTEON_REQUEST_PENDING;
3438 retval = octeon_send_soft_command(octeon_dev, sc);
3439 if (retval == IQ_SEND_FAILED) {
3440 dev_err(&octeon_dev->pci_dev->dev,
3441 "iq/oq config failed status: %x\n",
3443 /* Soft instr is freed by driver in case of failure. */
3444 octeon_free_soft_command(octeon_dev, sc);
3448 /* Sleep on a wait queue till the cond flag indicates that the
3449 * response arrived or timed-out.
3451 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
3455 retval = resp->status;
3457 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3458 WRITE_ONCE(sc->caller_is_done, true);
3459 goto setup_nic_dev_done;
3461 snprintf(octeon_dev->fw_info.liquidio_firmware_version,
3463 resp->cfg_info.liquidio_firmware_version);
3465 /* Verify f/w version (in case of 'auto' loading from flash) */
3466 fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3467 if (memcmp(LIQUIDIO_BASE_VERSION,
3469 strlen(LIQUIDIO_BASE_VERSION))) {
3470 dev_err(&octeon_dev->pci_dev->dev,
3471 "Unmatched firmware version. Expected %s.x, got %s.\n",
3472 LIQUIDIO_BASE_VERSION, fw_ver);
3473 WRITE_ONCE(sc->caller_is_done, true);
3474 goto setup_nic_dev_done;
3475 } else if (atomic_read(octeon_dev->adapter_fw_state) ==
3477 dev_info(&octeon_dev->pci_dev->dev,
3478 "Using auto-loaded firmware version %s.\n",
3482 /* extract micro version field; point past '<maj>.<min>.' */
3483 micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3484 if (kstrtoul(micro_ver, 10, µ) != 0)
3486 octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3487 octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3488 octeon_dev->fw_info.ver.rev = micro;
3490 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3491 (sizeof(struct liquidio_if_cfg_info)) >> 3);
3493 num_iqueues = hweight64(resp->cfg_info.iqmask);
3494 num_oqueues = hweight64(resp->cfg_info.oqmask);
3496 if (!(num_iqueues) || !(num_oqueues)) {
3497 dev_err(&octeon_dev->pci_dev->dev,
3498 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3499 resp->cfg_info.iqmask,
3500 resp->cfg_info.oqmask);
3501 WRITE_ONCE(sc->caller_is_done, true);
3502 goto setup_nic_dev_done;
3505 if (OCTEON_CN6XXX(octeon_dev)) {
3506 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3508 } else if (OCTEON_CN23XX_PF(octeon_dev)) {
3509 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3513 dev_dbg(&octeon_dev->pci_dev->dev,
3514 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3515 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3516 num_iqueues, num_oqueues, max_num_queues);
3517 netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3520 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3521 WRITE_ONCE(sc->caller_is_done, true);
3522 goto setup_nic_dev_done;
3525 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3527 /* Associate the routines that will handle different
3530 netdev->netdev_ops = &lionetdevops;
3532 retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3534 dev_err(&octeon_dev->pci_dev->dev,
3535 "setting real number rx failed\n");
3536 WRITE_ONCE(sc->caller_is_done, true);
3537 goto setup_nic_dev_free;
3540 retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3542 dev_err(&octeon_dev->pci_dev->dev,
3543 "setting real number tx failed\n");
3544 WRITE_ONCE(sc->caller_is_done, true);
3545 goto setup_nic_dev_free;
3548 lio = GET_LIO(netdev);
3550 memset(lio, 0, sizeof(struct lio));
3552 lio->ifidx = ifidx_or_pfnum;
3554 props = &octeon_dev->props[i];
3555 props->gmxport = resp->cfg_info.linfo.gmxport;
3556 props->netdev = netdev;
3558 lio->linfo.num_rxpciq = num_oqueues;
3559 lio->linfo.num_txpciq = num_iqueues;
3560 for (j = 0; j < num_oqueues; j++) {
3561 lio->linfo.rxpciq[j].u64 =
3562 resp->cfg_info.linfo.rxpciq[j].u64;
3564 for (j = 0; j < num_iqueues; j++) {
3565 lio->linfo.txpciq[j].u64 =
3566 resp->cfg_info.linfo.txpciq[j].u64;
3568 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3569 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3570 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3572 WRITE_ONCE(sc->caller_is_done, true);
3574 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3576 if (OCTEON_CN23XX_PF(octeon_dev) ||
3577 OCTEON_CN6XXX(octeon_dev)) {
3578 lio->dev_capability = NETIF_F_HIGHDMA
3581 | NETIF_F_SG | NETIF_F_RXCSUM
3583 | NETIF_F_TSO | NETIF_F_TSO6
3586 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3588 /* Copy of transmit encapsulation capabilities:
3589 * TSO, TSO6, Checksums for this device
3591 lio->enc_dev_capability = NETIF_F_IP_CSUM
3593 | NETIF_F_GSO_UDP_TUNNEL
3594 | NETIF_F_HW_CSUM | NETIF_F_SG
3596 | NETIF_F_TSO | NETIF_F_TSO6
3599 netdev->hw_enc_features = (lio->enc_dev_capability &
3602 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3604 netdev->vlan_features = lio->dev_capability;
3605 /* Add any unchangeable hw features */
3606 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
3607 NETIF_F_HW_VLAN_CTAG_RX |
3608 NETIF_F_HW_VLAN_CTAG_TX;
3610 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3612 netdev->hw_features = lio->dev_capability;
3613 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3614 netdev->hw_features = netdev->hw_features &
3615 ~NETIF_F_HW_VLAN_CTAG_RX;
3617 /* MTU range: 68 - 16000 */
3618 netdev->min_mtu = LIO_MIN_MTU_SIZE;
3619 netdev->max_mtu = LIO_MAX_MTU_SIZE;
3621 /* Point to the properties for octeon device to which this
3622 * interface belongs.
3624 lio->oct_dev = octeon_dev;
3625 lio->octprops = props;
3626 lio->netdev = netdev;
3628 dev_dbg(&octeon_dev->pci_dev->dev,
3629 "if%d gmx: %d hw_addr: 0x%llx\n", i,
3630 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3632 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3635 eth_random_addr(vfmac);
3636 if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3637 dev_err(&octeon_dev->pci_dev->dev,
3638 "Error setting VF%d MAC address\n",
3640 goto setup_nic_dev_free;
3644 /* 64-bit swap required on LE machines */
3645 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3646 for (j = 0; j < 6; j++)
3647 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3649 /* Copy MAC Address to OS network device structure */
3651 ether_addr_copy(netdev->dev_addr, mac);
3653 /* By default all interfaces on a single Octeon uses the same
3656 lio->txq = lio->linfo.txpciq[0].s.q_no;
3657 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3658 if (liquidio_setup_io_queues(octeon_dev, i,
3659 lio->linfo.num_txpciq,
3660 lio->linfo.num_rxpciq)) {
3661 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3662 goto setup_nic_dev_free;
3665 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3667 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3668 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3670 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3671 dev_err(&octeon_dev->pci_dev->dev,
3672 "Gather list allocation failed\n");
3673 goto setup_nic_dev_free;
3676 /* Register ethtool support */
3677 liquidio_set_ethtool_ops(netdev);
3678 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3679 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3681 octeon_dev->priv_flags = 0x0;
3683 if (netdev->features & NETIF_F_LRO)
3684 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3685 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3687 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3688 OCTNET_CMD_VLAN_FILTER_ENABLE);
3690 if ((debug != -1) && (debug & NETIF_MSG_HW))
3691 liquidio_set_feature(netdev,
3692 OCTNET_CMD_VERBOSE_ENABLE, 0);
3694 if (setup_link_status_change_wq(netdev))
3695 goto setup_nic_dev_free;
3697 if ((octeon_dev->fw_info.app_cap_flags &
3698 LIQUIDIO_TIME_SYNC_CAP) &&
3699 setup_sync_octeon_time_wq(netdev))
3700 goto setup_nic_dev_free;
3702 if (setup_rx_oom_poll_fn(netdev))
3703 goto setup_nic_dev_free;
3705 /* Register the network device with the OS */
3706 if (register_netdev(netdev)) {
3707 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3708 goto setup_nic_dev_free;
3711 dev_dbg(&octeon_dev->pci_dev->dev,
3712 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3713 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3714 netif_carrier_off(netdev);
3715 lio->link_changes++;
3717 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3719 /* Sending command to firmware to enable Rx checksum offload
3720 * by default at the time of setup of Liquidio driver for
3723 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3724 OCTNET_CMD_RXCSUM_ENABLE);
3725 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3726 OCTNET_CMD_TXCSUM_ENABLE);
3728 dev_dbg(&octeon_dev->pci_dev->dev,
3729 "NIC ifidx:%d Setup successful\n", i);
3731 if (octeon_dev->subsystem_id ==
3732 OCTEON_CN2350_25GB_SUBSYS_ID ||
3733 octeon_dev->subsystem_id ==
3734 OCTEON_CN2360_25GB_SUBSYS_ID) {
3735 cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3736 octeon_dev->fw_info.ver.min,
3737 octeon_dev->fw_info.ver.rev);
3739 /* speed control unsupported in f/w older than 1.7.2 */
3740 if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3741 dev_info(&octeon_dev->pci_dev->dev,
3742 "speed setting not supported by f/w.");
3743 octeon_dev->speed_setting = 25;
3744 octeon_dev->no_speed_setting = 1;
3746 liquidio_get_speed(lio);
3749 if (octeon_dev->speed_setting == 0) {
3750 octeon_dev->speed_setting = 25;
3751 octeon_dev->no_speed_setting = 1;
3754 octeon_dev->no_speed_setting = 1;
3755 octeon_dev->speed_setting = 10;
3757 octeon_dev->speed_boot = octeon_dev->speed_setting;
3759 /* don't read FEC setting if unsupported by f/w (see above) */
3760 if (octeon_dev->speed_boot == 25 &&
3761 !octeon_dev->no_speed_setting) {
3762 liquidio_get_fec(lio);
3763 octeon_dev->props[lio->ifidx].fec_boot =
3764 octeon_dev->props[lio->ifidx].fec;
3768 devlink = devlink_alloc(&liquidio_devlink_ops,
3769 sizeof(struct lio_devlink_priv));
3771 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3772 goto setup_nic_dev_free;
3775 lio_devlink = devlink_priv(devlink);
3776 lio_devlink->oct = octeon_dev;
3778 if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
3779 devlink_free(devlink);
3780 dev_err(&octeon_dev->pci_dev->dev,
3781 "devlink registration failed\n");
3782 goto setup_nic_dev_free;
3785 octeon_dev->devlink = devlink;
3786 octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3793 dev_err(&octeon_dev->pci_dev->dev,
3794 "NIC ifidx:%d Setup failed\n", i);
3795 liquidio_destroy_nic_device(octeon_dev, i);
3803 #ifdef CONFIG_PCI_IOV
3804 static int octeon_enable_sriov(struct octeon_device *oct)
3806 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3807 struct pci_dev *vfdev;
3811 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3812 err = pci_enable_sriov(oct->pci_dev,
3813 oct->sriov_info.num_vfs_alloced);
3815 dev_err(&oct->pci_dev->dev,
3816 "OCTEON: Failed to enable PCI sriov: %d\n",
3818 oct->sriov_info.num_vfs_alloced = 0;
3821 oct->sriov_info.sriov_enabled = 1;
3823 /* init lookup table that maps DPI ring number to VF pci_dev
3827 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3828 OCTEON_CN23XX_VF_VID, NULL);
3830 if (vfdev->is_virtfn &&
3831 (vfdev->physfn == oct->pci_dev)) {
3832 oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3834 u += oct->sriov_info.rings_per_vf;
3836 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3837 OCTEON_CN23XX_VF_VID, vfdev);
3841 return num_vfs_alloced;
3844 static int lio_pci_sriov_disable(struct octeon_device *oct)
3848 if (pci_vfs_assigned(oct->pci_dev)) {
3849 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3853 pci_disable_sriov(oct->pci_dev);
3856 while (u < MAX_POSSIBLE_VFS) {
3857 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3858 u += oct->sriov_info.rings_per_vf;
3861 oct->sriov_info.num_vfs_alloced = 0;
3862 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3868 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3870 struct octeon_device *oct = pci_get_drvdata(dev);
3873 if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3874 (oct->sriov_info.sriov_enabled)) {
3875 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3876 oct->pf_num, num_vfs);
3881 lio_vf_rep_destroy(oct);
3882 ret = lio_pci_sriov_disable(oct);
3883 } else if (num_vfs > oct->sriov_info.max_vfs) {
3884 dev_err(&oct->pci_dev->dev,
3885 "OCTEON: Max allowed VFs:%d user requested:%d",
3886 oct->sriov_info.max_vfs, num_vfs);
3889 oct->sriov_info.num_vfs_alloced = num_vfs;
3890 ret = octeon_enable_sriov(oct);
3891 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3892 oct->pf_num, num_vfs);
3893 ret = lio_vf_rep_create(oct);
3895 dev_info(&oct->pci_dev->dev,
3896 "vf representor create failed");
3904 * \brief initialize the NIC
3905 * @param oct octeon device
3907 * This initialization routine is called once the Octeon device application is
3910 static int liquidio_init_nic_module(struct octeon_device *oct)
3913 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3915 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3917 /* only default iq and oq were initialized
3918 * initialize the rest as well
3920 /* run port_config command for each port */
3921 oct->ifcount = num_nic_ports;
3923 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3925 for (i = 0; i < MAX_OCTEON_LINKS; i++)
3926 oct->props[i].gmxport = -1;
3928 retval = setup_nic_devices(oct);
3930 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3931 goto octnet_init_failure;
3934 /* Call vf_rep_modinit if the firmware is switchdev capable
3935 * and do it from the first liquidio function probed.
3937 if (!oct->octeon_id &&
3938 oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3939 retval = lio_vf_rep_modinit();
3941 liquidio_stop_nic_module(oct);
3942 goto octnet_init_failure;
3946 liquidio_ptp_init(oct);
3948 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3952 octnet_init_failure:
3960 * \brief starter callback that invokes the remaining initialization work after
3961 * the NIC is up and running.
3962 * @param octptr work struct work_struct
3964 static void nic_starter(struct work_struct *work)
3966 struct octeon_device *oct;
3967 struct cavium_wk *wk = (struct cavium_wk *)work;
3969 oct = (struct octeon_device *)wk->ctxptr;
3971 if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3974 /* If the status of the device is CORE_OK, the core
3975 * application has reported its application type. Call
3976 * any registered handlers now and move to the RUNNING
3979 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3980 schedule_delayed_work(&oct->nic_poll_work.work,
3981 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3985 atomic_set(&oct->status, OCT_DEV_RUNNING);
3987 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3988 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3990 if (liquidio_init_nic_module(oct))
3991 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3993 handshake[oct->octeon_id].started_ok = 1;
3995 dev_err(&oct->pci_dev->dev,
3996 "Unexpected application running on NIC (%d). Check firmware.\n",
4000 complete(&handshake[oct->octeon_id].started);
4004 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
4006 struct octeon_device *oct = (struct octeon_device *)buf;
4007 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
4008 int i, notice, vf_idx;
4012 notice = recv_pkt->rh.r.ossp;
4013 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
4015 /* the first 64-bit word of data is the vf_num */
4017 octeon_swap_8B_data(&vf_num, 1);
4018 vf_idx = (int)vf_num - 1;
4020 cores_crashed = READ_ONCE(oct->cores_crashed);
4022 if (notice == VF_DRV_LOADED) {
4023 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4024 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4025 dev_info(&oct->pci_dev->dev,
4026 "driver for VF%d was loaded\n", vf_idx);
4028 try_module_get(THIS_MODULE);
4030 } else if (notice == VF_DRV_REMOVED) {
4031 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4032 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4033 dev_info(&oct->pci_dev->dev,
4034 "driver for VF%d was removed\n", vf_idx);
4036 module_put(THIS_MODULE);
4038 } else if (notice == VF_DRV_MACADDR_CHANGED) {
4039 u8 *b = (u8 *)&data[1];
4041 oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4042 dev_info(&oct->pci_dev->dev,
4043 "VF driver changed VF%d's MAC address to %pM\n",
4047 for (i = 0; i < recv_pkt->buffer_count; i++)
4048 recv_buffer_free(recv_pkt->buffer_ptr[i]);
4049 octeon_free_recv_info(recv_info);
4055 * \brief Device initialization for each Octeon device that is probed
4056 * @param octeon_dev octeon device
4058 static int octeon_device_init(struct octeon_device *octeon_dev)
4061 char bootcmd[] = "\n";
4062 char *dbg_enb = NULL;
4063 enum lio_fw_state fw_state;
4064 struct octeon_device_priv *oct_priv =
4065 (struct octeon_device_priv *)octeon_dev->priv;
4066 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4068 /* Enable access to the octeon device and make its DMA capability
4071 if (octeon_pci_os_setup(octeon_dev))
4074 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4076 /* Identify the Octeon type and map the BAR address space. */
4077 if (octeon_chip_specific_setup(octeon_dev)) {
4078 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4082 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4084 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4085 * since that is what is required for the reference to be removed
4086 * during de-initialization (see 'octeon_destroy_resources').
4088 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4089 PCI_SLOT(octeon_dev->pci_dev->devfn),
4090 PCI_FUNC(octeon_dev->pci_dev->devfn),
4093 octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4095 /* CN23XX supports preloaded firmware if the following is true:
4097 * The adapter indicates that firmware is currently running AND
4098 * 'fw_type' is 'auto'.
4100 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4102 if (OCTEON_CN23XX_PF(octeon_dev) &&
4103 cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4104 atomic_cmpxchg(octeon_dev->adapter_fw_state,
4105 FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4108 /* If loading firmware, only first device of adapter needs to do so. */
4109 fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4110 FW_NEEDS_TO_BE_LOADED,
4111 FW_IS_BEING_LOADED);
4113 /* Here, [local variable] 'fw_state' is set to one of:
4115 * FW_IS_PRELOADED: No firmware is to be loaded (see above)
4116 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4117 * firmware to the adapter.
4118 * FW_IS_BEING_LOADED: The driver's second instance will not load
4119 * firmware to the adapter.
4122 /* Prior to f/w load, perform a soft reset of the Octeon device;
4123 * if error resetting, return w/error.
4125 if (fw_state == FW_NEEDS_TO_BE_LOADED)
4126 if (octeon_dev->fn_list.soft_reset(octeon_dev))
4129 /* Initialize the dispatch mechanism used to push packets arriving on
4130 * Octeon Output queues.
4132 if (octeon_init_dispatch_list(octeon_dev))
4135 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4136 OPCODE_NIC_CORE_DRV_ACTIVE,
4137 octeon_core_drv_init,
4140 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4141 OPCODE_NIC_VF_DRV_NOTICE,
4142 octeon_recv_vf_drv_notice, octeon_dev);
4143 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4144 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4145 schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4146 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4148 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4150 if (octeon_set_io_queues_off(octeon_dev)) {
4151 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4155 if (OCTEON_CN23XX_PF(octeon_dev)) {
4156 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4158 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4163 /* Initialize soft command buffer pool
4165 if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4166 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4169 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4171 /* Setup the data structures that manage this Octeon's Input queues. */
4172 if (octeon_setup_instr_queues(octeon_dev)) {
4173 dev_err(&octeon_dev->pci_dev->dev,
4174 "instruction queue initialization failed\n");
4177 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4179 /* Initialize lists to manage the requests of different types that
4180 * arrive from user & kernel applications for this octeon device.
4182 if (octeon_setup_response_list(octeon_dev)) {
4183 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4186 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4188 if (octeon_setup_output_queues(octeon_dev)) {
4189 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4193 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4195 if (OCTEON_CN23XX_PF(octeon_dev)) {
4196 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4197 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4200 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4202 if (octeon_allocate_ioq_vector
4204 octeon_dev->sriov_info.num_pf_rings)) {
4205 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4208 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4211 /* The input and output queue registers were setup earlier (the
4212 * queues were not enabled). Any additional registers
4213 * that need to be programmed should be done now.
4215 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4217 dev_err(&octeon_dev->pci_dev->dev,
4218 "Failed to configure device registers\n");
4223 /* Initialize the tasklet that handles output queue packet processing.*/
4224 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4225 tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4226 (unsigned long)octeon_dev);
4228 /* Setup the interrupt handler and record the INT SUM register address
4230 if (octeon_setup_interrupt(octeon_dev,
4231 octeon_dev->sriov_info.num_pf_rings))
4234 /* Enable Octeon device interrupts */
4235 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4237 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4239 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4240 * the output queue is enabled.
4241 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4242 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4243 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4244 * before any credits have been issued, causing the ring to be reset
4245 * (and the f/w appear to never have started).
4247 for (j = 0; j < octeon_dev->num_oqs; j++)
4248 writel(octeon_dev->droq[j]->max_count,
4249 octeon_dev->droq[j]->pkts_credit_reg);
4251 /* Enable the input and output queues for this Octeon device */
4252 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4254 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4258 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4260 if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4261 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4263 dev_info(&octeon_dev->pci_dev->dev,
4264 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4267 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4269 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4270 while (!ddr_timeout) {
4271 set_current_state(TASK_INTERRUPTIBLE);
4272 if (schedule_timeout(HZ / 10)) {
4273 /* user probably pressed Control-C */
4277 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4279 dev_err(&octeon_dev->pci_dev->dev,
4280 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4285 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4286 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4290 /* Divert uboot to take commands from host instead. */
4291 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4293 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4294 ret = octeon_init_consoles(octeon_dev);
4296 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4299 /* If console debug enabled, specify empty string to use default
4300 * enablement ELSE specify NULL string for 'disabled'.
4302 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4303 ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4305 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4307 } else if (octeon_console_debug_enabled(0)) {
4308 /* If console was added AND we're logging console output
4309 * then set our console print function.
4311 octeon_dev->console[0].print = octeon_dbg_console_print;
4314 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4316 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4317 ret = load_firmware(octeon_dev);
4319 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4323 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4326 handshake[octeon_dev->octeon_id].init_ok = 1;
4327 complete(&handshake[octeon_dev->octeon_id].init);
4329 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4335 * \brief Debug console print function
4336 * @param octeon_dev octeon device
4337 * @param console_num console number
4338 * @param prefix first portion of line to display
4339 * @param suffix second portion of line to display
4341 * The OCTEON debug console outputs entire lines (excluding '\n').
4342 * Normally, the line will be passed in the 'prefix' parameter.
4343 * However, due to buffering, it is possible for a line to be split into two
4344 * parts, in which case they will be passed as the 'prefix' parameter and
4345 * 'suffix' parameter.
4347 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4348 char *prefix, char *suffix)
4350 if (prefix && suffix)
4351 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4354 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4356 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4362 * \brief Exits the module
4364 static void __exit liquidio_exit(void)
4366 liquidio_deinit_pci();
4368 pr_info("LiquidIO network module is now unloaded\n");
4371 module_init(liquidio_init);
4372 module_exit(liquidio_exit);