1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/stddef.h>
10 #include <linux/pci.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/version.h>
14 #include <linux/delay.h>
15 #include <asm/byteorder.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/string.h>
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/workqueue.h>
21 #include <linux/ethtool.h>
22 #include <linux/etherdevice.h>
23 #include <linux/vmalloc.h>
24 #include <linux/qed/qed_if.h>
27 #include "qed_sriov.h"
29 #include "qed_dev_api.h"
32 #include "qed_selftest.h"
34 static char version[] =
35 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
37 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
38 MODULE_LICENSE("GPL");
39 MODULE_VERSION(DRV_MODULE_VERSION);
41 #define FW_FILE_VERSION \
42 __stringify(FW_MAJOR_VERSION) "." \
43 __stringify(FW_MINOR_VERSION) "." \
44 __stringify(FW_REVISION_VERSION) "." \
45 __stringify(FW_ENGINEERING_VERSION)
47 #define QED_FW_FILE_NAME \
48 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
50 MODULE_FIRMWARE(QED_FW_FILE_NAME);
52 static int __init qed_init(void)
54 pr_notice("qed_init called\n");
56 pr_info("%s", version);
61 static void __exit qed_cleanup(void)
63 pr_notice("qed_cleanup called\n");
66 module_init(qed_init);
67 module_exit(qed_cleanup);
69 /* Check if the DMA controller on the machine can properly handle the DMA
70 * addressing required by the device.
72 static int qed_set_coherency_mask(struct qed_dev *cdev)
74 struct device *dev = &cdev->pdev->dev;
76 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
77 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
79 "Can't request 64-bit consistent allocations\n");
82 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
83 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
90 static void qed_free_pci(struct qed_dev *cdev)
92 struct pci_dev *pdev = cdev->pdev;
95 iounmap(cdev->doorbells);
97 iounmap(cdev->regview);
98 if (atomic_read(&pdev->enable_cnt) == 1)
99 pci_release_regions(pdev);
101 pci_disable_device(pdev);
104 #define PCI_REVISION_ID_ERROR_VAL 0xff
106 /* Performs PCI initializations as well as initializing PCI-related parameters
107 * in the device structrue. Returns 0 in case of success.
109 static int qed_init_pci(struct qed_dev *cdev,
110 struct pci_dev *pdev)
117 rc = pci_enable_device(pdev);
119 DP_NOTICE(cdev, "Cannot enable PCI device\n");
123 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
124 DP_NOTICE(cdev, "No memory region found in bar #0\n");
129 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
130 DP_NOTICE(cdev, "No memory region found in bar #2\n");
135 if (atomic_read(&pdev->enable_cnt) == 1) {
136 rc = pci_request_regions(pdev, "qed");
139 "Failed to request PCI memory resources\n");
142 pci_set_master(pdev);
143 pci_save_state(pdev);
146 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
147 if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
149 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
154 if (!pci_is_pcie(pdev)) {
155 DP_NOTICE(cdev, "The bus is not PCI Express\n");
160 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
161 if (cdev->pci_params.pm_cap == 0)
162 DP_NOTICE(cdev, "Cannot find power management capability\n");
164 rc = qed_set_coherency_mask(cdev);
168 cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
169 cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
170 cdev->pci_params.irq = pdev->irq;
172 cdev->regview = pci_ioremap_bar(pdev, 0);
173 if (!cdev->regview) {
174 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
180 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
181 cdev->db_size = pci_resource_len(cdev->pdev, 2);
182 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
183 if (!cdev->doorbells) {
184 DP_NOTICE(cdev, "Cannot map doorbell space\n");
192 pci_release_regions(pdev);
194 pci_disable_device(pdev);
199 int qed_fill_dev_info(struct qed_dev *cdev,
200 struct qed_dev_info *dev_info)
204 memset(dev_info, 0, sizeof(struct qed_dev_info));
206 dev_info->num_hwfns = cdev->num_hwfns;
207 dev_info->pci_mem_start = cdev->pci_params.mem_start;
208 dev_info->pci_mem_end = cdev->pci_params.mem_end;
209 dev_info->pci_irq = cdev->pci_params.irq;
210 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
211 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
214 dev_info->fw_major = FW_MAJOR_VERSION;
215 dev_info->fw_minor = FW_MINOR_VERSION;
216 dev_info->fw_rev = FW_REVISION_VERSION;
217 dev_info->fw_eng = FW_ENGINEERING_VERSION;
218 dev_info->mf_mode = cdev->mf_mode;
220 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
221 &dev_info->fw_minor, &dev_info->fw_rev,
226 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
228 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
229 &dev_info->mfw_rev, NULL);
231 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
232 &dev_info->flash_size);
234 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
237 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
238 &dev_info->mfw_rev, NULL);
244 static void qed_free_cdev(struct qed_dev *cdev)
249 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
251 struct qed_dev *cdev;
253 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
257 qed_init_struct(cdev);
262 /* Sets the requested power state */
263 static int qed_set_power_state(struct qed_dev *cdev,
269 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
274 static struct qed_dev *qed_probe(struct pci_dev *pdev,
275 struct qed_probe_params *params)
277 struct qed_dev *cdev;
280 cdev = qed_alloc_cdev(pdev);
284 cdev->protocol = params->protocol;
287 cdev->b_is_vf = true;
289 qed_init_dp(cdev, params->dp_module, params->dp_level);
291 rc = qed_init_pci(cdev, pdev);
293 DP_ERR(cdev, "init pci failed\n");
296 DP_INFO(cdev, "PCI init completed successfully\n");
298 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
300 DP_ERR(cdev, "hw prepare failed\n");
304 DP_INFO(cdev, "qed_probe completed successffuly\n");
316 static void qed_remove(struct qed_dev *cdev)
325 qed_set_power_state(cdev, PCI_D3hot);
330 static void qed_disable_msix(struct qed_dev *cdev)
332 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
333 pci_disable_msix(cdev->pdev);
334 kfree(cdev->int_params.msix_table);
335 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
336 pci_disable_msi(cdev->pdev);
339 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
342 static int qed_enable_msix(struct qed_dev *cdev,
343 struct qed_int_params *int_params)
347 cnt = int_params->in.num_vectors;
349 for (i = 0; i < cnt; i++)
350 int_params->msix_table[i].entry = i;
352 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
353 int_params->in.min_msix_cnt, cnt);
354 if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
355 (rc % cdev->num_hwfns)) {
356 pci_disable_msix(cdev->pdev);
358 /* If fastpath is initialized, we need at least one interrupt
359 * per hwfn [and the slow path interrupts]. New requested number
360 * should be a multiple of the number of hwfns.
362 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
364 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
365 cnt, int_params->in.num_vectors);
366 rc = pci_enable_msix_exact(cdev->pdev,
367 int_params->msix_table, cnt);
373 /* MSI-x configuration was achieved */
374 int_params->out.int_mode = QED_INT_MODE_MSIX;
375 int_params->out.num_vectors = rc;
379 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
386 /* This function outputs the int mode and the number of enabled msix vector */
387 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
389 struct qed_int_params *int_params = &cdev->int_params;
390 struct msix_entry *tbl;
393 switch (int_params->in.int_mode) {
394 case QED_INT_MODE_MSIX:
395 /* Allocate MSIX table */
396 cnt = int_params->in.num_vectors;
397 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
398 if (!int_params->msix_table) {
404 rc = qed_enable_msix(cdev, int_params);
408 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
409 kfree(int_params->msix_table);
414 case QED_INT_MODE_MSI:
415 rc = pci_enable_msi(cdev->pdev);
417 int_params->out.int_mode = QED_INT_MODE_MSI;
421 DP_NOTICE(cdev, "Failed to enable MSI\n");
426 case QED_INT_MODE_INTA:
427 int_params->out.int_mode = QED_INT_MODE_INTA;
431 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
432 int_params->in.int_mode);
437 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
442 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
443 int index, void(*handler)(void *))
445 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
446 int relative_idx = index / cdev->num_hwfns;
448 hwfn->simd_proto_handler[relative_idx].func = handler;
449 hwfn->simd_proto_handler[relative_idx].token = token;
452 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
454 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
455 int relative_idx = index / cdev->num_hwfns;
457 memset(&hwfn->simd_proto_handler[relative_idx], 0,
458 sizeof(struct qed_simd_fp_handler));
461 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
463 tasklet_schedule((struct tasklet_struct *)tasklet);
467 static irqreturn_t qed_single_int(int irq, void *dev_instance)
469 struct qed_dev *cdev = (struct qed_dev *)dev_instance;
470 struct qed_hwfn *hwfn;
471 irqreturn_t rc = IRQ_NONE;
475 for (i = 0; i < cdev->num_hwfns; i++) {
476 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
481 hwfn = &cdev->hwfns[i];
483 /* Slowpath interrupt */
484 if (unlikely(status & 0x1)) {
485 tasklet_schedule(hwfn->sp_dpc);
490 /* Fastpath interrupts */
491 for (j = 0; j < 64; j++) {
492 if ((0x2ULL << j) & status) {
493 hwfn->simd_proto_handler[j].func(
494 hwfn->simd_proto_handler[j].token);
495 status &= ~(0x2ULL << j);
500 if (unlikely(status))
501 DP_VERBOSE(hwfn, NETIF_MSG_INTR,
502 "got an unknown interrupt status 0x%llx\n",
509 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
511 struct qed_dev *cdev = hwfn->cdev;
515 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
517 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
518 id, cdev->pdev->bus->number,
519 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
520 rc = request_irq(cdev->int_params.msix_table[id].vector,
521 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
523 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
524 "Requested slowpath MSI-X\n");
526 unsigned long flags = 0;
528 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
529 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
530 PCI_FUNC(cdev->pdev->devfn));
532 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
533 flags |= IRQF_SHARED;
535 rc = request_irq(cdev->pdev->irq, qed_single_int,
536 flags, cdev->name, cdev);
542 static void qed_slowpath_irq_free(struct qed_dev *cdev)
546 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
547 for_each_hwfn(cdev, i) {
548 if (!cdev->hwfns[i].b_int_requested)
550 synchronize_irq(cdev->int_params.msix_table[i].vector);
551 free_irq(cdev->int_params.msix_table[i].vector,
552 cdev->hwfns[i].sp_dpc);
555 if (QED_LEADING_HWFN(cdev)->b_int_requested)
556 free_irq(cdev->pdev->irq, cdev);
558 qed_int_disable_post_isr_release(cdev);
561 static int qed_nic_stop(struct qed_dev *cdev)
565 rc = qed_hw_stop(cdev);
567 for (i = 0; i < cdev->num_hwfns; i++) {
568 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
570 if (p_hwfn->b_sp_dpc_enabled) {
571 tasklet_disable(p_hwfn->sp_dpc);
572 p_hwfn->b_sp_dpc_enabled = false;
573 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
574 "Disabled sp taskelt [hwfn %d] at %p\n",
582 static int qed_nic_reset(struct qed_dev *cdev)
586 rc = qed_hw_reset(cdev);
595 static int qed_nic_setup(struct qed_dev *cdev)
599 rc = qed_resc_alloc(cdev);
603 DP_INFO(cdev, "Allocated qed resources\n");
605 qed_resc_setup(cdev);
610 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
614 /* Mark the fastpath as free/used */
615 cdev->int_params.fp_initialized = cnt ? true : false;
617 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
618 limit = cdev->num_hwfns * 63;
619 else if (cdev->int_params.fp_msix_cnt)
620 limit = cdev->int_params.fp_msix_cnt;
625 return min_t(int, cnt, limit);
628 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
630 memset(info, 0, sizeof(struct qed_int_info));
632 if (!cdev->int_params.fp_initialized) {
634 "Protocol driver requested interrupt information, but its support is not yet configured\n");
638 /* Need to expose only MSI-X information; Single IRQ is handled solely
641 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
642 int msix_base = cdev->int_params.fp_msix_base;
644 info->msix_cnt = cdev->int_params.fp_msix_cnt;
645 info->msix = &cdev->int_params.msix_table[msix_base];
651 static int qed_slowpath_setup_int(struct qed_dev *cdev,
652 enum qed_int_mode int_mode)
654 struct qed_sb_cnt_info sb_cnt_info;
657 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
659 cdev->int_params.in.int_mode = int_mode;
660 for_each_hwfn(cdev, i) {
661 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
662 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
663 cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
664 cdev->int_params.in.num_vectors++; /* slowpath */
667 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
668 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
670 rc = qed_set_int_mode(cdev, false);
672 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
676 cdev->int_params.fp_msix_base = cdev->num_hwfns;
677 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
683 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
687 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
688 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
690 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
691 &cdev->int_params.in.num_vectors);
692 if (cdev->num_hwfns > 1) {
695 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
696 cdev->int_params.in.num_vectors += vectors;
699 /* We want a minimum of one fastpath vector per vf hwfn */
700 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
702 rc = qed_set_int_mode(cdev, true);
706 cdev->int_params.fp_msix_base = 0;
707 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
712 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
713 u8 *input_buf, u32 max_size, u8 *unzip_buf)
717 p_hwfn->stream->next_in = input_buf;
718 p_hwfn->stream->avail_in = input_len;
719 p_hwfn->stream->next_out = unzip_buf;
720 p_hwfn->stream->avail_out = max_size;
722 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
725 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
730 rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
731 zlib_inflateEnd(p_hwfn->stream);
733 if (rc != Z_OK && rc != Z_STREAM_END) {
734 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
735 p_hwfn->stream->msg, rc);
739 return p_hwfn->stream->total_out / 4;
742 static int qed_alloc_stream_mem(struct qed_dev *cdev)
747 for_each_hwfn(cdev, i) {
748 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
750 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
754 workspace = vzalloc(zlib_inflate_workspacesize());
757 p_hwfn->stream->workspace = workspace;
763 static void qed_free_stream_mem(struct qed_dev *cdev)
767 for_each_hwfn(cdev, i) {
768 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
773 vfree(p_hwfn->stream->workspace);
774 kfree(p_hwfn->stream);
778 static void qed_update_pf_params(struct qed_dev *cdev,
779 struct qed_pf_params *params)
783 for (i = 0; i < cdev->num_hwfns; i++) {
784 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
786 p_hwfn->pf_params = *params;
790 static int qed_slowpath_start(struct qed_dev *cdev,
791 struct qed_slowpath_params *params)
793 struct qed_tunn_start_params tunn_info;
794 struct qed_mcp_drv_version drv_version;
795 const u8 *data = NULL;
796 struct qed_hwfn *hwfn;
799 if (qed_iov_wq_start(cdev))
803 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
807 "Failed to find fw file - /lib/firmware/%s\n",
813 rc = qed_nic_setup(cdev);
818 rc = qed_slowpath_setup_int(cdev, params->int_mode);
820 rc = qed_slowpath_vf_setup_int(cdev);
825 /* Allocate stream for unzipping */
826 rc = qed_alloc_stream_mem(cdev);
828 DP_NOTICE(cdev, "Failed to allocate stream memory\n");
832 data = cdev->firmware->data;
835 memset(&tunn_info, 0, sizeof(tunn_info));
836 tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
837 1 << QED_MODE_L2GRE_TUNN |
838 1 << QED_MODE_IPGRE_TUNN |
839 1 << QED_MODE_L2GENEVE_TUNN |
840 1 << QED_MODE_IPGENEVE_TUNN;
842 tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
843 tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
844 tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
846 /* Start the slowpath */
847 rc = qed_hw_init(cdev, &tunn_info, true,
848 cdev->int_params.out.int_mode,
854 "HW initialization and function start completed successfully\n");
857 hwfn = QED_LEADING_HWFN(cdev);
858 drv_version.version = (params->drv_major << 24) |
859 (params->drv_minor << 16) |
860 (params->drv_rev << 8) |
862 strlcpy(drv_version.name, params->name,
863 MCP_DRV_VER_STR_SIZE - 4);
864 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
867 DP_NOTICE(cdev, "Failed sending drv version command\n");
872 qed_reset_vport_stats(cdev);
877 qed_hw_timers_stop_all(cdev);
879 qed_slowpath_irq_free(cdev);
880 qed_free_stream_mem(cdev);
881 qed_disable_msix(cdev);
886 release_firmware(cdev->firmware);
888 qed_iov_wq_stop(cdev, false);
893 static int qed_slowpath_stop(struct qed_dev *cdev)
899 qed_free_stream_mem(cdev);
902 qed_slowpath_irq_free(cdev);
905 qed_disable_msix(cdev);
908 qed_iov_wq_stop(cdev, true);
911 release_firmware(cdev->firmware);
916 static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
917 char ver_str[VER_SIZE])
921 memcpy(cdev->name, name, NAME_SIZE);
922 for_each_hwfn(cdev, i)
923 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
925 memcpy(cdev->ver_str, ver_str, VER_SIZE);
926 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
929 static u32 qed_sb_init(struct qed_dev *cdev,
930 struct qed_sb_info *sb_info,
932 dma_addr_t sb_phy_addr, u16 sb_id,
933 enum qed_sb_type type)
935 struct qed_hwfn *p_hwfn;
941 /* RoCE uses single engine and CMT uses two engines. When using both
942 * we force only a single engine. Storage uses only engine 0 too.
944 if (type == QED_SB_TYPE_L2_QUEUE)
945 n_hwfns = cdev->num_hwfns;
949 hwfn_index = sb_id % n_hwfns;
950 p_hwfn = &cdev->hwfns[hwfn_index];
951 rel_sb_id = sb_id / n_hwfns;
953 DP_VERBOSE(cdev, NETIF_MSG_INTR,
954 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
955 hwfn_index, rel_sb_id, sb_id);
957 rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
958 sb_virt_addr, sb_phy_addr, rel_sb_id);
963 static u32 qed_sb_release(struct qed_dev *cdev,
964 struct qed_sb_info *sb_info,
967 struct qed_hwfn *p_hwfn;
972 hwfn_index = sb_id % cdev->num_hwfns;
973 p_hwfn = &cdev->hwfns[hwfn_index];
974 rel_sb_id = sb_id / cdev->num_hwfns;
976 DP_VERBOSE(cdev, NETIF_MSG_INTR,
977 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
978 hwfn_index, rel_sb_id, sb_id);
980 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
985 static bool qed_can_link_change(struct qed_dev *cdev)
990 static int qed_set_link(struct qed_dev *cdev,
991 struct qed_link_params *params)
993 struct qed_hwfn *hwfn;
994 struct qed_mcp_link_params *link_params;
1004 /* The link should be set only once per PF */
1005 hwfn = &cdev->hwfns[0];
1007 ptt = qed_ptt_acquire(hwfn);
1011 link_params = qed_mcp_get_link_params(hwfn);
1012 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1013 link_params->speed.autoneg = params->autoneg;
1014 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1015 link_params->speed.advertised_speeds = 0;
1016 if ((params->adv_speeds & SUPPORTED_1000baseT_Half) ||
1017 (params->adv_speeds & SUPPORTED_1000baseT_Full))
1018 link_params->speed.advertised_speeds |=
1019 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1020 if (params->adv_speeds & SUPPORTED_10000baseKR_Full)
1021 link_params->speed.advertised_speeds |=
1022 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1023 if (params->adv_speeds & SUPPORTED_40000baseLR4_Full)
1024 link_params->speed.advertised_speeds |=
1025 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1026 if (params->adv_speeds & 0)
1027 link_params->speed.advertised_speeds |=
1028 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1029 if (params->adv_speeds & 0)
1030 link_params->speed.advertised_speeds |=
1031 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G;
1033 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1034 link_params->speed.forced_speed = params->forced_speed;
1035 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1036 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1037 link_params->pause.autoneg = true;
1039 link_params->pause.autoneg = false;
1040 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1041 link_params->pause.forced_rx = true;
1043 link_params->pause.forced_rx = false;
1044 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1045 link_params->pause.forced_tx = true;
1047 link_params->pause.forced_tx = false;
1049 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1050 switch (params->loopback_mode) {
1051 case QED_LINK_LOOPBACK_INT_PHY:
1052 link_params->loopback_mode = PMM_LOOPBACK_INT_PHY;
1054 case QED_LINK_LOOPBACK_EXT_PHY:
1055 link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY;
1057 case QED_LINK_LOOPBACK_EXT:
1058 link_params->loopback_mode = PMM_LOOPBACK_EXT;
1060 case QED_LINK_LOOPBACK_MAC:
1061 link_params->loopback_mode = PMM_LOOPBACK_MAC;
1064 link_params->loopback_mode = PMM_LOOPBACK_NONE;
1069 rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1071 qed_ptt_release(hwfn, ptt);
1076 static int qed_get_port_type(u32 media_type)
1080 switch (media_type) {
1081 case MEDIA_SFPP_10G_FIBER:
1082 case MEDIA_SFP_1G_FIBER:
1083 case MEDIA_XFP_FIBER:
1085 port_type = PORT_FIBRE;
1087 case MEDIA_DA_TWINAX:
1088 port_type = PORT_DA;
1091 port_type = PORT_TP;
1093 case MEDIA_NOT_PRESENT:
1094 port_type = PORT_NONE;
1096 case MEDIA_UNSPECIFIED:
1098 port_type = PORT_OTHER;
1104 static void qed_fill_link(struct qed_hwfn *hwfn,
1105 struct qed_link_output *if_link)
1107 struct qed_mcp_link_params params;
1108 struct qed_mcp_link_state link;
1109 struct qed_mcp_link_capabilities link_caps;
1112 memset(if_link, 0, sizeof(*if_link));
1114 /* Prepare source inputs */
1115 if (IS_PF(hwfn->cdev)) {
1116 memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params));
1117 memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
1118 memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
1121 memset(¶ms, 0, sizeof(params));
1122 memset(&link, 0, sizeof(link));
1123 memset(&link_caps, 0, sizeof(link_caps));
1126 /* Set the link parameters to pass to protocol driver */
1128 if_link->link_up = true;
1130 /* TODO - at the moment assume supported and advertised speed equal */
1131 if_link->supported_caps = SUPPORTED_FIBRE;
1132 if (params.speed.autoneg)
1133 if_link->supported_caps |= SUPPORTED_Autoneg;
1134 if (params.pause.autoneg ||
1135 (params.pause.forced_rx && params.pause.forced_tx))
1136 if_link->supported_caps |= SUPPORTED_Asym_Pause;
1137 if (params.pause.autoneg || params.pause.forced_rx ||
1138 params.pause.forced_tx)
1139 if_link->supported_caps |= SUPPORTED_Pause;
1141 if_link->advertised_caps = if_link->supported_caps;
1142 if (params.speed.advertised_speeds &
1143 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1144 if_link->advertised_caps |= SUPPORTED_1000baseT_Half |
1145 SUPPORTED_1000baseT_Full;
1146 if (params.speed.advertised_speeds &
1147 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1148 if_link->advertised_caps |= SUPPORTED_10000baseKR_Full;
1149 if (params.speed.advertised_speeds &
1150 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1151 if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full;
1152 if (params.speed.advertised_speeds &
1153 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1154 if_link->advertised_caps |= 0;
1155 if (params.speed.advertised_speeds &
1156 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
1157 if_link->advertised_caps |= 0;
1159 if (link_caps.speed_capabilities &
1160 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1161 if_link->supported_caps |= SUPPORTED_1000baseT_Half |
1162 SUPPORTED_1000baseT_Full;
1163 if (link_caps.speed_capabilities &
1164 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1165 if_link->supported_caps |= SUPPORTED_10000baseKR_Full;
1166 if (link_caps.speed_capabilities &
1167 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1168 if_link->supported_caps |= SUPPORTED_40000baseLR4_Full;
1169 if (link_caps.speed_capabilities &
1170 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1171 if_link->supported_caps |= 0;
1172 if (link_caps.speed_capabilities &
1173 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
1174 if_link->supported_caps |= 0;
1177 if_link->speed = link.speed;
1179 /* TODO - fill duplex properly */
1180 if_link->duplex = DUPLEX_FULL;
1181 qed_mcp_get_media_type(hwfn->cdev, &media_type);
1182 if_link->port = qed_get_port_type(media_type);
1184 if_link->autoneg = params.speed.autoneg;
1186 if (params.pause.autoneg)
1187 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1188 if (params.pause.forced_rx)
1189 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1190 if (params.pause.forced_tx)
1191 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1193 /* Link partner capabilities */
1194 if (link.partner_adv_speed &
1195 QED_LINK_PARTNER_SPEED_1G_HD)
1196 if_link->lp_caps |= SUPPORTED_1000baseT_Half;
1197 if (link.partner_adv_speed &
1198 QED_LINK_PARTNER_SPEED_1G_FD)
1199 if_link->lp_caps |= SUPPORTED_1000baseT_Full;
1200 if (link.partner_adv_speed &
1201 QED_LINK_PARTNER_SPEED_10G)
1202 if_link->lp_caps |= SUPPORTED_10000baseKR_Full;
1203 if (link.partner_adv_speed &
1204 QED_LINK_PARTNER_SPEED_40G)
1205 if_link->lp_caps |= SUPPORTED_40000baseLR4_Full;
1206 if (link.partner_adv_speed &
1207 QED_LINK_PARTNER_SPEED_50G)
1208 if_link->lp_caps |= 0;
1209 if (link.partner_adv_speed &
1210 QED_LINK_PARTNER_SPEED_100G)
1211 if_link->lp_caps |= 0;
1213 if (link.an_complete)
1214 if_link->lp_caps |= SUPPORTED_Autoneg;
1216 if (link.partner_adv_pause)
1217 if_link->lp_caps |= SUPPORTED_Pause;
1218 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1219 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1220 if_link->lp_caps |= SUPPORTED_Asym_Pause;
1223 static void qed_get_current_link(struct qed_dev *cdev,
1224 struct qed_link_output *if_link)
1226 qed_fill_link(&cdev->hwfns[0], if_link);
1229 void qed_link_update(struct qed_hwfn *hwfn)
1231 void *cookie = hwfn->cdev->ops_cookie;
1232 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1233 struct qed_link_output if_link;
1235 qed_fill_link(hwfn, &if_link);
1237 if (IS_LEAD_HWFN(hwfn) && cookie)
1238 op->link_update(cookie, &if_link);
1241 static int qed_drain(struct qed_dev *cdev)
1243 struct qed_hwfn *hwfn;
1244 struct qed_ptt *ptt;
1250 for_each_hwfn(cdev, i) {
1251 hwfn = &cdev->hwfns[i];
1252 ptt = qed_ptt_acquire(hwfn);
1254 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1257 rc = qed_mcp_drain(hwfn, ptt);
1260 qed_ptt_release(hwfn, ptt);
1266 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
1268 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1269 struct qed_ptt *ptt;
1272 ptt = qed_ptt_acquire(hwfn);
1276 status = qed_mcp_set_led(hwfn, ptt, mode);
1278 qed_ptt_release(hwfn, ptt);
1283 struct qed_selftest_ops qed_selftest_ops_pass = {
1284 .selftest_memory = &qed_selftest_memory,
1285 .selftest_interrupt = &qed_selftest_interrupt,
1286 .selftest_register = &qed_selftest_register,
1287 .selftest_clock = &qed_selftest_clock,
1290 const struct qed_common_ops qed_common_ops_pass = {
1291 .selftest = &qed_selftest_ops_pass,
1292 .probe = &qed_probe,
1293 .remove = &qed_remove,
1294 .set_power_state = &qed_set_power_state,
1295 .set_id = &qed_set_id,
1296 .update_pf_params = &qed_update_pf_params,
1297 .slowpath_start = &qed_slowpath_start,
1298 .slowpath_stop = &qed_slowpath_stop,
1299 .set_fp_int = &qed_set_int_fp,
1300 .get_fp_int = &qed_get_int_fp,
1301 .sb_init = &qed_sb_init,
1302 .sb_release = &qed_sb_release,
1303 .simd_handler_config = &qed_simd_handler_config,
1304 .simd_handler_clean = &qed_simd_handler_clean,
1305 .can_link_change = &qed_can_link_change,
1306 .set_link = &qed_set_link,
1307 .get_link = &qed_get_current_link,
1308 .drain = &qed_drain,
1309 .update_msglvl = &qed_init_dp,
1310 .chain_alloc = &qed_chain_alloc,
1311 .chain_free = &qed_chain_free,
1312 .set_led = &qed_set_led,