2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/mlx5/driver.h>
44 #include <linux/mlx5/cq.h>
45 #include <linux/mlx5/qp.h>
46 #include <linux/debugfs.h>
47 #include <linux/kmod.h>
48 #include <linux/mlx5/mlx5_ifc.h>
49 #include <linux/mlx5/vport.h>
50 #ifdef CONFIG_RFS_ACCEL
51 #include <linux/cpu_rmap.h>
53 #include <net/devlink.h>
54 #include "mlx5_core.h"
60 #include "fpga/core.h"
61 #include "fpga/ipsec.h"
62 #include "accel/ipsec.h"
63 #include "accel/tls.h"
64 #include "lib/clock.h"
65 #include "lib/vxlan.h"
66 #include "lib/devcom.h"
67 #include "diag/fw_tracer.h"
70 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
71 MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
72 MODULE_LICENSE("Dual BSD/GPL");
73 MODULE_VERSION(DRIVER_VERSION);
75 unsigned int mlx5_core_debug_mask;
76 module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
77 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
79 #define MLX5_DEFAULT_PROF 2
80 static unsigned int prof_sel = MLX5_DEFAULT_PROF;
81 module_param_named(prof_sel, prof_sel, uint, 0444);
82 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
84 static u32 sw_owner_id[4];
87 MLX5_ATOMIC_REQ_MODE_BE = 0x0,
88 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
91 static struct mlx5_profile profile[] = {
96 .mask = MLX5_PROF_MASK_QP_SIZE,
100 .mask = MLX5_PROF_MASK_QP_SIZE |
101 MLX5_PROF_MASK_MR_CACHE,
190 #define FW_INIT_TIMEOUT_MILI 2000
191 #define FW_INIT_WAIT_MS 2
192 #define FW_PRE_INIT_TIMEOUT_MILI 10000
194 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
196 unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
199 while (fw_initializing(dev)) {
200 if (time_after(jiffies, end)) {
204 msleep(FW_INIT_WAIT_MS);
210 static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
212 int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
214 u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {0};
215 u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {0};
216 int remaining_size = driver_ver_sz;
219 if (!MLX5_CAP_GEN(dev, driver_version))
222 string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
224 strncpy(string, "Linux", remaining_size);
226 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
227 strncat(string, ",", remaining_size);
229 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
230 strncat(string, DRIVER_NAME, remaining_size);
232 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
233 strncat(string, ",", remaining_size);
235 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
236 strncat(string, DRIVER_VERSION, remaining_size);
239 MLX5_SET(set_driver_version_in, in, opcode,
240 MLX5_CMD_OP_SET_DRIVER_VERSION);
242 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
245 static int set_dma_caps(struct pci_dev *pdev)
249 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
251 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
252 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
254 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
259 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
262 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
263 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
266 "Can't set consistent PCI DMA mask, aborting\n");
271 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
275 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
277 struct pci_dev *pdev = dev->pdev;
280 mutex_lock(&dev->pci_status_mutex);
281 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
282 err = pci_enable_device(pdev);
284 dev->pci_status = MLX5_PCI_STATUS_ENABLED;
286 mutex_unlock(&dev->pci_status_mutex);
291 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
293 struct pci_dev *pdev = dev->pdev;
295 mutex_lock(&dev->pci_status_mutex);
296 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
297 pci_disable_device(pdev);
298 dev->pci_status = MLX5_PCI_STATUS_DISABLED;
300 mutex_unlock(&dev->pci_status_mutex);
303 static int request_bar(struct pci_dev *pdev)
307 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
308 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
312 err = pci_request_regions(pdev, DRIVER_NAME);
314 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
319 static void release_bar(struct pci_dev *pdev)
321 pci_release_regions(pdev);
324 struct mlx5_reg_host_endianness {
329 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
332 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
333 MLX5_DEV_CAP_FLAG_DCT,
336 static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
352 mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
357 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
358 enum mlx5_cap_type cap_type,
359 enum mlx5_cap_mode cap_mode)
361 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
362 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
363 void *out, *hca_caps;
364 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
367 memset(in, 0, sizeof(in));
368 out = kzalloc(out_sz, GFP_KERNEL);
372 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
373 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
374 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
377 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
378 cap_type, cap_mode, err);
382 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
385 case HCA_CAP_OPMOD_GET_MAX:
386 memcpy(dev->caps.hca_max[cap_type], hca_caps,
387 MLX5_UN_SZ_BYTES(hca_cap_union));
389 case HCA_CAP_OPMOD_GET_CUR:
390 memcpy(dev->caps.hca_cur[cap_type], hca_caps,
391 MLX5_UN_SZ_BYTES(hca_cap_union));
395 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
405 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
409 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
412 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
415 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
417 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
419 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
420 MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
421 return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
424 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
428 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
432 if (MLX5_CAP_GEN(dev, atomic)) {
433 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
442 supported_atomic_req_8B_endianness_mode_1);
444 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
447 set_ctx = kzalloc(set_sz, GFP_KERNEL);
451 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
453 /* Set requestor to host endianness */
454 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
455 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
457 err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
463 static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
470 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
471 !MLX5_CAP_GEN(dev, pg))
474 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
478 if (!(MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive) ||
479 MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive) ||
480 MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive)))
483 set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
484 set_ctx = kzalloc(set_sz, GFP_KERNEL);
488 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
489 memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP],
490 MLX5_ST_SZ_BYTES(odp_cap));
492 /* set ODP SRQ support for RC/UD and XRC transports */
493 MLX5_SET(odp_cap, set_hca_cap, ud_odp_caps.srq_receive,
494 MLX5_CAP_ODP_MAX(dev, ud_odp_caps.srq_receive));
496 MLX5_SET(odp_cap, set_hca_cap, rc_odp_caps.srq_receive,
497 MLX5_CAP_ODP_MAX(dev, rc_odp_caps.srq_receive));
499 MLX5_SET(odp_cap, set_hca_cap, xrc_odp_caps.srq_receive,
500 MLX5_CAP_ODP_MAX(dev, xrc_odp_caps.srq_receive));
502 err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ODP);
508 static int handle_hca_cap(struct mlx5_core_dev *dev)
510 void *set_ctx = NULL;
511 struct mlx5_profile *prof = dev->profile;
513 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
516 set_ctx = kzalloc(set_sz, GFP_KERNEL);
520 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
524 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
526 memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL],
527 MLX5_ST_SZ_BYTES(cmd_hca_cap));
529 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
530 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
532 /* we limit the size of the pkey table to 128 entries for now */
533 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
534 to_fw_pkey_sz(dev, 128));
536 /* Check log_max_qp from HCA caps to set in current profile */
537 if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
538 mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
539 profile[prof_sel].log_max_qp,
540 MLX5_CAP_GEN_MAX(dev, log_max_qp));
541 profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
543 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
544 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
547 /* disable cmdif checksum */
548 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
550 /* Enable 4K UAR only when HCA supports it and page size is bigger
553 if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
554 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
556 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
558 if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte))
559 MLX5_SET(cmd_hca_cap,
562 cache_line_size() >= 128 ? 1 : 0);
564 if (MLX5_CAP_GEN_MAX(dev, dct))
565 MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
567 if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
568 MLX5_SET(cmd_hca_cap,
571 MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
573 err = set_caps(dev, set_ctx, set_sz,
574 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
581 static int set_hca_cap(struct mlx5_core_dev *dev)
583 struct pci_dev *pdev = dev->pdev;
586 err = handle_hca_cap(dev);
588 dev_err(&pdev->dev, "handle_hca_cap failed\n");
592 err = handle_hca_cap_atomic(dev);
594 dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
598 err = handle_hca_cap_odp(dev);
600 dev_err(&pdev->dev, "handle_hca_cap_odp failed\n");
608 static int set_hca_ctrl(struct mlx5_core_dev *dev)
610 struct mlx5_reg_host_endianness he_in;
611 struct mlx5_reg_host_endianness he_out;
614 if (!mlx5_core_is_pf(dev))
617 memset(&he_in, 0, sizeof(he_in));
618 he_in.he = MLX5_SET_HOST_ENDIANNESS;
619 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
620 &he_out, sizeof(he_out),
621 MLX5_REG_HOST_ENDIANNESS, 0, 1);
625 static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
629 /* Disable local_lb by default */
630 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
631 ret = mlx5_nic_vport_update_local_lb(dev, false);
636 int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
638 u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
639 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
641 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
642 MLX5_SET(enable_hca_in, in, function_id, func_id);
643 MLX5_SET(enable_hca_in, in, embedded_cpu_function,
644 dev->caps.embedded_cpu);
645 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
648 int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
650 u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
651 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
653 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
654 MLX5_SET(disable_hca_in, in, function_id, func_id);
655 MLX5_SET(enable_hca_in, in, embedded_cpu_function,
656 dev->caps.embedded_cpu);
657 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
660 u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
661 struct ptp_system_timestamp *sts)
663 u32 timer_h, timer_h1, timer_l;
665 timer_h = ioread32be(&dev->iseg->internal_timer_h);
666 ptp_read_system_prets(sts);
667 timer_l = ioread32be(&dev->iseg->internal_timer_l);
668 ptp_read_system_postts(sts);
669 timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
670 if (timer_h != timer_h1) {
672 ptp_read_system_prets(sts);
673 timer_l = ioread32be(&dev->iseg->internal_timer_l);
674 ptp_read_system_postts(sts);
677 return (u64)timer_l | (u64)timer_h1 << 32;
680 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
682 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
683 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
687 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
688 err = mlx5_cmd_exec(dev, query_in, sizeof(query_in),
689 query_out, sizeof(query_out));
694 mlx5_cmd_mbox_status(query_out, &status, &syndrome);
695 if (!status || syndrome == MLX5_DRIVER_SYND) {
696 mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
697 err, status, syndrome);
701 mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n");
706 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
708 if (sup_issi & (1 << 1)) {
709 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0};
710 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
712 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
713 MLX5_SET(set_issi_in, set_in, current_issi, 1);
714 err = mlx5_cmd_exec(dev, set_in, sizeof(set_in),
715 set_out, sizeof(set_out));
717 mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
725 } else if (sup_issi & (1 << 0) || !sup_issi) {
732 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
733 const struct pci_device_id *id)
735 struct mlx5_priv *priv = &dev->priv;
739 priv->pci_dev_data = id->driver_data;
741 pci_set_drvdata(dev->pdev, dev);
743 priv->numa_node = dev_to_node(&dev->pdev->dev);
745 err = mlx5_pci_enable_device(dev);
747 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
751 err = request_bar(pdev);
753 dev_err(&pdev->dev, "error requesting BARs, aborting\n");
757 pci_set_master(pdev);
759 err = set_dma_caps(pdev);
761 dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
765 if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
766 pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
767 pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
768 mlx5_core_dbg(dev, "Enabling pci atomics failed\n");
770 dev->iseg_base = pci_resource_start(dev->pdev, 0);
771 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
774 dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
781 pci_clear_master(dev->pdev);
782 release_bar(dev->pdev);
784 mlx5_pci_disable_device(dev);
788 static void mlx5_pci_close(struct mlx5_core_dev *dev)
791 pci_clear_master(dev->pdev);
792 release_bar(dev->pdev);
793 mlx5_pci_disable_device(dev);
796 static int mlx5_init_once(struct mlx5_core_dev *dev)
798 struct pci_dev *pdev = dev->pdev;
801 dev->priv.devcom = mlx5_devcom_register_device(dev);
802 if (IS_ERR(dev->priv.devcom))
803 dev_err(&pdev->dev, "failed to register with devcom (0x%p)\n",
806 err = mlx5_query_board_id(dev);
808 dev_err(&pdev->dev, "query board id failed\n");
812 err = mlx5_eq_table_init(dev);
814 dev_err(&pdev->dev, "failed to initialize eq\n");
818 err = mlx5_events_init(dev);
820 dev_err(&pdev->dev, "failed to initialize events\n");
824 err = mlx5_cq_debugfs_init(dev);
826 dev_err(&pdev->dev, "failed to initialize cq debugfs\n");
827 goto err_events_cleanup;
830 mlx5_init_qp_table(dev);
832 mlx5_init_mkey_table(dev);
834 mlx5_init_reserved_gids(dev);
836 mlx5_init_clock(dev);
838 dev->vxlan = mlx5_vxlan_create(dev);
840 err = mlx5_init_rl_table(dev);
842 dev_err(&pdev->dev, "Failed to init rate limiting\n");
843 goto err_tables_cleanup;
846 err = mlx5_mpfs_init(dev);
848 dev_err(&pdev->dev, "Failed to init l2 table %d\n", err);
852 err = mlx5_eswitch_init(dev);
854 dev_err(&pdev->dev, "Failed to init eswitch %d\n", err);
855 goto err_mpfs_cleanup;
858 err = mlx5_sriov_init(dev);
860 dev_err(&pdev->dev, "Failed to init sriov %d\n", err);
861 goto err_eswitch_cleanup;
864 err = mlx5_fpga_init(dev);
866 dev_err(&pdev->dev, "Failed to init fpga device %d\n", err);
867 goto err_sriov_cleanup;
870 dev->tracer = mlx5_fw_tracer_create(dev);
875 mlx5_sriov_cleanup(dev);
877 mlx5_eswitch_cleanup(dev->priv.eswitch);
879 mlx5_mpfs_cleanup(dev);
881 mlx5_cleanup_rl_table(dev);
883 mlx5_vxlan_destroy(dev->vxlan);
884 mlx5_cleanup_mkey_table(dev);
885 mlx5_cleanup_qp_table(dev);
886 mlx5_cq_debugfs_cleanup(dev);
888 mlx5_events_cleanup(dev);
890 mlx5_eq_table_cleanup(dev);
892 mlx5_devcom_unregister_device(dev->priv.devcom);
897 static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
899 mlx5_fw_tracer_destroy(dev->tracer);
900 mlx5_fpga_cleanup(dev);
901 mlx5_sriov_cleanup(dev);
902 mlx5_eswitch_cleanup(dev->priv.eswitch);
903 mlx5_mpfs_cleanup(dev);
904 mlx5_cleanup_rl_table(dev);
905 mlx5_vxlan_destroy(dev->vxlan);
906 mlx5_cleanup_clock(dev);
907 mlx5_cleanup_reserved_gids(dev);
908 mlx5_cleanup_mkey_table(dev);
909 mlx5_cleanup_qp_table(dev);
910 mlx5_cq_debugfs_cleanup(dev);
911 mlx5_events_cleanup(dev);
912 mlx5_eq_table_cleanup(dev);
913 mlx5_devcom_unregister_device(dev->priv.devcom);
916 static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
918 struct pci_dev *pdev = dev->pdev;
921 dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
922 fw_rev_min(dev), fw_rev_sub(dev));
924 /* Only PFs hold the relevant PCIe information for this query */
925 if (mlx5_core_is_pf(dev))
926 pcie_print_link_status(dev->pdev);
928 /* wait for firmware to accept initialization segments configurations
930 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
932 dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
933 FW_PRE_INIT_TIMEOUT_MILI);
937 err = mlx5_cmd_init(dev);
939 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
943 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
945 dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
946 FW_INIT_TIMEOUT_MILI);
947 goto err_cmd_cleanup;
950 err = mlx5_core_enable_hca(dev, 0);
952 dev_err(&pdev->dev, "enable hca failed\n");
953 goto err_cmd_cleanup;
956 err = mlx5_core_set_issi(dev);
958 dev_err(&pdev->dev, "failed to set issi\n");
959 goto err_disable_hca;
962 err = mlx5_satisfy_startup_pages(dev, 1);
964 dev_err(&pdev->dev, "failed to allocate boot pages\n");
965 goto err_disable_hca;
968 err = set_hca_ctrl(dev);
970 dev_err(&pdev->dev, "set_hca_ctrl failed\n");
971 goto reclaim_boot_pages;
974 err = set_hca_cap(dev);
976 dev_err(&pdev->dev, "set_hca_cap failed\n");
977 goto reclaim_boot_pages;
980 err = mlx5_satisfy_startup_pages(dev, 0);
982 dev_err(&pdev->dev, "failed to allocate init pages\n");
983 goto reclaim_boot_pages;
986 err = mlx5_cmd_init_hca(dev, sw_owner_id);
988 dev_err(&pdev->dev, "init hca failed\n");
989 goto reclaim_boot_pages;
992 mlx5_set_driver_version(dev);
994 mlx5_start_health_poll(dev);
996 err = mlx5_query_hca_caps(dev);
998 dev_err(&pdev->dev, "query hca failed\n");
1005 mlx5_stop_health_poll(dev, boot);
1007 mlx5_reclaim_startup_pages(dev);
1009 mlx5_core_disable_hca(dev, 0);
1011 mlx5_cmd_cleanup(dev);
1016 static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
1020 mlx5_stop_health_poll(dev, boot);
1021 err = mlx5_cmd_teardown_hca(dev);
1023 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
1026 mlx5_reclaim_startup_pages(dev);
1027 mlx5_core_disable_hca(dev, 0);
1028 mlx5_cmd_cleanup(dev);
1033 static int mlx5_load(struct mlx5_core_dev *dev)
1035 struct pci_dev *pdev = dev->pdev;
1038 dev->priv.uar = mlx5_get_uars_page(dev);
1039 if (IS_ERR(dev->priv.uar)) {
1040 dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
1041 err = PTR_ERR(dev->priv.uar);
1045 mlx5_events_start(dev);
1046 mlx5_pagealloc_start(dev);
1048 err = mlx5_eq_table_create(dev);
1050 dev_err(&pdev->dev, "Failed to create EQs\n");
1054 err = mlx5_fw_tracer_init(dev->tracer);
1056 dev_err(&pdev->dev, "Failed to init FW tracer\n");
1060 err = mlx5_fpga_device_start(dev);
1062 dev_err(&pdev->dev, "fpga device start failed %d\n", err);
1063 goto err_fpga_start;
1066 err = mlx5_accel_ipsec_init(dev);
1068 dev_err(&pdev->dev, "IPSec device start failed %d\n", err);
1069 goto err_ipsec_start;
1072 err = mlx5_accel_tls_init(dev);
1074 dev_err(&pdev->dev, "TLS device start failed %d\n", err);
1078 err = mlx5_init_fs(dev);
1080 dev_err(&pdev->dev, "Failed to init flow steering\n");
1084 err = mlx5_core_set_hca_defaults(dev);
1086 dev_err(&pdev->dev, "Failed to set hca defaults\n");
1090 err = mlx5_sriov_attach(dev);
1092 dev_err(&pdev->dev, "sriov init failed %d\n", err);
1096 err = mlx5_ec_init(dev);
1098 dev_err(&pdev->dev, "Failed to init embedded CPU\n");
1105 mlx5_sriov_detach(dev);
1107 mlx5_cleanup_fs(dev);
1109 mlx5_accel_tls_cleanup(dev);
1111 mlx5_accel_ipsec_cleanup(dev);
1113 mlx5_fpga_device_stop(dev);
1115 mlx5_fw_tracer_cleanup(dev->tracer);
1117 mlx5_eq_table_destroy(dev);
1119 mlx5_pagealloc_stop(dev);
1120 mlx5_events_stop(dev);
1121 mlx5_put_uars_page(dev, dev->priv.uar);
1125 static void mlx5_unload(struct mlx5_core_dev *dev)
1127 mlx5_ec_cleanup(dev);
1128 mlx5_sriov_detach(dev);
1129 mlx5_cleanup_fs(dev);
1130 mlx5_accel_ipsec_cleanup(dev);
1131 mlx5_accel_tls_cleanup(dev);
1132 mlx5_fpga_device_stop(dev);
1133 mlx5_fw_tracer_cleanup(dev->tracer);
1134 mlx5_eq_table_destroy(dev);
1135 mlx5_pagealloc_stop(dev);
1136 mlx5_events_stop(dev);
1137 mlx5_put_uars_page(dev, dev->priv.uar);
1140 static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
1142 struct pci_dev *pdev = dev->pdev;
1145 dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
1146 mutex_lock(&dev->intf_state_mutex);
1147 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1148 mlx5_core_warn(dev, "interface is up, NOP\n");
1151 /* remove any previous indication of internal error */
1152 dev->state = MLX5_DEVICE_STATE_UP;
1154 err = mlx5_function_setup(dev, boot);
1159 err = mlx5_init_once(dev);
1161 dev_err(&pdev->dev, "sw objs init failed\n");
1162 goto function_teardown;
1166 err = mlx5_load(dev);
1170 if (mlx5_device_registered(dev)) {
1171 mlx5_attach_device(dev);
1173 err = mlx5_register_device(dev);
1175 dev_err(&pdev->dev, "register device failed %d\n", err);
1180 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1182 mutex_unlock(&dev->intf_state_mutex);
1190 mlx5_cleanup_once(dev);
1192 mlx5_function_teardown(dev, boot);
1193 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1194 mutex_unlock(&dev->intf_state_mutex);
1199 static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
1204 mlx5_drain_health_recovery(dev);
1206 mutex_lock(&dev->intf_state_mutex);
1207 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1208 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
1211 mlx5_cleanup_once(dev);
1215 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1217 if (mlx5_device_registered(dev))
1218 mlx5_detach_device(dev);
1223 mlx5_cleanup_once(dev);
1225 mlx5_function_teardown(dev, cleanup);
1227 mutex_unlock(&dev->intf_state_mutex);
1231 static const struct devlink_ops mlx5_devlink_ops = {
1232 #ifdef CONFIG_MLX5_ESWITCH
1233 .eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
1234 .eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
1235 .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set,
1236 .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
1237 .eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set,
1238 .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
1242 static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx, const char *name)
1244 struct mlx5_priv *priv = &dev->priv;
1247 strncpy(priv->name, name, MLX5_MAX_NAME_LEN);
1248 priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
1250 dev->profile = &profile[profile_idx];
1252 INIT_LIST_HEAD(&priv->ctx_list);
1253 spin_lock_init(&priv->ctx_lock);
1254 mutex_init(&dev->pci_status_mutex);
1255 mutex_init(&dev->intf_state_mutex);
1257 mutex_init(&priv->bfregs.reg_head.lock);
1258 mutex_init(&priv->bfregs.wc_head.lock);
1259 INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
1260 INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
1262 mutex_init(&priv->alloc_mutex);
1263 mutex_init(&priv->pgdir_mutex);
1264 INIT_LIST_HEAD(&priv->pgdir_list);
1265 spin_lock_init(&priv->mkey_lock);
1267 priv->dbg_root = debugfs_create_dir(name, mlx5_debugfs_root);
1268 if (!priv->dbg_root) {
1269 pr_err("mlx5_core: %s error, Cannot create debugfs dir, aborting\n", name);
1273 err = mlx5_health_init(dev);
1275 goto err_health_init;
1277 err = mlx5_pagealloc_init(dev);
1279 goto err_pagealloc_init;
1284 mlx5_health_cleanup(dev);
1286 debugfs_remove(dev->priv.dbg_root);
1291 static void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
1293 mlx5_pagealloc_cleanup(dev);
1294 mlx5_health_cleanup(dev);
1295 debugfs_remove_recursive(dev->priv.dbg_root);
1298 #define MLX5_IB_MOD "mlx5_ib"
1299 static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1301 struct mlx5_core_dev *dev;
1302 struct devlink *devlink;
1305 devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
1307 dev_err(&pdev->dev, "kzalloc failed\n");
1311 dev = devlink_priv(devlink);
1313 err = mlx5_mdev_init(dev, prof_sel, dev_name(&pdev->dev));
1317 err = mlx5_pci_init(dev, pdev, id);
1319 dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
1323 err = mlx5_load_one(dev, true);
1325 dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
1329 request_module_nowait(MLX5_IB_MOD);
1331 err = devlink_register(devlink, &pdev->dev);
1335 pci_save_state(pdev);
1339 mlx5_unload_one(dev, true);
1342 mlx5_pci_close(dev);
1344 mlx5_mdev_uninit(dev);
1346 devlink_free(devlink);
1351 static void remove_one(struct pci_dev *pdev)
1353 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1354 struct devlink *devlink = priv_to_devlink(dev);
1356 devlink_unregister(devlink);
1357 mlx5_unregister_device(dev);
1359 if (mlx5_unload_one(dev, true)) {
1360 dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
1361 mlx5_health_flush(dev);
1365 mlx5_pci_close(dev);
1366 mlx5_mdev_uninit(dev);
1367 devlink_free(devlink);
1370 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1371 pci_channel_state_t state)
1373 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1375 dev_info(&pdev->dev, "%s was called\n", __func__);
1377 mlx5_enter_error_state(dev, false);
1378 mlx5_unload_one(dev, false);
1379 /* In case of kernel call drain the health wq */
1381 mlx5_drain_health_wq(dev);
1382 mlx5_pci_disable_device(dev);
1385 return state == pci_channel_io_perm_failure ?
1386 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1389 /* wait for the device to show vital signs by waiting
1390 * for the health counter to start counting.
1392 static int wait_vital(struct pci_dev *pdev)
1394 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1395 struct mlx5_core_health *health = &dev->priv.health;
1396 const int niter = 100;
1401 for (i = 0; i < niter; i++) {
1402 count = ioread32be(health->health_counter);
1403 if (count && count != 0xffffffff) {
1404 if (last_count && last_count != count) {
1405 dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
1416 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1418 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1421 dev_info(&pdev->dev, "%s was called\n", __func__);
1423 err = mlx5_pci_enable_device(dev);
1425 dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
1427 return PCI_ERS_RESULT_DISCONNECT;
1430 pci_set_master(pdev);
1431 pci_restore_state(pdev);
1432 pci_save_state(pdev);
1434 if (wait_vital(pdev)) {
1435 dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
1436 return PCI_ERS_RESULT_DISCONNECT;
1439 return PCI_ERS_RESULT_RECOVERED;
1442 static void mlx5_pci_resume(struct pci_dev *pdev)
1444 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1447 dev_info(&pdev->dev, "%s was called\n", __func__);
1449 err = mlx5_load_one(dev, false);
1451 dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
1454 dev_info(&pdev->dev, "%s: device recovered\n", __func__);
1457 static const struct pci_error_handlers mlx5_err_handler = {
1458 .error_detected = mlx5_pci_err_detected,
1459 .slot_reset = mlx5_pci_slot_reset,
1460 .resume = mlx5_pci_resume
1463 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
1465 bool fast_teardown = false, force_teardown = false;
1468 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
1469 force_teardown = MLX5_CAP_GEN(dev, force_teardown);
1471 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
1472 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
1474 if (!fast_teardown && !force_teardown)
1477 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1478 mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
1482 /* Panic tear down fw command will stop the PCI bus communication
1483 * with the HCA, so the health polll is no longer needed.
1485 mlx5_drain_health_wq(dev);
1486 mlx5_stop_health_poll(dev, false);
1488 ret = mlx5_cmd_fast_teardown_hca(dev);
1492 ret = mlx5_cmd_force_teardown_hca(dev);
1496 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
1497 mlx5_start_health_poll(dev);
1501 mlx5_enter_error_state(dev, true);
1503 /* Some platforms requiring freeing the IRQ's in the shutdown
1504 * flow. If they aren't freed they can't be allocated after
1505 * kexec. There is no need to cleanup the mlx5_core software
1508 mlx5_core_eq_free_irqs(dev);
1513 static void shutdown(struct pci_dev *pdev)
1515 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1518 dev_info(&pdev->dev, "Shutdown was called\n");
1519 err = mlx5_try_fast_unload(dev);
1521 mlx5_unload_one(dev, false);
1522 mlx5_pci_disable_device(dev);
1525 static const struct pci_device_id mlx5_core_pci_table[] = {
1526 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
1527 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
1528 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4) },
1529 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
1530 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) },
1531 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
1532 { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
1533 { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
1534 { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5 Ex */
1535 { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */
1536 { PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */
1537 { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
1538 { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
1539 { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
1543 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1545 void mlx5_disable_device(struct mlx5_core_dev *dev)
1547 mlx5_pci_err_detected(dev->pdev, 0);
1550 void mlx5_recover_device(struct mlx5_core_dev *dev)
1552 mlx5_pci_disable_device(dev);
1553 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
1554 mlx5_pci_resume(dev->pdev);
1557 static struct pci_driver mlx5_core_driver = {
1558 .name = DRIVER_NAME,
1559 .id_table = mlx5_core_pci_table,
1561 .remove = remove_one,
1562 .shutdown = shutdown,
1563 .err_handler = &mlx5_err_handler,
1564 .sriov_configure = mlx5_core_sriov_configure,
1567 static void mlx5_core_verify_params(void)
1569 if (prof_sel >= ARRAY_SIZE(profile)) {
1570 pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
1572 ARRAY_SIZE(profile) - 1,
1574 prof_sel = MLX5_DEFAULT_PROF;
1578 static int __init init(void)
1582 get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
1584 mlx5_core_verify_params();
1585 mlx5_fpga_ipsec_build_fs_cmds();
1586 mlx5_register_debugfs();
1588 err = pci_register_driver(&mlx5_core_driver);
1592 #ifdef CONFIG_MLX5_CORE_EN
1599 mlx5_unregister_debugfs();
1603 static void __exit cleanup(void)
1605 #ifdef CONFIG_MLX5_CORE_EN
1608 pci_unregister_driver(&mlx5_core_driver);
1609 mlx5_unregister_debugfs();
1613 module_exit(cleanup);