2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/mlx5/driver.h>
44 #include <linux/mlx5/cq.h>
45 #include <linux/mlx5/qp.h>
46 #include <linux/mlx5/srq.h>
47 #include <linux/debugfs.h>
48 #include <linux/kmod.h>
49 #include <linux/mlx5/mlx5_ifc.h>
50 #include <linux/mlx5/vport.h>
51 #ifdef CONFIG_RFS_ACCEL
52 #include <linux/cpu_rmap.h>
54 #include <net/devlink.h>
55 #include "mlx5_core.h"
60 #include "fpga/core.h"
61 #include "accel/ipsec.h"
62 #include "lib/clock.h"
64 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
65 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
66 MODULE_LICENSE("Dual BSD/GPL");
67 MODULE_VERSION(DRIVER_VERSION);
69 unsigned int mlx5_core_debug_mask;
70 module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
71 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
73 #define MLX5_DEFAULT_PROF 2
74 static unsigned int prof_sel = MLX5_DEFAULT_PROF;
75 module_param_named(prof_sel, prof_sel, uint, 0444);
76 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
79 MLX5_ATOMIC_REQ_MODE_BE = 0x0,
80 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
83 static struct mlx5_profile profile[] = {
88 .mask = MLX5_PROF_MASK_QP_SIZE,
92 .mask = MLX5_PROF_MASK_QP_SIZE |
93 MLX5_PROF_MASK_MR_CACHE,
182 #define FW_INIT_TIMEOUT_MILI 2000
183 #define FW_INIT_WAIT_MS 2
184 #define FW_PRE_INIT_TIMEOUT_MILI 10000
186 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
188 unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
191 while (fw_initializing(dev)) {
192 if (time_after(jiffies, end)) {
196 msleep(FW_INIT_WAIT_MS);
202 static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
204 int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
206 u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {0};
207 u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {0};
208 int remaining_size = driver_ver_sz;
211 if (!MLX5_CAP_GEN(dev, driver_version))
214 string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
216 strncpy(string, "Linux", remaining_size);
218 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
219 strncat(string, ",", remaining_size);
221 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
222 strncat(string, DRIVER_NAME, remaining_size);
224 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
225 strncat(string, ",", remaining_size);
227 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
228 strncat(string, DRIVER_VERSION, remaining_size);
231 MLX5_SET(set_driver_version_in, in, opcode,
232 MLX5_CMD_OP_SET_DRIVER_VERSION);
234 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
237 static int set_dma_caps(struct pci_dev *pdev)
241 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
243 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
244 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
246 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
251 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
254 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
255 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
258 "Can't set consistent PCI DMA mask, aborting\n");
263 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
267 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
269 struct pci_dev *pdev = dev->pdev;
272 mutex_lock(&dev->pci_status_mutex);
273 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
274 err = pci_enable_device(pdev);
276 dev->pci_status = MLX5_PCI_STATUS_ENABLED;
278 mutex_unlock(&dev->pci_status_mutex);
283 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
285 struct pci_dev *pdev = dev->pdev;
287 mutex_lock(&dev->pci_status_mutex);
288 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
289 pci_disable_device(pdev);
290 dev->pci_status = MLX5_PCI_STATUS_DISABLED;
292 mutex_unlock(&dev->pci_status_mutex);
295 static int request_bar(struct pci_dev *pdev)
299 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
300 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
304 err = pci_request_regions(pdev, DRIVER_NAME);
306 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
311 static void release_bar(struct pci_dev *pdev)
313 pci_release_regions(pdev);
316 static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
318 struct mlx5_priv *priv = &dev->priv;
319 struct mlx5_eq_table *table = &priv->eq_table;
320 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
324 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
325 MLX5_EQ_VEC_COMP_BASE;
326 nvec = min_t(int, nvec, num_eqs);
327 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
330 priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
334 nvec = pci_alloc_irq_vectors(dev->pdev,
335 MLX5_EQ_VEC_COMP_BASE + 1, nvec,
339 goto err_free_irq_info;
342 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
347 kfree(priv->irq_info);
351 static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev)
353 struct mlx5_priv *priv = &dev->priv;
355 pci_free_irq_vectors(dev->pdev);
356 kfree(priv->irq_info);
359 struct mlx5_reg_host_endianness {
364 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
367 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
368 MLX5_DEV_CAP_FLAG_DCT,
371 static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
387 mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
392 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
393 enum mlx5_cap_type cap_type,
394 enum mlx5_cap_mode cap_mode)
396 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
397 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
398 void *out, *hca_caps;
399 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
402 memset(in, 0, sizeof(in));
403 out = kzalloc(out_sz, GFP_KERNEL);
407 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
408 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
409 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
412 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
413 cap_type, cap_mode, err);
417 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
420 case HCA_CAP_OPMOD_GET_MAX:
421 memcpy(dev->caps.hca_max[cap_type], hca_caps,
422 MLX5_UN_SZ_BYTES(hca_cap_union));
424 case HCA_CAP_OPMOD_GET_CUR:
425 memcpy(dev->caps.hca_cur[cap_type], hca_caps,
426 MLX5_UN_SZ_BYTES(hca_cap_union));
430 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
440 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
444 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
447 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
450 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
452 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
454 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
455 MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
456 return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
459 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
463 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
467 if (MLX5_CAP_GEN(dev, atomic)) {
468 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
477 supported_atomic_req_8B_endianness_mode_1);
479 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
482 set_ctx = kzalloc(set_sz, GFP_KERNEL);
486 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
488 /* Set requestor to host endianness */
489 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
490 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
492 err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
498 static int handle_hca_cap(struct mlx5_core_dev *dev)
500 void *set_ctx = NULL;
501 struct mlx5_profile *prof = dev->profile;
503 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
506 set_ctx = kzalloc(set_sz, GFP_KERNEL);
510 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
514 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
516 memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL],
517 MLX5_ST_SZ_BYTES(cmd_hca_cap));
519 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
520 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
522 /* we limit the size of the pkey table to 128 entries for now */
523 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
524 to_fw_pkey_sz(dev, 128));
526 /* Check log_max_qp from HCA caps to set in current profile */
527 if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
528 mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
529 profile[prof_sel].log_max_qp,
530 MLX5_CAP_GEN_MAX(dev, log_max_qp));
531 profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
533 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
534 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
537 /* disable cmdif checksum */
538 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
540 /* Enable 4K UAR only when HCA supports it and page size is bigger
543 if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
544 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
546 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
548 if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte))
549 MLX5_SET(cmd_hca_cap,
552 cache_line_size() == 128 ? 1 : 0);
554 err = set_caps(dev, set_ctx, set_sz,
555 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
562 static int set_hca_ctrl(struct mlx5_core_dev *dev)
564 struct mlx5_reg_host_endianness he_in;
565 struct mlx5_reg_host_endianness he_out;
568 if (!mlx5_core_is_pf(dev))
571 memset(&he_in, 0, sizeof(he_in));
572 he_in.he = MLX5_SET_HOST_ENDIANNESS;
573 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
574 &he_out, sizeof(he_out),
575 MLX5_REG_HOST_ENDIANNESS, 0, 1);
579 static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
583 /* Disable local_lb by default */
584 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
585 ret = mlx5_nic_vport_update_local_lb(dev, false);
590 int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
592 u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
593 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {0};
595 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
596 MLX5_SET(enable_hca_in, in, function_id, func_id);
597 return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
600 int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
602 u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
603 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {0};
605 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
606 MLX5_SET(disable_hca_in, in, function_id, func_id);
607 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
610 u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
612 u32 timer_h, timer_h1, timer_l;
614 timer_h = ioread32be(&dev->iseg->internal_timer_h);
615 timer_l = ioread32be(&dev->iseg->internal_timer_l);
616 timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
617 if (timer_h != timer_h1) /* wrap around */
618 timer_l = ioread32be(&dev->iseg->internal_timer_l);
620 return (u64)timer_l | (u64)timer_h1 << 32;
623 static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
625 struct mlx5_priv *priv = &mdev->priv;
626 int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
628 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
629 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
633 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
634 priv->irq_info[i].mask);
636 if (IS_ENABLED(CONFIG_SMP) &&
637 irq_set_affinity_hint(irq, priv->irq_info[i].mask))
638 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
643 static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
645 struct mlx5_priv *priv = &mdev->priv;
646 int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
648 irq_set_affinity_hint(irq, NULL);
649 free_cpumask_var(priv->irq_info[i].mask);
652 static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
657 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
658 err = mlx5_irq_set_affinity_hint(mdev, i);
666 for (i--; i >= 0; i--)
667 mlx5_irq_clear_affinity_hint(mdev, i);
672 static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
676 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
677 mlx5_irq_clear_affinity_hint(mdev, i);
680 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
683 struct mlx5_eq_table *table = &dev->priv.eq_table;
684 struct mlx5_eq *eq, *n;
687 spin_lock(&table->lock);
688 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
689 if (eq->index == vector) {
696 spin_unlock(&table->lock);
700 EXPORT_SYMBOL(mlx5_vector2eqn);
702 struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn)
704 struct mlx5_eq_table *table = &dev->priv.eq_table;
707 spin_lock(&table->lock);
708 list_for_each_entry(eq, &table->comp_eqs_list, list)
709 if (eq->eqn == eqn) {
710 spin_unlock(&table->lock);
714 spin_unlock(&table->lock);
716 return ERR_PTR(-ENOENT);
719 static void free_comp_eqs(struct mlx5_core_dev *dev)
721 struct mlx5_eq_table *table = &dev->priv.eq_table;
722 struct mlx5_eq *eq, *n;
724 #ifdef CONFIG_RFS_ACCEL
726 free_irq_cpu_rmap(dev->rmap);
730 spin_lock(&table->lock);
731 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
733 spin_unlock(&table->lock);
734 if (mlx5_destroy_unmap_eq(dev, eq))
735 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
738 spin_lock(&table->lock);
740 spin_unlock(&table->lock);
743 static int alloc_comp_eqs(struct mlx5_core_dev *dev)
745 struct mlx5_eq_table *table = &dev->priv.eq_table;
746 char name[MLX5_MAX_IRQ_NAME];
753 INIT_LIST_HEAD(&table->comp_eqs_list);
754 ncomp_vec = table->num_comp_vectors;
755 nent = MLX5_COMP_EQ_SIZE;
756 #ifdef CONFIG_RFS_ACCEL
757 dev->rmap = alloc_irq_cpu_rmap(ncomp_vec);
761 for (i = 0; i < ncomp_vec; i++) {
762 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
768 #ifdef CONFIG_RFS_ACCEL
769 irq_cpu_rmap_add(dev->rmap, pci_irq_vector(dev->pdev,
770 MLX5_EQ_VEC_COMP_BASE + i));
772 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
773 err = mlx5_create_map_eq(dev, eq,
774 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
775 name, MLX5_EQ_TYPE_COMP);
780 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
782 spin_lock(&table->lock);
783 list_add_tail(&eq->list, &table->comp_eqs_list);
784 spin_unlock(&table->lock);
794 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
796 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {0};
797 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
801 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
802 err = mlx5_cmd_exec(dev, query_in, sizeof(query_in),
803 query_out, sizeof(query_out));
808 mlx5_cmd_mbox_status(query_out, &status, &syndrome);
809 if (!status || syndrome == MLX5_DRIVER_SYND) {
810 mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
811 err, status, syndrome);
815 mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n");
820 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
822 if (sup_issi & (1 << 1)) {
823 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {0};
824 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
826 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
827 MLX5_SET(set_issi_in, set_in, current_issi, 1);
828 err = mlx5_cmd_exec(dev, set_in, sizeof(set_in),
829 set_out, sizeof(set_out));
831 mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
839 } else if (sup_issi & (1 << 0) || !sup_issi) {
846 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
848 struct pci_dev *pdev = dev->pdev;
851 pci_set_drvdata(dev->pdev, dev);
852 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
853 priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
855 mutex_init(&priv->pgdir_mutex);
856 INIT_LIST_HEAD(&priv->pgdir_list);
857 spin_lock_init(&priv->mkey_lock);
859 mutex_init(&priv->alloc_mutex);
861 priv->numa_node = dev_to_node(&dev->pdev->dev);
863 priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
867 err = mlx5_pci_enable_device(dev);
869 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
873 err = request_bar(pdev);
875 dev_err(&pdev->dev, "error requesting BARs, aborting\n");
879 pci_set_master(pdev);
881 err = set_dma_caps(pdev);
883 dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
887 dev->iseg_base = pci_resource_start(dev->pdev, 0);
888 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
891 dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
898 pci_clear_master(dev->pdev);
899 release_bar(dev->pdev);
901 mlx5_pci_disable_device(dev);
904 debugfs_remove(priv->dbg_root);
908 static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
911 pci_clear_master(dev->pdev);
912 release_bar(dev->pdev);
913 mlx5_pci_disable_device(dev);
914 debugfs_remove(priv->dbg_root);
917 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
919 struct pci_dev *pdev = dev->pdev;
922 err = mlx5_query_board_id(dev);
924 dev_err(&pdev->dev, "query board id failed\n");
928 err = mlx5_eq_init(dev);
930 dev_err(&pdev->dev, "failed to initialize eq\n");
934 err = mlx5_init_cq_table(dev);
936 dev_err(&pdev->dev, "failed to initialize cq table\n");
940 mlx5_init_qp_table(dev);
942 mlx5_init_srq_table(dev);
944 mlx5_init_mkey_table(dev);
946 mlx5_init_reserved_gids(dev);
948 mlx5_init_clock(dev);
950 err = mlx5_init_rl_table(dev);
952 dev_err(&pdev->dev, "Failed to init rate limiting\n");
953 goto err_tables_cleanup;
956 err = mlx5_mpfs_init(dev);
958 dev_err(&pdev->dev, "Failed to init l2 table %d\n", err);
962 err = mlx5_eswitch_init(dev);
964 dev_err(&pdev->dev, "Failed to init eswitch %d\n", err);
965 goto err_mpfs_cleanup;
968 err = mlx5_sriov_init(dev);
970 dev_err(&pdev->dev, "Failed to init sriov %d\n", err);
971 goto err_eswitch_cleanup;
974 err = mlx5_fpga_init(dev);
976 dev_err(&pdev->dev, "Failed to init fpga device %d\n", err);
977 goto err_sriov_cleanup;
983 mlx5_sriov_cleanup(dev);
985 mlx5_eswitch_cleanup(dev->priv.eswitch);
987 mlx5_mpfs_cleanup(dev);
989 mlx5_cleanup_rl_table(dev);
991 mlx5_cleanup_mkey_table(dev);
992 mlx5_cleanup_srq_table(dev);
993 mlx5_cleanup_qp_table(dev);
994 mlx5_cleanup_cq_table(dev);
997 mlx5_eq_cleanup(dev);
1003 static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
1005 mlx5_fpga_cleanup(dev);
1006 mlx5_sriov_cleanup(dev);
1007 mlx5_eswitch_cleanup(dev->priv.eswitch);
1008 mlx5_mpfs_cleanup(dev);
1009 mlx5_cleanup_rl_table(dev);
1010 mlx5_cleanup_clock(dev);
1011 mlx5_cleanup_reserved_gids(dev);
1012 mlx5_cleanup_mkey_table(dev);
1013 mlx5_cleanup_srq_table(dev);
1014 mlx5_cleanup_qp_table(dev);
1015 mlx5_cleanup_cq_table(dev);
1016 mlx5_eq_cleanup(dev);
1019 static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1022 struct pci_dev *pdev = dev->pdev;
1025 mutex_lock(&dev->intf_state_mutex);
1026 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1027 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
1032 dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
1033 fw_rev_min(dev), fw_rev_sub(dev));
1035 /* on load removing any previous indication of internal error, device is
1038 dev->state = MLX5_DEVICE_STATE_UP;
1040 /* wait for firmware to accept initialization segments configurations
1042 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
1044 dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
1045 FW_PRE_INIT_TIMEOUT_MILI);
1049 err = mlx5_cmd_init(dev);
1051 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
1055 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
1057 dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
1058 FW_INIT_TIMEOUT_MILI);
1059 goto err_cmd_cleanup;
1062 err = mlx5_core_enable_hca(dev, 0);
1064 dev_err(&pdev->dev, "enable hca failed\n");
1065 goto err_cmd_cleanup;
1068 err = mlx5_core_set_issi(dev);
1070 dev_err(&pdev->dev, "failed to set issi\n");
1071 goto err_disable_hca;
1074 err = mlx5_satisfy_startup_pages(dev, 1);
1076 dev_err(&pdev->dev, "failed to allocate boot pages\n");
1077 goto err_disable_hca;
1080 err = set_hca_ctrl(dev);
1082 dev_err(&pdev->dev, "set_hca_ctrl failed\n");
1083 goto reclaim_boot_pages;
1086 err = handle_hca_cap(dev);
1088 dev_err(&pdev->dev, "handle_hca_cap failed\n");
1089 goto reclaim_boot_pages;
1092 err = handle_hca_cap_atomic(dev);
1094 dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
1095 goto reclaim_boot_pages;
1098 err = mlx5_satisfy_startup_pages(dev, 0);
1100 dev_err(&pdev->dev, "failed to allocate init pages\n");
1101 goto reclaim_boot_pages;
1104 err = mlx5_pagealloc_start(dev);
1106 dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
1107 goto reclaim_boot_pages;
1110 err = mlx5_cmd_init_hca(dev);
1112 dev_err(&pdev->dev, "init hca failed\n");
1113 goto err_pagealloc_stop;
1116 mlx5_set_driver_version(dev);
1118 mlx5_start_health_poll(dev);
1120 err = mlx5_query_hca_caps(dev);
1122 dev_err(&pdev->dev, "query hca failed\n");
1127 err = mlx5_init_once(dev, priv);
1129 dev_err(&pdev->dev, "sw objs init failed\n");
1134 err = mlx5_alloc_irq_vectors(dev);
1136 dev_err(&pdev->dev, "alloc irq vectors failed\n");
1137 goto err_cleanup_once;
1140 dev->priv.uar = mlx5_get_uars_page(dev);
1141 if (IS_ERR(dev->priv.uar)) {
1142 dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
1143 err = PTR_ERR(dev->priv.uar);
1144 goto err_disable_msix;
1147 err = mlx5_start_eqs(dev);
1149 dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
1153 err = alloc_comp_eqs(dev);
1155 dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
1159 err = mlx5_irq_set_affinity_hints(dev);
1161 dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
1162 goto err_affinity_hints;
1165 err = mlx5_init_fs(dev);
1167 dev_err(&pdev->dev, "Failed to init flow steering\n");
1171 err = mlx5_core_set_hca_defaults(dev);
1173 dev_err(&pdev->dev, "Failed to set hca defaults\n");
1177 err = mlx5_sriov_attach(dev);
1179 dev_err(&pdev->dev, "sriov init failed %d\n", err);
1183 err = mlx5_fpga_device_start(dev);
1185 dev_err(&pdev->dev, "fpga device start failed %d\n", err);
1186 goto err_fpga_start;
1188 err = mlx5_accel_ipsec_init(dev);
1190 dev_err(&pdev->dev, "IPSec device start failed %d\n", err);
1191 goto err_ipsec_start;
1194 if (mlx5_device_registered(dev)) {
1195 mlx5_attach_device(dev);
1197 err = mlx5_register_device(dev);
1199 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
1204 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1206 mutex_unlock(&dev->intf_state_mutex);
1211 mlx5_accel_ipsec_cleanup(dev);
1213 mlx5_fpga_device_stop(dev);
1216 mlx5_sriov_detach(dev);
1219 mlx5_cleanup_fs(dev);
1222 mlx5_irq_clear_affinity_hints(dev);
1231 mlx5_put_uars_page(dev, priv->uar);
1234 mlx5_free_irq_vectors(dev);
1238 mlx5_cleanup_once(dev);
1241 mlx5_stop_health_poll(dev);
1242 if (mlx5_cmd_teardown_hca(dev)) {
1243 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
1248 mlx5_pagealloc_stop(dev);
1251 mlx5_reclaim_startup_pages(dev);
1254 mlx5_core_disable_hca(dev, 0);
1257 mlx5_cmd_cleanup(dev);
1260 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1261 mutex_unlock(&dev->intf_state_mutex);
1266 static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
1272 mlx5_drain_health_recovery(dev);
1274 mutex_lock(&dev->intf_state_mutex);
1275 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1276 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
1279 mlx5_cleanup_once(dev);
1283 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1285 if (mlx5_device_registered(dev))
1286 mlx5_detach_device(dev);
1288 mlx5_accel_ipsec_cleanup(dev);
1289 mlx5_fpga_device_stop(dev);
1291 mlx5_sriov_detach(dev);
1292 mlx5_cleanup_fs(dev);
1293 mlx5_irq_clear_affinity_hints(dev);
1296 mlx5_put_uars_page(dev, priv->uar);
1297 mlx5_free_irq_vectors(dev);
1299 mlx5_cleanup_once(dev);
1300 mlx5_stop_health_poll(dev);
1301 err = mlx5_cmd_teardown_hca(dev);
1303 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
1306 mlx5_pagealloc_stop(dev);
1307 mlx5_reclaim_startup_pages(dev);
1308 mlx5_core_disable_hca(dev, 0);
1309 mlx5_cmd_cleanup(dev);
1312 mutex_unlock(&dev->intf_state_mutex);
1316 struct mlx5_core_event_handler {
1317 void (*event)(struct mlx5_core_dev *dev,
1318 enum mlx5_dev_event event,
1322 static const struct devlink_ops mlx5_devlink_ops = {
1323 #ifdef CONFIG_MLX5_ESWITCH
1324 .eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
1325 .eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
1326 .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set,
1327 .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
1328 .eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set,
1329 .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
1333 #define MLX5_IB_MOD "mlx5_ib"
1334 static int init_one(struct pci_dev *pdev,
1335 const struct pci_device_id *id)
1337 struct mlx5_core_dev *dev;
1338 struct devlink *devlink;
1339 struct mlx5_priv *priv;
1342 devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
1344 dev_err(&pdev->dev, "kzalloc failed\n");
1348 dev = devlink_priv(devlink);
1350 priv->pci_dev_data = id->driver_data;
1352 pci_set_drvdata(pdev, dev);
1355 dev->event = mlx5_core_event;
1356 dev->profile = &profile[prof_sel];
1358 INIT_LIST_HEAD(&priv->ctx_list);
1359 spin_lock_init(&priv->ctx_lock);
1360 mutex_init(&dev->pci_status_mutex);
1361 mutex_init(&dev->intf_state_mutex);
1363 INIT_LIST_HEAD(&priv->waiting_events_list);
1364 priv->is_accum_events = false;
1366 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1367 err = init_srcu_struct(&priv->pfault_srcu);
1369 dev_err(&pdev->dev, "init_srcu_struct failed with error code %d\n",
1374 mutex_init(&priv->bfregs.reg_head.lock);
1375 mutex_init(&priv->bfregs.wc_head.lock);
1376 INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
1377 INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
1379 err = mlx5_pci_init(dev, priv);
1381 dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
1385 err = mlx5_health_init(dev);
1387 dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
1391 mlx5_pagealloc_init(dev);
1393 err = mlx5_load_one(dev, priv, true);
1395 dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
1399 request_module_nowait(MLX5_IB_MOD);
1401 err = devlink_register(devlink, &pdev->dev);
1405 pci_save_state(pdev);
1409 mlx5_unload_one(dev, priv, true);
1411 mlx5_pagealloc_cleanup(dev);
1412 mlx5_health_cleanup(dev);
1414 mlx5_pci_close(dev, priv);
1416 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1417 cleanup_srcu_struct(&priv->pfault_srcu);
1420 devlink_free(devlink);
1425 static void remove_one(struct pci_dev *pdev)
1427 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1428 struct devlink *devlink = priv_to_devlink(dev);
1429 struct mlx5_priv *priv = &dev->priv;
1431 devlink_unregister(devlink);
1432 mlx5_unregister_device(dev);
1434 if (mlx5_unload_one(dev, priv, true)) {
1435 dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
1436 mlx5_health_cleanup(dev);
1440 mlx5_pagealloc_cleanup(dev);
1441 mlx5_health_cleanup(dev);
1442 mlx5_pci_close(dev, priv);
1443 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1444 cleanup_srcu_struct(&priv->pfault_srcu);
1446 devlink_free(devlink);
1449 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1450 pci_channel_state_t state)
1452 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1453 struct mlx5_priv *priv = &dev->priv;
1455 dev_info(&pdev->dev, "%s was called\n", __func__);
1457 mlx5_enter_error_state(dev, false);
1458 mlx5_unload_one(dev, priv, false);
1459 /* In case of kernel call drain the health wq */
1461 mlx5_drain_health_wq(dev);
1462 mlx5_pci_disable_device(dev);
1465 return state == pci_channel_io_perm_failure ?
1466 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1469 /* wait for the device to show vital signs by waiting
1470 * for the health counter to start counting.
1472 static int wait_vital(struct pci_dev *pdev)
1474 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1475 struct mlx5_core_health *health = &dev->priv.health;
1476 const int niter = 100;
1481 for (i = 0; i < niter; i++) {
1482 count = ioread32be(health->health_counter);
1483 if (count && count != 0xffffffff) {
1484 if (last_count && last_count != count) {
1485 dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
1496 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1498 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1501 dev_info(&pdev->dev, "%s was called\n", __func__);
1503 err = mlx5_pci_enable_device(dev);
1505 dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
1507 return PCI_ERS_RESULT_DISCONNECT;
1510 pci_set_master(pdev);
1511 pci_restore_state(pdev);
1512 pci_save_state(pdev);
1514 if (wait_vital(pdev)) {
1515 dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
1516 return PCI_ERS_RESULT_DISCONNECT;
1519 return PCI_ERS_RESULT_RECOVERED;
1522 static void mlx5_pci_resume(struct pci_dev *pdev)
1524 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1525 struct mlx5_priv *priv = &dev->priv;
1528 dev_info(&pdev->dev, "%s was called\n", __func__);
1530 err = mlx5_load_one(dev, priv, false);
1532 dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
1535 dev_info(&pdev->dev, "%s: device recovered\n", __func__);
1538 static const struct pci_error_handlers mlx5_err_handler = {
1539 .error_detected = mlx5_pci_err_detected,
1540 .slot_reset = mlx5_pci_slot_reset,
1541 .resume = mlx5_pci_resume
1544 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
1548 if (!MLX5_CAP_GEN(dev, force_teardown)) {
1549 mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
1553 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1554 mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
1558 /* Panic tear down fw command will stop the PCI bus communication
1559 * with the HCA, so the health polll is no longer needed.
1561 mlx5_drain_health_wq(dev);
1562 mlx5_stop_health_poll(dev);
1564 ret = mlx5_cmd_force_teardown_hca(dev);
1566 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
1567 mlx5_start_health_poll(dev);
1571 mlx5_enter_error_state(dev, true);
1576 static void shutdown(struct pci_dev *pdev)
1578 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1579 struct mlx5_priv *priv = &dev->priv;
1582 dev_info(&pdev->dev, "Shutdown was called\n");
1583 err = mlx5_try_fast_unload(dev);
1585 mlx5_unload_one(dev, priv, false);
1586 mlx5_pci_disable_device(dev);
1589 static const struct pci_device_id mlx5_core_pci_table[] = {
1590 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
1591 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
1592 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4) },
1593 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
1594 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) },
1595 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
1596 { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
1597 { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
1598 { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5 Ex */
1599 { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */
1600 { PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */
1601 { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
1602 { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
1603 { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
1607 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1609 void mlx5_disable_device(struct mlx5_core_dev *dev)
1611 mlx5_pci_err_detected(dev->pdev, 0);
1614 void mlx5_recover_device(struct mlx5_core_dev *dev)
1616 mlx5_pci_disable_device(dev);
1617 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
1618 mlx5_pci_resume(dev->pdev);
1621 static struct pci_driver mlx5_core_driver = {
1622 .name = DRIVER_NAME,
1623 .id_table = mlx5_core_pci_table,
1625 .remove = remove_one,
1626 .shutdown = shutdown,
1627 .err_handler = &mlx5_err_handler,
1628 .sriov_configure = mlx5_core_sriov_configure,
1631 static void mlx5_core_verify_params(void)
1633 if (prof_sel >= ARRAY_SIZE(profile)) {
1634 pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
1636 ARRAY_SIZE(profile) - 1,
1638 prof_sel = MLX5_DEFAULT_PROF;
1642 static int __init init(void)
1646 mlx5_core_verify_params();
1647 mlx5_register_debugfs();
1649 err = pci_register_driver(&mlx5_core_driver);
1653 #ifdef CONFIG_MLX5_CORE_EN
1660 mlx5_unregister_debugfs();
1664 static void __exit cleanup(void)
1666 #ifdef CONFIG_MLX5_CORE_EN
1669 pci_unregister_driver(&mlx5_core_driver);
1670 mlx5_unregister_debugfs();
1674 module_exit(cleanup);