2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/init.h>
39 #include <linux/errno.h>
40 #include <linux/pci.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/slab.h>
43 #include <linux/io-mapping.h>
44 #include <linux/delay.h>
45 #include <linux/kmod.h>
46 #include <linux/etherdevice.h>
47 #include <net/devlink.h>
49 #include <uapi/rdma/mlx4-abi.h>
50 #include <linux/mlx4/device.h>
51 #include <linux/mlx4/doorbell.h>
57 MODULE_AUTHOR("Roland Dreier");
58 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
59 MODULE_LICENSE("Dual BSD/GPL");
60 MODULE_VERSION(DRV_VERSION);
62 struct workqueue_struct *mlx4_wq;
64 #ifdef CONFIG_MLX4_DEBUG
66 int mlx4_debug_level; /* 0 by default */
67 module_param_named(debug_level, mlx4_debug_level, int, 0644);
68 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
70 #endif /* CONFIG_MLX4_DEBUG */
75 module_param(msi_x, int, 0444);
76 MODULE_PARM_DESC(msi_x, "0 - don't use MSI-X, 1 - use MSI-X, >1 - limit number of MSI-X irqs to msi_x");
78 #else /* CONFIG_PCI_MSI */
82 #endif /* CONFIG_PCI_MSI */
84 static uint8_t num_vfs[3] = {0, 0, 0};
85 static int num_vfs_argc;
86 module_param_array(num_vfs, byte, &num_vfs_argc, 0444);
87 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
88 "num_vfs=port1,port2,port1+2");
90 static uint8_t probe_vf[3] = {0, 0, 0};
91 static int probe_vfs_argc;
92 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
93 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
94 "probe_vf=port1,port2,port1+2");
96 static int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
97 module_param_named(log_num_mgm_entry_size,
98 mlx4_log_num_mgm_entry_size, int, 0444);
99 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
100 " of qp per mcg, for example:"
101 " 10 gives 248.range: 7 <="
102 " log_num_mgm_entry_size <= 12."
103 " To activate device managed"
104 " flow steering when available, set to -1");
106 static bool enable_64b_cqe_eqe = true;
107 module_param(enable_64b_cqe_eqe, bool, 0444);
108 MODULE_PARM_DESC(enable_64b_cqe_eqe,
109 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
111 static bool enable_4k_uar;
112 module_param(enable_4k_uar, bool, 0444);
113 MODULE_PARM_DESC(enable_4k_uar,
114 "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)");
116 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
117 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
118 MLX4_FUNC_CAP_DMFS_A0_STATIC)
120 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV)
122 static char mlx4_version[] =
123 DRV_NAME ": Mellanox ConnectX core driver v"
126 static const struct mlx4_profile default_profile = {
129 .rdmarc_per_qp = 1 << 4,
133 .num_mtt = 1 << 20, /* It is really num mtt segements */
136 static const struct mlx4_profile low_mem_profile = {
139 .rdmarc_per_qp = 1 << 4,
146 static int log_num_mac = 7;
147 module_param_named(log_num_mac, log_num_mac, int, 0444);
148 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
150 static int log_num_vlan;
151 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
152 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
153 /* Log2 max number of VLANs per ETH port (0-7) */
154 #define MLX4_LOG_NUM_VLANS 7
155 #define MLX4_MIN_LOG_NUM_VLANS 0
156 #define MLX4_MIN_LOG_NUM_MAC 1
158 static bool use_prio;
159 module_param_named(use_prio, use_prio, bool, 0444);
160 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
162 int log_mtts_per_seg = ilog2(1);
163 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
164 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment "
165 "(0-7) (default: 0)");
167 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
168 static int arr_argc = 2;
169 module_param_array(port_type_array, int, &arr_argc, 0444);
170 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
171 "1 for IB, 2 for Ethernet");
173 struct mlx4_port_config {
174 struct list_head list;
175 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
176 struct pci_dev *pdev;
179 static atomic_t pf_loading = ATOMIC_INIT(0);
181 static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
182 struct devlink_param_gset_ctx *ctx)
184 ctx->val.vbool = !!mlx4_internal_err_reset;
188 static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
189 struct devlink_param_gset_ctx *ctx)
191 mlx4_internal_err_reset = ctx->val.vbool;
195 static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
196 struct devlink_param_gset_ctx *ctx)
198 struct mlx4_priv *priv = devlink_priv(devlink);
199 struct mlx4_dev *dev = &priv->dev;
201 ctx->val.vbool = dev->persist->crdump.snapshot_enable;
205 static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id,
206 struct devlink_param_gset_ctx *ctx)
208 struct mlx4_priv *priv = devlink_priv(devlink);
209 struct mlx4_dev *dev = &priv->dev;
211 dev->persist->crdump.snapshot_enable = ctx->val.vbool;
216 mlx4_devlink_max_macs_validate(struct devlink *devlink, u32 id,
217 union devlink_param_value val,
218 struct netlink_ext_ack *extack)
220 u32 value = val.vu32;
222 if (value < 1 || value > 128)
225 if (!is_power_of_2(value)) {
226 NL_SET_ERR_MSG_MOD(extack, "max_macs supported must be power of 2");
233 enum mlx4_devlink_param_id {
234 MLX4_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
235 MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
236 MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
239 static const struct devlink_param mlx4_devlink_params[] = {
240 DEVLINK_PARAM_GENERIC(INT_ERR_RESET,
241 BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
242 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
243 mlx4_devlink_ierr_reset_get,
244 mlx4_devlink_ierr_reset_set, NULL),
245 DEVLINK_PARAM_GENERIC(MAX_MACS,
246 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
247 NULL, NULL, mlx4_devlink_max_macs_validate),
248 DEVLINK_PARAM_GENERIC(REGION_SNAPSHOT,
249 BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
250 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
251 mlx4_devlink_crdump_snapshot_get,
252 mlx4_devlink_crdump_snapshot_set, NULL),
253 DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
254 "enable_64b_cqe_eqe", DEVLINK_PARAM_TYPE_BOOL,
255 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
257 DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
258 "enable_4k_uar", DEVLINK_PARAM_TYPE_BOOL,
259 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
263 static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
265 union devlink_param_value value;
267 value.vbool = !!mlx4_internal_err_reset;
268 devlink_param_driverinit_value_set(devlink,
269 DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
272 value.vu32 = 1UL << log_num_mac;
273 devlink_param_driverinit_value_set(devlink,
274 DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
277 value.vbool = enable_64b_cqe_eqe;
278 devlink_param_driverinit_value_set(devlink,
279 MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
282 value.vbool = enable_4k_uar;
283 devlink_param_driverinit_value_set(devlink,
284 MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
288 devlink_param_driverinit_value_set(devlink,
289 DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
293 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
294 struct mlx4_dev_cap *dev_cap)
296 /* The reserved_uars is calculated by system page size unit.
297 * Therefore, adjustment is added when the uar page size is less
298 * than the system page size
300 dev->caps.reserved_uars =
302 mlx4_get_num_reserved_uar(dev),
303 dev_cap->reserved_uars /
304 (1 << (PAGE_SHIFT - dev->uar_page_shift)));
307 int mlx4_check_port_params(struct mlx4_dev *dev,
308 enum mlx4_port_type *port_type)
312 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
313 for (i = 0; i < dev->caps.num_ports - 1; i++) {
314 if (port_type[i] != port_type[i + 1]) {
315 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
321 for (i = 0; i < dev->caps.num_ports; i++) {
322 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
323 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
331 static void mlx4_set_port_mask(struct mlx4_dev *dev)
335 for (i = 1; i <= dev->caps.num_ports; ++i)
336 dev->caps.port_mask[i] = dev->caps.port_type[i];
340 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
343 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
346 struct mlx4_func func;
348 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
349 err = mlx4_QUERY_FUNC(dev, &func, 0);
351 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
354 dev_cap->max_eqs = func.max_eq;
355 dev_cap->reserved_eqs = func.rsvd_eqs;
356 dev_cap->reserved_uars = func.rsvd_uars;
357 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
362 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
364 struct mlx4_caps *dev_cap = &dev->caps;
366 /* FW not supporting or cancelled by user */
367 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
368 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
371 /* Must have 64B CQE_EQE enabled by FW to use bigger stride
372 * When FW has NCSI it may decide not to report 64B CQE/EQEs
374 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
375 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
376 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
377 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
381 if (cache_line_size() == 128 || cache_line_size() == 256) {
382 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
383 /* Changing the real data inside CQE size to 32B */
384 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
385 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
387 if (mlx4_is_master(dev))
388 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
390 if (cache_line_size() != 32 && cache_line_size() != 64)
391 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
392 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
393 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
397 static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
398 struct mlx4_port_cap *port_cap)
400 dev->caps.vl_cap[port] = port_cap->max_vl;
401 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu;
402 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids;
403 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys;
404 /* set gid and pkey table operating lengths by default
405 * to non-sriov values
407 dev->caps.gid_table_len[port] = port_cap->max_gids;
408 dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
409 dev->caps.port_width_cap[port] = port_cap->max_port_width;
410 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
411 dev->caps.max_tc_eth = port_cap->max_tc_eth;
412 dev->caps.def_mac[port] = port_cap->def_mac;
413 dev->caps.supported_type[port] = port_cap->supported_port_types;
414 dev->caps.suggested_type[port] = port_cap->suggested_type;
415 dev->caps.default_sense[port] = port_cap->default_sense;
416 dev->caps.trans_type[port] = port_cap->trans_type;
417 dev->caps.vendor_oui[port] = port_cap->vendor_oui;
418 dev->caps.wavelength[port] = port_cap->wavelength;
419 dev->caps.trans_code[port] = port_cap->trans_code;
424 static int mlx4_dev_port(struct mlx4_dev *dev, int port,
425 struct mlx4_port_cap *port_cap)
429 err = mlx4_QUERY_PORT(dev, port, port_cap);
432 mlx4_err(dev, "QUERY_PORT command failed.\n");
437 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
439 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
442 if (mlx4_is_mfunc(dev)) {
443 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
444 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
448 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
450 "Keep FCS is not supported - Disabling Ignore FCS");
451 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
456 #define MLX4_A0_STEERING_TABLE_SIZE 256
457 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
462 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
464 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
467 mlx4_dev_cap_dump(dev, dev_cap);
469 if (dev_cap->min_page_sz > PAGE_SIZE) {
470 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
471 dev_cap->min_page_sz, PAGE_SIZE);
474 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
475 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
476 dev_cap->num_ports, MLX4_MAX_PORTS);
480 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
481 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
484 pci_resource_len(dev->persist->pdev, 2));
488 dev->caps.num_ports = dev_cap->num_ports;
489 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
490 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
491 dev->caps.num_sys_eqs :
493 for (i = 1; i <= dev->caps.num_ports; ++i) {
494 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i);
496 mlx4_err(dev, "QUERY_PORT command failed, aborting\n");
501 dev->caps.uar_page_size = PAGE_SIZE;
502 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
503 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
504 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
505 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
506 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
507 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
508 dev->caps.max_wqes = dev_cap->max_qp_sz;
509 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
510 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
511 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
512 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
513 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
514 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
516 * Subtract 1 from the limit because we need to allocate a
517 * spare CQE to enable resizing the CQ.
519 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
520 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
521 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
522 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
523 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
525 dev->caps.reserved_pds = dev_cap->reserved_pds;
526 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
527 dev_cap->reserved_xrcds : 0;
528 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
529 dev_cap->max_xrcds : 0;
530 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
532 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
533 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
534 dev->caps.flags = dev_cap->flags;
535 dev->caps.flags2 = dev_cap->flags2;
536 dev->caps.bmme_flags = dev_cap->bmme_flags;
537 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
538 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
539 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
540 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
541 dev->caps.wol_port[1] = dev_cap->wol_port[1];
542 dev->caps.wol_port[2] = dev_cap->wol_port[2];
543 dev->caps.health_buffer_addrs = dev_cap->health_buffer_addrs;
545 /* Save uar page shift */
546 if (!mlx4_is_slave(dev)) {
547 /* Virtual PCI function needs to determine UAR page size from
548 * firmware. Only master PCI function can set the uar page size
550 if (enable_4k_uar || !dev->persist->num_vfs)
551 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
553 dev->uar_page_shift = PAGE_SHIFT;
555 mlx4_set_num_reserved_uars(dev, dev_cap);
558 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
559 struct mlx4_init_hca_param hca_param;
561 memset(&hca_param, 0, sizeof(hca_param));
562 err = mlx4_QUERY_HCA(dev, &hca_param);
563 /* Turn off PHV_EN flag in case phv_check_en is set.
564 * phv_check_en is a HW check that parse the packet and verify
565 * phv bit was reported correctly in the wqe. To allow QinQ
566 * PHV_EN flag should be set and phv_check_en must be cleared
567 * otherwise QinQ packets will be drop by the HW.
569 if (err || hca_param.phv_check_en)
570 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
573 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
574 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
575 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
576 /* Don't do sense port on multifunction devices (for now at least) */
577 if (mlx4_is_mfunc(dev))
578 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
580 if (mlx4_low_memory_profile()) {
581 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC;
582 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
584 dev->caps.log_num_macs = log_num_mac;
585 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
588 for (i = 1; i <= dev->caps.num_ports; ++i) {
589 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
590 if (dev->caps.supported_type[i]) {
591 /* if only ETH is supported - assign ETH */
592 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
593 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
594 /* if only IB is supported, assign IB */
595 else if (dev->caps.supported_type[i] ==
597 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
599 /* if IB and ETH are supported, we set the port
600 * type according to user selection of port type;
601 * if user selected none, take the FW hint */
602 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
603 dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
604 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
606 dev->caps.port_type[i] = port_type_array[i - 1];
610 * Link sensing is allowed on the port if 3 conditions are true:
611 * 1. Both protocols are supported on the port.
612 * 2. Different types are supported on the port
613 * 3. FW declared that it supports link sensing
615 mlx4_priv(dev)->sense.sense_allowed[i] =
616 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
617 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
618 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
621 * If "default_sense" bit is set, we move the port to "AUTO" mode
622 * and perform sense_port FW command to try and set the correct
623 * port type from beginning
625 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
626 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
627 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
628 mlx4_SENSE_PORT(dev, i, &sensed_port);
629 if (sensed_port != MLX4_PORT_TYPE_NONE)
630 dev->caps.port_type[i] = sensed_port;
632 dev->caps.possible_type[i] = dev->caps.port_type[i];
635 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) {
636 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs;
637 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
638 i, 1 << dev->caps.log_num_macs);
640 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) {
641 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans;
642 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
643 i, 1 << dev->caps.log_num_vlans);
647 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
648 (port_type_array[0] == MLX4_PORT_TYPE_IB) &&
649 (port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
651 "Granular QoS per VF not supported with IB/Eth configuration\n");
652 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
655 dev->caps.max_counters = dev_cap->max_counters;
657 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
658 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
659 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
660 (1 << dev->caps.log_num_macs) *
661 (1 << dev->caps.log_num_vlans) *
663 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
665 if (dev_cap->dmfs_high_rate_qpn_base > 0 &&
666 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)
667 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base;
669 dev->caps.dmfs_high_rate_qpn_base =
670 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
672 if (dev_cap->dmfs_high_rate_qpn_range > 0 &&
673 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
674 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range;
675 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT;
676 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0;
678 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED;
679 dev->caps.dmfs_high_rate_qpn_base =
680 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
681 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
684 dev->caps.rl_caps = dev_cap->rl_caps;
686 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
687 dev->caps.dmfs_high_rate_qpn_range;
689 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
690 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
691 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
692 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
694 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
696 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
698 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
699 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
700 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
701 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
704 if (dev_cap->flags2 &
705 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
706 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
707 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
708 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
709 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
713 if ((dev->caps.flags &
714 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
716 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
718 if (!mlx4_is_slave(dev)) {
719 mlx4_enable_cqe_eqe_stride(dev);
720 dev->caps.alloc_res_qp_mask =
721 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
724 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
725 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
726 mlx4_warn(dev, "Old device ETS support detected\n");
727 mlx4_warn(dev, "Consider upgrading device FW.\n");
728 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
732 dev->caps.alloc_res_qp_mask = 0;
735 mlx4_enable_ignore_fcs(dev);
740 /*The function checks if there are live vf, return the num of them*/
741 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
743 struct mlx4_priv *priv = mlx4_priv(dev);
744 struct mlx4_slave_state *s_state;
748 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
749 s_state = &priv->mfunc.master.slave_state[i];
750 if (s_state->active && s_state->last_cmd !=
751 MLX4_COMM_CMD_RESET) {
752 mlx4_warn(dev, "%s: slave: %d is still active\n",
760 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
762 u32 qk = MLX4_RESERVED_QKEY_BASE;
764 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
765 qpn < dev->phys_caps.base_proxy_sqpn)
768 if (qpn >= dev->phys_caps.base_tunnel_sqpn)
770 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
772 qk += qpn - dev->phys_caps.base_proxy_sqpn;
776 EXPORT_SYMBOL(mlx4_get_parav_qkey);
778 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
780 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
782 if (!mlx4_is_master(dev))
785 priv->virt2phys_pkey[slave][port - 1][i] = val;
787 EXPORT_SYMBOL(mlx4_sync_pkey_table);
789 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
791 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
793 if (!mlx4_is_master(dev))
796 priv->slave_node_guids[slave] = guid;
798 EXPORT_SYMBOL(mlx4_put_slave_node_guid);
800 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
802 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
804 if (!mlx4_is_master(dev))
807 return priv->slave_node_guids[slave];
809 EXPORT_SYMBOL(mlx4_get_slave_node_guid);
811 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
813 struct mlx4_priv *priv = mlx4_priv(dev);
814 struct mlx4_slave_state *s_slave;
816 if (!mlx4_is_master(dev))
819 s_slave = &priv->mfunc.master.slave_state[slave];
820 return !!s_slave->active;
822 EXPORT_SYMBOL(mlx4_is_slave_active);
824 void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
825 struct _rule_hw *eth_header)
827 if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
828 is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
829 struct mlx4_net_trans_rule_hw_eth *eth =
830 (struct mlx4_net_trans_rule_hw_eth *)eth_header;
831 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
832 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
833 next_rule->rsvd == 0;
836 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
839 EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio);
841 static void slave_adjust_steering_mode(struct mlx4_dev *dev,
842 struct mlx4_dev_cap *dev_cap,
843 struct mlx4_init_hca_param *hca_param)
845 dev->caps.steering_mode = hca_param->steering_mode;
846 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
847 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
848 dev->caps.fs_log_max_ucast_qp_range_size =
849 dev_cap->fs_log_max_ucast_qp_range_size;
851 dev->caps.num_qp_per_mgm =
852 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
854 mlx4_dbg(dev, "Steering mode is: %s\n",
855 mlx4_steering_mode_str(dev->caps.steering_mode));
858 static void mlx4_slave_destroy_special_qp_cap(struct mlx4_dev *dev)
860 kfree(dev->caps.spec_qps);
861 dev->caps.spec_qps = NULL;
864 static int mlx4_slave_special_qp_cap(struct mlx4_dev *dev)
866 struct mlx4_func_cap *func_cap = NULL;
867 struct mlx4_caps *caps = &dev->caps;
870 func_cap = kzalloc(sizeof(*func_cap), GFP_KERNEL);
871 caps->spec_qps = kcalloc(caps->num_ports, sizeof(*caps->spec_qps), GFP_KERNEL);
873 if (!func_cap || !caps->spec_qps) {
874 mlx4_err(dev, "Failed to allocate memory for special qps cap\n");
879 for (i = 1; i <= caps->num_ports; ++i) {
880 err = mlx4_QUERY_FUNC_CAP(dev, i, func_cap);
882 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
886 caps->spec_qps[i - 1] = func_cap->spec_qps;
887 caps->port_mask[i] = caps->port_type[i];
888 caps->phys_port_id[i] = func_cap->phys_port_id;
889 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
890 &caps->gid_table_len[i],
891 &caps->pkey_table_len[i]);
893 mlx4_err(dev, "QUERY_PORT command failed for port %d, aborting (%d)\n",
901 mlx4_slave_destroy_special_qp_cap(dev);
906 static int mlx4_slave_cap(struct mlx4_dev *dev)
910 struct mlx4_dev_cap *dev_cap = NULL;
911 struct mlx4_func_cap *func_cap = NULL;
912 struct mlx4_init_hca_param *hca_param = NULL;
914 hca_param = kzalloc(sizeof(*hca_param), GFP_KERNEL);
915 func_cap = kzalloc(sizeof(*func_cap), GFP_KERNEL);
916 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
917 if (!hca_param || !func_cap || !dev_cap) {
918 mlx4_err(dev, "Failed to allocate memory for slave_cap\n");
923 err = mlx4_QUERY_HCA(dev, hca_param);
925 mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
929 /* fail if the hca has an unknown global capability
930 * at this time global_caps should be always zeroed
932 if (hca_param->global_caps) {
933 mlx4_err(dev, "Unknown hca global capabilities\n");
938 dev->caps.hca_core_clock = hca_param->hca_core_clock;
940 dev->caps.max_qp_dest_rdma = 1 << hca_param->log_rd_per_qp;
941 err = mlx4_dev_cap(dev, dev_cap);
943 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
947 err = mlx4_QUERY_FW(dev);
949 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
951 page_size = ~dev->caps.page_size_cap + 1;
952 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
953 if (page_size > PAGE_SIZE) {
954 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
955 page_size, PAGE_SIZE);
960 /* Set uar_page_shift for VF */
961 dev->uar_page_shift = hca_param->uar_page_sz + 12;
963 /* Make sure the master uar page size is valid */
964 if (dev->uar_page_shift > PAGE_SHIFT) {
966 "Invalid configuration: uar page size is larger than system page size\n");
971 /* Set reserved_uars based on the uar_page_shift */
972 mlx4_set_num_reserved_uars(dev, dev_cap);
974 /* Although uar page size in FW differs from system page size,
975 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
976 * still works with assumption that uar page size == system page size
978 dev->caps.uar_page_size = PAGE_SIZE;
980 err = mlx4_QUERY_FUNC_CAP(dev, 0, func_cap);
982 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
987 if ((func_cap->pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
988 PF_CONTEXT_BEHAVIOUR_MASK) {
989 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
990 func_cap->pf_context_behaviour,
991 PF_CONTEXT_BEHAVIOUR_MASK);
996 dev->caps.num_ports = func_cap->num_ports;
997 dev->quotas.qp = func_cap->qp_quota;
998 dev->quotas.srq = func_cap->srq_quota;
999 dev->quotas.cq = func_cap->cq_quota;
1000 dev->quotas.mpt = func_cap->mpt_quota;
1001 dev->quotas.mtt = func_cap->mtt_quota;
1002 dev->caps.num_qps = 1 << hca_param->log_num_qps;
1003 dev->caps.num_srqs = 1 << hca_param->log_num_srqs;
1004 dev->caps.num_cqs = 1 << hca_param->log_num_cqs;
1005 dev->caps.num_mpts = 1 << hca_param->log_mpt_sz;
1006 dev->caps.num_eqs = func_cap->max_eq;
1007 dev->caps.reserved_eqs = func_cap->reserved_eq;
1008 dev->caps.reserved_lkey = func_cap->reserved_lkey;
1009 dev->caps.num_pds = MLX4_NUM_PDS;
1010 dev->caps.num_mgms = 0;
1011 dev->caps.num_amgms = 0;
1013 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
1014 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
1015 dev->caps.num_ports, MLX4_MAX_PORTS);
1020 mlx4_replace_zero_macs(dev);
1022 err = mlx4_slave_special_qp_cap(dev);
1024 mlx4_err(dev, "Set special QP caps failed. aborting\n");
1028 if (dev->caps.uar_page_size * (dev->caps.num_uars -
1029 dev->caps.reserved_uars) >
1030 pci_resource_len(dev->persist->pdev,
1032 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
1033 dev->caps.uar_page_size * dev->caps.num_uars,
1034 (unsigned long long)
1035 pci_resource_len(dev->persist->pdev, 2));
1040 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
1041 dev->caps.eqe_size = 64;
1042 dev->caps.eqe_factor = 1;
1044 dev->caps.eqe_size = 32;
1045 dev->caps.eqe_factor = 0;
1048 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
1049 dev->caps.cqe_size = 64;
1050 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1052 dev->caps.cqe_size = 32;
1055 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
1056 dev->caps.eqe_size = hca_param->eqe_size;
1057 dev->caps.eqe_factor = 0;
1060 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
1061 dev->caps.cqe_size = hca_param->cqe_size;
1062 /* User still need to know when CQE > 32B */
1063 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1066 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1067 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
1069 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_USER_MAC_EN;
1070 mlx4_dbg(dev, "User MAC FW update is not supported in slave mode\n");
1072 slave_adjust_steering_mode(dev, dev_cap, hca_param);
1073 mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
1074 hca_param->rss_ip_frags ? "on" : "off");
1076 if (func_cap->extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
1077 dev->caps.bf_reg_size)
1078 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
1080 if (func_cap->extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP)
1081 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP;
1085 mlx4_slave_destroy_special_qp_cap(dev);
1093 static void mlx4_request_modules(struct mlx4_dev *dev)
1096 int has_ib_port = false;
1097 int has_eth_port = false;
1098 #define EN_DRV_NAME "mlx4_en"
1099 #define IB_DRV_NAME "mlx4_ib"
1101 for (port = 1; port <= dev->caps.num_ports; port++) {
1102 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
1104 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
1105 has_eth_port = true;
1109 request_module_nowait(EN_DRV_NAME);
1110 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
1111 request_module_nowait(IB_DRV_NAME);
1115 * Change the port configuration of the device.
1116 * Every user of this function must hold the port mutex.
1118 int mlx4_change_port_types(struct mlx4_dev *dev,
1119 enum mlx4_port_type *port_types)
1125 for (port = 0; port < dev->caps.num_ports; port++) {
1126 /* Change the port type only if the new type is different
1127 * from the current, and not set to Auto */
1128 if (port_types[port] != dev->caps.port_type[port + 1])
1132 mlx4_unregister_device(dev);
1133 for (port = 1; port <= dev->caps.num_ports; port++) {
1134 mlx4_CLOSE_PORT(dev, port);
1135 dev->caps.port_type[port] = port_types[port - 1];
1136 err = mlx4_SET_PORT(dev, port, -1);
1138 mlx4_err(dev, "Failed to set port %d, aborting\n",
1143 mlx4_set_port_mask(dev);
1144 err = mlx4_register_device(dev);
1146 mlx4_err(dev, "Failed to register device\n");
1149 mlx4_request_modules(dev);
1156 static ssize_t show_port_type(struct device *dev,
1157 struct device_attribute *attr,
1160 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1162 struct mlx4_dev *mdev = info->dev;
1166 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
1168 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
1169 sprintf(buf, "auto (%s)\n", type);
1171 sprintf(buf, "%s\n", type);
1176 static int __set_port_type(struct mlx4_port_info *info,
1177 enum mlx4_port_type port_type)
1179 struct mlx4_dev *mdev = info->dev;
1180 struct mlx4_priv *priv = mlx4_priv(mdev);
1181 enum mlx4_port_type types[MLX4_MAX_PORTS];
1182 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
1186 if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
1188 "Requested port type for port %d is not supported on this HCA\n",
1193 mlx4_stop_sense(mdev);
1194 mutex_lock(&priv->port_mutex);
1195 info->tmp_type = port_type;
1197 /* Possible type is always the one that was delivered */
1198 mdev->caps.possible_type[info->port] = info->tmp_type;
1200 for (i = 0; i < mdev->caps.num_ports; i++) {
1201 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
1202 mdev->caps.possible_type[i+1];
1203 if (types[i] == MLX4_PORT_TYPE_AUTO)
1204 types[i] = mdev->caps.port_type[i+1];
1207 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
1208 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
1209 for (i = 1; i <= mdev->caps.num_ports; i++) {
1210 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
1211 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
1217 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
1221 mlx4_do_sense_ports(mdev, new_types, types);
1223 err = mlx4_check_port_params(mdev, new_types);
1227 /* We are about to apply the changes after the configuration
1228 * was verified, no need to remember the temporary types
1230 for (i = 0; i < mdev->caps.num_ports; i++)
1231 priv->port[i + 1].tmp_type = 0;
1233 err = mlx4_change_port_types(mdev, new_types);
1236 mlx4_start_sense(mdev);
1237 mutex_unlock(&priv->port_mutex);
1242 static ssize_t set_port_type(struct device *dev,
1243 struct device_attribute *attr,
1244 const char *buf, size_t count)
1246 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1248 struct mlx4_dev *mdev = info->dev;
1249 enum mlx4_port_type port_type;
1250 static DEFINE_MUTEX(set_port_type_mutex);
1253 mutex_lock(&set_port_type_mutex);
1255 if (!strcmp(buf, "ib\n")) {
1256 port_type = MLX4_PORT_TYPE_IB;
1257 } else if (!strcmp(buf, "eth\n")) {
1258 port_type = MLX4_PORT_TYPE_ETH;
1259 } else if (!strcmp(buf, "auto\n")) {
1260 port_type = MLX4_PORT_TYPE_AUTO;
1262 mlx4_err(mdev, "%s is not supported port type\n", buf);
1267 err = __set_port_type(info, port_type);
1270 mutex_unlock(&set_port_type_mutex);
1272 return err ? err : count;
1283 static inline int int_to_ibta_mtu(int mtu)
1286 case 256: return IB_MTU_256;
1287 case 512: return IB_MTU_512;
1288 case 1024: return IB_MTU_1024;
1289 case 2048: return IB_MTU_2048;
1290 case 4096: return IB_MTU_4096;
1295 static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
1298 case IB_MTU_256: return 256;
1299 case IB_MTU_512: return 512;
1300 case IB_MTU_1024: return 1024;
1301 case IB_MTU_2048: return 2048;
1302 case IB_MTU_4096: return 4096;
1307 static ssize_t show_port_ib_mtu(struct device *dev,
1308 struct device_attribute *attr,
1311 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1313 struct mlx4_dev *mdev = info->dev;
1315 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
1316 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1318 sprintf(buf, "%d\n",
1319 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
1323 static ssize_t set_port_ib_mtu(struct device *dev,
1324 struct device_attribute *attr,
1325 const char *buf, size_t count)
1327 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1329 struct mlx4_dev *mdev = info->dev;
1330 struct mlx4_priv *priv = mlx4_priv(mdev);
1331 int err, port, mtu, ibta_mtu = -1;
1333 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
1334 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1338 err = kstrtoint(buf, 0, &mtu);
1340 ibta_mtu = int_to_ibta_mtu(mtu);
1342 if (err || ibta_mtu < 0) {
1343 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
1347 mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
1349 mlx4_stop_sense(mdev);
1350 mutex_lock(&priv->port_mutex);
1351 mlx4_unregister_device(mdev);
1352 for (port = 1; port <= mdev->caps.num_ports; port++) {
1353 mlx4_CLOSE_PORT(mdev, port);
1354 err = mlx4_SET_PORT(mdev, port, -1);
1356 mlx4_err(mdev, "Failed to set port %d, aborting\n",
1361 err = mlx4_register_device(mdev);
1363 mutex_unlock(&priv->port_mutex);
1364 mlx4_start_sense(mdev);
1365 return err ? err : count;
1368 /* bond for multi-function device */
1369 #define MAX_MF_BOND_ALLOWED_SLAVES 63
1370 static int mlx4_mf_bond(struct mlx4_dev *dev)
1374 struct mlx4_slaves_pport slaves_port1;
1375 struct mlx4_slaves_pport slaves_port2;
1376 DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX);
1378 slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1);
1379 slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2);
1380 bitmap_and(slaves_port_1_2,
1381 slaves_port1.slaves, slaves_port2.slaves,
1382 dev->persist->num_vfs + 1);
1384 /* only single port vfs are allowed */
1385 if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) {
1386 mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n");
1390 /* number of virtual functions is number of total functions minus one
1391 * physical function for each port.
1393 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
1394 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
1396 /* limit on maximum allowed VFs */
1397 if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) {
1398 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
1399 nvfs, MAX_MF_BOND_ALLOWED_SLAVES);
1403 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1404 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
1408 err = mlx4_bond_mac_table(dev);
1411 err = mlx4_bond_vlan_table(dev);
1414 err = mlx4_bond_fs_rules(dev);
1420 (void)mlx4_unbond_vlan_table(dev);
1422 (void)mlx4_unbond_mac_table(dev);
1426 static int mlx4_mf_unbond(struct mlx4_dev *dev)
1430 ret = mlx4_unbond_fs_rules(dev);
1432 mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret);
1433 ret1 = mlx4_unbond_mac_table(dev);
1435 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
1438 ret1 = mlx4_unbond_vlan_table(dev);
1440 mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1);
1446 int mlx4_bond(struct mlx4_dev *dev)
1449 struct mlx4_priv *priv = mlx4_priv(dev);
1451 mutex_lock(&priv->bond_mutex);
1453 if (!mlx4_is_bonded(dev)) {
1454 ret = mlx4_do_bond(dev, true);
1456 mlx4_err(dev, "Failed to bond device: %d\n", ret);
1457 if (!ret && mlx4_is_master(dev)) {
1458 ret = mlx4_mf_bond(dev);
1460 mlx4_err(dev, "bond for multifunction failed\n");
1461 mlx4_do_bond(dev, false);
1466 mutex_unlock(&priv->bond_mutex);
1468 mlx4_dbg(dev, "Device is bonded\n");
1472 EXPORT_SYMBOL_GPL(mlx4_bond);
1474 int mlx4_unbond(struct mlx4_dev *dev)
1477 struct mlx4_priv *priv = mlx4_priv(dev);
1479 mutex_lock(&priv->bond_mutex);
1481 if (mlx4_is_bonded(dev)) {
1484 ret = mlx4_do_bond(dev, false);
1486 mlx4_err(dev, "Failed to unbond device: %d\n", ret);
1487 if (mlx4_is_master(dev))
1488 ret2 = mlx4_mf_unbond(dev);
1490 mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2);
1495 mutex_unlock(&priv->bond_mutex);
1497 mlx4_dbg(dev, "Device is unbonded\n");
1501 EXPORT_SYMBOL_GPL(mlx4_unbond);
1504 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
1506 u8 port1 = v2p->port1;
1507 u8 port2 = v2p->port2;
1508 struct mlx4_priv *priv = mlx4_priv(dev);
1511 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
1514 mutex_lock(&priv->bond_mutex);
1516 /* zero means keep current mapping for this port */
1518 port1 = priv->v2p.port1;
1520 port2 = priv->v2p.port2;
1522 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
1523 (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
1524 (port1 == 2 && port2 == 1)) {
1525 /* besides boundary checks cross mapping makes
1526 * no sense and therefore not allowed */
1528 } else if ((port1 == priv->v2p.port1) &&
1529 (port2 == priv->v2p.port2)) {
1532 err = mlx4_virt2phy_port_map(dev, port1, port2);
1534 mlx4_dbg(dev, "port map changed: [%d][%d]\n",
1536 priv->v2p.port1 = port1;
1537 priv->v2p.port2 = port2;
1539 mlx4_err(dev, "Failed to change port mape: %d\n", err);
1543 mutex_unlock(&priv->bond_mutex);
1546 EXPORT_SYMBOL_GPL(mlx4_port_map_set);
1548 static int mlx4_load_fw(struct mlx4_dev *dev)
1550 struct mlx4_priv *priv = mlx4_priv(dev);
1553 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1554 GFP_HIGHUSER | __GFP_NOWARN, 0);
1555 if (!priv->fw.fw_icm) {
1556 mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1560 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1562 mlx4_err(dev, "MAP_FA command failed, aborting\n");
1566 err = mlx4_RUN_FW(dev);
1568 mlx4_err(dev, "RUN_FW command failed, aborting\n");
1578 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1582 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1585 struct mlx4_priv *priv = mlx4_priv(dev);
1589 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1591 ((u64) (MLX4_CMPT_TYPE_QP *
1592 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1593 cmpt_entry_sz, dev->caps.num_qps,
1594 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1599 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1601 ((u64) (MLX4_CMPT_TYPE_SRQ *
1602 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1603 cmpt_entry_sz, dev->caps.num_srqs,
1604 dev->caps.reserved_srqs, 0, 0);
1608 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1610 ((u64) (MLX4_CMPT_TYPE_CQ *
1611 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1612 cmpt_entry_sz, dev->caps.num_cqs,
1613 dev->caps.reserved_cqs, 0, 0);
1617 num_eqs = dev->phys_caps.num_phys_eqs;
1618 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1620 ((u64) (MLX4_CMPT_TYPE_EQ *
1621 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1622 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
1629 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1632 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1635 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1641 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1642 struct mlx4_init_hca_param *init_hca, u64 icm_size)
1644 struct mlx4_priv *priv = mlx4_priv(dev);
1649 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1651 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1655 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1656 (unsigned long long) icm_size >> 10,
1657 (unsigned long long) aux_pages << 2);
1659 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1660 GFP_HIGHUSER | __GFP_NOWARN, 0);
1661 if (!priv->fw.aux_icm) {
1662 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1666 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1668 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1672 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1674 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1679 num_eqs = dev->phys_caps.num_phys_eqs;
1680 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1681 init_hca->eqc_base, dev_cap->eqc_entry_sz,
1682 num_eqs, num_eqs, 0, 0);
1684 mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1685 goto err_unmap_cmpt;
1689 * Reserved MTT entries must be aligned up to a cacheline
1690 * boundary, since the FW will write to them, while the driver
1691 * writes to all other MTT entries. (The variable
1692 * dev->caps.mtt_entry_sz below is really the MTT segment
1693 * size, not the raw entry size)
1695 dev->caps.reserved_mtts =
1696 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1697 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1699 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1701 dev->caps.mtt_entry_sz,
1703 dev->caps.reserved_mtts, 1, 0);
1705 mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1709 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1710 init_hca->dmpt_base,
1711 dev_cap->dmpt_entry_sz,
1713 dev->caps.reserved_mrws, 1, 1);
1715 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1719 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1721 dev_cap->qpc_entry_sz,
1723 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1726 mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1727 goto err_unmap_dmpt;
1730 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1731 init_hca->auxc_base,
1732 dev_cap->aux_entry_sz,
1734 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1737 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1741 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1742 init_hca->altc_base,
1743 dev_cap->altc_entry_sz,
1745 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1748 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1749 goto err_unmap_auxc;
1752 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1753 init_hca->rdmarc_base,
1754 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1756 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1759 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1760 goto err_unmap_altc;
1763 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1765 dev_cap->cqc_entry_sz,
1767 dev->caps.reserved_cqs, 0, 0);
1769 mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1770 goto err_unmap_rdmarc;
1773 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1774 init_hca->srqc_base,
1775 dev_cap->srq_entry_sz,
1777 dev->caps.reserved_srqs, 0, 0);
1779 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1784 * For flow steering device managed mode it is required to use
1785 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1786 * required, but for simplicity just map the whole multicast
1787 * group table now. The table isn't very big and it's a lot
1788 * easier than trying to track ref counts.
1790 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1792 mlx4_get_mgm_entry_size(dev),
1793 dev->caps.num_mgms + dev->caps.num_amgms,
1794 dev->caps.num_mgms + dev->caps.num_amgms,
1797 mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1804 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1807 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1810 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1813 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1816 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1819 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1822 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1825 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1828 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1831 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1832 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1833 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1834 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1837 mlx4_UNMAP_ICM_AUX(dev);
1840 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1845 static void mlx4_free_icms(struct mlx4_dev *dev)
1847 struct mlx4_priv *priv = mlx4_priv(dev);
1849 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1850 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1851 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1852 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1853 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1854 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1855 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1856 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1857 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1858 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1859 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1860 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1861 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1862 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1864 mlx4_UNMAP_ICM_AUX(dev);
1865 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1868 static void mlx4_slave_exit(struct mlx4_dev *dev)
1870 struct mlx4_priv *priv = mlx4_priv(dev);
1872 mutex_lock(&priv->cmd.slave_cmd_mutex);
1873 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
1875 mlx4_warn(dev, "Failed to close slave function\n");
1876 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1879 static int map_bf_area(struct mlx4_dev *dev)
1881 struct mlx4_priv *priv = mlx4_priv(dev);
1882 resource_size_t bf_start;
1883 resource_size_t bf_len;
1886 if (!dev->caps.bf_reg_size)
1889 bf_start = pci_resource_start(dev->persist->pdev, 2) +
1890 (dev->caps.num_uars << PAGE_SHIFT);
1891 bf_len = pci_resource_len(dev->persist->pdev, 2) -
1892 (dev->caps.num_uars << PAGE_SHIFT);
1893 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1894 if (!priv->bf_mapping)
1900 static void unmap_bf_area(struct mlx4_dev *dev)
1902 if (mlx4_priv(dev)->bf_mapping)
1903 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1906 u64 mlx4_read_clock(struct mlx4_dev *dev)
1908 u32 clockhi, clocklo, clockhi1;
1911 struct mlx4_priv *priv = mlx4_priv(dev);
1913 for (i = 0; i < 10; i++) {
1914 clockhi = swab32(readl(priv->clock_mapping));
1915 clocklo = swab32(readl(priv->clock_mapping + 4));
1916 clockhi1 = swab32(readl(priv->clock_mapping));
1917 if (clockhi == clockhi1)
1921 cycles = (u64) clockhi << 32 | (u64) clocklo;
1925 EXPORT_SYMBOL_GPL(mlx4_read_clock);
1928 static int map_internal_clock(struct mlx4_dev *dev)
1930 struct mlx4_priv *priv = mlx4_priv(dev);
1932 priv->clock_mapping =
1933 ioremap(pci_resource_start(dev->persist->pdev,
1934 priv->fw.clock_bar) +
1935 priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1937 if (!priv->clock_mapping)
1943 int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1944 struct mlx4_clock_params *params)
1946 struct mlx4_priv *priv = mlx4_priv(dev);
1948 if (mlx4_is_slave(dev))
1954 params->bar = priv->fw.clock_bar;
1955 params->offset = priv->fw.clock_offset;
1956 params->size = MLX4_CLOCK_SIZE;
1960 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params);
1962 static void unmap_internal_clock(struct mlx4_dev *dev)
1964 struct mlx4_priv *priv = mlx4_priv(dev);
1966 if (priv->clock_mapping)
1967 iounmap(priv->clock_mapping);
1970 static void mlx4_close_hca(struct mlx4_dev *dev)
1972 unmap_internal_clock(dev);
1974 if (mlx4_is_slave(dev))
1975 mlx4_slave_exit(dev);
1977 mlx4_CLOSE_HCA(dev, 0);
1978 mlx4_free_icms(dev);
1982 static void mlx4_close_fw(struct mlx4_dev *dev)
1984 if (!mlx4_is_slave(dev)) {
1986 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1990 static int mlx4_comm_check_offline(struct mlx4_dev *dev)
1992 #define COMM_CHAN_OFFLINE_OFFSET 0x09
1997 struct mlx4_priv *priv = mlx4_priv(dev);
1999 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
2000 while (time_before(jiffies, end)) {
2001 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
2002 MLX4_COMM_CHAN_FLAGS));
2003 offline_bit = (comm_flags &
2004 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
2008 /* If device removal has been requested,
2009 * do not continue retrying.
2011 if (dev->persist->interface_state &
2012 MLX4_INTERFACE_STATE_NOWAIT)
2015 /* There are cases as part of AER/Reset flow that PF needs
2016 * around 100 msec to load. We therefore sleep for 100 msec
2017 * to allow other tasks to make use of that CPU during this
2022 mlx4_err(dev, "Communication channel is offline.\n");
2026 static void mlx4_reset_vf_support(struct mlx4_dev *dev)
2028 #define COMM_CHAN_RST_OFFSET 0x1e
2030 struct mlx4_priv *priv = mlx4_priv(dev);
2034 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
2035 MLX4_COMM_CHAN_CAPS));
2036 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
2039 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
2042 static int mlx4_init_slave(struct mlx4_dev *dev)
2044 struct mlx4_priv *priv = mlx4_priv(dev);
2045 u64 dma = (u64) priv->mfunc.vhcr_dma;
2046 int ret_from_reset = 0;
2048 u32 cmd_channel_ver;
2050 if (atomic_read(&pf_loading)) {
2051 mlx4_warn(dev, "PF is not ready - Deferring probe\n");
2052 return -EPROBE_DEFER;
2055 mutex_lock(&priv->cmd.slave_cmd_mutex);
2056 priv->cmd.max_cmds = 1;
2057 if (mlx4_comm_check_offline(dev)) {
2058 mlx4_err(dev, "PF is not responsive, skipping initialization\n");
2062 mlx4_reset_vf_support(dev);
2063 mlx4_warn(dev, "Sending reset\n");
2064 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
2065 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
2066 /* if we are in the middle of flr the slave will try
2067 * NUM_OF_RESET_RETRIES times before leaving.*/
2068 if (ret_from_reset) {
2069 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
2070 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
2071 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2072 return -EPROBE_DEFER;
2077 /* check the driver version - the slave I/F revision
2078 * must match the master's */
2079 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
2080 cmd_channel_ver = mlx4_comm_get_version();
2082 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
2083 MLX4_COMM_GET_IF_REV(slave_read)) {
2084 mlx4_err(dev, "slave driver version is not supported by the master\n");
2088 mlx4_warn(dev, "Sending vhcr0\n");
2089 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
2090 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2092 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
2093 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2095 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
2096 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2098 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
2099 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2102 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2106 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
2108 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2112 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
2116 for (i = 1; i <= dev->caps.num_ports; i++) {
2117 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
2118 dev->caps.gid_table_len[i] =
2119 mlx4_get_slave_num_gids(dev, 0, i);
2121 dev->caps.gid_table_len[i] = 1;
2122 dev->caps.pkey_table_len[i] =
2123 dev->phys_caps.pkey_phys_table_len[i] - 1;
2127 static int choose_log_fs_mgm_entry_size(int qp_per_entry)
2129 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
2131 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
2133 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
2137 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
2140 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)
2142 switch (dmfs_high_steer_mode) {
2143 case MLX4_STEERING_DMFS_A0_DEFAULT:
2144 return "default performance";
2146 case MLX4_STEERING_DMFS_A0_DYNAMIC:
2147 return "dynamic hybrid mode";
2149 case MLX4_STEERING_DMFS_A0_STATIC:
2150 return "performance optimized for limited rule configuration (static)";
2152 case MLX4_STEERING_DMFS_A0_DISABLE:
2153 return "disabled performance optimized steering";
2155 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED:
2156 return "performance optimized steering not supported";
2159 return "Unrecognized mode";
2163 #define MLX4_DMFS_A0_STEERING (1UL << 2)
2165 static void choose_steering_mode(struct mlx4_dev *dev,
2166 struct mlx4_dev_cap *dev_cap)
2168 if (mlx4_log_num_mgm_entry_size <= 0) {
2169 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) {
2170 if (dev->caps.dmfs_high_steer_mode ==
2171 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2172 mlx4_err(dev, "DMFS high rate mode not supported\n");
2174 dev->caps.dmfs_high_steer_mode =
2175 MLX4_STEERING_DMFS_A0_STATIC;
2179 if (mlx4_log_num_mgm_entry_size <= 0 &&
2180 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
2181 (!mlx4_is_mfunc(dev) ||
2182 (dev_cap->fs_max_num_qp_per_entry >=
2183 (dev->persist->num_vfs + 1))) &&
2184 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
2185 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
2186 dev->oper_log_mgm_entry_size =
2187 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
2188 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
2189 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
2190 dev->caps.fs_log_max_ucast_qp_range_size =
2191 dev_cap->fs_log_max_ucast_qp_range_size;
2193 if (dev->caps.dmfs_high_steer_mode !=
2194 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2195 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE;
2196 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
2197 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2198 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
2200 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
2202 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
2203 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2204 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
2206 dev->oper_log_mgm_entry_size =
2207 mlx4_log_num_mgm_entry_size > 0 ?
2208 mlx4_log_num_mgm_entry_size :
2209 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
2210 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
2212 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
2213 mlx4_steering_mode_str(dev->caps.steering_mode),
2214 dev->oper_log_mgm_entry_size,
2215 mlx4_log_num_mgm_entry_size);
2218 static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
2219 struct mlx4_dev_cap *dev_cap)
2221 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2222 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
2223 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
2225 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
2227 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
2228 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
2231 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
2234 struct mlx4_port_cap port_cap;
2236 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2239 for (i = 1; i <= dev->caps.num_ports; i++) {
2240 if (mlx4_dev_port(dev, i, &port_cap)) {
2242 "QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n");
2243 } else if ((dev->caps.dmfs_high_steer_mode !=
2244 MLX4_STEERING_DMFS_A0_DEFAULT) &&
2245 (port_cap.dmfs_optimized_state ==
2246 !!(dev->caps.dmfs_high_steer_mode ==
2247 MLX4_STEERING_DMFS_A0_DISABLE))) {
2249 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
2250 dmfs_high_rate_steering_mode_str(
2251 dev->caps.dmfs_high_steer_mode),
2252 (port_cap.dmfs_optimized_state ?
2253 "enabled" : "disabled"));
2260 static int mlx4_init_fw(struct mlx4_dev *dev)
2262 struct mlx4_mod_stat_cfg mlx4_cfg;
2265 if (!mlx4_is_slave(dev)) {
2266 err = mlx4_QUERY_FW(dev);
2269 mlx4_info(dev, "non-primary physical function, skipping\n");
2271 mlx4_err(dev, "QUERY_FW command failed, aborting\n");
2275 err = mlx4_load_fw(dev);
2277 mlx4_err(dev, "Failed to start FW, aborting\n");
2281 mlx4_cfg.log_pg_sz_m = 1;
2282 mlx4_cfg.log_pg_sz = 0;
2283 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
2285 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
2291 static int mlx4_init_hca(struct mlx4_dev *dev)
2293 struct mlx4_priv *priv = mlx4_priv(dev);
2294 struct mlx4_init_hca_param *init_hca = NULL;
2295 struct mlx4_dev_cap *dev_cap = NULL;
2296 struct mlx4_adapter adapter;
2297 struct mlx4_profile profile;
2299 struct mlx4_config_dev_params params;
2302 if (!mlx4_is_slave(dev)) {
2303 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
2304 init_hca = kzalloc(sizeof(*init_hca), GFP_KERNEL);
2306 if (!dev_cap || !init_hca) {
2311 err = mlx4_dev_cap(dev, dev_cap);
2313 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
2317 choose_steering_mode(dev, dev_cap);
2318 choose_tunnel_offload_mode(dev, dev_cap);
2320 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
2321 mlx4_is_master(dev))
2322 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC;
2324 err = mlx4_get_phys_port_id(dev);
2326 mlx4_err(dev, "Fail to get physical port id\n");
2328 if (mlx4_is_master(dev))
2329 mlx4_parav_master_pf_caps(dev);
2331 if (mlx4_low_memory_profile()) {
2332 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
2333 profile = low_mem_profile;
2335 profile = default_profile;
2337 if (dev->caps.steering_mode ==
2338 MLX4_STEERING_MODE_DEVICE_MANAGED)
2339 profile.num_mcg = MLX4_FS_NUM_MCG;
2341 icm_size = mlx4_make_profile(dev, &profile, dev_cap,
2343 if ((long long) icm_size < 0) {
2348 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
2350 if (enable_4k_uar || !dev->persist->num_vfs) {
2351 init_hca->log_uar_sz = ilog2(dev->caps.num_uars) +
2352 PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
2353 init_hca->uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
2355 init_hca->log_uar_sz = ilog2(dev->caps.num_uars);
2356 init_hca->uar_page_sz = PAGE_SHIFT - 12;
2359 init_hca->mw_enabled = 0;
2360 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2361 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2362 init_hca->mw_enabled = INIT_HCA_TPT_MW_ENABLE;
2364 err = mlx4_init_icm(dev, dev_cap, init_hca, icm_size);
2368 err = mlx4_INIT_HCA(dev, init_hca);
2370 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
2374 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
2375 err = mlx4_query_func(dev, dev_cap);
2377 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
2379 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
2380 dev->caps.num_eqs = dev_cap->max_eqs;
2381 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
2382 dev->caps.reserved_uars = dev_cap->reserved_uars;
2387 * If TS is supported by FW
2388 * read HCA frequency by QUERY_HCA command
2390 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
2391 err = mlx4_QUERY_HCA(dev, init_hca);
2393 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
2394 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2396 dev->caps.hca_core_clock =
2397 init_hca->hca_core_clock;
2400 /* In case we got HCA frequency 0 - disable timestamping
2401 * to avoid dividing by zero
2403 if (!dev->caps.hca_core_clock) {
2404 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2406 "HCA frequency is 0 - timestamping is not supported\n");
2407 } else if (map_internal_clock(dev)) {
2409 * Map internal clock,
2410 * in case of failure disable timestamping
2412 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2413 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
2417 if (dev->caps.dmfs_high_steer_mode !=
2418 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) {
2419 if (mlx4_validate_optimized_steering(dev))
2420 mlx4_warn(dev, "Optimized steering validation failed\n");
2422 if (dev->caps.dmfs_high_steer_mode ==
2423 MLX4_STEERING_DMFS_A0_DISABLE) {
2424 dev->caps.dmfs_high_rate_qpn_base =
2425 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
2426 dev->caps.dmfs_high_rate_qpn_range =
2427 MLX4_A0_STEERING_TABLE_SIZE;
2430 mlx4_info(dev, "DMFS high rate steer mode is: %s\n",
2431 dmfs_high_rate_steering_mode_str(
2432 dev->caps.dmfs_high_steer_mode));
2435 err = mlx4_init_slave(dev);
2437 if (err != -EPROBE_DEFER)
2438 mlx4_err(dev, "Failed to initialize slave\n");
2442 err = mlx4_slave_cap(dev);
2444 mlx4_err(dev, "Failed to obtain slave caps\n");
2449 if (map_bf_area(dev))
2450 mlx4_dbg(dev, "Failed to map blue flame area\n");
2452 /*Only the master set the ports, all the rest got it from it.*/
2453 if (!mlx4_is_slave(dev))
2454 mlx4_set_port_mask(dev);
2456 err = mlx4_QUERY_ADAPTER(dev, &adapter);
2458 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
2462 /* Query CONFIG_DEV parameters */
2463 err = mlx4_config_dev_retrieval(dev, ¶ms);
2464 if (err && err != -EOPNOTSUPP) {
2465 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
2467 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
2468 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
2470 priv->eq_table.inta_pin = adapter.inta_pin;
2471 memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id));
2477 unmap_internal_clock(dev);
2480 if (mlx4_is_slave(dev))
2481 mlx4_slave_destroy_special_qp_cap(dev);
2484 if (mlx4_is_slave(dev))
2485 mlx4_slave_exit(dev);
2487 mlx4_CLOSE_HCA(dev, 0);
2490 if (!mlx4_is_slave(dev))
2491 mlx4_free_icms(dev);
2500 static int mlx4_init_counters_table(struct mlx4_dev *dev)
2502 struct mlx4_priv *priv = mlx4_priv(dev);
2505 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2508 if (!dev->caps.max_counters)
2511 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
2512 /* reserve last counter index for sink counter */
2513 return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
2515 nent_pow2 - dev->caps.max_counters + 1);
2518 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
2520 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2523 if (!dev->caps.max_counters)
2526 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
2529 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
2531 struct mlx4_priv *priv = mlx4_priv(dev);
2534 for (port = 0; port < dev->caps.num_ports; port++)
2535 if (priv->def_counter[port] != -1)
2536 mlx4_counter_free(dev, priv->def_counter[port]);
2539 static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2541 struct mlx4_priv *priv = mlx4_priv(dev);
2545 for (port = 0; port < dev->caps.num_ports; port++)
2546 priv->def_counter[port] = -1;
2548 for (port = 0; port < dev->caps.num_ports; port++) {
2549 err = mlx4_counter_alloc(dev, &idx, MLX4_RES_USAGE_DRIVER);
2551 if (!err || err == -ENOSPC) {
2552 priv->def_counter[port] = idx;
2553 } else if (err == -ENOENT) {
2556 } else if (mlx4_is_slave(dev) && err == -EINVAL) {
2557 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2558 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2559 MLX4_SINK_COUNTER_INDEX(dev));
2562 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2563 __func__, port + 1, err);
2564 mlx4_cleanup_default_counters(dev);
2568 mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
2569 __func__, priv->def_counter[port], port + 1);
2575 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2577 struct mlx4_priv *priv = mlx4_priv(dev);
2579 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2582 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
2584 *idx = MLX4_SINK_COUNTER_INDEX(dev);
2591 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
2593 u32 in_modifier = RES_COUNTER | (((u32)usage & 3) << 30);
2597 if (mlx4_is_mfunc(dev)) {
2598 err = mlx4_cmd_imm(dev, 0, &out_param, in_modifier,
2599 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
2600 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2602 *idx = get_param_l(&out_param);
2606 return __mlx4_counter_alloc(dev, idx);
2608 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
2610 static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
2613 struct mlx4_cmd_mailbox *if_stat_mailbox;
2615 u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
2617 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
2618 if (IS_ERR(if_stat_mailbox))
2619 return PTR_ERR(if_stat_mailbox);
2621 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
2622 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
2625 mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
2629 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2631 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2634 if (idx == MLX4_SINK_COUNTER_INDEX(dev))
2637 __mlx4_clear_if_stat(dev, idx);
2639 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
2643 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2647 if (mlx4_is_mfunc(dev)) {
2648 set_param_l(&in_param, idx);
2649 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
2650 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
2654 __mlx4_counter_free(dev, idx);
2656 EXPORT_SYMBOL_GPL(mlx4_counter_free);
2658 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
2660 struct mlx4_priv *priv = mlx4_priv(dev);
2662 return priv->def_counter[port - 1];
2664 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
2666 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
2668 struct mlx4_priv *priv = mlx4_priv(dev);
2670 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2672 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid);
2674 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port)
2676 struct mlx4_priv *priv = mlx4_priv(dev);
2678 return priv->mfunc.master.vf_admin[entry].vport[port].guid;
2680 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid);
2682 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port)
2684 struct mlx4_priv *priv = mlx4_priv(dev);
2691 get_random_bytes((char *)&guid, sizeof(guid));
2692 guid &= ~(cpu_to_be64(1ULL << 56));
2693 guid |= cpu_to_be64(1ULL << 57);
2694 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2697 static int mlx4_setup_hca(struct mlx4_dev *dev)
2699 struct mlx4_priv *priv = mlx4_priv(dev);
2702 __be32 ib_port_default_caps;
2704 err = mlx4_init_uar_table(dev);
2706 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
2710 err = mlx4_uar_alloc(dev, &priv->driver_uar);
2712 mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
2713 goto err_uar_table_free;
2716 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
2718 mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
2723 err = mlx4_init_pd_table(dev);
2725 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
2729 err = mlx4_init_xrcd_table(dev);
2731 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
2732 goto err_pd_table_free;
2735 err = mlx4_init_mr_table(dev);
2737 mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
2738 goto err_xrcd_table_free;
2741 if (!mlx4_is_slave(dev)) {
2742 err = mlx4_init_mcg_table(dev);
2744 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
2745 goto err_mr_table_free;
2747 err = mlx4_config_mad_demux(dev);
2749 mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
2750 goto err_mcg_table_free;
2754 err = mlx4_init_eq_table(dev);
2756 mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
2757 goto err_mcg_table_free;
2760 err = mlx4_cmd_use_events(dev);
2762 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
2763 goto err_eq_table_free;
2766 err = mlx4_NOP(dev);
2768 if (dev->flags & MLX4_FLAG_MSI_X) {
2769 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2770 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2771 mlx4_warn(dev, "Trying again without MSI-X\n");
2773 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2774 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2775 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
2781 mlx4_dbg(dev, "NOP command IRQ test passed\n");
2783 err = mlx4_init_cq_table(dev);
2785 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
2789 err = mlx4_init_srq_table(dev);
2791 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
2792 goto err_cq_table_free;
2795 err = mlx4_init_qp_table(dev);
2797 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
2798 goto err_srq_table_free;
2801 if (!mlx4_is_slave(dev)) {
2802 err = mlx4_init_counters_table(dev);
2803 if (err && err != -ENOENT) {
2804 mlx4_err(dev, "Failed to initialize counters table, aborting\n");
2805 goto err_qp_table_free;
2809 err = mlx4_allocate_default_counters(dev);
2811 mlx4_err(dev, "Failed to allocate default counters, aborting\n");
2812 goto err_counters_table_free;
2815 if (!mlx4_is_slave(dev)) {
2816 for (port = 1; port <= dev->caps.num_ports; port++) {
2817 ib_port_default_caps = 0;
2818 err = mlx4_get_port_ib_caps(dev, port,
2819 &ib_port_default_caps);
2821 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2823 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
2825 /* initialize per-slave default ib port capabilities */
2826 if (mlx4_is_master(dev)) {
2828 for (i = 0; i < dev->num_slaves; i++) {
2829 if (i == mlx4_master_func_num(dev))
2831 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
2832 ib_port_default_caps;
2836 if (mlx4_is_mfunc(dev))
2837 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
2839 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
2841 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
2842 dev->caps.pkey_table_len[port] : -1);
2844 mlx4_err(dev, "Failed to set port %d, aborting\n",
2846 goto err_default_countes_free;
2853 err_default_countes_free:
2854 mlx4_cleanup_default_counters(dev);
2856 err_counters_table_free:
2857 if (!mlx4_is_slave(dev))
2858 mlx4_cleanup_counters_table(dev);
2861 mlx4_cleanup_qp_table(dev);
2864 mlx4_cleanup_srq_table(dev);
2867 mlx4_cleanup_cq_table(dev);
2870 mlx4_cmd_use_polling(dev);
2873 mlx4_cleanup_eq_table(dev);
2876 if (!mlx4_is_slave(dev))
2877 mlx4_cleanup_mcg_table(dev);
2880 mlx4_cleanup_mr_table(dev);
2882 err_xrcd_table_free:
2883 mlx4_cleanup_xrcd_table(dev);
2886 mlx4_cleanup_pd_table(dev);
2892 mlx4_uar_free(dev, &priv->driver_uar);
2895 mlx4_cleanup_uar_table(dev);
2899 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
2901 int requested_cpu = 0;
2902 struct mlx4_priv *priv = mlx4_priv(dev);
2907 if (eqn > dev->caps.num_comp_vectors)
2910 for (i = 1; i < port; i++)
2911 off += mlx4_get_eqs_per_port(dev, i);
2913 requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
2915 /* Meaning EQs are shared, and this call comes from the second port */
2916 if (requested_cpu < 0)
2919 eq = &priv->eq_table.eq[eqn];
2921 if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
2924 cpumask_set_cpu(requested_cpu, eq->affinity_mask);
2929 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2931 struct mlx4_priv *priv = mlx4_priv(dev);
2932 struct msix_entry *entries;
2937 int nreq = min3(dev->caps.num_ports *
2938 (int)num_online_cpus() + 1,
2939 dev->caps.num_eqs - dev->caps.reserved_eqs,
2943 nreq = min_t(int, nreq, msi_x);
2945 entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL);
2949 for (i = 0; i < nreq; ++i)
2950 entries[i].entry = i;
2952 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
2955 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
2959 /* 1 is reserved for events (asyncrounous EQ) */
2960 dev->caps.num_comp_vectors = nreq - 1;
2962 priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
2963 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
2964 dev->caps.num_ports);
2966 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
2967 if (i == MLX4_EQ_ASYNC)
2970 priv->eq_table.eq[i].irq =
2971 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
2973 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
2974 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2975 dev->caps.num_ports);
2976 /* We don't set affinity hint when there
2981 priv->eq_table.eq[i].actv_ports.ports);
2982 if (mlx4_init_affinity_hint(dev, port + 1, i))
2983 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
2986 /* We divide the Eqs evenly between the two ports.
2987 * (dev->caps.num_comp_vectors / dev->caps.num_ports)
2988 * refers to the number of Eqs per port
2989 * (i.e eqs_per_port). Theoretically, we would like to
2990 * write something like (i + 1) % eqs_per_port == 0.
2991 * However, since there's an asynchronous Eq, we have
2992 * to skip over it by comparing this condition to
2993 * !!((i + 1) > MLX4_EQ_ASYNC).
2995 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
2997 (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
2998 !!((i + 1) > MLX4_EQ_ASYNC))
2999 /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
3000 * everything is shared anyway.
3005 dev->flags |= MLX4_FLAG_MSI_X;
3012 dev->caps.num_comp_vectors = 1;
3014 BUG_ON(MLX4_EQ_ASYNC >= 2);
3015 for (i = 0; i < 2; ++i) {
3016 priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
3017 if (i != MLX4_EQ_ASYNC) {
3018 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
3019 dev->caps.num_ports);
3024 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
3026 struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
3027 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
3030 err = devlink_port_register(devlink, &info->devlink_port, port);
3036 if (!mlx4_is_slave(dev)) {
3037 mlx4_init_mac_table(dev, &info->mac_table);
3038 mlx4_init_vlan_table(dev, &info->vlan_table);
3039 mlx4_init_roce_gid_table(dev, &info->gid_table);
3040 info->base_qpn = mlx4_get_base_qpn(dev, port);
3043 sprintf(info->dev_name, "mlx4_port%d", port);
3044 info->port_attr.attr.name = info->dev_name;
3045 if (mlx4_is_mfunc(dev)) {
3046 info->port_attr.attr.mode = 0444;
3048 info->port_attr.attr.mode = 0644;
3049 info->port_attr.store = set_port_type;
3051 info->port_attr.show = show_port_type;
3052 sysfs_attr_init(&info->port_attr.attr);
3054 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
3056 mlx4_err(dev, "Failed to create file for port %d\n", port);
3057 devlink_port_unregister(&info->devlink_port);
3062 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
3063 info->port_mtu_attr.attr.name = info->dev_mtu_name;
3064 if (mlx4_is_mfunc(dev)) {
3065 info->port_mtu_attr.attr.mode = 0444;
3067 info->port_mtu_attr.attr.mode = 0644;
3068 info->port_mtu_attr.store = set_port_ib_mtu;
3070 info->port_mtu_attr.show = show_port_ib_mtu;
3071 sysfs_attr_init(&info->port_mtu_attr.attr);
3073 err = device_create_file(&dev->persist->pdev->dev,
3074 &info->port_mtu_attr);
3076 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
3077 device_remove_file(&info->dev->persist->pdev->dev,
3079 devlink_port_unregister(&info->devlink_port);
3087 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
3092 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
3093 device_remove_file(&info->dev->persist->pdev->dev,
3094 &info->port_mtu_attr);
3095 devlink_port_unregister(&info->devlink_port);
3097 #ifdef CONFIG_RFS_ACCEL
3098 free_irq_cpu_rmap(info->rmap);
3103 static int mlx4_init_steering(struct mlx4_dev *dev)
3105 struct mlx4_priv *priv = mlx4_priv(dev);
3106 int num_entries = dev->caps.num_ports;
3109 priv->steer = kcalloc(num_entries, sizeof(struct mlx4_steer),
3114 for (i = 0; i < num_entries; i++)
3115 for (j = 0; j < MLX4_NUM_STEERS; j++) {
3116 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
3117 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
3122 static void mlx4_clear_steering(struct mlx4_dev *dev)
3124 struct mlx4_priv *priv = mlx4_priv(dev);
3125 struct mlx4_steer_index *entry, *tmp_entry;
3126 struct mlx4_promisc_qp *pqp, *tmp_pqp;
3127 int num_entries = dev->caps.num_ports;
3130 for (i = 0; i < num_entries; i++) {
3131 for (j = 0; j < MLX4_NUM_STEERS; j++) {
3132 list_for_each_entry_safe(pqp, tmp_pqp,
3133 &priv->steer[i].promisc_qps[j],
3135 list_del(&pqp->list);
3138 list_for_each_entry_safe(entry, tmp_entry,
3139 &priv->steer[i].steer_entries[j],
3141 list_del(&entry->list);
3142 list_for_each_entry_safe(pqp, tmp_pqp,
3145 list_del(&pqp->list);
3155 static int extended_func_num(struct pci_dev *pdev)
3157 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
3160 #define MLX4_OWNER_BASE 0x8069c
3161 #define MLX4_OWNER_SIZE 4
3163 static int mlx4_get_ownership(struct mlx4_dev *dev)
3165 void __iomem *owner;
3168 if (pci_channel_offline(dev->persist->pdev))
3171 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3175 mlx4_err(dev, "Failed to obtain ownership bit\n");
3184 static void mlx4_free_ownership(struct mlx4_dev *dev)
3186 void __iomem *owner;
3188 if (pci_channel_offline(dev->persist->pdev))
3191 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3195 mlx4_err(dev, "Failed to obtain ownership bit\n");
3203 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
3204 !!((flags) & MLX4_FLAG_MASTER))
3206 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
3207 u8 total_vfs, int existing_vfs, int reset_flow)
3209 u64 dev_flags = dev->flags;
3211 int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
3215 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
3222 atomic_inc(&pf_loading);
3223 if (dev->flags & MLX4_FLAG_SRIOV) {
3224 if (existing_vfs != total_vfs) {
3225 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
3226 existing_vfs, total_vfs);
3227 total_vfs = existing_vfs;
3231 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), GFP_KERNEL);
3232 if (NULL == dev->dev_vfs) {
3233 mlx4_err(dev, "Failed to allocate memory for VFs\n");
3237 if (!(dev->flags & MLX4_FLAG_SRIOV)) {
3238 if (total_vfs > fw_enabled_sriov_vfs) {
3239 mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
3240 total_vfs, fw_enabled_sriov_vfs);
3244 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
3245 err = pci_enable_sriov(pdev, total_vfs);
3248 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
3252 mlx4_warn(dev, "Running in master mode\n");
3253 dev_flags |= MLX4_FLAG_SRIOV |
3255 dev_flags &= ~MLX4_FLAG_SLAVE;
3256 dev->persist->num_vfs = total_vfs;
3261 atomic_dec(&pf_loading);
3263 dev->persist->num_vfs = 0;
3264 kfree(dev->dev_vfs);
3265 dev->dev_vfs = NULL;
3266 return dev_flags & ~MLX4_FLAG_MASTER;
3270 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
3273 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
3276 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
3277 /* Checking for 64 VFs as a limitation of CX2 */
3278 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
3279 requested_vfs >= 64) {
3280 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
3282 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
3287 static int mlx4_pci_enable_device(struct mlx4_dev *dev)
3289 struct pci_dev *pdev = dev->persist->pdev;
3292 mutex_lock(&dev->persist->pci_status_mutex);
3293 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
3294 err = pci_enable_device(pdev);
3296 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
3298 mutex_unlock(&dev->persist->pci_status_mutex);
3303 static void mlx4_pci_disable_device(struct mlx4_dev *dev)
3305 struct pci_dev *pdev = dev->persist->pdev;
3307 mutex_lock(&dev->persist->pci_status_mutex);
3308 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
3309 pci_disable_device(pdev);
3310 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
3312 mutex_unlock(&dev->persist->pci_status_mutex);
3315 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
3316 int total_vfs, int *nvfs, struct mlx4_priv *priv,
3319 struct mlx4_dev *dev;
3324 struct mlx4_dev_cap *dev_cap = NULL;
3325 int existing_vfs = 0;
3329 INIT_LIST_HEAD(&priv->ctx_list);
3330 spin_lock_init(&priv->ctx_lock);
3332 mutex_init(&priv->port_mutex);
3333 mutex_init(&priv->bond_mutex);
3335 INIT_LIST_HEAD(&priv->pgdir_list);
3336 mutex_init(&priv->pgdir_mutex);
3337 spin_lock_init(&priv->cmd.context_lock);
3339 INIT_LIST_HEAD(&priv->bf_list);
3340 mutex_init(&priv->bf_mutex);
3342 dev->rev_id = pdev->revision;
3343 dev->numa_node = dev_to_node(&pdev->dev);
3345 /* Detect if this device is a virtual function */
3346 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3347 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
3348 dev->flags |= MLX4_FLAG_SLAVE;
3350 /* We reset the device and enable SRIOV only for physical
3351 * devices. Try to claim ownership on the device;
3352 * if already taken, skip -- do not allow multiple PFs */
3353 err = mlx4_get_ownership(dev);
3358 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
3363 atomic_set(&priv->opreq_count, 0);
3364 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
3367 * Now reset the HCA before we touch the PCI capabilities or
3368 * attempt a firmware command, since a boot ROM may have left
3369 * the HCA in an undefined state.
3371 err = mlx4_reset(dev);
3373 mlx4_err(dev, "Failed to reset HCA, aborting\n");
3378 dev->flags = MLX4_FLAG_MASTER;
3379 existing_vfs = pci_num_vf(pdev);
3381 dev->flags |= MLX4_FLAG_SRIOV;
3382 dev->persist->num_vfs = total_vfs;
3386 /* on load remove any previous indication of internal error,
3389 dev->persist->state = MLX4_DEVICE_STATE_UP;
3392 err = mlx4_cmd_init(dev);
3394 mlx4_err(dev, "Failed to init command interface, aborting\n");
3398 /* In slave functions, the communication channel must be initialized
3399 * before posting commands. Also, init num_slaves before calling
3401 if (mlx4_is_mfunc(dev)) {
3402 if (mlx4_is_master(dev)) {
3403 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
3406 dev->num_slaves = 0;
3407 err = mlx4_multi_func_init(dev);
3409 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
3415 err = mlx4_init_fw(dev);
3417 mlx4_err(dev, "Failed to init fw, aborting.\n");
3421 if (mlx4_is_master(dev)) {
3422 /* when we hit the goto slave_start below, dev_cap already initialized */
3424 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
3431 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3433 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3437 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3440 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3441 u64 dev_flags = mlx4_enable_sriov(dev, pdev,
3447 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3448 dev->flags = dev_flags;
3449 if (!SRIOV_VALID_STATE(dev->flags)) {
3450 mlx4_err(dev, "Invalid SRIOV state\n");
3453 err = mlx4_reset(dev);
3455 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
3461 /* Legacy mode FW requires SRIOV to be enabled before
3462 * doing QUERY_DEV_CAP, since max_eq's value is different if
3465 memset(dev_cap, 0, sizeof(*dev_cap));
3466 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3468 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3472 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3477 err = mlx4_init_hca(dev);
3479 if (err == -EACCES) {
3480 /* Not primary Physical function
3481 * Running in slave mode */
3482 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3483 /* We're not a PF */
3484 if (dev->flags & MLX4_FLAG_SRIOV) {
3486 pci_disable_sriov(pdev);
3487 if (mlx4_is_master(dev) && !reset_flow)
3488 atomic_dec(&pf_loading);
3489 dev->flags &= ~MLX4_FLAG_SRIOV;
3491 if (!mlx4_is_slave(dev))
3492 mlx4_free_ownership(dev);
3493 dev->flags |= MLX4_FLAG_SLAVE;
3494 dev->flags &= ~MLX4_FLAG_MASTER;
3500 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3501 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
3502 existing_vfs, reset_flow);
3504 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
3505 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
3506 dev->flags = dev_flags;
3507 err = mlx4_cmd_init(dev);
3509 /* Only VHCR is cleaned up, so could still
3512 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
3516 dev->flags = dev_flags;
3519 if (!SRIOV_VALID_STATE(dev->flags)) {
3520 mlx4_err(dev, "Invalid SRIOV state\n");
3525 /* check if the device is functioning at its maximum possible speed.
3526 * No return code for this call, just warn the user in case of PCI
3527 * express device capabilities are under-satisfied by the bus.
3529 if (!mlx4_is_slave(dev))
3530 pcie_print_link_status(dev->persist->pdev);
3532 /* In master functions, the communication channel must be initialized
3533 * after obtaining its address from fw */
3534 if (mlx4_is_master(dev)) {
3535 if (dev->caps.num_ports < 2 &&
3539 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
3540 dev->caps.num_ports);
3543 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
3546 i < sizeof(dev->persist->nvfs)/
3547 sizeof(dev->persist->nvfs[0]); i++) {
3550 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
3551 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
3552 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
3553 dev->caps.num_ports;
3557 /* In master functions, the communication channel
3558 * must be initialized after obtaining its address from fw
3560 err = mlx4_multi_func_init(dev);
3562 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
3567 err = mlx4_alloc_eq_table(dev);
3569 goto err_master_mfunc;
3571 bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
3572 mutex_init(&priv->msix_ctl.pool_lock);
3574 mlx4_enable_msi_x(dev);
3575 if ((mlx4_is_mfunc(dev)) &&
3576 !(dev->flags & MLX4_FLAG_MSI_X)) {
3578 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
3582 if (!mlx4_is_slave(dev)) {
3583 err = mlx4_init_steering(dev);
3585 goto err_disable_msix;
3588 mlx4_init_quotas(dev);
3590 err = mlx4_setup_hca(dev);
3591 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
3592 !mlx4_is_mfunc(dev)) {
3593 dev->flags &= ~MLX4_FLAG_MSI_X;
3594 dev->caps.num_comp_vectors = 1;
3595 pci_disable_msix(pdev);
3596 err = mlx4_setup_hca(dev);
3602 /* When PF resources are ready arm its comm channel to enable
3605 if (mlx4_is_master(dev)) {
3606 err = mlx4_ARM_COMM_CHANNEL(dev);
3608 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
3614 for (port = 1; port <= dev->caps.num_ports; port++) {
3615 err = mlx4_init_port_info(dev, port);
3620 priv->v2p.port1 = 1;
3621 priv->v2p.port2 = 2;
3623 err = mlx4_register_device(dev);
3627 mlx4_request_modules(dev);
3629 mlx4_sense_init(dev);
3630 mlx4_start_sense(dev);
3634 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3635 atomic_dec(&pf_loading);
3641 for (--port; port >= 1; --port)
3642 mlx4_cleanup_port_info(&priv->port[port]);
3644 mlx4_cleanup_default_counters(dev);
3645 if (!mlx4_is_slave(dev))
3646 mlx4_cleanup_counters_table(dev);
3647 mlx4_cleanup_qp_table(dev);
3648 mlx4_cleanup_srq_table(dev);
3649 mlx4_cleanup_cq_table(dev);
3650 mlx4_cmd_use_polling(dev);
3651 mlx4_cleanup_eq_table(dev);
3652 mlx4_cleanup_mcg_table(dev);
3653 mlx4_cleanup_mr_table(dev);
3654 mlx4_cleanup_xrcd_table(dev);
3655 mlx4_cleanup_pd_table(dev);
3656 mlx4_cleanup_uar_table(dev);
3659 if (!mlx4_is_slave(dev))
3660 mlx4_clear_steering(dev);
3663 if (dev->flags & MLX4_FLAG_MSI_X)
3664 pci_disable_msix(pdev);
3667 mlx4_free_eq_table(dev);
3670 if (mlx4_is_master(dev)) {
3671 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
3672 mlx4_multi_func_cleanup(dev);
3675 if (mlx4_is_slave(dev))
3676 mlx4_slave_destroy_special_qp_cap(dev);
3679 mlx4_close_hca(dev);
3685 if (mlx4_is_slave(dev))
3686 mlx4_multi_func_cleanup(dev);
3689 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3692 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
3693 pci_disable_sriov(pdev);
3694 dev->flags &= ~MLX4_FLAG_SRIOV;
3697 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3698 atomic_dec(&pf_loading);
3700 kfree(priv->dev.dev_vfs);
3702 if (!mlx4_is_slave(dev))
3703 mlx4_free_ownership(dev);
3709 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3710 struct mlx4_priv *priv)
3713 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3714 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3715 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
3716 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
3717 unsigned total_vfs = 0;
3720 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
3722 err = mlx4_pci_enable_device(&priv->dev);
3724 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3728 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
3729 * per port, we must limit the number of VFs to 63 (since their are
3732 for (i = 0; i < ARRAY_SIZE(nvfs) && i < num_vfs_argc;
3733 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
3734 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
3736 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
3738 goto err_disable_pdev;
3741 for (i = 0; i < ARRAY_SIZE(prb_vf) && i < probe_vfs_argc;
3743 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
3744 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
3745 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
3747 goto err_disable_pdev;
3750 if (total_vfs > MLX4_MAX_NUM_VF) {
3752 "Requested more VF's (%d) than allowed by hw (%d)\n",
3753 total_vfs, MLX4_MAX_NUM_VF);
3755 goto err_disable_pdev;
3758 for (i = 0; i < MLX4_MAX_PORTS; i++) {
3759 if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
3761 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
3762 nvfs[i] + nvfs[2], i + 1,
3763 MLX4_MAX_NUM_VF_P_PORT);
3765 goto err_disable_pdev;
3769 /* Check for BARs. */
3770 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
3771 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3772 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
3773 pci_dev_data, pci_resource_flags(pdev, 0));
3775 goto err_disable_pdev;
3777 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
3778 dev_err(&pdev->dev, "Missing UAR, aborting\n");
3780 goto err_disable_pdev;
3783 err = pci_request_regions(pdev, DRV_NAME);
3785 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
3786 goto err_disable_pdev;
3789 pci_set_master(pdev);
3791 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3793 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
3794 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3796 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
3797 goto err_release_regions;
3800 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3802 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
3803 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3805 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
3806 goto err_release_regions;
3810 /* Allow large DMA segments, up to the firmware limit of 1 GB */
3811 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
3812 /* Detect if this device is a virtual function */
3813 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3814 /* When acting as pf, we normally skip vfs unless explicitly
3815 * requested to probe them.
3818 unsigned vfs_offset = 0;
3820 for (i = 0; i < ARRAY_SIZE(nvfs) &&
3821 vfs_offset + nvfs[i] < extended_func_num(pdev);
3822 vfs_offset += nvfs[i], i++)
3824 if (i == ARRAY_SIZE(nvfs)) {
3826 goto err_release_regions;
3828 if ((extended_func_num(pdev) - vfs_offset)
3830 dev_warn(&pdev->dev, "Skipping virtual function:%d\n",
3831 extended_func_num(pdev));
3833 goto err_release_regions;
3838 err = mlx4_crdump_init(&priv->dev);
3840 goto err_release_regions;
3842 err = mlx4_catas_init(&priv->dev);
3846 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
3853 mlx4_catas_end(&priv->dev);
3856 mlx4_crdump_end(&priv->dev);
3858 err_release_regions:
3859 pci_release_regions(pdev);
3862 mlx4_pci_disable_device(&priv->dev);
3866 static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
3867 enum devlink_port_type port_type)
3869 struct mlx4_port_info *info = container_of(devlink_port,
3870 struct mlx4_port_info,
3872 enum mlx4_port_type mlx4_port_type;
3874 switch (port_type) {
3875 case DEVLINK_PORT_TYPE_AUTO:
3876 mlx4_port_type = MLX4_PORT_TYPE_AUTO;
3878 case DEVLINK_PORT_TYPE_ETH:
3879 mlx4_port_type = MLX4_PORT_TYPE_ETH;
3881 case DEVLINK_PORT_TYPE_IB:
3882 mlx4_port_type = MLX4_PORT_TYPE_IB;
3888 return __set_port_type(info, mlx4_port_type);
3891 static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink)
3893 struct mlx4_priv *priv = devlink_priv(devlink);
3894 struct mlx4_dev *dev = &priv->dev;
3895 struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
3896 union devlink_param_value saved_value;
3899 err = devlink_param_driverinit_value_get(devlink,
3900 DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
3902 if (!err && mlx4_internal_err_reset != saved_value.vbool) {
3903 mlx4_internal_err_reset = saved_value.vbool;
3904 /* Notify on value changed on runtime configuration mode */
3905 devlink_param_value_changed(devlink,
3906 DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
3908 err = devlink_param_driverinit_value_get(devlink,
3909 DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
3912 log_num_mac = order_base_2(saved_value.vu32);
3913 err = devlink_param_driverinit_value_get(devlink,
3914 MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
3917 enable_64b_cqe_eqe = saved_value.vbool;
3918 err = devlink_param_driverinit_value_get(devlink,
3919 MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
3922 enable_4k_uar = saved_value.vbool;
3923 err = devlink_param_driverinit_value_get(devlink,
3924 DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
3926 if (!err && crdump->snapshot_enable != saved_value.vbool) {
3927 crdump->snapshot_enable = saved_value.vbool;
3928 devlink_param_value_changed(devlink,
3929 DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
3933 static void mlx4_restart_one_down(struct pci_dev *pdev);
3934 static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
3935 struct devlink *devlink);
3937 static int mlx4_devlink_reload_down(struct devlink *devlink, bool netns_change,
3938 struct netlink_ext_ack *extack)
3940 struct mlx4_priv *priv = devlink_priv(devlink);
3941 struct mlx4_dev *dev = &priv->dev;
3942 struct mlx4_dev_persistent *persist = dev->persist;
3945 NL_SET_ERR_MSG_MOD(extack, "Namespace change is not supported");
3948 if (persist->num_vfs)
3949 mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
3950 mlx4_restart_one_down(persist->pdev);
3954 static int mlx4_devlink_reload_up(struct devlink *devlink,
3955 struct netlink_ext_ack *extack)
3957 struct mlx4_priv *priv = devlink_priv(devlink);
3958 struct mlx4_dev *dev = &priv->dev;
3959 struct mlx4_dev_persistent *persist = dev->persist;
3962 err = mlx4_restart_one_up(persist->pdev, true, devlink);
3964 mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n",
3970 static const struct devlink_ops mlx4_devlink_ops = {
3971 .port_type_set = mlx4_devlink_port_type_set,
3972 .reload_down = mlx4_devlink_reload_down,
3973 .reload_up = mlx4_devlink_reload_up,
3976 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3978 struct devlink *devlink;
3979 struct mlx4_priv *priv;
3980 struct mlx4_dev *dev;
3983 printk_once(KERN_INFO "%s", mlx4_version);
3985 devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv));
3988 priv = devlink_priv(devlink);
3991 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
3992 if (!dev->persist) {
3994 goto err_devlink_free;
3996 dev->persist->pdev = pdev;
3997 dev->persist->dev = dev;
3998 pci_set_drvdata(pdev, dev->persist);
3999 priv->pci_dev_data = id->driver_data;
4000 mutex_init(&dev->persist->device_state_mutex);
4001 mutex_init(&dev->persist->interface_state_mutex);
4002 mutex_init(&dev->persist->pci_status_mutex);
4004 ret = devlink_register(devlink, &pdev->dev);
4006 goto err_persist_free;
4007 ret = devlink_params_register(devlink, mlx4_devlink_params,
4008 ARRAY_SIZE(mlx4_devlink_params));
4010 goto err_devlink_unregister;
4011 mlx4_devlink_set_params_init_values(devlink);
4012 ret = __mlx4_init_one(pdev, id->driver_data, priv);
4014 goto err_params_unregister;
4016 devlink_params_publish(devlink);
4017 devlink_reload_enable(devlink);
4018 pci_save_state(pdev);
4021 err_params_unregister:
4022 devlink_params_unregister(devlink, mlx4_devlink_params,
4023 ARRAY_SIZE(mlx4_devlink_params));
4024 err_devlink_unregister:
4025 devlink_unregister(devlink);
4027 kfree(dev->persist);
4029 devlink_free(devlink);
4033 static void mlx4_clean_dev(struct mlx4_dev *dev)
4035 struct mlx4_dev_persistent *persist = dev->persist;
4036 struct mlx4_priv *priv = mlx4_priv(dev);
4037 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
4039 memset(priv, 0, sizeof(*priv));
4040 priv->dev.persist = persist;
4041 priv->dev.flags = flags;
4044 static void mlx4_unload_one(struct pci_dev *pdev)
4046 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4047 struct mlx4_dev *dev = persist->dev;
4048 struct mlx4_priv *priv = mlx4_priv(dev);
4055 /* saving current ports type for further use */
4056 for (i = 0; i < dev->caps.num_ports; i++) {
4057 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
4058 dev->persist->curr_port_poss_type[i] = dev->caps.
4059 possible_type[i + 1];
4062 pci_dev_data = priv->pci_dev_data;
4064 mlx4_stop_sense(dev);
4065 mlx4_unregister_device(dev);
4067 for (p = 1; p <= dev->caps.num_ports; p++) {
4068 mlx4_cleanup_port_info(&priv->port[p]);
4069 mlx4_CLOSE_PORT(dev, p);
4072 if (mlx4_is_master(dev))
4073 mlx4_free_resource_tracker(dev,
4074 RES_TR_FREE_SLAVES_ONLY);
4076 mlx4_cleanup_default_counters(dev);
4077 if (!mlx4_is_slave(dev))
4078 mlx4_cleanup_counters_table(dev);
4079 mlx4_cleanup_qp_table(dev);
4080 mlx4_cleanup_srq_table(dev);
4081 mlx4_cleanup_cq_table(dev);
4082 mlx4_cmd_use_polling(dev);
4083 mlx4_cleanup_eq_table(dev);
4084 mlx4_cleanup_mcg_table(dev);
4085 mlx4_cleanup_mr_table(dev);
4086 mlx4_cleanup_xrcd_table(dev);
4087 mlx4_cleanup_pd_table(dev);
4089 if (mlx4_is_master(dev))
4090 mlx4_free_resource_tracker(dev,
4091 RES_TR_FREE_STRUCTS_ONLY);
4094 mlx4_uar_free(dev, &priv->driver_uar);
4095 mlx4_cleanup_uar_table(dev);
4096 if (!mlx4_is_slave(dev))
4097 mlx4_clear_steering(dev);
4098 mlx4_free_eq_table(dev);
4099 if (mlx4_is_master(dev))
4100 mlx4_multi_func_cleanup(dev);
4101 mlx4_close_hca(dev);
4103 if (mlx4_is_slave(dev))
4104 mlx4_multi_func_cleanup(dev);
4105 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
4107 if (dev->flags & MLX4_FLAG_MSI_X)
4108 pci_disable_msix(pdev);
4110 if (!mlx4_is_slave(dev))
4111 mlx4_free_ownership(dev);
4113 mlx4_slave_destroy_special_qp_cap(dev);
4114 kfree(dev->dev_vfs);
4116 mlx4_clean_dev(dev);
4117 priv->pci_dev_data = pci_dev_data;
4121 static void mlx4_remove_one(struct pci_dev *pdev)
4123 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4124 struct mlx4_dev *dev = persist->dev;
4125 struct mlx4_priv *priv = mlx4_priv(dev);
4126 struct devlink *devlink = priv_to_devlink(priv);
4129 devlink_reload_disable(devlink);
4131 if (mlx4_is_slave(dev))
4132 persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
4134 mutex_lock(&persist->interface_state_mutex);
4135 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
4136 mutex_unlock(&persist->interface_state_mutex);
4138 /* Disabling SR-IOV is not allowed while there are active vf's */
4139 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
4140 active_vfs = mlx4_how_many_lives_vf(dev);
4142 pr_warn("Removing PF when there are active VF's !!\n");
4143 pr_warn("Will not disable SR-IOV.\n");
4147 /* device marked to be under deletion running now without the lock
4148 * letting other tasks to be terminated
4150 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4151 mlx4_unload_one(pdev);
4153 mlx4_info(dev, "%s: interface is down\n", __func__);
4154 mlx4_catas_end(dev);
4155 mlx4_crdump_end(dev);
4156 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
4157 mlx4_warn(dev, "Disabling SR-IOV\n");
4158 pci_disable_sriov(pdev);
4161 pci_release_regions(pdev);
4162 mlx4_pci_disable_device(dev);
4163 devlink_params_unregister(devlink, mlx4_devlink_params,
4164 ARRAY_SIZE(mlx4_devlink_params));
4165 devlink_unregister(devlink);
4166 kfree(dev->persist);
4167 devlink_free(devlink);
4170 static int restore_current_port_types(struct mlx4_dev *dev,
4171 enum mlx4_port_type *types,
4172 enum mlx4_port_type *poss_types)
4174 struct mlx4_priv *priv = mlx4_priv(dev);
4177 mlx4_stop_sense(dev);
4179 mutex_lock(&priv->port_mutex);
4180 for (i = 0; i < dev->caps.num_ports; i++)
4181 dev->caps.possible_type[i + 1] = poss_types[i];
4182 err = mlx4_change_port_types(dev, types);
4183 mlx4_start_sense(dev);
4184 mutex_unlock(&priv->port_mutex);
4189 static void mlx4_restart_one_down(struct pci_dev *pdev)
4191 mlx4_unload_one(pdev);
4194 static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
4195 struct devlink *devlink)
4197 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4198 struct mlx4_dev *dev = persist->dev;
4199 struct mlx4_priv *priv = mlx4_priv(dev);
4200 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4201 int pci_dev_data, err, total_vfs;
4203 pci_dev_data = priv->pci_dev_data;
4204 total_vfs = dev->persist->num_vfs;
4205 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4208 mlx4_devlink_param_load_driverinit_values(devlink);
4209 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
4211 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
4212 __func__, pci_name(pdev), err);
4216 err = restore_current_port_types(dev, dev->persist->curr_port_type,
4217 dev->persist->curr_port_poss_type);
4219 mlx4_err(dev, "could not restore original port types (%d)\n",
4225 int mlx4_restart_one(struct pci_dev *pdev)
4227 mlx4_restart_one_down(pdev);
4228 return mlx4_restart_one_up(pdev, false, NULL);
4231 #define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT }
4232 #define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF }
4233 #define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 }
4235 static const struct pci_device_id mlx4_pci_table[] = {
4236 #ifdef CONFIG_MLX4_CORE_GEN2
4237 /* MT25408 "Hermon" */
4238 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */
4239 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */
4240 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR), /* QDR */
4241 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2), /* DDR Gen2 */
4242 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2), /* QDR Gen2 */
4243 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN), /* EN 10GigE */
4244 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2), /* EN 10GigE Gen2 */
4245 /* MT25458 ConnectX EN 10GBASE-T */
4246 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN),
4247 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2), /* Gen2 */
4248 /* MT26468 ConnectX EN 10GigE PCIe Gen2*/
4249 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2),
4250 /* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */
4251 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2),
4252 /* MT26478 ConnectX2 40GigE PCIe Gen2 */
4253 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2),
4254 /* MT25400 Family [ConnectX-2] */
4255 MLX_VF(0x1002), /* Virtual Function */
4256 #endif /* CONFIG_MLX4_CORE_GEN2 */
4257 /* MT27500 Family [ConnectX-3] */
4258 MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3),
4259 MLX_VF(0x1004), /* Virtual Function */
4260 MLX_GN(0x1005), /* MT27510 Family */
4261 MLX_GN(0x1006), /* MT27511 Family */
4262 MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO), /* MT27520 Family */
4263 MLX_GN(0x1008), /* MT27521 Family */
4264 MLX_GN(0x1009), /* MT27530 Family */
4265 MLX_GN(0x100a), /* MT27531 Family */
4266 MLX_GN(0x100b), /* MT27540 Family */
4267 MLX_GN(0x100c), /* MT27541 Family */
4268 MLX_GN(0x100d), /* MT27550 Family */
4269 MLX_GN(0x100e), /* MT27551 Family */
4270 MLX_GN(0x100f), /* MT27560 Family */
4271 MLX_GN(0x1010), /* MT27561 Family */
4274 * See the mellanox_check_broken_intx_masking() quirk when
4281 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
4283 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
4284 pci_channel_state_t state)
4286 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4288 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
4289 mlx4_enter_error_state(persist);
4291 mutex_lock(&persist->interface_state_mutex);
4292 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4293 mlx4_unload_one(pdev);
4295 mutex_unlock(&persist->interface_state_mutex);
4296 if (state == pci_channel_io_perm_failure)
4297 return PCI_ERS_RESULT_DISCONNECT;
4299 mlx4_pci_disable_device(persist->dev);
4300 return PCI_ERS_RESULT_NEED_RESET;
4303 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
4305 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4306 struct mlx4_dev *dev = persist->dev;
4309 mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
4310 err = mlx4_pci_enable_device(dev);
4312 mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
4313 return PCI_ERS_RESULT_DISCONNECT;
4316 pci_set_master(pdev);
4317 pci_restore_state(pdev);
4318 pci_save_state(pdev);
4319 return PCI_ERS_RESULT_RECOVERED;
4322 static void mlx4_pci_resume(struct pci_dev *pdev)
4324 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4325 struct mlx4_dev *dev = persist->dev;
4326 struct mlx4_priv *priv = mlx4_priv(dev);
4327 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4331 mlx4_err(dev, "%s was called\n", __func__);
4332 total_vfs = dev->persist->num_vfs;
4333 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4335 mutex_lock(&persist->interface_state_mutex);
4336 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
4337 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
4340 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
4345 err = restore_current_port_types(dev, dev->persist->
4346 curr_port_type, dev->persist->
4347 curr_port_poss_type);
4349 mlx4_err(dev, "could not restore original port types (%d)\n", err);
4352 mutex_unlock(&persist->interface_state_mutex);
4356 static void mlx4_shutdown(struct pci_dev *pdev)
4358 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4360 mlx4_info(persist->dev, "mlx4_shutdown was called\n");
4361 mutex_lock(&persist->interface_state_mutex);
4362 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4363 mlx4_unload_one(pdev);
4364 mutex_unlock(&persist->interface_state_mutex);
4367 static const struct pci_error_handlers mlx4_err_handler = {
4368 .error_detected = mlx4_pci_err_detected,
4369 .slot_reset = mlx4_pci_slot_reset,
4370 .resume = mlx4_pci_resume,
4373 static int mlx4_suspend(struct pci_dev *pdev, pm_message_t state)
4375 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4376 struct mlx4_dev *dev = persist->dev;
4378 mlx4_err(dev, "suspend was called\n");
4379 mutex_lock(&persist->interface_state_mutex);
4380 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4381 mlx4_unload_one(pdev);
4382 mutex_unlock(&persist->interface_state_mutex);
4387 static int mlx4_resume(struct pci_dev *pdev)
4389 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4390 struct mlx4_dev *dev = persist->dev;
4391 struct mlx4_priv *priv = mlx4_priv(dev);
4392 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4396 mlx4_err(dev, "resume was called\n");
4397 total_vfs = dev->persist->num_vfs;
4398 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4400 mutex_lock(&persist->interface_state_mutex);
4401 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
4402 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs,
4405 ret = restore_current_port_types(dev,
4406 dev->persist->curr_port_type,
4407 dev->persist->curr_port_poss_type);
4409 mlx4_err(dev, "resume: could not restore original port types (%d)\n", ret);
4412 mutex_unlock(&persist->interface_state_mutex);
4417 static struct pci_driver mlx4_driver = {
4419 .id_table = mlx4_pci_table,
4420 .probe = mlx4_init_one,
4421 .shutdown = mlx4_shutdown,
4422 .remove = mlx4_remove_one,
4423 .suspend = mlx4_suspend,
4424 .resume = mlx4_resume,
4425 .err_handler = &mlx4_err_handler,
4428 static int __init mlx4_verify_params(void)
4431 pr_warn("mlx4_core: bad msi_x: %d\n", msi_x);
4435 if ((log_num_mac < 0) || (log_num_mac > 7)) {
4436 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
4440 if (log_num_vlan != 0)
4441 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
4442 MLX4_LOG_NUM_VLANS);
4445 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
4447 if ((log_mtts_per_seg < 0) || (log_mtts_per_seg > 7)) {
4448 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
4453 /* Check if module param for ports type has legal combination */
4454 if (port_type_array[0] == false && port_type_array[1] == true) {
4455 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
4456 port_type_array[0] = true;
4459 if (mlx4_log_num_mgm_entry_size < -7 ||
4460 (mlx4_log_num_mgm_entry_size > 0 &&
4461 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
4462 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) {
4463 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
4464 mlx4_log_num_mgm_entry_size,
4465 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
4466 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
4473 static int __init mlx4_init(void)
4477 if (mlx4_verify_params())
4481 mlx4_wq = create_singlethread_workqueue("mlx4");
4485 ret = pci_register_driver(&mlx4_driver);
4487 destroy_workqueue(mlx4_wq);
4488 return ret < 0 ? ret : 0;
4491 static void __exit mlx4_cleanup(void)
4493 pci_unregister_driver(&mlx4_driver);
4494 destroy_workqueue(mlx4_wq);
4497 module_init(mlx4_init);
4498 module_exit(mlx4_cleanup);