2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/cmd.h>
35 #include <linux/module.h>
36 #include "mlx5_core.h"
38 static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
41 u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0};
43 MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
44 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
47 int mlx5_query_board_id(struct mlx5_core_dev *dev)
50 int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
53 out = kzalloc(outlen, GFP_KERNEL);
57 err = mlx5_cmd_query_adapter(dev, out, outlen);
62 MLX5_ADDR_OF(query_adapter_out, out,
63 query_adapter_struct.vsd_contd_psid),
64 MLX5_FLD_SZ_BYTES(query_adapter_out,
65 query_adapter_struct.vsd_contd_psid));
72 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
75 int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
78 out = kzalloc(outlen, GFP_KERNEL);
82 err = mlx5_cmd_query_adapter(mdev, out, outlen);
86 *vendor_id = MLX5_GET(query_adapter_out, out,
87 query_adapter_struct.ieee_vendor_id);
92 EXPORT_SYMBOL(mlx5_core_query_vendor_id);
94 static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev)
96 return mlx5_query_pcam_reg(dev, dev->caps.pcam,
97 MLX5_PCAM_FEATURE_ENHANCED_FEATURES,
98 MLX5_PCAM_REGS_5000_TO_507F);
101 static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
103 return mlx5_query_mcam_reg(dev, dev->caps.mcam,
104 MLX5_MCAM_FEATURE_ENHANCED_FEATURES,
105 MLX5_MCAM_REGS_FIRST_128);
108 int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
112 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
116 if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
117 err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
122 if (MLX5_CAP_GEN(dev, pg)) {
123 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
128 if (MLX5_CAP_GEN(dev, atomic)) {
129 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
134 if (MLX5_CAP_GEN(dev, roce)) {
135 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
140 if (MLX5_CAP_GEN(dev, nic_flow_table) ||
141 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
142 err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
147 if (MLX5_CAP_GEN(dev, vport_group_manager) &&
148 MLX5_CAP_GEN(dev, eswitch_flow_table)) {
149 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
154 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
155 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
160 if (MLX5_CAP_GEN(dev, vector_calc)) {
161 err = mlx5_core_get_caps(dev, MLX5_CAP_VECTOR_CALC);
166 if (MLX5_CAP_GEN(dev, qos)) {
167 err = mlx5_core_get_caps(dev, MLX5_CAP_QOS);
172 if (MLX5_CAP_GEN(dev, pcam_reg))
173 mlx5_get_pcam_reg(dev);
175 if (MLX5_CAP_GEN(dev, mcam_reg))
176 mlx5_get_mcam_reg(dev);
181 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
183 u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
184 u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0};
186 MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
187 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
190 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
192 u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
193 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
195 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
196 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
199 int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
201 u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
202 u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
206 if (!MLX5_CAP_GEN(dev, force_teardown)) {
207 mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
211 MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
212 MLX5_SET(teardown_hca_in, in, profile, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE);
214 ret = mlx5_cmd_exec_polling(dev, in, sizeof(in), out, sizeof(out));
218 force_state = MLX5_GET(teardown_hca_out, out, force_state);
219 if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
220 mlx5_core_err(dev, "teardown with force mode failed\n");
227 enum mlxsw_reg_mcc_instruction {
228 MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
229 MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
230 MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03,
231 MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04,
232 MLX5_REG_MCC_INSTRUCTION_ACTIVATE = 0x06,
233 MLX5_REG_MCC_INSTRUCTION_CANCEL = 0x08,
236 static int mlx5_reg_mcc_set(struct mlx5_core_dev *dev,
237 enum mlxsw_reg_mcc_instruction instr,
238 u16 component_index, u32 update_handle,
241 u32 out[MLX5_ST_SZ_DW(mcc_reg)];
242 u32 in[MLX5_ST_SZ_DW(mcc_reg)];
244 memset(in, 0, sizeof(in));
246 MLX5_SET(mcc_reg, in, instruction, instr);
247 MLX5_SET(mcc_reg, in, component_index, component_index);
248 MLX5_SET(mcc_reg, in, update_handle, update_handle);
249 MLX5_SET(mcc_reg, in, component_size, component_size);
251 return mlx5_core_access_reg(dev, in, sizeof(in), out,
252 sizeof(out), MLX5_REG_MCC, 0, 1);
255 static int mlx5_reg_mcc_query(struct mlx5_core_dev *dev,
256 u32 *update_handle, u8 *error_code,
259 u32 out[MLX5_ST_SZ_DW(mcc_reg)];
260 u32 in[MLX5_ST_SZ_DW(mcc_reg)];
263 memset(in, 0, sizeof(in));
264 memset(out, 0, sizeof(out));
265 MLX5_SET(mcc_reg, in, update_handle, *update_handle);
267 err = mlx5_core_access_reg(dev, in, sizeof(in), out,
268 sizeof(out), MLX5_REG_MCC, 0, 0);
272 *update_handle = MLX5_GET(mcc_reg, out, update_handle);
273 *error_code = MLX5_GET(mcc_reg, out, error_code);
274 *control_state = MLX5_GET(mcc_reg, out, control_state);
280 static int mlx5_reg_mcda_set(struct mlx5_core_dev *dev,
282 u32 offset, u16 size,
285 int err, in_size = MLX5_ST_SZ_BYTES(mcda_reg) + size;
286 u32 out[MLX5_ST_SZ_DW(mcda_reg)];
287 int i, j, dw_size = size >> 2;
291 in = kzalloc(in_size, GFP_KERNEL);
295 MLX5_SET(mcda_reg, in, update_handle, update_handle);
296 MLX5_SET(mcda_reg, in, offset, offset);
297 MLX5_SET(mcda_reg, in, size, size);
299 for (i = 0; i < dw_size; i++) {
301 data_element = htonl(*(u32 *)&data[j]);
302 memcpy(MLX5_ADDR_OF(mcda_reg, in, data) + j, &data_element, 4);
305 err = mlx5_core_access_reg(dev, in, in_size, out,
306 sizeof(out), MLX5_REG_MCDA, 0, 1);
311 static int mlx5_reg_mcqi_query(struct mlx5_core_dev *dev,
313 u32 *max_component_size,
314 u8 *log_mcda_word_size,
315 u16 *mcda_max_write_size)
317 u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_ST_SZ_DW(mcqi_cap)];
318 int offset = MLX5_ST_SZ_DW(mcqi_reg);
319 u32 in[MLX5_ST_SZ_DW(mcqi_reg)];
322 memset(in, 0, sizeof(in));
323 memset(out, 0, sizeof(out));
325 MLX5_SET(mcqi_reg, in, component_index, component_index);
326 MLX5_SET(mcqi_reg, in, data_size, MLX5_ST_SZ_BYTES(mcqi_cap));
328 err = mlx5_core_access_reg(dev, in, sizeof(in), out,
329 sizeof(out), MLX5_REG_MCQI, 0, 0);
333 *max_component_size = MLX5_GET(mcqi_cap, out + offset, max_component_size);
334 *log_mcda_word_size = MLX5_GET(mcqi_cap, out + offset, log_mcda_word_size);
335 *mcda_max_write_size = MLX5_GET(mcqi_cap, out + offset, mcda_max_write_size);