2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
39 #include "mlx5_core.h"
42 int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
43 struct mlx5_flow_table *ft)
45 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
46 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
48 MLX5_SET(set_flow_table_root_in, in, opcode,
49 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
50 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
51 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
53 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
54 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
57 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
60 int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
62 enum fs_flow_table_op_mod op_mod,
63 enum fs_flow_table_type type, unsigned int level,
64 unsigned int log_size, struct mlx5_flow_table
65 *next_ft, unsigned int *table_id, u32 flags)
67 int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
68 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
69 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
72 MLX5_SET(create_flow_table_in, in, opcode,
73 MLX5_CMD_OP_CREATE_FLOW_TABLE);
75 MLX5_SET(create_flow_table_in, in, table_type, type);
76 MLX5_SET(create_flow_table_in, in, level, level);
77 MLX5_SET(create_flow_table_in, in, log_size, log_size);
79 MLX5_SET(create_flow_table_in, in, vport_number, vport);
80 MLX5_SET(create_flow_table_in, in, other_vport, 1);
83 MLX5_SET(create_flow_table_in, in, decap_en, en_encap_decap);
84 MLX5_SET(create_flow_table_in, in, encap_en, en_encap_decap);
87 case FS_FT_OP_MOD_NORMAL:
89 MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
90 MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
94 case FS_FT_OP_MOD_LAG_DEMUX:
95 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
97 MLX5_SET(create_flow_table_in, in, lag_master_next_table_id,
102 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
104 *table_id = MLX5_GET(create_flow_table_out, out,
109 int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
110 struct mlx5_flow_table *ft)
112 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
113 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
115 MLX5_SET(destroy_flow_table_in, in, opcode,
116 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
117 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
118 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
120 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
121 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
124 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
127 int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
128 struct mlx5_flow_table *ft,
129 struct mlx5_flow_table *next_ft)
131 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
132 u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
134 MLX5_SET(modify_flow_table_in, in, opcode,
135 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
136 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
137 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
139 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
140 MLX5_SET(modify_flow_table_in, in, modify_field_select,
141 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
143 MLX5_SET(modify_flow_table_in, in,
144 lag_master_next_table_id, next_ft->id);
146 MLX5_SET(modify_flow_table_in, in,
147 lag_master_next_table_id, 0);
151 MLX5_SET(modify_flow_table_in, in, vport_number,
153 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
155 MLX5_SET(modify_flow_table_in, in, modify_field_select,
156 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
158 MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
159 MLX5_SET(modify_flow_table_in, in, table_miss_id,
162 MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
166 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
169 int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
170 struct mlx5_flow_table *ft,
172 unsigned int *group_id)
174 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
175 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
178 MLX5_SET(create_flow_group_in, in, opcode,
179 MLX5_CMD_OP_CREATE_FLOW_GROUP);
180 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
181 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
183 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
184 MLX5_SET(create_flow_group_in, in, other_vport, 1);
187 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
189 *group_id = MLX5_GET(create_flow_group_out, out,
194 int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
195 struct mlx5_flow_table *ft,
196 unsigned int group_id)
198 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
199 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
201 MLX5_SET(destroy_flow_group_in, in, opcode,
202 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
203 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
204 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
205 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
207 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
208 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
211 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
214 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
215 int opmod, int modify_mask,
216 struct mlx5_flow_table *ft,
220 unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
221 fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
222 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
223 struct mlx5_flow_rule *dst;
224 void *in_flow_context;
225 void *in_match_value;
230 in = mlx5_vzalloc(inlen);
232 mlx5_core_warn(dev, "failed to allocate inbox\n");
236 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
237 MLX5_SET(set_fte_in, in, op_mod, opmod);
238 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
239 MLX5_SET(set_fte_in, in, table_type, ft->type);
240 MLX5_SET(set_fte_in, in, table_id, ft->id);
241 MLX5_SET(set_fte_in, in, flow_index, fte->index);
243 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
244 MLX5_SET(set_fte_in, in, other_vport, 1);
247 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
248 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
249 MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
250 MLX5_SET(flow_context, in_flow_context, action, fte->action);
251 MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
252 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
254 memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
256 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
257 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
260 list_for_each_entry(dst, &fte->node.children, node.list) {
263 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
266 MLX5_SET(dest_format_struct, in_dests, destination_type,
267 dst->dest_attr.type);
268 if (dst->dest_attr.type ==
269 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
270 id = dst->dest_attr.ft->id;
272 id = dst->dest_attr.tir_num;
274 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
275 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
279 MLX5_SET(flow_context, in_flow_context, destination_list_size,
283 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
286 list_for_each_entry(dst, &fte->node.children, node.list) {
287 if (dst->dest_attr.type !=
288 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
291 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
292 dst->dest_attr.counter->id);
293 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
297 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
301 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
306 int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
307 struct mlx5_flow_table *ft,
311 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
314 int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
315 struct mlx5_flow_table *ft,
321 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
322 flow_table_properties_nic_receive.
328 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
331 int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
332 struct mlx5_flow_table *ft,
335 u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
336 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
338 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
339 MLX5_SET(delete_fte_in, in, table_type, ft->type);
340 MLX5_SET(delete_fte_in, in, table_id, ft->id);
341 MLX5_SET(delete_fte_in, in, flow_index, index);
343 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
344 MLX5_SET(delete_fte_in, in, other_vport, 1);
347 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
350 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
352 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
353 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
356 MLX5_SET(alloc_flow_counter_in, in, opcode,
357 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
359 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
361 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
365 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
367 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
368 u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
370 MLX5_SET(dealloc_flow_counter_in, in, opcode,
371 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
372 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
373 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
376 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
377 u64 *packets, u64 *bytes)
379 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
380 MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
381 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
385 MLX5_SET(query_flow_counter_in, in, opcode,
386 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
387 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
388 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
389 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
393 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
394 *packets = MLX5_GET64(traffic_counter, stats, packets);
395 *bytes = MLX5_GET64(traffic_counter, stats, octets);
399 struct mlx5_cmd_fc_bulk {
406 struct mlx5_cmd_fc_bulk *
407 mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
409 struct mlx5_cmd_fc_bulk *b;
411 MLX5_ST_SZ_BYTES(query_flow_counter_out) +
412 MLX5_ST_SZ_BYTES(traffic_counter) * num;
414 b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
425 void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
431 mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
433 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
435 MLX5_SET(query_flow_counter_in, in, opcode,
436 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
437 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
438 MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
439 MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
440 return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
443 void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
444 struct mlx5_cmd_fc_bulk *b, u16 id,
445 u64 *packets, u64 *bytes)
447 int index = id - b->id;
450 if (index < 0 || index >= b->num) {
451 mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
452 id, b->id, b->id + b->num - 1);
456 stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
457 flow_statistics[index]);
458 *packets = MLX5_GET64(traffic_counter, stats, packets);
459 *bytes = MLX5_GET64(traffic_counter, stats, octets);
462 int mlx5_encap_alloc(struct mlx5_core_dev *dev,
468 int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
469 u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
470 void *encap_header_in;
476 if (size > MLX5_CAP_ESW(dev, max_encap_header_size))
479 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + max_encap_size,
484 encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header);
485 header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header);
486 inlen = header - (void *)in + size;
488 memset(in, 0, inlen);
489 MLX5_SET(alloc_encap_header_in, in, opcode,
490 MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
491 MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
492 MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
493 memcpy(header, encap_header, size);
495 memset(out, 0, sizeof(out));
496 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
498 *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
503 void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
505 u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
506 u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
508 memset(in, 0, sizeof(in));
509 MLX5_SET(dealloc_encap_header_in, in, opcode,
510 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
511 MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
513 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));