2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
39 #include "mlx5_core.h"
41 int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
42 struct mlx5_flow_table *ft)
44 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
45 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
47 MLX5_SET(set_flow_table_root_in, in, opcode,
48 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
49 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
50 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
52 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
53 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
56 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
59 int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
61 enum fs_flow_table_op_mod op_mod,
62 enum fs_flow_table_type type, unsigned int level,
63 unsigned int log_size, struct mlx5_flow_table
64 *next_ft, unsigned int *table_id)
66 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
67 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
70 MLX5_SET(create_flow_table_in, in, opcode,
71 MLX5_CMD_OP_CREATE_FLOW_TABLE);
73 MLX5_SET(create_flow_table_in, in, table_type, type);
74 MLX5_SET(create_flow_table_in, in, level, level);
75 MLX5_SET(create_flow_table_in, in, log_size, log_size);
77 MLX5_SET(create_flow_table_in, in, vport_number, vport);
78 MLX5_SET(create_flow_table_in, in, other_vport, 1);
82 case FS_FT_OP_MOD_NORMAL:
84 MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
85 MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
89 case FS_FT_OP_MOD_LAG_DEMUX:
90 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
92 MLX5_SET(create_flow_table_in, in, lag_master_next_table_id,
97 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
99 *table_id = MLX5_GET(create_flow_table_out, out,
104 int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
105 struct mlx5_flow_table *ft)
107 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
108 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
110 MLX5_SET(destroy_flow_table_in, in, opcode,
111 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
112 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
113 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
115 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
116 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
119 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
122 int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
123 struct mlx5_flow_table *ft,
124 struct mlx5_flow_table *next_ft)
126 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
127 u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
129 MLX5_SET(modify_flow_table_in, in, opcode,
130 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
131 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
132 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
134 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
135 MLX5_SET(modify_flow_table_in, in, modify_field_select,
136 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
138 MLX5_SET(modify_flow_table_in, in,
139 lag_master_next_table_id, next_ft->id);
141 MLX5_SET(modify_flow_table_in, in,
142 lag_master_next_table_id, 0);
146 MLX5_SET(modify_flow_table_in, in, vport_number,
148 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
150 MLX5_SET(modify_flow_table_in, in, modify_field_select,
151 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
153 MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
154 MLX5_SET(modify_flow_table_in, in, table_miss_id,
157 MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
161 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
164 int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
165 struct mlx5_flow_table *ft,
167 unsigned int *group_id)
169 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
170 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
173 MLX5_SET(create_flow_group_in, in, opcode,
174 MLX5_CMD_OP_CREATE_FLOW_GROUP);
175 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
176 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
178 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
179 MLX5_SET(create_flow_group_in, in, other_vport, 1);
182 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
184 *group_id = MLX5_GET(create_flow_group_out, out,
189 int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
190 struct mlx5_flow_table *ft,
191 unsigned int group_id)
193 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
194 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
196 MLX5_SET(destroy_flow_group_in, in, opcode,
197 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
198 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
199 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
200 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
202 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
203 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
206 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
209 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
210 int opmod, int modify_mask,
211 struct mlx5_flow_table *ft,
215 unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
216 fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
217 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
218 struct mlx5_flow_rule *dst;
219 void *in_flow_context;
220 void *in_match_value;
225 in = mlx5_vzalloc(inlen);
227 mlx5_core_warn(dev, "failed to allocate inbox\n");
231 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
232 MLX5_SET(set_fte_in, in, op_mod, opmod);
233 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
234 MLX5_SET(set_fte_in, in, table_type, ft->type);
235 MLX5_SET(set_fte_in, in, table_id, ft->id);
236 MLX5_SET(set_fte_in, in, flow_index, fte->index);
238 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
239 MLX5_SET(set_fte_in, in, other_vport, 1);
242 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
243 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
244 MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
245 MLX5_SET(flow_context, in_flow_context, action, fte->action);
246 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
248 memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
250 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
251 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
254 list_for_each_entry(dst, &fte->node.children, node.list) {
257 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
260 MLX5_SET(dest_format_struct, in_dests, destination_type,
261 dst->dest_attr.type);
262 if (dst->dest_attr.type ==
263 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
264 id = dst->dest_attr.ft->id;
266 id = dst->dest_attr.tir_num;
268 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
269 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
273 MLX5_SET(flow_context, in_flow_context, destination_list_size,
277 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
280 list_for_each_entry(dst, &fte->node.children, node.list) {
281 if (dst->dest_attr.type !=
282 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
285 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
286 dst->dest_attr.counter->id);
287 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
291 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
295 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
300 int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
301 struct mlx5_flow_table *ft,
305 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
308 int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
309 struct mlx5_flow_table *ft,
315 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
316 flow_table_properties_nic_receive.
322 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
325 int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
326 struct mlx5_flow_table *ft,
329 u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
330 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
332 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
333 MLX5_SET(delete_fte_in, in, table_type, ft->type);
334 MLX5_SET(delete_fte_in, in, table_id, ft->id);
335 MLX5_SET(delete_fte_in, in, flow_index, index);
337 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
338 MLX5_SET(delete_fte_in, in, other_vport, 1);
341 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
344 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
346 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
347 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
350 MLX5_SET(alloc_flow_counter_in, in, opcode,
351 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
353 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
355 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
359 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
361 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
362 u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
364 MLX5_SET(dealloc_flow_counter_in, in, opcode,
365 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
366 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
367 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
370 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
371 u64 *packets, u64 *bytes)
373 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
374 MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
375 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
379 MLX5_SET(query_flow_counter_in, in, opcode,
380 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
381 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
382 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
383 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
387 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
388 *packets = MLX5_GET64(traffic_counter, stats, packets);
389 *bytes = MLX5_GET64(traffic_counter, stats, octets);
393 struct mlx5_cmd_fc_bulk {
400 struct mlx5_cmd_fc_bulk *
401 mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
403 struct mlx5_cmd_fc_bulk *b;
405 MLX5_ST_SZ_BYTES(query_flow_counter_out) +
406 MLX5_ST_SZ_BYTES(traffic_counter) * num;
408 b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
419 void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
425 mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
427 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
429 MLX5_SET(query_flow_counter_in, in, opcode,
430 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
431 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
432 MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
433 MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
434 return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
437 void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
438 struct mlx5_cmd_fc_bulk *b, u16 id,
439 u64 *packets, u64 *bytes)
441 int index = id - b->id;
444 if (index < 0 || index >= b->num) {
445 mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
446 id, b->id, b->id + b->num - 1);
450 stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
451 flow_statistics[index]);
452 *packets = MLX5_GET64(traffic_counter, stats, packets);
453 *bytes = MLX5_GET64(traffic_counter, stats, octets);
456 #define MAX_ENCAP_SIZE (128)
458 int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev,
464 u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
465 u32 in[MLX5_ST_SZ_DW(alloc_encap_header_in) +
466 (MAX_ENCAP_SIZE / sizeof(u32))];
467 void *encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in,
469 void *header = MLX5_ADDR_OF(encap_header_in, encap_header_in,
471 int inlen = header - (void *)in + size;
474 if (size > MAX_ENCAP_SIZE)
477 memset(in, 0, inlen);
478 MLX5_SET(alloc_encap_header_in, in, opcode,
479 MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
480 MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
481 MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
482 memcpy(header, encap_header, size);
484 memset(out, 0, sizeof(out));
485 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
487 *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
491 void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id)
493 u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
494 u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
496 memset(in, 0, sizeof(in));
497 MLX5_SET(dealloc_encap_header_in, in, opcode,
498 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
499 MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
501 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));