2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/dcbnl.h>
38 #include <linux/if_ether.h>
39 #include <linux/list.h>
46 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
48 enum mlxsw_reg_sbxx_dir dir)
50 return &mlxsw_sp->sb.prs[dir][pool];
53 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
54 u8 local_port, u8 pg_buff,
55 enum mlxsw_reg_sbxx_dir dir)
57 return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
60 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
61 u8 local_port, u8 pool,
62 enum mlxsw_reg_sbxx_dir dir)
64 return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
67 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
68 enum mlxsw_reg_sbxx_dir dir,
69 enum mlxsw_reg_sbpr_mode mode, u32 size)
71 char sbpr_pl[MLXSW_REG_SBPR_LEN];
72 struct mlxsw_sp_sb_pr *pr;
75 mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
76 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
80 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
86 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
87 u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
88 u32 min_buff, u32 max_buff, u8 pool)
90 char sbcm_pl[MLXSW_REG_SBCM_LEN];
93 mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
94 min_buff, max_buff, pool);
95 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
98 if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
99 struct mlxsw_sp_sb_cm *cm;
101 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
102 cm->min_buff = min_buff;
103 cm->max_buff = max_buff;
109 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
110 u8 pool, enum mlxsw_reg_sbxx_dir dir,
111 u32 min_buff, u32 max_buff)
113 char sbpm_pl[MLXSW_REG_SBPM_LEN];
114 struct mlxsw_sp_sb_pm *pm;
117 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
119 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
123 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
124 pm->min_buff = min_buff;
125 pm->max_buff = max_buff;
129 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
130 u8 pool, enum mlxsw_reg_sbxx_dir dir,
131 struct list_head *bulk_list)
133 char sbpm_pl[MLXSW_REG_SBPM_LEN];
135 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
136 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
140 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
141 char *sbpm_pl, size_t sbpm_pl_len,
142 unsigned long cb_priv)
144 struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
146 mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
149 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
150 u8 pool, enum mlxsw_reg_sbxx_dir dir,
151 struct list_head *bulk_list)
153 char sbpm_pl[MLXSW_REG_SBPM_LEN];
154 struct mlxsw_sp_sb_pm *pm;
156 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
157 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
158 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
160 mlxsw_sp_sb_pm_occ_query_cb,
164 static const u16 mlxsw_sp_pbs[] = {
165 [0] = 2 * ETH_FRAME_LEN,
166 [9] = 2 * MLXSW_PORT_MAX_MTU,
169 #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
170 #define MLXSW_SP_PB_UNUSED 8
172 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
174 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
175 char pbmc_pl[MLXSW_REG_PBMC_LEN];
178 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
180 for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
181 u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]);
183 if (i == MLXSW_SP_PB_UNUSED)
185 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
187 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
188 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
189 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
192 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
194 char pptb_pl[MLXSW_REG_PPTB_LEN];
197 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
198 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
199 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
200 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
204 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
208 err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
211 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
214 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
216 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
218 mlxsw_sp->sb.ports = kcalloc(max_ports, sizeof(struct mlxsw_sp_sb_port),
220 if (!mlxsw_sp->sb.ports)
225 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
227 kfree(mlxsw_sp->sb.ports);
230 #define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000
231 #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
232 #define MLXSW_SP_SB_PR_EGRESS_SIZE 13232000
234 #define MLXSW_SP_SB_PR(_mode, _size) \
240 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
241 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
242 MLXSW_SP_SB_PR_INGRESS_SIZE),
243 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
244 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
245 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
246 MLXSW_SP_SB_PR_INGRESS_MNG_SIZE),
249 #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
251 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
252 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE),
253 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
254 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
255 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
258 #define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
260 static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
261 enum mlxsw_reg_sbxx_dir dir,
262 const struct mlxsw_sp_sb_pr *prs,
268 for (i = 0; i < prs_len; i++) {
269 u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size);
271 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, prs[i].mode, size);
278 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
282 err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
283 mlxsw_sp_sb_prs_ingress,
284 MLXSW_SP_SB_PRS_INGRESS_LEN);
287 return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
288 mlxsw_sp_sb_prs_egress,
289 MLXSW_SP_SB_PRS_EGRESS_LEN);
292 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
294 .min_buff = _min_buff, \
295 .max_buff = _max_buff, \
299 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
300 MLXSW_SP_SB_CM(10000, 8, 0),
301 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
302 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
303 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
304 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
305 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
306 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
307 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
308 MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
309 MLXSW_SP_SB_CM(20000, 1, 3),
312 #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
314 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
315 MLXSW_SP_SB_CM(1500, 9, 0),
316 MLXSW_SP_SB_CM(1500, 9, 0),
317 MLXSW_SP_SB_CM(1500, 9, 0),
318 MLXSW_SP_SB_CM(1500, 9, 0),
319 MLXSW_SP_SB_CM(1500, 9, 0),
320 MLXSW_SP_SB_CM(1500, 9, 0),
321 MLXSW_SP_SB_CM(1500, 9, 0),
322 MLXSW_SP_SB_CM(1500, 9, 0),
323 MLXSW_SP_SB_CM(0, 0, 0),
324 MLXSW_SP_SB_CM(0, 0, 0),
325 MLXSW_SP_SB_CM(0, 0, 0),
326 MLXSW_SP_SB_CM(0, 0, 0),
327 MLXSW_SP_SB_CM(0, 0, 0),
328 MLXSW_SP_SB_CM(0, 0, 0),
329 MLXSW_SP_SB_CM(0, 0, 0),
330 MLXSW_SP_SB_CM(0, 0, 0),
331 MLXSW_SP_SB_CM(1, 0xff, 0),
334 #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
336 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
338 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
339 MLXSW_SP_CPU_PORT_SB_CM,
340 MLXSW_SP_CPU_PORT_SB_CM,
341 MLXSW_SP_CPU_PORT_SB_CM,
342 MLXSW_SP_CPU_PORT_SB_CM,
343 MLXSW_SP_CPU_PORT_SB_CM,
344 MLXSW_SP_CPU_PORT_SB_CM,
345 MLXSW_SP_CPU_PORT_SB_CM,
346 MLXSW_SP_SB_CM(10000, 0, 0),
347 MLXSW_SP_CPU_PORT_SB_CM,
348 MLXSW_SP_CPU_PORT_SB_CM,
349 MLXSW_SP_CPU_PORT_SB_CM,
350 MLXSW_SP_CPU_PORT_SB_CM,
351 MLXSW_SP_CPU_PORT_SB_CM,
352 MLXSW_SP_CPU_PORT_SB_CM,
353 MLXSW_SP_CPU_PORT_SB_CM,
354 MLXSW_SP_CPU_PORT_SB_CM,
355 MLXSW_SP_CPU_PORT_SB_CM,
356 MLXSW_SP_CPU_PORT_SB_CM,
357 MLXSW_SP_CPU_PORT_SB_CM,
358 MLXSW_SP_CPU_PORT_SB_CM,
359 MLXSW_SP_CPU_PORT_SB_CM,
360 MLXSW_SP_CPU_PORT_SB_CM,
361 MLXSW_SP_CPU_PORT_SB_CM,
362 MLXSW_SP_CPU_PORT_SB_CM,
363 MLXSW_SP_CPU_PORT_SB_CM,
364 MLXSW_SP_CPU_PORT_SB_CM,
365 MLXSW_SP_CPU_PORT_SB_CM,
366 MLXSW_SP_CPU_PORT_SB_CM,
367 MLXSW_SP_CPU_PORT_SB_CM,
368 MLXSW_SP_CPU_PORT_SB_CM,
369 MLXSW_SP_CPU_PORT_SB_CM,
370 MLXSW_SP_CPU_PORT_SB_CM,
373 #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
374 ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
376 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
377 enum mlxsw_reg_sbxx_dir dir,
378 const struct mlxsw_sp_sb_cm *cms,
384 for (i = 0; i < cms_len; i++) {
385 const struct mlxsw_sp_sb_cm *cm;
388 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
389 continue; /* PG number 8 does not exist, skip it */
391 /* All pools are initialized using dynamic thresholds,
392 * therefore 'max_buff' isn't specified in cells.
394 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
395 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
396 min_buff, cm->max_buff, cm->pool);
403 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
407 err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
408 mlxsw_sp_port->local_port,
409 MLXSW_REG_SBXX_DIR_INGRESS,
410 mlxsw_sp_sb_cms_ingress,
411 MLXSW_SP_SB_CMS_INGRESS_LEN);
414 return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
415 mlxsw_sp_port->local_port,
416 MLXSW_REG_SBXX_DIR_EGRESS,
417 mlxsw_sp_sb_cms_egress,
418 MLXSW_SP_SB_CMS_EGRESS_LEN);
421 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
423 return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
424 mlxsw_sp_cpu_port_sb_cms,
425 MLXSW_SP_CPU_PORT_SB_MCS_LEN);
428 #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
430 .min_buff = _min_buff, \
431 .max_buff = _max_buff, \
434 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
435 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
436 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
437 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
438 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
441 #define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
443 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
444 MLXSW_SP_SB_PM(0, 7),
445 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
446 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
447 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
450 #define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
452 static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
453 enum mlxsw_reg_sbxx_dir dir,
454 const struct mlxsw_sp_sb_pm *pms,
460 for (i = 0; i < pms_len; i++) {
461 const struct mlxsw_sp_sb_pm *pm;
464 err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
465 pm->min_buff, pm->max_buff);
472 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
476 err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
477 mlxsw_sp_port->local_port,
478 MLXSW_REG_SBXX_DIR_INGRESS,
479 mlxsw_sp_sb_pms_ingress,
480 MLXSW_SP_SB_PMS_INGRESS_LEN);
483 return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
484 mlxsw_sp_port->local_port,
485 MLXSW_REG_SBXX_DIR_EGRESS,
486 mlxsw_sp_sb_pms_egress,
487 MLXSW_SP_SB_PMS_EGRESS_LEN);
490 struct mlxsw_sp_sb_mm {
496 #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
498 .min_buff = _min_buff, \
499 .max_buff = _max_buff, \
503 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
504 MLXSW_SP_SB_MM(20000, 0xff, 0),
505 MLXSW_SP_SB_MM(20000, 0xff, 0),
506 MLXSW_SP_SB_MM(20000, 0xff, 0),
507 MLXSW_SP_SB_MM(20000, 0xff, 0),
508 MLXSW_SP_SB_MM(20000, 0xff, 0),
509 MLXSW_SP_SB_MM(20000, 0xff, 0),
510 MLXSW_SP_SB_MM(20000, 0xff, 0),
511 MLXSW_SP_SB_MM(20000, 0xff, 0),
512 MLXSW_SP_SB_MM(20000, 0xff, 0),
513 MLXSW_SP_SB_MM(20000, 0xff, 0),
514 MLXSW_SP_SB_MM(20000, 0xff, 0),
515 MLXSW_SP_SB_MM(20000, 0xff, 0),
516 MLXSW_SP_SB_MM(20000, 0xff, 0),
517 MLXSW_SP_SB_MM(20000, 0xff, 0),
518 MLXSW_SP_SB_MM(20000, 0xff, 0),
521 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
523 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
525 char sbmm_pl[MLXSW_REG_SBMM_LEN];
529 for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
530 const struct mlxsw_sp_sb_mm *mc;
533 mc = &mlxsw_sp_sb_mms[i];
534 /* All pools are initialized using dynamic thresholds,
535 * therefore 'max_buff' isn't specified in cells.
537 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
538 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
540 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
547 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
552 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
554 mlxsw_sp->sb.cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
556 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
558 sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE);
560 err = mlxsw_sp_sb_ports_init(mlxsw_sp);
563 err = mlxsw_sp_sb_prs_init(mlxsw_sp);
565 goto err_sb_prs_init;
566 err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
568 goto err_sb_cpu_port_sb_cms_init;
569 err = mlxsw_sp_sb_mms_init(mlxsw_sp);
571 goto err_sb_mms_init;
572 err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size,
573 MLXSW_SP_SB_POOL_COUNT,
574 MLXSW_SP_SB_POOL_COUNT,
575 MLXSW_SP_SB_TC_COUNT,
576 MLXSW_SP_SB_TC_COUNT);
578 goto err_devlink_sb_register;
582 err_devlink_sb_register:
584 err_sb_cpu_port_sb_cms_init:
586 mlxsw_sp_sb_ports_fini(mlxsw_sp);
590 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
592 devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
593 mlxsw_sp_sb_ports_fini(mlxsw_sp);
596 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
600 err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
603 err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
606 err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
611 static u8 pool_get(u16 pool_index)
613 return pool_index % MLXSW_SP_SB_POOL_COUNT;
616 static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
621 if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
622 pool_index += MLXSW_SP_SB_POOL_COUNT;
626 static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
628 return pool_index < MLXSW_SP_SB_POOL_COUNT ?
629 MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
632 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
633 unsigned int sb_index, u16 pool_index,
634 struct devlink_sb_pool_info *pool_info)
636 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
637 u8 pool = pool_get(pool_index);
638 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
639 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
641 pool_info->pool_type = (enum devlink_sb_pool_type) dir;
642 pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
643 pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
647 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
648 unsigned int sb_index, u16 pool_index, u32 size,
649 enum devlink_sb_threshold_type threshold_type)
651 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
652 u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
653 u8 pool = pool_get(pool_index);
654 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
655 enum mlxsw_reg_sbpr_mode mode;
657 if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
660 mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
661 return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
664 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
666 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
667 enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
669 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
671 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
672 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
673 return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
676 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
677 enum mlxsw_reg_sbxx_dir dir, u32 threshold,
680 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
682 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
685 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
686 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
687 val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
691 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
696 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
697 unsigned int sb_index, u16 pool_index,
700 struct mlxsw_sp_port *mlxsw_sp_port =
701 mlxsw_core_port_driver_priv(mlxsw_core_port);
702 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
703 u8 local_port = mlxsw_sp_port->local_port;
704 u8 pool = pool_get(pool_index);
705 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
706 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
709 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
714 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
715 unsigned int sb_index, u16 pool_index,
718 struct mlxsw_sp_port *mlxsw_sp_port =
719 mlxsw_core_port_driver_priv(mlxsw_core_port);
720 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
721 u8 local_port = mlxsw_sp_port->local_port;
722 u8 pool = pool_get(pool_index);
723 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
727 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
728 threshold, &max_buff);
732 return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
736 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
737 unsigned int sb_index, u16 tc_index,
738 enum devlink_sb_pool_type pool_type,
739 u16 *p_pool_index, u32 *p_threshold)
741 struct mlxsw_sp_port *mlxsw_sp_port =
742 mlxsw_core_port_driver_priv(mlxsw_core_port);
743 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
744 u8 local_port = mlxsw_sp_port->local_port;
745 u8 pg_buff = tc_index;
746 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
747 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
750 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
752 *p_pool_index = pool_index_get(cm->pool, dir);
756 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
757 unsigned int sb_index, u16 tc_index,
758 enum devlink_sb_pool_type pool_type,
759 u16 pool_index, u32 threshold)
761 struct mlxsw_sp_port *mlxsw_sp_port =
762 mlxsw_core_port_driver_priv(mlxsw_core_port);
763 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
764 u8 local_port = mlxsw_sp_port->local_port;
765 u8 pg_buff = tc_index;
766 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
767 u8 pool = pool_get(pool_index);
771 if (dir != dir_get(pool_index))
774 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
775 threshold, &max_buff);
779 return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
783 #define MASKED_COUNT_MAX \
784 (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
786 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
791 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
792 char *sbsr_pl, size_t sbsr_pl_len,
793 unsigned long cb_priv)
795 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
796 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
800 struct mlxsw_sp_sb_cm *cm;
803 memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
806 for (local_port = cb_ctx.local_port_1;
807 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
808 if (!mlxsw_sp->ports[local_port])
810 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
811 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
812 MLXSW_REG_SBXX_DIR_INGRESS);
813 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
814 &cm->occ.cur, &cm->occ.max);
816 if (++masked_count == cb_ctx.masked_count)
820 for (local_port = cb_ctx.local_port_1;
821 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
822 if (!mlxsw_sp->ports[local_port])
824 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
825 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
826 MLXSW_REG_SBXX_DIR_EGRESS);
827 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
828 &cm->occ.cur, &cm->occ.max);
830 if (++masked_count == cb_ctx.masked_count)
835 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
836 unsigned int sb_index)
838 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
839 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
840 unsigned long cb_priv;
841 LIST_HEAD(bulk_list);
850 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
856 local_port_1 = local_port;
858 mlxsw_reg_sbsr_pack(sbsr_pl, false);
859 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
860 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
861 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
863 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
864 if (!mlxsw_sp->ports[local_port])
866 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
867 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
868 for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
869 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
870 MLXSW_REG_SBXX_DIR_INGRESS,
874 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
875 MLXSW_REG_SBXX_DIR_EGRESS,
880 if (++masked_count == MASKED_COUNT_MAX)
885 cb_ctx.masked_count = masked_count;
886 cb_ctx.local_port_1 = local_port_1;
887 memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
888 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
889 &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
893 if (local_port < mlxsw_core_max_ports(mlxsw_core))
897 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
904 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
905 unsigned int sb_index)
907 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
908 LIST_HEAD(bulk_list);
910 unsigned int masked_count;
916 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
923 mlxsw_reg_sbsr_pack(sbsr_pl, true);
924 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
925 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
926 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
928 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
929 if (!mlxsw_sp->ports[local_port])
931 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
932 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
933 for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
934 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
935 MLXSW_REG_SBXX_DIR_INGRESS,
939 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
940 MLXSW_REG_SBXX_DIR_EGRESS,
945 if (++masked_count == MASKED_COUNT_MAX)
950 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
951 &bulk_list, NULL, 0);
954 if (local_port < mlxsw_core_max_ports(mlxsw_core))
958 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
965 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
966 unsigned int sb_index, u16 pool_index,
967 u32 *p_cur, u32 *p_max)
969 struct mlxsw_sp_port *mlxsw_sp_port =
970 mlxsw_core_port_driver_priv(mlxsw_core_port);
971 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
972 u8 local_port = mlxsw_sp_port->local_port;
973 u8 pool = pool_get(pool_index);
974 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
975 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
978 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
979 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
983 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
984 unsigned int sb_index, u16 tc_index,
985 enum devlink_sb_pool_type pool_type,
986 u32 *p_cur, u32 *p_max)
988 struct mlxsw_sp_port *mlxsw_sp_port =
989 mlxsw_core_port_driver_priv(mlxsw_core_port);
990 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
991 u8 local_port = mlxsw_sp_port->local_port;
992 u8 pg_buff = tc_index;
993 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
994 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
997 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
998 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);