2 * Copyright (c) 2008, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Author: Lucy Liu <lucy.liu@intel.com>
20 #include <linux/netdevice.h>
21 #include <linux/netlink.h>
22 #include <net/netlink.h>
23 #include <net/rtnetlink.h>
24 #include <linux/dcbnl.h>
25 #include <linux/rtnetlink.h>
29 * Data Center Bridging (DCB) is a collection of Ethernet enhancements
30 * intended to allow network traffic with differing requirements
31 * (highly reliable, no drops vs. best effort vs. low latency) to operate
32 * and co-exist on Ethernet. Current DCB features are:
34 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
35 * framework for assigning bandwidth guarantees to traffic classes.
37 * Priority-based Flow Control (PFC) - provides a flow control mechanism which
38 * can work independently for each 802.1p priority.
40 * Congestion Notification - provides a mechanism for end-to-end congestion
41 * control for protocols which do not have built-in congestion management.
43 * More information about the emerging standards for these Ethernet features
44 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
46 * This file implements an rtnetlink interface to allow configuration of DCB
47 * features for capable devices.
50 MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
51 MODULE_DESCRIPTION("Data Center Bridging netlink interface");
52 MODULE_LICENSE("GPL");
54 /**************** DCB attribute policies *************************************/
56 /* DCB netlink attributes policy */
57 static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
58 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
59 [DCB_ATTR_STATE] = {.type = NLA_U8},
60 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
61 [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
62 [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
63 [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
64 [DCB_ATTR_CAP] = {.type = NLA_NESTED},
65 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
66 [DCB_ATTR_BCN] = {.type = NLA_NESTED},
67 [DCB_ATTR_APP] = {.type = NLA_NESTED},
70 /* DCB priority flow control to User Priority nested attributes */
71 static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
72 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
73 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
74 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
75 [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
76 [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
77 [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
78 [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
79 [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
80 [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
83 /* DCB priority grouping nested attributes */
84 static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
85 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
86 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
87 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
88 [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
89 [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
90 [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
91 [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
92 [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
93 [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
94 [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
95 [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
96 [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
97 [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
98 [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
99 [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
100 [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
101 [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
102 [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
105 /* DCB traffic class nested attributes. */
106 static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
107 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
108 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
109 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
110 [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
111 [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
114 /* DCB capabilities nested attributes. */
115 static struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
116 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
117 [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
118 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
119 [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
120 [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
121 [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
122 [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
123 [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
126 /* DCB capabilities nested attributes. */
127 static struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
128 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
129 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
130 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
133 /* DCB BCN nested attributes. */
134 static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
135 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
136 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
137 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
138 [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
139 [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
140 [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
141 [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
142 [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
143 [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
144 [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
145 [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
146 [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
147 [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
148 [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
149 [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
150 [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
151 [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
152 [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
153 [DCB_BCN_ATTR_W] = {.type = NLA_U32},
154 [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
155 [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
156 [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
157 [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
158 [DCB_BCN_ATTR_C] = {.type = NLA_U32},
159 [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
162 /* DCB APP nested attributes. */
163 static struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
164 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
165 [DCB_APP_ATTR_ID] = {.type = NLA_U16},
166 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
169 /* standard netlink reply call */
170 static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
173 struct sk_buff *dcbnl_skb;
175 struct nlmsghdr *nlh;
178 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
182 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags);
184 dcb = NLMSG_DATA(nlh);
185 dcb->dcb_family = AF_UNSPEC;
189 ret = nla_put_u8(dcbnl_skb, attr, value);
193 /* end the message, assign the nlmsg_len. */
194 nlmsg_end(dcbnl_skb, nlh);
195 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
202 kfree_skb(dcbnl_skb);
206 static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb,
207 u32 pid, u32 seq, u16 flags)
211 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
212 if (!netdev->dcbnl_ops->getstate)
215 ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
216 DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
221 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
222 u32 pid, u32 seq, u16 flags)
224 struct sk_buff *dcbnl_skb;
225 struct nlmsghdr *nlh;
227 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
233 if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg)
236 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
237 tb[DCB_ATTR_PFC_CFG],
242 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
246 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
248 dcb = NLMSG_DATA(nlh);
249 dcb->dcb_family = AF_UNSPEC;
250 dcb->cmd = DCB_CMD_PFC_GCFG;
252 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG);
256 if (data[DCB_PFC_UP_ATTR_ALL])
259 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
260 if (!getall && !data[i])
263 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
265 ret = nla_put_u8(dcbnl_skb, i, value);
268 nla_nest_cancel(dcbnl_skb, nest);
272 nla_nest_end(dcbnl_skb, nest);
274 nlmsg_end(dcbnl_skb, nlh);
276 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
283 kfree_skb(dcbnl_skb);
288 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
289 u32 pid, u32 seq, u16 flags)
291 struct sk_buff *dcbnl_skb;
292 struct nlmsghdr *nlh;
294 u8 perm_addr[MAX_ADDR_LEN];
297 if (!netdev->dcbnl_ops->getpermhwaddr)
300 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
304 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
306 dcb = NLMSG_DATA(nlh);
307 dcb->dcb_family = AF_UNSPEC;
308 dcb->cmd = DCB_CMD_GPERM_HWADDR;
310 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
312 ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
315 nlmsg_end(dcbnl_skb, nlh);
317 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
325 kfree_skb(dcbnl_skb);
330 static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
331 u32 pid, u32 seq, u16 flags)
333 struct sk_buff *dcbnl_skb;
334 struct nlmsghdr *nlh;
336 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
342 if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap)
345 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
350 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
354 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
356 dcb = NLMSG_DATA(nlh);
357 dcb->dcb_family = AF_UNSPEC;
358 dcb->cmd = DCB_CMD_GCAP;
360 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP);
364 if (data[DCB_CAP_ATTR_ALL])
367 for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
368 if (!getall && !data[i])
371 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
372 ret = nla_put_u8(dcbnl_skb, i, value);
375 nla_nest_cancel(dcbnl_skb, nest);
380 nla_nest_end(dcbnl_skb, nest);
382 nlmsg_end(dcbnl_skb, nlh);
384 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
391 kfree_skb(dcbnl_skb);
396 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
397 u32 pid, u32 seq, u16 flags)
399 struct sk_buff *dcbnl_skb;
400 struct nlmsghdr *nlh;
402 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
408 if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs)
411 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
418 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
424 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
426 dcb = NLMSG_DATA(nlh);
427 dcb->dcb_family = AF_UNSPEC;
428 dcb->cmd = DCB_CMD_GNUMTCS;
430 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS);
436 if (data[DCB_NUMTCS_ATTR_ALL])
439 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
440 if (!getall && !data[i])
443 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
445 ret = nla_put_u8(dcbnl_skb, i, value);
448 nla_nest_cancel(dcbnl_skb, nest);
456 nla_nest_end(dcbnl_skb, nest);
458 nlmsg_end(dcbnl_skb, nlh);
460 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
469 kfree_skb(dcbnl_skb);
474 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
475 u32 pid, u32 seq, u16 flags)
477 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
482 if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs)
485 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
493 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
497 value = nla_get_u8(data[i]);
499 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
506 ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS,
507 DCB_ATTR_NUMTCS, pid, seq, flags);
513 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb,
514 u32 pid, u32 seq, u16 flags)
518 if (!netdev->dcbnl_ops->getpfcstate)
521 ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
522 DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
528 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
529 u32 pid, u32 seq, u16 flags)
534 if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate)
537 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
539 netdev->dcbnl_ops->setpfcstate(netdev, value);
541 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE,
547 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
548 u32 pid, u32 seq, u16 flags, int dir)
550 struct sk_buff *dcbnl_skb;
551 struct nlmsghdr *nlh;
553 struct nlattr *pg_nest, *param_nest, *data;
554 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
555 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
556 u8 prio, pgid, tc_pct, up_map;
561 if (!tb[DCB_ATTR_PG_CFG] ||
562 !netdev->dcbnl_ops->getpgtccfgtx ||
563 !netdev->dcbnl_ops->getpgtccfgrx ||
564 !netdev->dcbnl_ops->getpgbwgcfgtx ||
565 !netdev->dcbnl_ops->getpgbwgcfgrx)
568 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
569 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
574 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
578 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
580 dcb = NLMSG_DATA(nlh);
581 dcb->dcb_family = AF_UNSPEC;
582 dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
584 pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG);
588 if (pg_tb[DCB_PG_ATTR_TC_ALL])
591 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
592 if (!getall && !pg_tb[i])
595 if (pg_tb[DCB_PG_ATTR_TC_ALL])
596 data = pg_tb[DCB_PG_ATTR_TC_ALL];
599 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
600 data, dcbnl_tc_param_nest);
604 param_nest = nla_nest_start(dcbnl_skb, i);
608 pgid = DCB_ATTR_VALUE_UNDEFINED;
609 prio = DCB_ATTR_VALUE_UNDEFINED;
610 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
611 up_map = DCB_ATTR_VALUE_UNDEFINED;
615 netdev->dcbnl_ops->getpgtccfgrx(netdev,
616 i - DCB_PG_ATTR_TC_0, &prio,
617 &pgid, &tc_pct, &up_map);
620 netdev->dcbnl_ops->getpgtccfgtx(netdev,
621 i - DCB_PG_ATTR_TC_0, &prio,
622 &pgid, &tc_pct, &up_map);
625 if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
626 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
627 ret = nla_put_u8(dcbnl_skb,
628 DCB_TC_ATTR_PARAM_PGID, pgid);
632 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
633 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
634 ret = nla_put_u8(dcbnl_skb,
635 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
639 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
640 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
641 ret = nla_put_u8(dcbnl_skb,
642 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
646 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
647 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
648 ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT,
653 nla_nest_end(dcbnl_skb, param_nest);
656 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
661 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
662 if (!getall && !pg_tb[i])
665 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
669 netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
670 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
673 netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
674 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
676 ret = nla_put_u8(dcbnl_skb, i, tc_pct);
682 nla_nest_end(dcbnl_skb, pg_nest);
684 nlmsg_end(dcbnl_skb, nlh);
686 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
693 nla_nest_cancel(dcbnl_skb, param_nest);
695 nla_nest_cancel(dcbnl_skb, pg_nest);
698 kfree_skb(dcbnl_skb);
704 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb,
705 u32 pid, u32 seq, u16 flags)
707 return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0);
710 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb,
711 u32 pid, u32 seq, u16 flags)
713 return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1);
716 static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb,
717 u32 pid, u32 seq, u16 flags)
722 if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate)
725 value = nla_get_u8(tb[DCB_ATTR_STATE]);
727 ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value),
728 RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
734 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
735 u32 pid, u32 seq, u16 flags)
737 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
742 if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg)
745 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
746 tb[DCB_ATTR_PFC_CFG],
751 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
754 value = nla_get_u8(data[i]);
755 netdev->dcbnl_ops->setpfccfg(netdev,
756 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
759 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG,
765 static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
766 u32 pid, u32 seq, u16 flags)
770 if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall)
773 ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
774 DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
779 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
780 u32 pid, u32 seq, u16 flags, int dir)
782 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
783 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
791 if (!tb[DCB_ATTR_PG_CFG] ||
792 !netdev->dcbnl_ops->setpgtccfgtx ||
793 !netdev->dcbnl_ops->setpgtccfgrx ||
794 !netdev->dcbnl_ops->setpgbwgcfgtx ||
795 !netdev->dcbnl_ops->setpgbwgcfgrx)
798 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
799 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
803 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
807 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
808 pg_tb[i], dcbnl_tc_param_nest);
812 pgid = DCB_ATTR_VALUE_UNDEFINED;
813 prio = DCB_ATTR_VALUE_UNDEFINED;
814 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
815 up_map = DCB_ATTR_VALUE_UNDEFINED;
817 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
819 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
821 if (param_tb[DCB_TC_ATTR_PARAM_PGID])
822 pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
824 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
825 tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
827 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
829 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
831 /* dir: Tx = 0, Rx = 1 */
834 netdev->dcbnl_ops->setpgtccfgrx(netdev,
835 i - DCB_PG_ATTR_TC_0,
836 prio, pgid, tc_pct, up_map);
839 netdev->dcbnl_ops->setpgtccfgtx(netdev,
840 i - DCB_PG_ATTR_TC_0,
841 prio, pgid, tc_pct, up_map);
845 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
849 tc_pct = nla_get_u8(pg_tb[i]);
851 /* dir: Tx = 0, Rx = 1 */
854 netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
855 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
858 netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
859 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
863 ret = dcbnl_reply(0, RTM_SETDCB,
864 (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
865 DCB_ATTR_PG_CFG, pid, seq, flags);
871 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb,
872 u32 pid, u32 seq, u16 flags)
874 return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0);
877 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb,
878 u32 pid, u32 seq, u16 flags)
880 return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1);
883 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
884 u32 pid, u32 seq, u16 flags)
886 struct sk_buff *dcbnl_skb;
887 struct nlmsghdr *nlh;
889 struct nlattr *bcn_nest;
890 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
897 if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp ||
898 !netdev->dcbnl_ops->getbcncfg)
901 ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
902 tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
907 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
911 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
913 dcb = NLMSG_DATA(nlh);
914 dcb->dcb_family = AF_UNSPEC;
915 dcb->cmd = DCB_CMD_BCN_GCFG;
917 bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN);
921 if (bcn_tb[DCB_BCN_ATTR_ALL])
924 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
925 if (!getall && !bcn_tb[i])
928 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
930 ret = nla_put_u8(dcbnl_skb, i, value_byte);
935 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
936 if (!getall && !bcn_tb[i])
939 netdev->dcbnl_ops->getbcncfg(netdev, i,
941 ret = nla_put_u32(dcbnl_skb, i, value_integer);
946 nla_nest_end(dcbnl_skb, bcn_nest);
948 nlmsg_end(dcbnl_skb, nlh);
950 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
957 nla_nest_cancel(dcbnl_skb, bcn_nest);
960 kfree_skb(dcbnl_skb);
966 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
967 u32 pid, u32 seq, u16 flags)
969 struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
975 if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg
976 || !netdev->dcbnl_ops->setbcnrp)
979 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
985 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
988 value_byte = nla_get_u8(data[i]);
989 netdev->dcbnl_ops->setbcnrp(netdev,
990 data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
993 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
996 value_int = nla_get_u32(data[i]);
997 netdev->dcbnl_ops->setbcncfg(netdev,
1001 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN,
1007 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1009 struct net *net = sock_net(skb->sk);
1010 struct net_device *netdev;
1011 struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
1012 struct nlattr *tb[DCB_ATTR_MAX + 1];
1013 u32 pid = skb ? NETLINK_CB(skb).pid : 0;
1016 if (net != &init_net)
1019 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1024 if (!tb[DCB_ATTR_IFNAME])
1027 netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
1031 if (!netdev->dcbnl_ops)
1035 case DCB_CMD_GSTATE:
1036 ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
1039 case DCB_CMD_PFC_GCFG:
1040 ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1043 case DCB_CMD_GPERM_HWADDR:
1044 ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
1047 case DCB_CMD_PGTX_GCFG:
1048 ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1051 case DCB_CMD_PGRX_GCFG:
1052 ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1055 case DCB_CMD_BCN_GCFG:
1056 ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1059 case DCB_CMD_SSTATE:
1060 ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
1063 case DCB_CMD_PFC_SCFG:
1064 ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1068 case DCB_CMD_SET_ALL:
1069 ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq,
1072 case DCB_CMD_PGTX_SCFG:
1073 ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1076 case DCB_CMD_PGRX_SCFG:
1077 ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1081 ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
1084 case DCB_CMD_GNUMTCS:
1085 ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
1088 case DCB_CMD_SNUMTCS:
1089 ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
1092 case DCB_CMD_PFC_GSTATE:
1093 ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
1096 case DCB_CMD_PFC_SSTATE:
1097 ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
1100 case DCB_CMD_BCN_SCFG:
1101 ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1114 static int __init dcbnl_init(void)
1116 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL);
1117 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL);
1121 module_init(dcbnl_init);
1123 static void __exit dcbnl_exit(void)
1125 rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
1126 rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
1128 module_exit(dcbnl_exit);