blob: 36f37af63bf2c010bbc3fd3b000d17e204523057 [file] [log] [blame]
Alexander Duyck2f90b862008-11-20 20:52:10 -08001/*
Mark Rustad698e1d22011-03-14 09:01:02 +00002 * Copyright (c) 2008-2011, Intel Corporation.
Alexander Duyck2f90b862008-11-20 20:52:10 -08003 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Lucy Liu <lucy.liu@intel.com>
18 */
19
20#include <linux/netdevice.h>
21#include <linux/netlink.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Alexander Duyck2f90b862008-11-20 20:52:10 -080023#include <net/netlink.h>
24#include <net/rtnetlink.h>
25#include <linux/dcbnl.h>
John Fastabend96b99682010-12-30 09:26:37 +000026#include <net/dcbevent.h>
Alexander Duyck2f90b862008-11-20 20:52:10 -080027#include <linux/rtnetlink.h>
Paul Gortmaker3a9a2312011-05-27 09:12:25 -040028#include <linux/module.h>
Alexander Duyck2f90b862008-11-20 20:52:10 -080029#include <net/sock.h>
30
31/**
32 * Data Center Bridging (DCB) is a collection of Ethernet enhancements
33 * intended to allow network traffic with differing requirements
34 * (highly reliable, no drops vs. best effort vs. low latency) to operate
35 * and co-exist on Ethernet. Current DCB features are:
36 *
37 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
38 * framework for assigning bandwidth guarantees to traffic classes.
39 *
40 * Priority-based Flow Control (PFC) - provides a flow control mechanism which
41 * can work independently for each 802.1p priority.
42 *
43 * Congestion Notification - provides a mechanism for end-to-end congestion
44 * control for protocols which do not have built-in congestion management.
45 *
46 * More information about the emerging standards for these Ethernet features
47 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
48 *
49 * This file implements an rtnetlink interface to allow configuration of DCB
50 * features for capable devices.
51 */
52
53MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -080054MODULE_DESCRIPTION("Data Center Bridging netlink interface");
Alexander Duyck2f90b862008-11-20 20:52:10 -080055MODULE_LICENSE("GPL");
56
57/**************** DCB attribute policies *************************************/
58
59/* DCB netlink attributes policy */
Alexey Dobriyanb54452b2010-02-18 08:14:31 +000060static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
Alexander Duyck859ee3c2008-11-20 21:10:23 -080061 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
62 [DCB_ATTR_STATE] = {.type = NLA_U8},
63 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
64 [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
65 [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
Alexander Duyck2f90b862008-11-20 20:52:10 -080066 [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
Alexander Duyck859ee3c2008-11-20 21:10:23 -080067 [DCB_ATTR_CAP] = {.type = NLA_NESTED},
68 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
69 [DCB_ATTR_BCN] = {.type = NLA_NESTED},
Yi Zou6fa382a2009-08-31 12:33:20 +000070 [DCB_ATTR_APP] = {.type = NLA_NESTED},
John Fastabend3e290272010-12-30 09:25:46 +000071 [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
Shmulik Ravid6241b622010-12-30 06:26:48 +000072 [DCB_ATTR_DCBX] = {.type = NLA_U8},
Shmulik Ravidea45fe42010-12-30 06:26:55 +000073 [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
Alexander Duyck2f90b862008-11-20 20:52:10 -080074};
75
76/* DCB priority flow control to User Priority nested attributes */
Alexey Dobriyanb54452b2010-02-18 08:14:31 +000077static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
Alexander Duyck2f90b862008-11-20 20:52:10 -080078 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
79 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
80 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
81 [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
82 [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
83 [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
84 [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
85 [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
86 [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
87};
88
89/* DCB priority grouping nested attributes */
Alexey Dobriyanb54452b2010-02-18 08:14:31 +000090static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
Alexander Duyck2f90b862008-11-20 20:52:10 -080091 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
92 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
93 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
94 [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
95 [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
96 [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
97 [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
98 [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
99 [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
100 [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
101 [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
102 [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
103 [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
104 [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
105 [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
106 [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
107 [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
108 [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
109};
110
111/* DCB traffic class nested attributes. */
Alexey Dobriyanb54452b2010-02-18 08:14:31 +0000112static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
Alexander Duyck2f90b862008-11-20 20:52:10 -0800113 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
114 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
115 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
116 [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
117 [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
118};
119
Alexander Duyck46132182008-11-20 21:05:08 -0800120/* DCB capabilities nested attributes. */
Alexey Dobriyanb54452b2010-02-18 08:14:31 +0000121static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
Alexander Duyck46132182008-11-20 21:05:08 -0800122 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
123 [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
124 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
125 [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
126 [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
127 [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
128 [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
129 [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
Shmulik Ravid6241b622010-12-30 06:26:48 +0000130 [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
Alexander Duyck46132182008-11-20 21:05:08 -0800131};
Alexander Duyck2f90b862008-11-20 20:52:10 -0800132
Alexander Duyck33dbabc2008-11-20 21:08:19 -0800133/* DCB capabilities nested attributes. */
Alexey Dobriyanb54452b2010-02-18 08:14:31 +0000134static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
Alexander Duyck33dbabc2008-11-20 21:08:19 -0800135 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
136 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
137 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
138};
139
Alexander Duyck859ee3c2008-11-20 21:10:23 -0800140/* DCB BCN nested attributes. */
Alexey Dobriyanb54452b2010-02-18 08:14:31 +0000141static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
Alexander Duyck859ee3c2008-11-20 21:10:23 -0800142 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
143 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
144 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
145 [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
146 [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
147 [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
148 [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
149 [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
150 [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
Don Skidmoref4314e82008-12-21 20:10:29 -0800151 [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
152 [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
Alexander Duyck859ee3c2008-11-20 21:10:23 -0800153 [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
154 [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
155 [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
156 [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
157 [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
158 [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
159 [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
160 [DCB_BCN_ATTR_W] = {.type = NLA_U32},
161 [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
162 [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
163 [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
164 [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
165 [DCB_BCN_ATTR_C] = {.type = NLA_U32},
166 [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
167};
168
Yi Zou6fa382a2009-08-31 12:33:20 +0000169/* DCB APP nested attributes. */
Alexey Dobriyanb54452b2010-02-18 08:14:31 +0000170static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
Yi Zou6fa382a2009-08-31 12:33:20 +0000171 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
172 [DCB_APP_ATTR_ID] = {.type = NLA_U16},
173 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
174};
175
John Fastabend3e290272010-12-30 09:25:46 +0000176/* IEEE 802.1Qaz nested attributes. */
177static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
178 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
179 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
180 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
181};
182
183static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
184 [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)},
185};
186
Shmulik Ravidea45fe42010-12-30 06:26:55 +0000187/* DCB number of traffic classes nested attributes. */
188static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
189 [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
190 [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
191 [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
192 [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
193};
194
John Fastabend9ab933a2010-12-30 09:26:31 +0000195static LIST_HEAD(dcb_app_list);
196static DEFINE_SPINLOCK(dcb_lock);
197
Alexander Duyck2f90b862008-11-20 20:52:10 -0800198/* standard netlink reply call */
199static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
200 u32 seq, u16 flags)
201{
202 struct sk_buff *dcbnl_skb;
203 struct dcbmsg *dcb;
204 struct nlmsghdr *nlh;
205 int ret = -EINVAL;
206
207 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
208 if (!dcbnl_skb)
209 return ret;
210
211 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags);
212
213 dcb = NLMSG_DATA(nlh);
214 dcb->dcb_family = AF_UNSPEC;
215 dcb->cmd = cmd;
216 dcb->dcb_pad = 0;
217
218 ret = nla_put_u8(dcbnl_skb, attr, value);
219 if (ret)
220 goto err;
221
222 /* end the message, assign the nlmsg_len. */
223 nlmsg_end(dcbnl_skb, nlh);
224 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
225 if (ret)
John Fastabend7eaf5072009-09-25 13:12:03 +0000226 return -EINVAL;
Alexander Duyck2f90b862008-11-20 20:52:10 -0800227
228 return 0;
229nlmsg_failure:
230err:
Roel Kluin858eb712009-01-04 17:29:21 -0800231 kfree_skb(dcbnl_skb);
Alexander Duyck2f90b862008-11-20 20:52:10 -0800232 return ret;
233}
234
235static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb,
236 u32 pid, u32 seq, u16 flags)
237{
238 int ret = -EINVAL;
239
240 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
241 if (!netdev->dcbnl_ops->getstate)
242 return ret;
243
244 ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
245 DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
246
247 return ret;
248}
249
250static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
251 u32 pid, u32 seq, u16 flags)
252{
253 struct sk_buff *dcbnl_skb;
254 struct nlmsghdr *nlh;
255 struct dcbmsg *dcb;
256 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
257 u8 value;
258 int ret = -EINVAL;
259 int i;
260 int getall = 0;
261
262 if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg)
263 return ret;
264
265 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
266 tb[DCB_ATTR_PFC_CFG],
267 dcbnl_pfc_up_nest);
268 if (ret)
269 goto err_out;
270
271 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
272 if (!dcbnl_skb)
273 goto err_out;
274
275 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
276
277 dcb = NLMSG_DATA(nlh);
278 dcb->dcb_family = AF_UNSPEC;
279 dcb->cmd = DCB_CMD_PFC_GCFG;
280
281 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG);
282 if (!nest)
283 goto err;
284
285 if (data[DCB_PFC_UP_ATTR_ALL])
286 getall = 1;
287
288 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
289 if (!getall && !data[i])
290 continue;
291
292 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
293 &value);
294 ret = nla_put_u8(dcbnl_skb, i, value);
295
296 if (ret) {
297 nla_nest_cancel(dcbnl_skb, nest);
298 goto err;
299 }
300 }
301 nla_nest_end(dcbnl_skb, nest);
302
303 nlmsg_end(dcbnl_skb, nlh);
304
305 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
306 if (ret)
John Fastabend7eaf5072009-09-25 13:12:03 +0000307 goto err_out;
Alexander Duyck2f90b862008-11-20 20:52:10 -0800308
309 return 0;
310nlmsg_failure:
311err:
Roel Kluin858eb712009-01-04 17:29:21 -0800312 kfree_skb(dcbnl_skb);
Alexander Duyck2f90b862008-11-20 20:52:10 -0800313err_out:
314 return -EINVAL;
315}
316
317static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
318 u32 pid, u32 seq, u16 flags)
319{
320 struct sk_buff *dcbnl_skb;
321 struct nlmsghdr *nlh;
322 struct dcbmsg *dcb;
323 u8 perm_addr[MAX_ADDR_LEN];
324 int ret = -EINVAL;
325
326 if (!netdev->dcbnl_ops->getpermhwaddr)
327 return ret;
328
329 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
330 if (!dcbnl_skb)
331 goto err_out;
332
333 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
334
335 dcb = NLMSG_DATA(nlh);
336 dcb->dcb_family = AF_UNSPEC;
337 dcb->cmd = DCB_CMD_GPERM_HWADDR;
338
339 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
340
341 ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
342 perm_addr);
343
344 nlmsg_end(dcbnl_skb, nlh);
345
346 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
347 if (ret)
John Fastabend7eaf5072009-09-25 13:12:03 +0000348 goto err_out;
Alexander Duyck2f90b862008-11-20 20:52:10 -0800349
350 return 0;
351
352nlmsg_failure:
Roel Kluin858eb712009-01-04 17:29:21 -0800353 kfree_skb(dcbnl_skb);
Alexander Duyck2f90b862008-11-20 20:52:10 -0800354err_out:
355 return -EINVAL;
356}
357
Alexander Duyck46132182008-11-20 21:05:08 -0800358static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
359 u32 pid, u32 seq, u16 flags)
360{
361 struct sk_buff *dcbnl_skb;
362 struct nlmsghdr *nlh;
363 struct dcbmsg *dcb;
364 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
365 u8 value;
366 int ret = -EINVAL;
367 int i;
368 int getall = 0;
369
370 if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap)
371 return ret;
372
373 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
374 dcbnl_cap_nest);
375 if (ret)
376 goto err_out;
377
378 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
379 if (!dcbnl_skb)
380 goto err_out;
381
382 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
383
384 dcb = NLMSG_DATA(nlh);
385 dcb->dcb_family = AF_UNSPEC;
386 dcb->cmd = DCB_CMD_GCAP;
387
388 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP);
389 if (!nest)
390 goto err;
391
392 if (data[DCB_CAP_ATTR_ALL])
393 getall = 1;
394
395 for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
396 if (!getall && !data[i])
397 continue;
398
399 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
400 ret = nla_put_u8(dcbnl_skb, i, value);
401
402 if (ret) {
403 nla_nest_cancel(dcbnl_skb, nest);
404 goto err;
405 }
406 }
407 }
408 nla_nest_end(dcbnl_skb, nest);
409
410 nlmsg_end(dcbnl_skb, nlh);
411
412 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
413 if (ret)
John Fastabend7eaf5072009-09-25 13:12:03 +0000414 goto err_out;
Alexander Duyck46132182008-11-20 21:05:08 -0800415
416 return 0;
417nlmsg_failure:
418err:
Roel Kluin858eb712009-01-04 17:29:21 -0800419 kfree_skb(dcbnl_skb);
Alexander Duyck46132182008-11-20 21:05:08 -0800420err_out:
421 return -EINVAL;
422}
423
Alexander Duyck33dbabc2008-11-20 21:08:19 -0800424static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
425 u32 pid, u32 seq, u16 flags)
426{
427 struct sk_buff *dcbnl_skb;
428 struct nlmsghdr *nlh;
429 struct dcbmsg *dcb;
430 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
431 u8 value;
432 int ret = -EINVAL;
433 int i;
434 int getall = 0;
435
436 if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs)
437 return ret;
438
439 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
440 dcbnl_numtcs_nest);
441 if (ret) {
442 ret = -EINVAL;
443 goto err_out;
444 }
445
446 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
447 if (!dcbnl_skb) {
448 ret = -EINVAL;
449 goto err_out;
450 }
451
452 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
453
454 dcb = NLMSG_DATA(nlh);
455 dcb->dcb_family = AF_UNSPEC;
456 dcb->cmd = DCB_CMD_GNUMTCS;
457
458 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS);
459 if (!nest) {
460 ret = -EINVAL;
461 goto err;
462 }
463
464 if (data[DCB_NUMTCS_ATTR_ALL])
465 getall = 1;
466
467 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
468 if (!getall && !data[i])
469 continue;
470
471 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
472 if (!ret) {
473 ret = nla_put_u8(dcbnl_skb, i, value);
474
475 if (ret) {
476 nla_nest_cancel(dcbnl_skb, nest);
477 ret = -EINVAL;
478 goto err;
479 }
480 } else {
481 goto err;
482 }
483 }
484 nla_nest_end(dcbnl_skb, nest);
485
486 nlmsg_end(dcbnl_skb, nlh);
487
488 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
489 if (ret) {
490 ret = -EINVAL;
John Fastabend7eaf5072009-09-25 13:12:03 +0000491 goto err_out;
Alexander Duyck33dbabc2008-11-20 21:08:19 -0800492 }
493
494 return 0;
495nlmsg_failure:
496err:
Roel Kluin858eb712009-01-04 17:29:21 -0800497 kfree_skb(dcbnl_skb);
Alexander Duyck33dbabc2008-11-20 21:08:19 -0800498err_out:
499 return ret;
500}
501
502static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
503 u32 pid, u32 seq, u16 flags)
504{
505 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
506 int ret = -EINVAL;
507 u8 value;
508 int i;
509
Don Skidmore8b124a82008-12-15 01:06:23 -0800510 if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs)
Alexander Duyck33dbabc2008-11-20 21:08:19 -0800511 return ret;
512
513 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
514 dcbnl_numtcs_nest);
515
516 if (ret) {
517 ret = -EINVAL;
518 goto err;
519 }
520
521 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
522 if (data[i] == NULL)
523 continue;
524
525 value = nla_get_u8(data[i]);
526
527 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
528
529 if (ret)
530 goto operr;
531 }
532
533operr:
534 ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS,
535 DCB_ATTR_NUMTCS, pid, seq, flags);
536
537err:
538 return ret;
539}
540
Alexander Duyck0eb3aa92008-11-20 21:09:23 -0800541static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb,
542 u32 pid, u32 seq, u16 flags)
543{
544 int ret = -EINVAL;
545
546 if (!netdev->dcbnl_ops->getpfcstate)
547 return ret;
548
549 ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
550 DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
551 pid, seq, flags);
552
553 return ret;
554}
555
556static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
557 u32 pid, u32 seq, u16 flags)
558{
559 int ret = -EINVAL;
560 u8 value;
561
562 if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate)
563 return ret;
564
565 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
566
567 netdev->dcbnl_ops->setpfcstate(netdev, value);
568
569 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE,
570 pid, seq, flags);
571
572 return ret;
573}
574
Yi Zou57949682009-08-31 12:33:40 +0000575static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
576 u32 pid, u32 seq, u16 flags)
577{
578 struct sk_buff *dcbnl_skb;
579 struct nlmsghdr *nlh;
580 struct dcbmsg *dcb;
581 struct nlattr *app_nest;
582 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
583 u16 id;
584 u8 up, idtype;
585 int ret = -EINVAL;
586
John Fastabend3dce38a2011-01-21 16:35:18 +0000587 if (!tb[DCB_ATTR_APP])
Yi Zou57949682009-08-31 12:33:40 +0000588 goto out;
589
590 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
591 dcbnl_app_nest);
592 if (ret)
593 goto out;
594
595 ret = -EINVAL;
596 /* all must be non-null */
597 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
598 (!app_tb[DCB_APP_ATTR_ID]))
599 goto out;
600
601 /* either by eth type or by socket number */
602 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
603 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
604 (idtype != DCB_APP_IDTYPE_PORTNUM))
605 goto out;
606
607 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
John Fastabend3dce38a2011-01-21 16:35:18 +0000608
609 if (netdev->dcbnl_ops->getapp) {
610 up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
611 } else {
612 struct dcb_app app = {
613 .selector = idtype,
614 .protocol = id,
615 };
616 up = dcb_getapp(netdev, &app);
617 }
Yi Zou57949682009-08-31 12:33:40 +0000618
619 /* send this back */
620 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
621 if (!dcbnl_skb)
622 goto out;
623
624 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
625 dcb = NLMSG_DATA(nlh);
626 dcb->dcb_family = AF_UNSPEC;
627 dcb->cmd = DCB_CMD_GAPP;
628
629 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
Jesper Juhld3337de2011-02-10 11:57:16 +0000630 if (!app_nest)
631 goto out_cancel;
632
Yi Zou57949682009-08-31 12:33:40 +0000633 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
634 if (ret)
635 goto out_cancel;
636
637 ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id);
638 if (ret)
639 goto out_cancel;
640
641 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up);
642 if (ret)
643 goto out_cancel;
644
645 nla_nest_end(dcbnl_skb, app_nest);
646 nlmsg_end(dcbnl_skb, nlh);
647
648 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
649 if (ret)
650 goto nlmsg_failure;
651
652 goto out;
653
654out_cancel:
655 nla_nest_cancel(dcbnl_skb, app_nest);
656nlmsg_failure:
657 kfree_skb(dcbnl_skb);
658out:
659 return ret;
660}
661
662static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
663 u32 pid, u32 seq, u16 flags)
664{
John Fastabend9ab933a2010-12-30 09:26:31 +0000665 int err, ret = -EINVAL;
Yi Zou57949682009-08-31 12:33:40 +0000666 u16 id;
667 u8 up, idtype;
668 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
669
John Fastabend9ab933a2010-12-30 09:26:31 +0000670 if (!tb[DCB_ATTR_APP])
Yi Zou57949682009-08-31 12:33:40 +0000671 goto out;
672
673 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
674 dcbnl_app_nest);
675 if (ret)
676 goto out;
677
678 ret = -EINVAL;
679 /* all must be non-null */
680 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
681 (!app_tb[DCB_APP_ATTR_ID]) ||
682 (!app_tb[DCB_APP_ATTR_PRIORITY]))
683 goto out;
684
685 /* either by eth type or by socket number */
686 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
687 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
688 (idtype != DCB_APP_IDTYPE_PORTNUM))
689 goto out;
690
691 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
692 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
693
John Fastabend9ab933a2010-12-30 09:26:31 +0000694 if (netdev->dcbnl_ops->setapp) {
695 err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
696 } else {
697 struct dcb_app app;
698 app.selector = idtype;
699 app.protocol = id;
700 app.priority = up;
701 err = dcb_setapp(netdev, &app);
702 }
703
704 ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
705 pid, seq, flags);
Yi Zou57949682009-08-31 12:33:40 +0000706out:
707 return ret;
708}
709
Alexander Duyck2f90b862008-11-20 20:52:10 -0800710static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
711 u32 pid, u32 seq, u16 flags, int dir)
712{
713 struct sk_buff *dcbnl_skb;
714 struct nlmsghdr *nlh;
715 struct dcbmsg *dcb;
716 struct nlattr *pg_nest, *param_nest, *data;
717 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
718 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
719 u8 prio, pgid, tc_pct, up_map;
720 int ret = -EINVAL;
721 int getall = 0;
722 int i;
723
724 if (!tb[DCB_ATTR_PG_CFG] ||
725 !netdev->dcbnl_ops->getpgtccfgtx ||
726 !netdev->dcbnl_ops->getpgtccfgrx ||
727 !netdev->dcbnl_ops->getpgbwgcfgtx ||
728 !netdev->dcbnl_ops->getpgbwgcfgrx)
729 return ret;
730
731 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
732 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
733
734 if (ret)
735 goto err_out;
736
737 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
738 if (!dcbnl_skb)
739 goto err_out;
740
741 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
742
743 dcb = NLMSG_DATA(nlh);
744 dcb->dcb_family = AF_UNSPEC;
745 dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
746
747 pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG);
748 if (!pg_nest)
749 goto err;
750
751 if (pg_tb[DCB_PG_ATTR_TC_ALL])
752 getall = 1;
753
754 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
755 if (!getall && !pg_tb[i])
756 continue;
757
758 if (pg_tb[DCB_PG_ATTR_TC_ALL])
759 data = pg_tb[DCB_PG_ATTR_TC_ALL];
760 else
761 data = pg_tb[i];
762 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
763 data, dcbnl_tc_param_nest);
764 if (ret)
765 goto err_pg;
766
767 param_nest = nla_nest_start(dcbnl_skb, i);
768 if (!param_nest)
769 goto err_pg;
770
771 pgid = DCB_ATTR_VALUE_UNDEFINED;
772 prio = DCB_ATTR_VALUE_UNDEFINED;
773 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
774 up_map = DCB_ATTR_VALUE_UNDEFINED;
775
776 if (dir) {
777 /* Rx */
778 netdev->dcbnl_ops->getpgtccfgrx(netdev,
779 i - DCB_PG_ATTR_TC_0, &prio,
780 &pgid, &tc_pct, &up_map);
781 } else {
782 /* Tx */
783 netdev->dcbnl_ops->getpgtccfgtx(netdev,
784 i - DCB_PG_ATTR_TC_0, &prio,
785 &pgid, &tc_pct, &up_map);
786 }
787
788 if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
789 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
790 ret = nla_put_u8(dcbnl_skb,
791 DCB_TC_ATTR_PARAM_PGID, pgid);
792 if (ret)
793 goto err_param;
794 }
795 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
796 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
797 ret = nla_put_u8(dcbnl_skb,
798 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
799 if (ret)
800 goto err_param;
801 }
802 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
803 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
804 ret = nla_put_u8(dcbnl_skb,
805 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
806 if (ret)
807 goto err_param;
808 }
809 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
810 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
811 ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT,
812 tc_pct);
813 if (ret)
814 goto err_param;
815 }
816 nla_nest_end(dcbnl_skb, param_nest);
817 }
818
819 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
820 getall = 1;
821 else
822 getall = 0;
823
824 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
825 if (!getall && !pg_tb[i])
826 continue;
827
828 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
829
830 if (dir) {
831 /* Rx */
832 netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
833 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
834 } else {
835 /* Tx */
836 netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
837 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
838 }
839 ret = nla_put_u8(dcbnl_skb, i, tc_pct);
840
841 if (ret)
842 goto err_pg;
843 }
844
845 nla_nest_end(dcbnl_skb, pg_nest);
846
847 nlmsg_end(dcbnl_skb, nlh);
848
849 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
850 if (ret)
John Fastabend7eaf5072009-09-25 13:12:03 +0000851 goto err_out;
Alexander Duyck2f90b862008-11-20 20:52:10 -0800852
853 return 0;
854
855err_param:
856 nla_nest_cancel(dcbnl_skb, param_nest);
857err_pg:
858 nla_nest_cancel(dcbnl_skb, pg_nest);
859nlmsg_failure:
860err:
Roel Kluin858eb712009-01-04 17:29:21 -0800861 kfree_skb(dcbnl_skb);
Alexander Duyck2f90b862008-11-20 20:52:10 -0800862err_out:
863 ret = -EINVAL;
864 return ret;
865}
866
867static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb,
868 u32 pid, u32 seq, u16 flags)
869{
870 return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0);
871}
872
873static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb,
874 u32 pid, u32 seq, u16 flags)
875{
876 return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1);
877}
878
879static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb,
880 u32 pid, u32 seq, u16 flags)
881{
882 int ret = -EINVAL;
883 u8 value;
884
885 if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate)
886 return ret;
887
888 value = nla_get_u8(tb[DCB_ATTR_STATE]);
889
Don Skidmore1486a612008-12-21 20:09:50 -0800890 ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value),
891 RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
Alexander Duyck2f90b862008-11-20 20:52:10 -0800892 pid, seq, flags);
893
894 return ret;
895}
896
897static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
898 u32 pid, u32 seq, u16 flags)
899{
900 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
901 int i;
902 int ret = -EINVAL;
903 u8 value;
904
905 if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg)
906 return ret;
907
908 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
909 tb[DCB_ATTR_PFC_CFG],
910 dcbnl_pfc_up_nest);
911 if (ret)
912 goto err;
913
914 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
915 if (data[i] == NULL)
916 continue;
917 value = nla_get_u8(data[i]);
918 netdev->dcbnl_ops->setpfccfg(netdev,
919 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
920 }
921
922 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG,
923 pid, seq, flags);
924err:
925 return ret;
926}
927
928static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
929 u32 pid, u32 seq, u16 flags)
930{
931 int ret = -EINVAL;
932
933 if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall)
934 return ret;
935
936 ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
937 DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
938
939 return ret;
940}
941
942static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
943 u32 pid, u32 seq, u16 flags, int dir)
944{
945 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
946 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
947 int ret = -EINVAL;
948 int i;
949 u8 pgid;
950 u8 up_map;
951 u8 prio;
952 u8 tc_pct;
953
954 if (!tb[DCB_ATTR_PG_CFG] ||
955 !netdev->dcbnl_ops->setpgtccfgtx ||
956 !netdev->dcbnl_ops->setpgtccfgrx ||
957 !netdev->dcbnl_ops->setpgbwgcfgtx ||
958 !netdev->dcbnl_ops->setpgbwgcfgrx)
959 return ret;
960
961 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
962 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
963 if (ret)
964 goto err;
965
966 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
967 if (!pg_tb[i])
968 continue;
969
970 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
971 pg_tb[i], dcbnl_tc_param_nest);
972 if (ret)
973 goto err;
974
975 pgid = DCB_ATTR_VALUE_UNDEFINED;
976 prio = DCB_ATTR_VALUE_UNDEFINED;
977 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
978 up_map = DCB_ATTR_VALUE_UNDEFINED;
979
980 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
981 prio =
982 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
983
984 if (param_tb[DCB_TC_ATTR_PARAM_PGID])
985 pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
986
987 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
988 tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
989
990 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
991 up_map =
992 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
993
994 /* dir: Tx = 0, Rx = 1 */
995 if (dir) {
996 /* Rx */
997 netdev->dcbnl_ops->setpgtccfgrx(netdev,
998 i - DCB_PG_ATTR_TC_0,
999 prio, pgid, tc_pct, up_map);
1000 } else {
1001 /* Tx */
1002 netdev->dcbnl_ops->setpgtccfgtx(netdev,
1003 i - DCB_PG_ATTR_TC_0,
1004 prio, pgid, tc_pct, up_map);
1005 }
1006 }
1007
1008 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1009 if (!pg_tb[i])
1010 continue;
1011
1012 tc_pct = nla_get_u8(pg_tb[i]);
1013
1014 /* dir: Tx = 0, Rx = 1 */
1015 if (dir) {
1016 /* Rx */
1017 netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
1018 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
1019 } else {
1020 /* Tx */
1021 netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
1022 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
1023 }
1024 }
1025
1026 ret = dcbnl_reply(0, RTM_SETDCB,
1027 (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
1028 DCB_ATTR_PG_CFG, pid, seq, flags);
1029
1030err:
1031 return ret;
1032}
1033
1034static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb,
1035 u32 pid, u32 seq, u16 flags)
1036{
1037 return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0);
1038}
1039
1040static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb,
1041 u32 pid, u32 seq, u16 flags)
1042{
1043 return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1);
1044}
1045
Alexander Duyck859ee3c2008-11-20 21:10:23 -08001046static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
1047 u32 pid, u32 seq, u16 flags)
1048{
1049 struct sk_buff *dcbnl_skb;
1050 struct nlmsghdr *nlh;
1051 struct dcbmsg *dcb;
1052 struct nlattr *bcn_nest;
1053 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
1054 u8 value_byte;
1055 u32 value_integer;
1056 int ret = -EINVAL;
1057 bool getall = false;
1058 int i;
1059
1060 if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp ||
1061 !netdev->dcbnl_ops->getbcncfg)
1062 return ret;
1063
1064 ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
1065 tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
1066
1067 if (ret)
1068 goto err_out;
1069
1070 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1071 if (!dcbnl_skb)
1072 goto err_out;
1073
1074 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1075
1076 dcb = NLMSG_DATA(nlh);
1077 dcb->dcb_family = AF_UNSPEC;
1078 dcb->cmd = DCB_CMD_BCN_GCFG;
1079
1080 bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN);
1081 if (!bcn_nest)
1082 goto err;
1083
1084 if (bcn_tb[DCB_BCN_ATTR_ALL])
1085 getall = true;
1086
1087 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1088 if (!getall && !bcn_tb[i])
1089 continue;
1090
1091 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
1092 &value_byte);
1093 ret = nla_put_u8(dcbnl_skb, i, value_byte);
1094 if (ret)
1095 goto err_bcn;
1096 }
1097
Don Skidmoref4314e82008-12-21 20:10:29 -08001098 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
Alexander Duyck859ee3c2008-11-20 21:10:23 -08001099 if (!getall && !bcn_tb[i])
1100 continue;
1101
1102 netdev->dcbnl_ops->getbcncfg(netdev, i,
1103 &value_integer);
1104 ret = nla_put_u32(dcbnl_skb, i, value_integer);
1105 if (ret)
1106 goto err_bcn;
1107 }
1108
1109 nla_nest_end(dcbnl_skb, bcn_nest);
1110
1111 nlmsg_end(dcbnl_skb, nlh);
1112
1113 ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
1114 if (ret)
John Fastabend7eaf5072009-09-25 13:12:03 +00001115 goto err_out;
Alexander Duyck859ee3c2008-11-20 21:10:23 -08001116
1117 return 0;
1118
1119err_bcn:
1120 nla_nest_cancel(dcbnl_skb, bcn_nest);
1121nlmsg_failure:
1122err:
Roel Kluin858eb712009-01-04 17:29:21 -08001123 kfree_skb(dcbnl_skb);
Alexander Duyck859ee3c2008-11-20 21:10:23 -08001124err_out:
1125 ret = -EINVAL;
1126 return ret;
1127}
1128
1129static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
1130 u32 pid, u32 seq, u16 flags)
1131{
1132 struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
1133 int i;
1134 int ret = -EINVAL;
1135 u8 value_byte;
1136 u32 value_int;
1137
Joe Perchesf64f9e72009-11-29 16:55:45 -08001138 if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg ||
1139 !netdev->dcbnl_ops->setbcnrp)
Alexander Duyck859ee3c2008-11-20 21:10:23 -08001140 return ret;
1141
1142 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
1143 tb[DCB_ATTR_BCN],
1144 dcbnl_pfc_up_nest);
1145 if (ret)
1146 goto err;
1147
1148 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1149 if (data[i] == NULL)
1150 continue;
1151 value_byte = nla_get_u8(data[i]);
1152 netdev->dcbnl_ops->setbcnrp(netdev,
1153 data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
1154 }
1155
Don Skidmoref4314e82008-12-21 20:10:29 -08001156 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
Alexander Duyck859ee3c2008-11-20 21:10:23 -08001157 if (data[i] == NULL)
1158 continue;
1159 value_int = nla_get_u32(data[i]);
1160 netdev->dcbnl_ops->setbcncfg(netdev,
1161 i, value_int);
1162 }
1163
1164 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN,
1165 pid, seq, flags);
1166err:
1167 return ret;
1168}
1169
Shmulik Raviddc6ed1d2011-02-27 05:04:38 +00001170static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
1171 int app_nested_type, int app_info_type,
1172 int app_entry_type)
Shmulik Ravideed84712011-02-27 05:04:31 +00001173{
1174 struct dcb_peer_app_info info;
1175 struct dcb_app *table = NULL;
1176 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1177 u16 app_count;
1178 int err;
1179
1180
1181 /**
1182 * retrieve the peer app configuration form the driver. If the driver
1183 * handlers fail exit without doing anything
1184 */
1185 err = ops->peer_getappinfo(netdev, &info, &app_count);
1186 if (!err && app_count) {
1187 table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
1188 if (!table)
1189 return -ENOMEM;
1190
1191 err = ops->peer_getapptable(netdev, table);
1192 }
1193
1194 if (!err) {
1195 u16 i;
1196 struct nlattr *app;
1197
1198 /**
1199 * build the message, from here on the only possible failure
1200 * is due to the skb size
1201 */
1202 err = -EMSGSIZE;
1203
Shmulik Raviddc6ed1d2011-02-27 05:04:38 +00001204 app = nla_nest_start(skb, app_nested_type);
Shmulik Ravideed84712011-02-27 05:04:31 +00001205 if (!app)
1206 goto nla_put_failure;
1207
David S. Miller1eb4c972012-04-01 20:03:01 -04001208 if (app_info_type &&
1209 nla_put(skb, app_info_type, sizeof(info), &info))
1210 goto nla_put_failure;
Shmulik Raviddc6ed1d2011-02-27 05:04:38 +00001211
David S. Miller1eb4c972012-04-01 20:03:01 -04001212 for (i = 0; i < app_count; i++) {
1213 if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
1214 &table[i]))
1215 goto nla_put_failure;
1216 }
Shmulik Ravideed84712011-02-27 05:04:31 +00001217 nla_nest_end(skb, app);
1218 }
1219 err = 0;
1220
1221nla_put_failure:
1222 kfree(table);
1223 return err;
1224}
John Fastabend3e290272010-12-30 09:25:46 +00001225
1226/* Handle IEEE 802.1Qaz GET commands. */
John Fastabend314b4772011-06-21 07:34:37 +00001227static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
John Fastabend3e290272010-12-30 09:25:46 +00001228{
John Fastabend9ab933a2010-12-30 09:26:31 +00001229 struct nlattr *ieee, *app;
1230 struct dcb_app_type *itr;
John Fastabend3e290272010-12-30 09:25:46 +00001231 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
John Fastabendc7797ba2011-06-21 07:34:31 +00001232 int dcbx;
John Fastabend314b4772011-06-21 07:34:37 +00001233 int err = -EMSGSIZE;
John Fastabend3e290272010-12-30 09:25:46 +00001234
David S. Miller1eb4c972012-04-01 20:03:01 -04001235 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1236 goto nla_put_failure;
John Fastabend3e290272010-12-30 09:25:46 +00001237 ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1238 if (!ieee)
1239 goto nla_put_failure;
1240
1241 if (ops->ieee_getets) {
1242 struct ieee_ets ets;
1243 err = ops->ieee_getets(netdev, &ets);
David S. Miller1eb4c972012-04-01 20:03:01 -04001244 if (!err &&
1245 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1246 goto nla_put_failure;
John Fastabend3e290272010-12-30 09:25:46 +00001247 }
1248
1249 if (ops->ieee_getpfc) {
1250 struct ieee_pfc pfc;
1251 err = ops->ieee_getpfc(netdev, &pfc);
David S. Miller1eb4c972012-04-01 20:03:01 -04001252 if (!err &&
1253 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1254 goto nla_put_failure;
John Fastabend3e290272010-12-30 09:25:46 +00001255 }
1256
John Fastabend9ab933a2010-12-30 09:26:31 +00001257 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1258 if (!app)
1259 goto nla_put_failure;
1260
1261 spin_lock(&dcb_lock);
1262 list_for_each_entry(itr, &dcb_app_list, list) {
Mark Rustade290ed82011-10-06 08:52:33 +00001263 if (itr->ifindex == netdev->ifindex) {
Dan Carpenter70bfa2d2011-01-04 21:03:12 +00001264 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1265 &itr->app);
1266 if (err) {
1267 spin_unlock(&dcb_lock);
1268 goto nla_put_failure;
1269 }
1270 }
John Fastabend9ab933a2010-12-30 09:26:31 +00001271 }
John Fastabendc7797ba2011-06-21 07:34:31 +00001272
1273 if (netdev->dcbnl_ops->getdcbx)
1274 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1275 else
1276 dcbx = -EOPNOTSUPP;
1277
John Fastabend9ab933a2010-12-30 09:26:31 +00001278 spin_unlock(&dcb_lock);
1279 nla_nest_end(skb, app);
1280
Shmulik Ravideed84712011-02-27 05:04:31 +00001281 /* get peer info if available */
1282 if (ops->ieee_peer_getets) {
1283 struct ieee_ets ets;
1284 err = ops->ieee_peer_getets(netdev, &ets);
David S. Miller1eb4c972012-04-01 20:03:01 -04001285 if (!err &&
1286 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1287 goto nla_put_failure;
Shmulik Ravideed84712011-02-27 05:04:31 +00001288 }
1289
1290 if (ops->ieee_peer_getpfc) {
1291 struct ieee_pfc pfc;
1292 err = ops->ieee_peer_getpfc(netdev, &pfc);
David S. Miller1eb4c972012-04-01 20:03:01 -04001293 if (!err &&
1294 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1295 goto nla_put_failure;
Shmulik Ravideed84712011-02-27 05:04:31 +00001296 }
1297
1298 if (ops->peer_getappinfo && ops->peer_getapptable) {
Shmulik Raviddc6ed1d2011-02-27 05:04:38 +00001299 err = dcbnl_build_peer_app(netdev, skb,
1300 DCB_ATTR_IEEE_PEER_APP,
1301 DCB_ATTR_IEEE_APP_UNSPEC,
1302 DCB_ATTR_IEEE_APP);
Shmulik Ravideed84712011-02-27 05:04:31 +00001303 if (err)
1304 goto nla_put_failure;
1305 }
1306
John Fastabend3e290272010-12-30 09:25:46 +00001307 nla_nest_end(skb, ieee);
John Fastabendc7797ba2011-06-21 07:34:31 +00001308 if (dcbx >= 0) {
1309 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1310 if (err)
1311 goto nla_put_failure;
1312 }
John Fastabend3e290272010-12-30 09:25:46 +00001313
John Fastabend314b4772011-06-21 07:34:37 +00001314 return 0;
1315
John Fastabend3e290272010-12-30 09:25:46 +00001316nla_put_failure:
John Fastabend314b4772011-06-21 07:34:37 +00001317 return err;
John Fastabend3e290272010-12-30 09:25:46 +00001318}
1319
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001320static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1321 int dir)
1322{
1323 u8 pgid, up_map, prio, tc_pct;
1324 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1325 int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1326 struct nlattr *pg = nla_nest_start(skb, i);
1327
1328 if (!pg)
1329 goto nla_put_failure;
1330
1331 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1332 struct nlattr *tc_nest = nla_nest_start(skb, i);
1333
1334 if (!tc_nest)
1335 goto nla_put_failure;
1336
1337 pgid = DCB_ATTR_VALUE_UNDEFINED;
1338 prio = DCB_ATTR_VALUE_UNDEFINED;
1339 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1340 up_map = DCB_ATTR_VALUE_UNDEFINED;
1341
1342 if (!dir)
1343 ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1344 &prio, &pgid, &tc_pct, &up_map);
1345 else
1346 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1347 &prio, &pgid, &tc_pct, &up_map);
1348
David S. Miller1eb4c972012-04-01 20:03:01 -04001349 if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
1350 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1351 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1352 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1353 goto nla_put_failure;
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001354 nla_nest_end(skb, tc_nest);
1355 }
1356
1357 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1358 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1359
1360 if (!dir)
1361 ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1362 &tc_pct);
1363 else
1364 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1365 &tc_pct);
David S. Miller1eb4c972012-04-01 20:03:01 -04001366 if (nla_put_u8(skb, i, tc_pct))
1367 goto nla_put_failure;
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001368 }
1369 nla_nest_end(skb, pg);
1370 return 0;
1371
1372nla_put_failure:
1373 return -EMSGSIZE;
1374}
1375
1376static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1377{
1378 struct nlattr *cee, *app;
1379 struct dcb_app_type *itr;
1380 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1381 int dcbx, i, err = -EMSGSIZE;
1382 u8 value;
1383
David S. Miller1eb4c972012-04-01 20:03:01 -04001384 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1385 goto nla_put_failure;
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001386 cee = nla_nest_start(skb, DCB_ATTR_CEE);
1387 if (!cee)
1388 goto nla_put_failure;
1389
1390 /* local pg */
1391 if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1392 err = dcbnl_cee_pg_fill(skb, netdev, 1);
1393 if (err)
1394 goto nla_put_failure;
1395 }
1396
1397 if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1398 err = dcbnl_cee_pg_fill(skb, netdev, 0);
1399 if (err)
1400 goto nla_put_failure;
1401 }
1402
1403 /* local pfc */
1404 if (ops->getpfccfg) {
1405 struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
1406
1407 if (!pfc_nest)
1408 goto nla_put_failure;
1409
1410 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1411 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
David S. Miller1eb4c972012-04-01 20:03:01 -04001412 if (nla_put_u8(skb, i, value))
1413 goto nla_put_failure;
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001414 }
1415 nla_nest_end(skb, pfc_nest);
1416 }
1417
1418 /* local app */
1419 spin_lock(&dcb_lock);
1420 app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
1421 if (!app)
Dan Carpenter40f5d722011-07-07 21:27:24 +00001422 goto dcb_unlock;
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001423
1424 list_for_each_entry(itr, &dcb_app_list, list) {
Mark Rustade290ed82011-10-06 08:52:33 +00001425 if (itr->ifindex == netdev->ifindex) {
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001426 struct nlattr *app_nest = nla_nest_start(skb,
1427 DCB_ATTR_APP);
1428 if (!app_nest)
1429 goto dcb_unlock;
1430
1431 err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1432 itr->app.selector);
1433 if (err)
1434 goto dcb_unlock;
1435
1436 err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1437 itr->app.protocol);
1438 if (err)
1439 goto dcb_unlock;
1440
1441 err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1442 itr->app.priority);
1443 if (err)
1444 goto dcb_unlock;
1445
1446 nla_nest_end(skb, app_nest);
1447 }
1448 }
1449 nla_nest_end(skb, app);
1450
1451 if (netdev->dcbnl_ops->getdcbx)
1452 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1453 else
1454 dcbx = -EOPNOTSUPP;
1455
1456 spin_unlock(&dcb_lock);
1457
1458 /* features flags */
1459 if (ops->getfeatcfg) {
1460 struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
1461 if (!feat)
1462 goto nla_put_failure;
1463
1464 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1465 i++)
David S. Miller1eb4c972012-04-01 20:03:01 -04001466 if (!ops->getfeatcfg(netdev, i, &value) &&
1467 nla_put_u8(skb, i, value))
1468 goto nla_put_failure;
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001469
1470 nla_nest_end(skb, feat);
1471 }
1472
1473 /* peer info if available */
1474 if (ops->cee_peer_getpg) {
1475 struct cee_pg pg;
1476 err = ops->cee_peer_getpg(netdev, &pg);
David S. Miller1eb4c972012-04-01 20:03:01 -04001477 if (!err &&
1478 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
1479 goto nla_put_failure;
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001480 }
1481
1482 if (ops->cee_peer_getpfc) {
1483 struct cee_pfc pfc;
1484 err = ops->cee_peer_getpfc(netdev, &pfc);
David S. Miller1eb4c972012-04-01 20:03:01 -04001485 if (!err &&
1486 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
1487 goto nla_put_failure;
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001488 }
1489
1490 if (ops->peer_getappinfo && ops->peer_getapptable) {
1491 err = dcbnl_build_peer_app(netdev, skb,
1492 DCB_ATTR_CEE_PEER_APP_TABLE,
1493 DCB_ATTR_CEE_PEER_APP_INFO,
1494 DCB_ATTR_CEE_PEER_APP);
1495 if (err)
1496 goto nla_put_failure;
1497 }
1498 nla_nest_end(skb, cee);
1499
1500 /* DCBX state */
1501 if (dcbx >= 0) {
1502 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1503 if (err)
1504 goto nla_put_failure;
1505 }
1506 return 0;
1507
1508dcb_unlock:
1509 spin_unlock(&dcb_lock);
1510nla_put_failure:
1511 return err;
1512}
1513
1514static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1515 u32 seq, u32 pid, int dcbx_ver)
John Fastabend314b4772011-06-21 07:34:37 +00001516{
1517 struct net *net = dev_net(dev);
1518 struct sk_buff *skb;
1519 struct nlmsghdr *nlh;
1520 struct dcbmsg *dcb;
1521 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1522 int err;
1523
1524 if (!ops)
1525 return -EOPNOTSUPP;
1526
1527 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1528 if (!skb)
1529 return -ENOBUFS;
1530
1531 nlh = nlmsg_put(skb, pid, 0, event, sizeof(*dcb), 0);
1532 if (nlh == NULL) {
Dan Carpenter4d054f22011-06-23 03:14:42 -07001533 nlmsg_free(skb);
John Fastabend314b4772011-06-21 07:34:37 +00001534 return -EMSGSIZE;
1535 }
1536
1537 dcb = NLMSG_DATA(nlh);
1538 dcb->dcb_family = AF_UNSPEC;
1539 dcb->cmd = cmd;
1540
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001541 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1542 err = dcbnl_ieee_fill(skb, dev);
1543 else
1544 err = dcbnl_cee_fill(skb, dev);
1545
John Fastabend314b4772011-06-21 07:34:37 +00001546 if (err < 0) {
1547 /* Report error to broadcast listeners */
1548 nlmsg_cancel(skb, nlh);
1549 kfree_skb(skb);
1550 rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1551 } else {
1552 /* End nlmsg and notify broadcast listeners */
1553 nlmsg_end(skb, nlh);
1554 rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1555 }
1556
1557 return err;
1558}
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001559
1560int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1561 u32 seq, u32 pid)
1562{
1563 return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE);
1564}
1565EXPORT_SYMBOL(dcbnl_ieee_notify);
1566
1567int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1568 u32 seq, u32 pid)
1569{
1570 return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE);
1571}
1572EXPORT_SYMBOL(dcbnl_cee_notify);
John Fastabend314b4772011-06-21 07:34:37 +00001573
1574/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
1575 * be completed the entire msg is aborted and error value is returned.
1576 * No attempt is made to reconcile the case where only part of the
1577 * cmd can be completed.
1578 */
1579static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1580 u32 pid, u32 seq, u16 flags)
1581{
1582 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1583 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1584 int err = -EOPNOTSUPP;
1585
1586 if (!ops)
1587 return err;
1588
John Fastabend4003b652011-06-21 07:35:04 +00001589 if (!tb[DCB_ATTR_IEEE])
1590 return -EINVAL;
1591
John Fastabend314b4772011-06-21 07:34:37 +00001592 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1593 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1594 if (err)
1595 return err;
1596
1597 if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1598 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1599 err = ops->ieee_setets(netdev, ets);
1600 if (err)
1601 goto err;
1602 }
1603
1604 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1605 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1606 err = ops->ieee_setpfc(netdev, pfc);
1607 if (err)
1608 goto err;
1609 }
1610
1611 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1612 struct nlattr *attr;
1613 int rem;
1614
1615 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1616 struct dcb_app *app_data;
1617 if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1618 continue;
1619 app_data = nla_data(attr);
1620 if (ops->ieee_setapp)
1621 err = ops->ieee_setapp(netdev, app_data);
1622 else
John Fastabendb6db2172011-06-21 07:34:42 +00001623 err = dcb_ieee_setapp(netdev, app_data);
John Fastabend314b4772011-06-21 07:34:37 +00001624 if (err)
1625 goto err;
1626 }
1627 }
1628
1629err:
1630 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
1631 pid, seq, flags);
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001632 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
John Fastabend314b4772011-06-21 07:34:37 +00001633 return err;
1634}
1635
1636static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
1637 u32 pid, u32 seq, u16 flags)
1638{
1639 struct net *net = dev_net(netdev);
1640 struct sk_buff *skb;
1641 struct nlmsghdr *nlh;
1642 struct dcbmsg *dcb;
1643 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1644 int err;
1645
1646 if (!ops)
1647 return -EOPNOTSUPP;
1648
1649 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1650 if (!skb)
1651 return -ENOBUFS;
1652
1653 nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1654 if (nlh == NULL) {
Dan Carpenter4d054f22011-06-23 03:14:42 -07001655 nlmsg_free(skb);
John Fastabend314b4772011-06-21 07:34:37 +00001656 return -EMSGSIZE;
1657 }
1658
1659 dcb = NLMSG_DATA(nlh);
1660 dcb->dcb_family = AF_UNSPEC;
1661 dcb->cmd = DCB_CMD_IEEE_GET;
1662
1663 err = dcbnl_ieee_fill(skb, netdev);
1664
1665 if (err < 0) {
1666 nlmsg_cancel(skb, nlh);
1667 kfree_skb(skb);
1668 } else {
1669 nlmsg_end(skb, nlh);
1670 err = rtnl_unicast(skb, net, pid);
1671 }
1672
1673 return err;
1674}
John Fastabendf9ae7e42011-06-21 07:34:48 +00001675
1676static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
1677 u32 pid, u32 seq, u16 flags)
1678{
1679 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1680 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1681 int err = -EOPNOTSUPP;
1682
1683 if (!ops)
1684 return -EOPNOTSUPP;
1685
1686 if (!tb[DCB_ATTR_IEEE])
1687 return -EINVAL;
1688
1689 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1690 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1691 if (err)
1692 return err;
1693
1694 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1695 struct nlattr *attr;
1696 int rem;
1697
1698 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1699 struct dcb_app *app_data;
1700
1701 if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1702 continue;
1703 app_data = nla_data(attr);
1704 if (ops->ieee_delapp)
1705 err = ops->ieee_delapp(netdev, app_data);
1706 else
1707 err = dcb_ieee_delapp(netdev, app_data);
1708 if (err)
1709 goto err;
1710 }
1711 }
1712
1713err:
1714 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE,
1715 pid, seq, flags);
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001716 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
John Fastabendf9ae7e42011-06-21 07:34:48 +00001717 return err;
1718}
1719
1720
Shmulik Ravid6241b622010-12-30 06:26:48 +00001721/* DCBX configuration */
1722static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb,
1723 u32 pid, u32 seq, u16 flags)
1724{
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001725 int ret;
Shmulik Ravid6241b622010-12-30 06:26:48 +00001726
1727 if (!netdev->dcbnl_ops->getdcbx)
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001728 return -EOPNOTSUPP;
Shmulik Ravid6241b622010-12-30 06:26:48 +00001729
1730 ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB,
1731 DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags);
1732
1733 return ret;
1734}
1735
1736static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
1737 u32 pid, u32 seq, u16 flags)
1738{
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001739 int ret;
Shmulik Ravid6241b622010-12-30 06:26:48 +00001740 u8 value;
1741
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001742 if (!netdev->dcbnl_ops->setdcbx)
1743 return -EOPNOTSUPP;
1744
1745 if (!tb[DCB_ATTR_DCBX])
1746 return -EINVAL;
Shmulik Ravid6241b622010-12-30 06:26:48 +00001747
1748 value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1749
1750 ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value),
1751 RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX,
1752 pid, seq, flags);
1753
1754 return ret;
1755}
1756
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001757static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
1758 u32 pid, u32 seq, u16 flags)
1759{
1760 struct sk_buff *dcbnl_skb;
1761 struct nlmsghdr *nlh;
1762 struct dcbmsg *dcb;
1763 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1764 u8 value;
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001765 int ret, i;
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001766 int getall = 0;
1767
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001768 if (!netdev->dcbnl_ops->getfeatcfg)
1769 return -EOPNOTSUPP;
1770
1771 if (!tb[DCB_ATTR_FEATCFG])
1772 return -EINVAL;
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001773
1774 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1775 dcbnl_featcfg_nest);
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001776 if (ret)
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001777 goto err_out;
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001778
1779 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1780 if (!dcbnl_skb) {
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001781 ret = -ENOBUFS;
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001782 goto err_out;
1783 }
1784
1785 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1786
1787 dcb = NLMSG_DATA(nlh);
1788 dcb->dcb_family = AF_UNSPEC;
1789 dcb->cmd = DCB_CMD_GFEATCFG;
1790
1791 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG);
1792 if (!nest) {
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001793 ret = -EMSGSIZE;
1794 goto nla_put_failure;
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001795 }
1796
1797 if (data[DCB_FEATCFG_ATTR_ALL])
1798 getall = 1;
1799
1800 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1801 if (!getall && !data[i])
1802 continue;
1803
1804 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001805 if (!ret)
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001806 ret = nla_put_u8(dcbnl_skb, i, value);
1807
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001808 if (ret) {
1809 nla_nest_cancel(dcbnl_skb, nest);
1810 goto nla_put_failure;
1811 }
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001812 }
1813 nla_nest_end(dcbnl_skb, nest);
1814
1815 nlmsg_end(dcbnl_skb, nlh);
1816
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001817 return rtnl_unicast(dcbnl_skb, &init_net, pid);
1818nla_put_failure:
1819 nlmsg_cancel(dcbnl_skb, nlh);
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001820nlmsg_failure:
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001821 kfree_skb(dcbnl_skb);
1822err_out:
1823 return ret;
1824}
1825
1826static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
1827 u32 pid, u32 seq, u16 flags)
1828{
1829 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001830 int ret, i;
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001831 u8 value;
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001832
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001833 if (!netdev->dcbnl_ops->setfeatcfg)
1834 return -ENOTSUPP;
1835
1836 if (!tb[DCB_ATTR_FEATCFG])
1837 return -EINVAL;
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001838
1839 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1840 dcbnl_featcfg_nest);
1841
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001842 if (ret)
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001843 goto err;
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001844
1845 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1846 if (data[i] == NULL)
1847 continue;
1848
1849 value = nla_get_u8(data[i]);
1850
1851 ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1852
1853 if (ret)
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001854 goto err;
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001855 }
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001856err:
Shmulik Ravid7f891cf2011-01-03 08:04:59 +00001857 dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG,
1858 pid, seq, flags);
1859
Shmulik Ravidea45fe42010-12-30 06:26:55 +00001860 return ret;
1861}
1862
Shmulik Raviddc6ed1d2011-02-27 05:04:38 +00001863/* Handle CEE DCBX GET commands. */
1864static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
1865 u32 pid, u32 seq, u16 flags)
1866{
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001867 struct net *net = dev_net(netdev);
Shmulik Raviddc6ed1d2011-02-27 05:04:38 +00001868 struct sk_buff *skb;
1869 struct nlmsghdr *nlh;
1870 struct dcbmsg *dcb;
Shmulik Raviddc6ed1d2011-02-27 05:04:38 +00001871 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001872 int err;
Shmulik Raviddc6ed1d2011-02-27 05:04:38 +00001873
1874 if (!ops)
1875 return -EOPNOTSUPP;
1876
1877 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1878 if (!skb)
1879 return -ENOBUFS;
1880
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001881 nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1882 if (nlh == NULL) {
1883 nlmsg_free(skb);
1884 return -EMSGSIZE;
1885 }
Shmulik Raviddc6ed1d2011-02-27 05:04:38 +00001886
1887 dcb = NLMSG_DATA(nlh);
1888 dcb->dcb_family = AF_UNSPEC;
1889 dcb->cmd = DCB_CMD_CEE_GET;
1890
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001891 err = dcbnl_cee_fill(skb, netdev);
Shmulik Raviddc6ed1d2011-02-27 05:04:38 +00001892
Shmulik Ravid5b7f7622011-07-05 06:16:25 +00001893 if (err < 0) {
1894 nlmsg_cancel(skb, nlh);
1895 nlmsg_free(skb);
1896 } else {
1897 nlmsg_end(skb, nlh);
1898 err = rtnl_unicast(skb, net, pid);
Shmulik Ravid37cf4d12011-07-05 06:16:22 +00001899 }
Shmulik Ravid37cf4d12011-07-05 06:16:22 +00001900 return err;
Shmulik Raviddc6ed1d2011-02-27 05:04:38 +00001901}
1902
Alexander Duyck2f90b862008-11-20 20:52:10 -08001903static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1904{
1905 struct net *net = sock_net(skb->sk);
1906 struct net_device *netdev;
1907 struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
1908 struct nlattr *tb[DCB_ATTR_MAX + 1];
1909 u32 pid = skb ? NETLINK_CB(skb).pid : 0;
1910 int ret = -EINVAL;
1911
Octavian Purdila09ad9bc2009-11-25 15:14:13 -08001912 if (!net_eq(net, &init_net))
Alexander Duyck2f90b862008-11-20 20:52:10 -08001913 return -EINVAL;
1914
1915 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1916 dcbnl_rtnl_policy);
1917 if (ret < 0)
1918 return ret;
1919
1920 if (!tb[DCB_ATTR_IFNAME])
1921 return -EINVAL;
1922
1923 netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
1924 if (!netdev)
1925 return -EINVAL;
1926
1927 if (!netdev->dcbnl_ops)
1928 goto errout;
1929
1930 switch (dcb->cmd) {
1931 case DCB_CMD_GSTATE:
1932 ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
1933 nlh->nlmsg_flags);
1934 goto out;
1935 case DCB_CMD_PFC_GCFG:
1936 ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1937 nlh->nlmsg_flags);
1938 goto out;
1939 case DCB_CMD_GPERM_HWADDR:
1940 ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
1941 nlh->nlmsg_flags);
1942 goto out;
1943 case DCB_CMD_PGTX_GCFG:
1944 ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1945 nlh->nlmsg_flags);
1946 goto out;
1947 case DCB_CMD_PGRX_GCFG:
1948 ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1949 nlh->nlmsg_flags);
1950 goto out;
Alexander Duyck859ee3c2008-11-20 21:10:23 -08001951 case DCB_CMD_BCN_GCFG:
1952 ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
1953 nlh->nlmsg_flags);
1954 goto out;
Alexander Duyck2f90b862008-11-20 20:52:10 -08001955 case DCB_CMD_SSTATE:
1956 ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
1957 nlh->nlmsg_flags);
1958 goto out;
1959 case DCB_CMD_PFC_SCFG:
1960 ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
1961 nlh->nlmsg_flags);
1962 goto out;
1963
1964 case DCB_CMD_SET_ALL:
1965 ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq,
1966 nlh->nlmsg_flags);
1967 goto out;
1968 case DCB_CMD_PGTX_SCFG:
1969 ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1970 nlh->nlmsg_flags);
1971 goto out;
1972 case DCB_CMD_PGRX_SCFG:
1973 ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1974 nlh->nlmsg_flags);
1975 goto out;
Alexander Duyck46132182008-11-20 21:05:08 -08001976 case DCB_CMD_GCAP:
1977 ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
1978 nlh->nlmsg_flags);
1979 goto out;
Alexander Duyck33dbabc2008-11-20 21:08:19 -08001980 case DCB_CMD_GNUMTCS:
1981 ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
1982 nlh->nlmsg_flags);
1983 goto out;
1984 case DCB_CMD_SNUMTCS:
1985 ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
1986 nlh->nlmsg_flags);
1987 goto out;
Alexander Duyck0eb3aa92008-11-20 21:09:23 -08001988 case DCB_CMD_PFC_GSTATE:
1989 ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
1990 nlh->nlmsg_flags);
1991 goto out;
1992 case DCB_CMD_PFC_SSTATE:
1993 ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
1994 nlh->nlmsg_flags);
1995 goto out;
Alexander Duyck859ee3c2008-11-20 21:10:23 -08001996 case DCB_CMD_BCN_SCFG:
1997 ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
1998 nlh->nlmsg_flags);
1999 goto out;
Yi Zou57949682009-08-31 12:33:40 +00002000 case DCB_CMD_GAPP:
2001 ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
2002 nlh->nlmsg_flags);
2003 goto out;
2004 case DCB_CMD_SAPP:
2005 ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
2006 nlh->nlmsg_flags);
2007 goto out;
John Fastabend3e290272010-12-30 09:25:46 +00002008 case DCB_CMD_IEEE_SET:
2009 ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
John Fastabendf9ae7e42011-06-21 07:34:48 +00002010 nlh->nlmsg_flags);
John Fastabend3e290272010-12-30 09:25:46 +00002011 goto out;
2012 case DCB_CMD_IEEE_GET:
2013 ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
John Fastabendf9ae7e42011-06-21 07:34:48 +00002014 nlh->nlmsg_flags);
2015 goto out;
2016 case DCB_CMD_IEEE_DEL:
2017 ret = dcbnl_ieee_del(netdev, tb, pid, nlh->nlmsg_seq,
2018 nlh->nlmsg_flags);
John Fastabend3e290272010-12-30 09:25:46 +00002019 goto out;
Shmulik Ravid6241b622010-12-30 06:26:48 +00002020 case DCB_CMD_GDCBX:
2021 ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
2022 nlh->nlmsg_flags);
2023 goto out;
2024 case DCB_CMD_SDCBX:
2025 ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq,
2026 nlh->nlmsg_flags);
2027 goto out;
Shmulik Ravidea45fe42010-12-30 06:26:55 +00002028 case DCB_CMD_GFEATCFG:
2029 ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
2030 nlh->nlmsg_flags);
2031 goto out;
2032 case DCB_CMD_SFEATCFG:
2033 ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
2034 nlh->nlmsg_flags);
2035 goto out;
Shmulik Raviddc6ed1d2011-02-27 05:04:38 +00002036 case DCB_CMD_CEE_GET:
2037 ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq,
2038 nlh->nlmsg_flags);
2039 goto out;
Alexander Duyck2f90b862008-11-20 20:52:10 -08002040 default:
2041 goto errout;
2042 }
2043errout:
2044 ret = -EINVAL;
2045out:
2046 dev_put(netdev);
2047 return ret;
2048}
2049
John Fastabend9ab933a2010-12-30 09:26:31 +00002050/**
2051 * dcb_getapp - retrieve the DCBX application user priority
2052 *
2053 * On success returns a non-zero 802.1p user priority bitmap
2054 * otherwise returns 0 as the invalid user priority bitmap to
2055 * indicate an error.
2056 */
2057u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
2058{
2059 struct dcb_app_type *itr;
2060 u8 prio = 0;
2061
2062 spin_lock(&dcb_lock);
2063 list_for_each_entry(itr, &dcb_app_list, list) {
2064 if (itr->app.selector == app->selector &&
2065 itr->app.protocol == app->protocol &&
Mark Rustade290ed82011-10-06 08:52:33 +00002066 itr->ifindex == dev->ifindex) {
John Fastabend9ab933a2010-12-30 09:26:31 +00002067 prio = itr->app.priority;
2068 break;
2069 }
2070 }
2071 spin_unlock(&dcb_lock);
2072
2073 return prio;
2074}
2075EXPORT_SYMBOL(dcb_getapp);
2076
2077/**
John Fastabendb6db2172011-06-21 07:34:42 +00002078 * dcb_setapp - add CEE dcb application data to app list
John Fastabend9ab933a2010-12-30 09:26:31 +00002079 *
John Fastabendb6db2172011-06-21 07:34:42 +00002080 * Priority 0 is an invalid priority in CEE spec. This routine
2081 * removes applications from the app list if the priority is
2082 * set to zero.
John Fastabend9ab933a2010-12-30 09:26:31 +00002083 */
John Fastabendab6baf92011-06-21 07:34:58 +00002084int dcb_setapp(struct net_device *dev, struct dcb_app *new)
John Fastabend9ab933a2010-12-30 09:26:31 +00002085{
2086 struct dcb_app_type *itr;
John Fastabend7ec79272011-01-31 12:00:59 +00002087 struct dcb_app_type event;
2088
Mark Rustade290ed82011-10-06 08:52:33 +00002089 event.ifindex = dev->ifindex;
John Fastabend7ec79272011-01-31 12:00:59 +00002090 memcpy(&event.app, new, sizeof(event.app));
John Fastabend6bd0e1c2011-10-06 08:52:38 +00002091 if (dev->dcbnl_ops->getdcbx)
2092 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
John Fastabend9ab933a2010-12-30 09:26:31 +00002093
2094 spin_lock(&dcb_lock);
2095 /* Search for existing match and replace */
2096 list_for_each_entry(itr, &dcb_app_list, list) {
2097 if (itr->app.selector == new->selector &&
2098 itr->app.protocol == new->protocol &&
Mark Rustade290ed82011-10-06 08:52:33 +00002099 itr->ifindex == dev->ifindex) {
John Fastabend9ab933a2010-12-30 09:26:31 +00002100 if (new->priority)
2101 itr->app.priority = new->priority;
2102 else {
2103 list_del(&itr->list);
2104 kfree(itr);
2105 }
2106 goto out;
2107 }
2108 }
2109 /* App type does not exist add new application type */
2110 if (new->priority) {
2111 struct dcb_app_type *entry;
2112 entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2113 if (!entry) {
2114 spin_unlock(&dcb_lock);
2115 return -ENOMEM;
2116 }
2117
2118 memcpy(&entry->app, new, sizeof(*new));
Mark Rustade290ed82011-10-06 08:52:33 +00002119 entry->ifindex = dev->ifindex;
John Fastabend9ab933a2010-12-30 09:26:31 +00002120 list_add(&entry->list, &dcb_app_list);
2121 }
2122out:
2123 spin_unlock(&dcb_lock);
John Fastabend7ec79272011-01-31 12:00:59 +00002124 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
John Fastabend9ab933a2010-12-30 09:26:31 +00002125 return 0;
2126}
2127EXPORT_SYMBOL(dcb_setapp);
2128
John Fastabendb6db2172011-06-21 07:34:42 +00002129/**
John Fastabenda364c8c2011-06-21 07:34:53 +00002130 * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
2131 *
2132 * Helper routine which on success returns a non-zero 802.1Qaz user
2133 * priority bitmap otherwise returns 0 to indicate the dcb_app was
2134 * not found in APP list.
2135 */
2136u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
2137{
2138 struct dcb_app_type *itr;
2139 u8 prio = 0;
2140
2141 spin_lock(&dcb_lock);
2142 list_for_each_entry(itr, &dcb_app_list, list) {
2143 if (itr->app.selector == app->selector &&
2144 itr->app.protocol == app->protocol &&
Mark Rustade290ed82011-10-06 08:52:33 +00002145 itr->ifindex == dev->ifindex) {
John Fastabenda364c8c2011-06-21 07:34:53 +00002146 prio |= 1 << itr->app.priority;
2147 }
2148 }
2149 spin_unlock(&dcb_lock);
2150
2151 return prio;
2152}
2153EXPORT_SYMBOL(dcb_ieee_getapp_mask);
2154
2155/**
John Fastabendb6db2172011-06-21 07:34:42 +00002156 * dcb_ieee_setapp - add IEEE dcb application data to app list
2157 *
2158 * This adds Application data to the list. Multiple application
2159 * entries may exists for the same selector and protocol as long
2160 * as the priorities are different.
2161 */
2162int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2163{
2164 struct dcb_app_type *itr, *entry;
2165 struct dcb_app_type event;
2166 int err = 0;
2167
Mark Rustade290ed82011-10-06 08:52:33 +00002168 event.ifindex = dev->ifindex;
John Fastabendb6db2172011-06-21 07:34:42 +00002169 memcpy(&event.app, new, sizeof(event.app));
John Fastabend6bd0e1c2011-10-06 08:52:38 +00002170 if (dev->dcbnl_ops->getdcbx)
2171 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
John Fastabendb6db2172011-06-21 07:34:42 +00002172
2173 spin_lock(&dcb_lock);
2174 /* Search for existing match and abort if found */
2175 list_for_each_entry(itr, &dcb_app_list, list) {
2176 if (itr->app.selector == new->selector &&
2177 itr->app.protocol == new->protocol &&
2178 itr->app.priority == new->priority &&
Mark Rustade290ed82011-10-06 08:52:33 +00002179 itr->ifindex == dev->ifindex) {
John Fastabendb6db2172011-06-21 07:34:42 +00002180 err = -EEXIST;
2181 goto out;
2182 }
2183 }
2184
2185 /* App entry does not exist add new entry */
2186 entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2187 if (!entry) {
2188 err = -ENOMEM;
2189 goto out;
2190 }
2191
2192 memcpy(&entry->app, new, sizeof(*new));
Mark Rustade290ed82011-10-06 08:52:33 +00002193 entry->ifindex = dev->ifindex;
John Fastabendb6db2172011-06-21 07:34:42 +00002194 list_add(&entry->list, &dcb_app_list);
2195out:
2196 spin_unlock(&dcb_lock);
2197 if (!err)
2198 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2199 return err;
2200}
2201EXPORT_SYMBOL(dcb_ieee_setapp);
2202
John Fastabendf9ae7e42011-06-21 07:34:48 +00002203/**
2204 * dcb_ieee_delapp - delete IEEE dcb application data from list
2205 *
2206 * This removes a matching APP data from the APP list
2207 */
2208int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
2209{
2210 struct dcb_app_type *itr;
2211 struct dcb_app_type event;
2212 int err = -ENOENT;
2213
Mark Rustade290ed82011-10-06 08:52:33 +00002214 event.ifindex = dev->ifindex;
John Fastabendf9ae7e42011-06-21 07:34:48 +00002215 memcpy(&event.app, del, sizeof(event.app));
John Fastabend6bd0e1c2011-10-06 08:52:38 +00002216 if (dev->dcbnl_ops->getdcbx)
2217 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
John Fastabendf9ae7e42011-06-21 07:34:48 +00002218
2219 spin_lock(&dcb_lock);
2220 /* Search for existing match and remove it. */
2221 list_for_each_entry(itr, &dcb_app_list, list) {
2222 if (itr->app.selector == del->selector &&
2223 itr->app.protocol == del->protocol &&
2224 itr->app.priority == del->priority &&
Mark Rustade290ed82011-10-06 08:52:33 +00002225 itr->ifindex == dev->ifindex) {
John Fastabendf9ae7e42011-06-21 07:34:48 +00002226 list_del(&itr->list);
2227 kfree(itr);
2228 err = 0;
2229 goto out;
2230 }
2231 }
2232
2233out:
2234 spin_unlock(&dcb_lock);
2235 if (!err)
2236 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2237 return err;
2238}
2239EXPORT_SYMBOL(dcb_ieee_delapp);
2240
Shmulik Ravid7c14c3f2010-12-30 06:27:10 +00002241static void dcb_flushapp(void)
John Fastabend9ab933a2010-12-30 09:26:31 +00002242{
2243 struct dcb_app_type *app;
Dan Carpenter2a8fe002011-01-04 21:03:44 +00002244 struct dcb_app_type *tmp;
John Fastabend9ab933a2010-12-30 09:26:31 +00002245
2246 spin_lock(&dcb_lock);
Dan Carpenter2a8fe002011-01-04 21:03:44 +00002247 list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
John Fastabend9ab933a2010-12-30 09:26:31 +00002248 list_del(&app->list);
2249 kfree(app);
2250 }
2251 spin_unlock(&dcb_lock);
2252}
2253
Alexander Duyck2f90b862008-11-20 20:52:10 -08002254static int __init dcbnl_init(void)
2255{
John Fastabend9ab933a2010-12-30 09:26:31 +00002256 INIT_LIST_HEAD(&dcb_app_list);
2257
Greg Rosec7ac8672011-06-10 01:27:09 +00002258 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL);
2259 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL);
Alexander Duyck2f90b862008-11-20 20:52:10 -08002260
2261 return 0;
2262}
2263module_init(dcbnl_init);
2264
2265static void __exit dcbnl_exit(void)
2266{
2267 rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
2268 rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
John Fastabend9ab933a2010-12-30 09:26:31 +00002269 dcb_flushapp();
Alexander Duyck2f90b862008-11-20 20:52:10 -08002270}
2271module_exit(dcbnl_exit);