blob: 4b6db3b6c5d57dd35a9b0896df3ac00df99b5fe1 [file] [log] [blame]
Pavel Emelyanove314dbd2007-09-25 16:14:46 -07001/*
2 * drivers/net/veth.c
3 *
4 * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
8 *
9 */
10
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070011#include <linux/netdevice.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070013#include <linux/ethtool.h>
14#include <linux/etherdevice.h>
Eric Dumazetcf05c702011-06-19 22:48:34 -070015#include <linux/u64_stats_sync.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070016
17#include <net/dst.h>
18#include <net/xfrm.h>
Stephen Hemmingerecef9692007-12-25 17:23:59 -080019#include <linux/veth.h>
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070020
21#define DRV_NAME "veth"
22#define DRV_VERSION "1.0"
23
Eric Biederman38d40812009-03-03 23:36:04 -080024#define MIN_MTU 68 /* Min L3 MTU */
25#define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */
Eric Biederman38d40812009-03-03 23:36:04 -080026
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070027struct veth_net_stats {
Eric Dumazetcf05c702011-06-19 22:48:34 -070028 u64 rx_packets;
29 u64 tx_packets;
30 u64 rx_bytes;
31 u64 tx_bytes;
32 u64 rx_dropped;
33 u64 tx_dropped;
34 struct u64_stats_sync syncp;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070035};
36
37struct veth_priv {
38 struct net_device *peer;
Tejun Heo47d74272010-02-16 15:21:08 +000039 struct veth_net_stats __percpu *stats;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070040};
41
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070042/*
43 * ethtool interface
44 */
45
46static struct {
47 const char string[ETH_GSTRING_LEN];
48} ethtool_stats_keys[] = {
49 { "peer_ifindex" },
50};
51
52static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
53{
54 cmd->supported = 0;
55 cmd->advertising = 0;
David Decotigny70739492011-04-27 18:32:40 +000056 ethtool_cmd_speed_set(cmd, SPEED_10000);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070057 cmd->duplex = DUPLEX_FULL;
58 cmd->port = PORT_TP;
59 cmd->phy_address = 0;
60 cmd->transceiver = XCVR_INTERNAL;
61 cmd->autoneg = AUTONEG_DISABLE;
62 cmd->maxtxpkt = 0;
63 cmd->maxrxpkt = 0;
64 return 0;
65}
66
67static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
68{
69 strcpy(info->driver, DRV_NAME);
70 strcpy(info->version, DRV_VERSION);
71 strcpy(info->fw_version, "N/A");
72}
73
74static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
75{
76 switch(stringset) {
77 case ETH_SS_STATS:
78 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
79 break;
80 }
81}
82
Jeff Garzikb9f2c042007-10-03 18:07:32 -070083static int veth_get_sset_count(struct net_device *dev, int sset)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070084{
Jeff Garzikb9f2c042007-10-03 18:07:32 -070085 switch (sset) {
86 case ETH_SS_STATS:
87 return ARRAY_SIZE(ethtool_stats_keys);
88 default:
89 return -EOPNOTSUPP;
90 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -070091}
92
93static void veth_get_ethtool_stats(struct net_device *dev,
94 struct ethtool_stats *stats, u64 *data)
95{
96 struct veth_priv *priv;
97
98 priv = netdev_priv(dev);
99 data[0] = priv->peer->ifindex;
100}
101
Stephen Hemminger0fc0b732009-09-02 01:03:33 -0700102static const struct ethtool_ops veth_ethtool_ops = {
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700103 .get_settings = veth_get_settings,
104 .get_drvinfo = veth_get_drvinfo,
105 .get_link = ethtool_op_get_link,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700106 .get_strings = veth_get_strings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700107 .get_sset_count = veth_get_sset_count,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700108 .get_ethtool_stats = veth_get_ethtool_stats,
109};
110
111/*
112 * xmit
113 */
114
Stephen Hemminger424efe92009-08-31 19:50:51 +0000115static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700116{
117 struct net_device *rcv = NULL;
118 struct veth_priv *priv, *rcv_priv;
Eric Biederman38d40812009-03-03 23:36:04 -0800119 struct veth_net_stats *stats, *rcv_stats;
Christoph Lametere7dcaa42009-10-03 19:48:23 +0900120 int length;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700121
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700122 priv = netdev_priv(dev);
123 rcv = priv->peer;
124 rcv_priv = netdev_priv(rcv);
125
Christoph Lametere7dcaa42009-10-03 19:48:23 +0900126 stats = this_cpu_ptr(priv->stats);
127 rcv_stats = this_cpu_ptr(rcv_priv->stats);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700128
129 if (!(rcv->flags & IFF_UP))
Eric Biederman38d40812009-03-03 23:36:04 -0800130 goto tx_drop;
131
Michał Mirosław0b796752010-12-14 12:35:13 +0000132 /* don't change ip_summed == CHECKSUM_PARTIAL, as that
133 will cause bad checksum on forwarded packets */
Michał Mirosława2c725f2011-03-31 01:01:35 +0000134 if (skb->ip_summed == CHECKSUM_NONE &&
135 rcv->features & NETIF_F_RXCSUM)
136 skb->ip_summed = CHECKSUM_UNNECESSARY;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700137
Eric W. Biederman675071a2011-03-21 18:24:53 -0700138 length = skb->len;
Arnd Bergmann44540962009-11-26 06:07:08 +0000139 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
140 goto rx_drop;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700141
Eric Dumazetcf05c702011-06-19 22:48:34 -0700142 u64_stats_update_begin(&stats->syncp);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700143 stats->tx_bytes += length;
144 stats->tx_packets++;
Eric Dumazetcf05c702011-06-19 22:48:34 -0700145 u64_stats_update_end(&stats->syncp);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700146
Eric Dumazetcf05c702011-06-19 22:48:34 -0700147 u64_stats_update_begin(&rcv_stats->syncp);
Eric Biederman38d40812009-03-03 23:36:04 -0800148 rcv_stats->rx_bytes += length;
149 rcv_stats->rx_packets++;
Eric Dumazetcf05c702011-06-19 22:48:34 -0700150 u64_stats_update_end(&rcv_stats->syncp);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700151
Patrick McHardy6ed10652009-06-23 06:03:08 +0000152 return NETDEV_TX_OK;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700153
Eric Biederman38d40812009-03-03 23:36:04 -0800154tx_drop:
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700155 kfree_skb(skb);
Eric Dumazetcf05c702011-06-19 22:48:34 -0700156 u64_stats_update_begin(&stats->syncp);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700157 stats->tx_dropped++;
Eric Dumazetcf05c702011-06-19 22:48:34 -0700158 u64_stats_update_end(&stats->syncp);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000159 return NETDEV_TX_OK;
Eric Biederman38d40812009-03-03 23:36:04 -0800160
161rx_drop:
Eric Dumazetcf05c702011-06-19 22:48:34 -0700162 u64_stats_update_begin(&rcv_stats->syncp);
Eric Biederman38d40812009-03-03 23:36:04 -0800163 rcv_stats->rx_dropped++;
Eric Dumazetcf05c702011-06-19 22:48:34 -0700164 u64_stats_update_end(&rcv_stats->syncp);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000165 return NETDEV_TX_OK;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700166}
167
168/*
169 * general routines
170 */
171
stephen hemminger6311cc42011-06-08 14:53:59 +0000172static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
173 struct rtnl_link_stats64 *tot)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700174{
Eric Dumazetcf05c702011-06-19 22:48:34 -0700175 struct veth_priv *priv = netdev_priv(dev);
David S. Miller11687a12009-06-25 02:45:42 -0700176 int cpu;
David S. Miller11687a12009-06-25 02:45:42 -0700177
Eric Dumazet2b1c8b02009-11-18 07:09:39 +0000178 for_each_possible_cpu(cpu) {
Eric Dumazetcf05c702011-06-19 22:48:34 -0700179 struct veth_net_stats *stats = per_cpu_ptr(priv->stats, cpu);
180 u64 rx_packets, rx_bytes, rx_dropped;
181 u64 tx_packets, tx_bytes, tx_dropped;
182 unsigned int start;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700183
Eric Dumazetcf05c702011-06-19 22:48:34 -0700184 do {
185 start = u64_stats_fetch_begin_bh(&stats->syncp);
186 rx_packets = stats->rx_packets;
187 tx_packets = stats->tx_packets;
188 rx_bytes = stats->rx_bytes;
189 tx_bytes = stats->tx_bytes;
190 rx_dropped = stats->rx_dropped;
191 tx_dropped = stats->tx_dropped;
192 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
193 tot->rx_packets += rx_packets;
194 tot->tx_packets += tx_packets;
195 tot->rx_bytes += rx_bytes;
196 tot->tx_bytes += tx_bytes;
197 tot->rx_dropped += rx_dropped;
198 tot->tx_dropped += tx_dropped;
David S. Miller11687a12009-06-25 02:45:42 -0700199 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700200
stephen hemminger6311cc42011-06-08 14:53:59 +0000201 return tot;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700202}
203
204static int veth_open(struct net_device *dev)
205{
206 struct veth_priv *priv;
207
208 priv = netdev_priv(dev);
209 if (priv->peer == NULL)
210 return -ENOTCONN;
211
212 if (priv->peer->flags & IFF_UP) {
213 netif_carrier_on(dev);
214 netif_carrier_on(priv->peer);
215 }
216 return 0;
217}
218
Eric W. Biederman2cf48a12009-02-25 19:47:29 +0000219static int veth_close(struct net_device *dev)
220{
221 struct veth_priv *priv = netdev_priv(dev);
222
223 netif_carrier_off(dev);
224 netif_carrier_off(priv->peer);
225
226 return 0;
227}
228
Eric Biederman38d40812009-03-03 23:36:04 -0800229static int is_valid_veth_mtu(int new_mtu)
230{
Eric Dumazet807540b2010-09-23 05:40:09 +0000231 return new_mtu >= MIN_MTU && new_mtu <= MAX_MTU;
Eric Biederman38d40812009-03-03 23:36:04 -0800232}
233
234static int veth_change_mtu(struct net_device *dev, int new_mtu)
235{
236 if (!is_valid_veth_mtu(new_mtu))
237 return -EINVAL;
238 dev->mtu = new_mtu;
239 return 0;
240}
241
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700242static int veth_dev_init(struct net_device *dev)
243{
Tejun Heo47d74272010-02-16 15:21:08 +0000244 struct veth_net_stats __percpu *stats;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700245 struct veth_priv *priv;
246
247 stats = alloc_percpu(struct veth_net_stats);
248 if (stats == NULL)
249 return -ENOMEM;
250
251 priv = netdev_priv(dev);
252 priv->stats = stats;
253 return 0;
254}
255
David S. Miller11687a12009-06-25 02:45:42 -0700256static void veth_dev_free(struct net_device *dev)
257{
258 struct veth_priv *priv;
259
260 priv = netdev_priv(dev);
261 free_percpu(priv->stats);
262 free_netdev(dev);
263}
264
Stephen Hemminger4456e7b2008-11-19 21:50:10 -0800265static const struct net_device_ops veth_netdev_ops = {
Daniel Lezcanoee923622009-02-22 00:04:45 -0800266 .ndo_init = veth_dev_init,
267 .ndo_open = veth_open,
Eric W. Biederman2cf48a12009-02-25 19:47:29 +0000268 .ndo_stop = veth_close,
Daniel Lezcanoee923622009-02-22 00:04:45 -0800269 .ndo_start_xmit = veth_xmit,
Eric Biederman38d40812009-03-03 23:36:04 -0800270 .ndo_change_mtu = veth_change_mtu,
stephen hemminger6311cc42011-06-08 14:53:59 +0000271 .ndo_get_stats64 = veth_get_stats64,
Daniel Lezcanoee923622009-02-22 00:04:45 -0800272 .ndo_set_mac_address = eth_mac_addr,
Stephen Hemminger4456e7b2008-11-19 21:50:10 -0800273};
274
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700275static void veth_setup(struct net_device *dev)
276{
277 ether_setup(dev);
278
Stephen Hemminger4456e7b2008-11-19 21:50:10 -0800279 dev->netdev_ops = &veth_netdev_ops;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700280 dev->ethtool_ops = &veth_ethtool_ops;
281 dev->features |= NETIF_F_LLTX;
David S. Miller11687a12009-06-25 02:45:42 -0700282 dev->destructor = veth_dev_free;
Michał Mirosława2c725f2011-03-31 01:01:35 +0000283
284 dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700285}
286
287/*
288 * netlink interface
289 */
290
291static int veth_validate(struct nlattr *tb[], struct nlattr *data[])
292{
293 if (tb[IFLA_ADDRESS]) {
294 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
295 return -EINVAL;
296 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
297 return -EADDRNOTAVAIL;
298 }
Eric Biederman38d40812009-03-03 23:36:04 -0800299 if (tb[IFLA_MTU]) {
300 if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
301 return -EINVAL;
302 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700303 return 0;
304}
305
306static struct rtnl_link_ops veth_link_ops;
307
Eric W. Biederman81adee42009-11-08 00:53:51 -0800308static int veth_newlink(struct net *src_net, struct net_device *dev,
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700309 struct nlattr *tb[], struct nlattr *data[])
310{
311 int err;
312 struct net_device *peer;
313 struct veth_priv *priv;
314 char ifname[IFNAMSIZ];
315 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
Patrick McHardy3729d502010-02-26 06:34:54 +0000316 struct ifinfomsg *ifmp;
Eric W. Biederman81adee42009-11-08 00:53:51 -0800317 struct net *net;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700318
319 /*
320 * create and register peer first
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700321 */
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700322 if (data != NULL && data[VETH_INFO_PEER] != NULL) {
323 struct nlattr *nla_peer;
324
325 nla_peer = data[VETH_INFO_PEER];
Patrick McHardy3729d502010-02-26 06:34:54 +0000326 ifmp = nla_data(nla_peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700327 err = nla_parse(peer_tb, IFLA_MAX,
328 nla_data(nla_peer) + sizeof(struct ifinfomsg),
329 nla_len(nla_peer) - sizeof(struct ifinfomsg),
330 ifla_policy);
331 if (err < 0)
332 return err;
333
334 err = veth_validate(peer_tb, NULL);
335 if (err < 0)
336 return err;
337
338 tbp = peer_tb;
Patrick McHardy3729d502010-02-26 06:34:54 +0000339 } else {
340 ifmp = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700341 tbp = tb;
Patrick McHardy3729d502010-02-26 06:34:54 +0000342 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700343
344 if (tbp[IFLA_IFNAME])
345 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
346 else
347 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
348
Eric W. Biederman81adee42009-11-08 00:53:51 -0800349 net = rtnl_link_get_net(src_net, tbp);
350 if (IS_ERR(net))
351 return PTR_ERR(net);
352
353 peer = rtnl_create_link(src_net, net, ifname, &veth_link_ops, tbp);
354 if (IS_ERR(peer)) {
355 put_net(net);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700356 return PTR_ERR(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -0800357 }
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700358
359 if (tbp[IFLA_ADDRESS] == NULL)
360 random_ether_addr(peer->dev_addr);
361
362 err = register_netdevice(peer);
Eric W. Biederman81adee42009-11-08 00:53:51 -0800363 put_net(net);
364 net = NULL;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700365 if (err < 0)
366 goto err_register_peer;
367
368 netif_carrier_off(peer);
369
Patrick McHardy3729d502010-02-26 06:34:54 +0000370 err = rtnl_configure_link(peer, ifmp);
371 if (err < 0)
372 goto err_configure_peer;
373
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700374 /*
375 * register dev last
376 *
377 * note, that since we've registered new device the dev's name
378 * should be re-allocated
379 */
380
381 if (tb[IFLA_ADDRESS] == NULL)
382 random_ether_addr(dev->dev_addr);
383
Jiri Pirko6c8c4442011-04-30 01:28:17 +0000384 if (tb[IFLA_IFNAME])
385 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
386 else
387 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
388
389 if (strchr(dev->name, '%')) {
390 err = dev_alloc_name(dev, dev->name);
391 if (err < 0)
392 goto err_alloc_name;
393 }
394
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700395 err = register_netdevice(dev);
396 if (err < 0)
397 goto err_register_dev;
398
399 netif_carrier_off(dev);
400
401 /*
402 * tie the deviced together
403 */
404
405 priv = netdev_priv(dev);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700406 priv->peer = peer;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700407
408 priv = netdev_priv(peer);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700409 priv->peer = dev;
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700410 return 0;
411
412err_register_dev:
413 /* nothing to do */
Jiri Pirko6c8c4442011-04-30 01:28:17 +0000414err_alloc_name:
Patrick McHardy3729d502010-02-26 06:34:54 +0000415err_configure_peer:
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700416 unregister_netdevice(peer);
417 return err;
418
419err_register_peer:
420 free_netdev(peer);
421 return err;
422}
423
Eric Dumazet23289a32009-10-27 07:06:36 +0000424static void veth_dellink(struct net_device *dev, struct list_head *head)
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700425{
426 struct veth_priv *priv;
427 struct net_device *peer;
428
429 priv = netdev_priv(dev);
430 peer = priv->peer;
431
Eric Dumazet24540532009-10-30 01:00:27 -0700432 unregister_netdevice_queue(dev, head);
433 unregister_netdevice_queue(peer, head);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700434}
435
436static const struct nla_policy veth_policy[VETH_INFO_MAX + 1];
437
438static struct rtnl_link_ops veth_link_ops = {
439 .kind = DRV_NAME,
440 .priv_size = sizeof(struct veth_priv),
441 .setup = veth_setup,
442 .validate = veth_validate,
443 .newlink = veth_newlink,
444 .dellink = veth_dellink,
445 .policy = veth_policy,
446 .maxtype = VETH_INFO_MAX,
447};
448
449/*
450 * init/fini
451 */
452
453static __init int veth_init(void)
454{
455 return rtnl_link_register(&veth_link_ops);
456}
457
458static __exit void veth_exit(void)
459{
Patrick McHardy68365452008-01-20 17:25:14 -0800460 rtnl_link_unregister(&veth_link_ops);
Pavel Emelyanove314dbd2007-09-25 16:14:46 -0700461}
462
463module_init(veth_init);
464module_exit(veth_exit);
465
466MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
467MODULE_LICENSE("GPL v2");
468MODULE_ALIAS_RTNL_LINK(DRV_NAME);