blob: 5c498d2b043f45f59135160e8edd424658c01e18 [file] [log] [blame]
Rusty Russell296f96f2007-10-22 11:03:37 +10001/* A simple network driver using virtio.
2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19//#define DEBUG
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
Herbert Xua9ea3fc2008-04-18 11:21:42 +080022#include <linux/ethtool.h>
Rusty Russell296f96f2007-10-22 11:03:37 +100023#include <linux/module.h>
24#include <linux/virtio.h>
Fernando Luis Vazquez Cao3ca4f5c2009-07-31 15:25:56 +090025#include <linux/virtio_ids.h>
Rusty Russell296f96f2007-10-22 11:03:37 +100026#include <linux/virtio_net.h>
27#include <linux/scatterlist.h>
Alex Williamsone918085a2009-01-25 18:06:26 -080028#include <linux/if_vlan.h>
Rusty Russell296f96f2007-10-22 11:03:37 +100029
Dor Laor6c0cd7c2007-12-16 15:19:43 +020030static int napi_weight = 128;
31module_param(napi_weight, int, 0444);
32
Rusty Russell34a48572008-02-04 23:50:02 -050033static int csum = 1, gso = 1;
34module_param(csum, bool, 0444);
35module_param(gso, bool, 0444);
36
Rusty Russell296f96f2007-10-22 11:03:37 +100037/* FIXME: MTU in config. */
Alex Williamsone918085a2009-01-25 18:06:26 -080038#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -080039#define GOOD_COPY_LEN 128
Rusty Russell296f96f2007-10-22 11:03:37 +100040
Alex Williamsonf565a7c2009-02-04 09:02:45 +000041#define VIRTNET_SEND_COMMAND_SG_MAX 2
Alex Williamson2a41f712009-02-04 09:02:34 +000042
Rusty Russell296f96f2007-10-22 11:03:37 +100043struct virtnet_info
44{
45 struct virtio_device *vdev;
Alex Williamson2a41f712009-02-04 09:02:34 +000046 struct virtqueue *rvq, *svq, *cvq;
Rusty Russell296f96f2007-10-22 11:03:37 +100047 struct net_device *dev;
48 struct napi_struct napi;
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -080049 unsigned int status;
Rusty Russell296f96f2007-10-22 11:03:37 +100050
Rusty Russell99ffc692008-05-02 21:50:46 -050051 /* The skb we couldn't send because buffers were full. */
52 struct sk_buff *last_xmit_skb;
53
Rusty Russell363f1512008-06-08 20:51:55 +100054 /* If we need to free in a timer, this is it. */
Mark McLoughlin14c998f2008-06-08 20:50:56 +100055 struct timer_list xmit_free_timer;
56
Rusty Russell296f96f2007-10-22 11:03:37 +100057 /* Number of input buffers, and max we've ever had. */
58 unsigned int num, max;
59
Rusty Russell11a3a152008-05-26 17:48:13 +100060 /* For cleaning up after transmission. */
61 struct tasklet_struct tasklet;
Rusty Russell363f1512008-06-08 20:51:55 +100062 bool free_in_tasklet;
Rusty Russell11a3a152008-05-26 17:48:13 +100063
Herbert Xu97402b92008-04-18 11:24:27 +080064 /* I like... big packets and I cannot lie! */
65 bool big_packets;
66
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -080067 /* Host will merge rx buffers for big packets (shake it! shake it!) */
68 bool mergeable_rx_bufs;
69
Rusty Russell296f96f2007-10-22 11:03:37 +100070 /* Receive & send queues. */
71 struct sk_buff_head recv;
72 struct sk_buff_head send;
Rusty Russellfb6813f2008-07-25 12:06:01 -050073
Rusty Russell3161e452009-08-26 12:22:32 -070074 /* Work struct for refilling if we run low on memory. */
75 struct delayed_work refill;
76
Rusty Russellfb6813f2008-07-25 12:06:01 -050077 /* Chain pages by the private ptr. */
78 struct page *pages;
Rusty Russell296f96f2007-10-22 11:03:37 +100079};
80
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -080081static inline void *skb_vnet_hdr(struct sk_buff *skb)
Rusty Russell296f96f2007-10-22 11:03:37 +100082{
83 return (struct virtio_net_hdr *)skb->cb;
84}
85
Rusty Russellfb6813f2008-07-25 12:06:01 -050086static void give_a_page(struct virtnet_info *vi, struct page *page)
87{
88 page->private = (unsigned long)vi->pages;
89 vi->pages = page;
90}
91
Mark McLoughlin0a888fd2008-11-16 22:39:18 -080092static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
93{
94 unsigned int i;
95
96 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
97 give_a_page(vi, skb_shinfo(skb)->frags[i].page);
98 skb_shinfo(skb)->nr_frags = 0;
99 skb->data_len = 0;
100}
101
Rusty Russellfb6813f2008-07-25 12:06:01 -0500102static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
103{
104 struct page *p = vi->pages;
105
106 if (p)
107 vi->pages = (struct page *)p->private;
108 else
109 p = alloc_page(gfp_mask);
110 return p;
111}
112
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500113static void skb_xmit_done(struct virtqueue *svq)
Rusty Russell296f96f2007-10-22 11:03:37 +1000114{
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500115 struct virtnet_info *vi = svq->vdev->priv;
Rusty Russell296f96f2007-10-22 11:03:37 +1000116
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500117 /* Suppress further interrupts. */
118 svq->vq_ops->disable_cb(svq);
Rusty Russell11a3a152008-05-26 17:48:13 +1000119
Rusty Russell363f1512008-06-08 20:51:55 +1000120 /* We were probably waiting for more output buffers. */
Rusty Russell296f96f2007-10-22 11:03:37 +1000121 netif_wake_queue(vi->dev);
Rusty Russell11a3a152008-05-26 17:48:13 +1000122
123 /* Make sure we re-xmit last_xmit_skb: if there are no more packets
124 * queued, start_xmit won't be called. */
125 tasklet_schedule(&vi->tasklet);
Rusty Russell296f96f2007-10-22 11:03:37 +1000126}
127
128static void receive_skb(struct net_device *dev, struct sk_buff *skb,
129 unsigned len)
130{
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800131 struct virtnet_info *vi = netdev_priv(dev);
Rusty Russell296f96f2007-10-22 11:03:37 +1000132 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
Herbert Xu97402b92008-04-18 11:24:27 +0800133 int err;
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800134 int i;
Rusty Russell296f96f2007-10-22 11:03:37 +1000135
136 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
137 pr_debug("%s: short packet %i\n", dev->name, len);
138 dev->stats.rx_length_errors++;
139 goto drop;
140 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000141
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800142 if (vi->mergeable_rx_bufs) {
143 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
144 unsigned int copy;
145 char *p = page_address(skb_shinfo(skb)->frags[0].page);
Rusty Russellfb6813f2008-07-25 12:06:01 -0500146
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800147 if (len > PAGE_SIZE)
148 len = PAGE_SIZE;
149 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
150
151 memcpy(hdr, p, sizeof(*mhdr));
152 p += sizeof(*mhdr);
153
154 copy = len;
155 if (copy > skb_tailroom(skb))
156 copy = skb_tailroom(skb);
157
158 memcpy(skb_put(skb, copy), p, copy);
159
160 len -= copy;
161
162 if (!len) {
163 give_a_page(vi, skb_shinfo(skb)->frags[0].page);
164 skb_shinfo(skb)->nr_frags--;
165 } else {
166 skb_shinfo(skb)->frags[0].page_offset +=
167 sizeof(*mhdr) + copy;
168 skb_shinfo(skb)->frags[0].size = len;
169 skb->data_len += len;
170 skb->len += len;
171 }
172
173 while (--mhdr->num_buffers) {
174 struct sk_buff *nskb;
175
176 i = skb_shinfo(skb)->nr_frags;
177 if (i >= MAX_SKB_FRAGS) {
178 pr_debug("%s: packet too long %d\n", dev->name,
179 len);
180 dev->stats.rx_length_errors++;
181 goto drop;
182 }
183
184 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
185 if (!nskb) {
186 pr_debug("%s: rx error: %d buffers missing\n",
187 dev->name, mhdr->num_buffers);
188 dev->stats.rx_length_errors++;
189 goto drop;
190 }
191
192 __skb_unlink(nskb, &vi->recv);
193 vi->num--;
194
195 skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
196 skb_shinfo(nskb)->nr_frags = 0;
197 kfree_skb(nskb);
198
199 if (len > PAGE_SIZE)
200 len = PAGE_SIZE;
201
202 skb_shinfo(skb)->frags[i].size = len;
203 skb_shinfo(skb)->nr_frags++;
204 skb->data_len += len;
205 skb->len += len;
206 }
207 } else {
208 len -= sizeof(struct virtio_net_hdr);
209
210 if (len <= MAX_PACKET_LEN)
211 trim_pages(vi, skb);
212
213 err = pskb_trim(skb, len);
214 if (err) {
215 pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
216 len, err);
217 dev->stats.rx_dropped++;
218 goto drop;
219 }
Herbert Xu97402b92008-04-18 11:24:27 +0800220 }
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800221
Herbert Xu97402b92008-04-18 11:24:27 +0800222 skb->truesize += skb->data_len;
Rusty Russell296f96f2007-10-22 11:03:37 +1000223 dev->stats.rx_bytes += skb->len;
224 dev->stats.rx_packets++;
225
226 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
227 pr_debug("Needs csum!\n");
Rusty Russellf35d9d82008-02-04 23:49:54 -0500228 if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
Rusty Russell296f96f2007-10-22 11:03:37 +1000229 goto frame_err;
Rusty Russell296f96f2007-10-22 11:03:37 +1000230 }
231
Mark McLoughlin23cde762008-06-08 20:49:00 +1000232 skb->protocol = eth_type_trans(skb, dev);
233 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
234 ntohs(skb->protocol), skb->len, skb->pkt_type);
235
Rusty Russell296f96f2007-10-22 11:03:37 +1000236 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
237 pr_debug("GSO!\n");
Rusty Russell34a48572008-02-04 23:50:02 -0500238 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
Rusty Russell296f96f2007-10-22 11:03:37 +1000239 case VIRTIO_NET_HDR_GSO_TCPV4:
240 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
241 break;
Rusty Russell296f96f2007-10-22 11:03:37 +1000242 case VIRTIO_NET_HDR_GSO_UDP:
243 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
244 break;
245 case VIRTIO_NET_HDR_GSO_TCPV6:
246 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
247 break;
248 default:
249 if (net_ratelimit())
250 printk(KERN_WARNING "%s: bad gso type %u.\n",
251 dev->name, hdr->gso_type);
252 goto frame_err;
253 }
254
Rusty Russell34a48572008-02-04 23:50:02 -0500255 if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
256 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
257
Rusty Russell296f96f2007-10-22 11:03:37 +1000258 skb_shinfo(skb)->gso_size = hdr->gso_size;
259 if (skb_shinfo(skb)->gso_size == 0) {
260 if (net_ratelimit())
261 printk(KERN_WARNING "%s: zero gso size.\n",
262 dev->name);
263 goto frame_err;
264 }
265
266 /* Header must be checked, and gso_segs computed. */
267 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
268 skb_shinfo(skb)->gso_segs = 0;
269 }
270
271 netif_receive_skb(skb);
272 return;
273
274frame_err:
275 dev->stats.rx_frame_errors++;
276drop:
277 dev_kfree_skb(skb);
278}
279
Rusty Russell3161e452009-08-26 12:22:32 -0700280static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
Rusty Russell296f96f2007-10-22 11:03:37 +1000281{
282 struct sk_buff *skb;
Rusty Russell05271682008-05-02 21:50:45 -0500283 struct scatterlist sg[2+MAX_SKB_FRAGS];
Herbert Xu97402b92008-04-18 11:24:27 +0800284 int num, err, i;
Rusty Russell3161e452009-08-26 12:22:32 -0700285 bool oom = false;
Rusty Russell296f96f2007-10-22 11:03:37 +1000286
Rusty Russell05271682008-05-02 21:50:45 -0500287 sg_init_table(sg, 2+MAX_SKB_FRAGS);
Rusty Russell296f96f2007-10-22 11:03:37 +1000288 for (;;) {
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800289 struct virtio_net_hdr *hdr;
290
Herbert Xu8981f012009-06-11 20:55:17 -0700291 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
Rusty Russell3161e452009-08-26 12:22:32 -0700292 if (unlikely(!skb)) {
293 oom = true;
Rusty Russell296f96f2007-10-22 11:03:37 +1000294 break;
Rusty Russell3161e452009-08-26 12:22:32 -0700295 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000296
Herbert Xu8981f012009-06-11 20:55:17 -0700297 skb_reserve(skb, NET_IP_ALIGN);
Rusty Russell296f96f2007-10-22 11:03:37 +1000298 skb_put(skb, MAX_PACKET_LEN);
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800299
300 hdr = skb_vnet_hdr(skb);
Ira W. Snyder8527bec2009-01-26 21:00:33 -0800301 sg_set_buf(sg, hdr, sizeof(*hdr));
Herbert Xu97402b92008-04-18 11:24:27 +0800302
303 if (vi->big_packets) {
304 for (i = 0; i < MAX_SKB_FRAGS; i++) {
305 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
Rusty Russell3161e452009-08-26 12:22:32 -0700306 f->page = get_a_page(vi, gfp);
Herbert Xu97402b92008-04-18 11:24:27 +0800307 if (!f->page)
308 break;
309
310 f->page_offset = 0;
311 f->size = PAGE_SIZE;
312
313 skb->data_len += PAGE_SIZE;
314 skb->len += PAGE_SIZE;
315
316 skb_shinfo(skb)->nr_frags++;
317 }
318 }
319
Rusty Russell296f96f2007-10-22 11:03:37 +1000320 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
321 skb_queue_head(&vi->recv, skb);
322
323 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
Rusty Russell3c1b27d2009-09-23 22:26:31 -0600324 if (err < 0) {
Rusty Russell296f96f2007-10-22 11:03:37 +1000325 skb_unlink(skb, &vi->recv);
Mark McLoughlin0a888fd2008-11-16 22:39:18 -0800326 trim_pages(vi, skb);
Rusty Russell296f96f2007-10-22 11:03:37 +1000327 kfree_skb(skb);
328 break;
329 }
330 vi->num++;
331 }
332 if (unlikely(vi->num > vi->max))
333 vi->max = vi->num;
334 vi->rvq->vq_ops->kick(vi->rvq);
Rusty Russell3161e452009-08-26 12:22:32 -0700335 return !oom;
Rusty Russell296f96f2007-10-22 11:03:37 +1000336}
337
Rusty Russell3161e452009-08-26 12:22:32 -0700338/* Returns false if we couldn't fill entirely (OOM). */
339static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800340{
341 struct sk_buff *skb;
342 struct scatterlist sg[1];
343 int err;
Rusty Russell3161e452009-08-26 12:22:32 -0700344 bool oom = false;
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800345
Rusty Russell3161e452009-08-26 12:22:32 -0700346 if (!vi->mergeable_rx_bufs)
347 return try_fill_recv_maxbufs(vi, gfp);
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800348
349 for (;;) {
350 skb_frag_t *f;
351
352 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
Rusty Russell3161e452009-08-26 12:22:32 -0700353 if (unlikely(!skb)) {
354 oom = true;
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800355 break;
Rusty Russell3161e452009-08-26 12:22:32 -0700356 }
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800357
358 skb_reserve(skb, NET_IP_ALIGN);
359
360 f = &skb_shinfo(skb)->frags[0];
Rusty Russell3161e452009-08-26 12:22:32 -0700361 f->page = get_a_page(vi, gfp);
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800362 if (!f->page) {
Rusty Russell3161e452009-08-26 12:22:32 -0700363 oom = true;
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800364 kfree_skb(skb);
365 break;
366 }
367
368 f->page_offset = 0;
369 f->size = PAGE_SIZE;
370
371 skb_shinfo(skb)->nr_frags++;
372
373 sg_init_one(sg, page_address(f->page), PAGE_SIZE);
374 skb_queue_head(&vi->recv, skb);
375
376 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
Rusty Russell3c1b27d2009-09-23 22:26:31 -0600377 if (err < 0) {
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800378 skb_unlink(skb, &vi->recv);
379 kfree_skb(skb);
380 break;
381 }
382 vi->num++;
383 }
384 if (unlikely(vi->num > vi->max))
385 vi->max = vi->num;
386 vi->rvq->vq_ops->kick(vi->rvq);
Rusty Russell3161e452009-08-26 12:22:32 -0700387 return !oom;
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800388}
389
Rusty Russell18445c42008-02-04 23:49:57 -0500390static void skb_recv_done(struct virtqueue *rvq)
Rusty Russell296f96f2007-10-22 11:03:37 +1000391{
392 struct virtnet_info *vi = rvq->vdev->priv;
Rusty Russell18445c42008-02-04 23:49:57 -0500393 /* Schedule NAPI, Suppress further interrupts if successful. */
Ben Hutchings288379f2009-01-19 16:43:59 -0800394 if (napi_schedule_prep(&vi->napi)) {
Rusty Russell18445c42008-02-04 23:49:57 -0500395 rvq->vq_ops->disable_cb(rvq);
Ben Hutchings288379f2009-01-19 16:43:59 -0800396 __napi_schedule(&vi->napi);
Rusty Russell18445c42008-02-04 23:49:57 -0500397 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000398}
399
Rusty Russell3161e452009-08-26 12:22:32 -0700400static void refill_work(struct work_struct *work)
401{
402 struct virtnet_info *vi;
403 bool still_empty;
404
405 vi = container_of(work, struct virtnet_info, refill.work);
406 napi_disable(&vi->napi);
407 try_fill_recv(vi, GFP_KERNEL);
408 still_empty = (vi->num == 0);
409 napi_enable(&vi->napi);
410
411 /* In theory, this can happen: if we don't get any buffers in
412 * we will *never* try to fill again. */
413 if (still_empty)
414 schedule_delayed_work(&vi->refill, HZ/2);
415}
416
Rusty Russell296f96f2007-10-22 11:03:37 +1000417static int virtnet_poll(struct napi_struct *napi, int budget)
418{
419 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
420 struct sk_buff *skb = NULL;
421 unsigned int len, received = 0;
422
423again:
424 while (received < budget &&
425 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
426 __skb_unlink(skb, &vi->recv);
427 receive_skb(vi->dev, skb, len);
428 vi->num--;
429 received++;
430 }
431
Rusty Russell3161e452009-08-26 12:22:32 -0700432 if (vi->num < vi->max / 2) {
433 if (!try_fill_recv(vi, GFP_ATOMIC))
434 schedule_delayed_work(&vi->refill, 0);
435 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000436
Rusty Russell8329d982007-11-19 11:20:43 -0500437 /* Out of packets? */
438 if (received < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -0800439 napi_complete(napi);
Rusty Russell18445c42008-02-04 23:49:57 -0500440 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
Christian Borntraeger4265f162008-03-14 14:17:05 +0100441 && napi_schedule_prep(napi)) {
442 vi->rvq->vq_ops->disable_cb(vi->rvq);
Ben Hutchings288379f2009-01-19 16:43:59 -0800443 __napi_schedule(napi);
Rusty Russell296f96f2007-10-22 11:03:37 +1000444 goto again;
Christian Borntraeger4265f162008-03-14 14:17:05 +0100445 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000446 }
447
448 return received;
449}
450
451static void free_old_xmit_skbs(struct virtnet_info *vi)
452{
453 struct sk_buff *skb;
454 unsigned int len;
455
456 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
457 pr_debug("Sent skb %p\n", skb);
458 __skb_unlink(skb, &vi->send);
Rusty Russell655aa312008-05-02 21:50:43 -0500459 vi->dev->stats.tx_bytes += skb->len;
Rusty Russell296f96f2007-10-22 11:03:37 +1000460 vi->dev->stats.tx_packets++;
461 kfree_skb(skb);
462 }
463}
464
Rusty Russell363f1512008-06-08 20:51:55 +1000465/* If the virtio transport doesn't always notify us when all in-flight packets
466 * are consumed, we fall back to using this function on a timer to free them. */
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000467static void xmit_free(unsigned long data)
468{
469 struct virtnet_info *vi = (void *)data;
470
471 netif_tx_lock(vi->dev);
472
473 free_old_xmit_skbs(vi);
474
475 if (!skb_queue_empty(&vi->send))
476 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
477
478 netif_tx_unlock(vi->dev);
479}
480
Rusty Russell99ffc692008-05-02 21:50:46 -0500481static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
Rusty Russell296f96f2007-10-22 11:03:37 +1000482{
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000483 int num, err;
Rusty Russell05271682008-05-02 21:50:45 -0500484 struct scatterlist sg[2+MAX_SKB_FRAGS];
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800485 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
486 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
Rusty Russell296f96f2007-10-22 11:03:37 +1000487 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
Rusty Russell296f96f2007-10-22 11:03:37 +1000488
Rusty Russell05271682008-05-02 21:50:45 -0500489 sg_init_table(sg, 2+MAX_SKB_FRAGS);
Rusty Russell4d125de2007-11-07 16:34:49 +1100490
Johannes Berge1749612008-10-27 15:59:26 -0700491 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
Rusty Russell296f96f2007-10-22 11:03:37 +1000492
Rusty Russell296f96f2007-10-22 11:03:37 +1000493 if (skb->ip_summed == CHECKSUM_PARTIAL) {
494 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
495 hdr->csum_start = skb->csum_start - skb_headroom(skb);
496 hdr->csum_offset = skb->csum_offset;
497 } else {
498 hdr->flags = 0;
499 hdr->csum_offset = hdr->csum_start = 0;
500 }
501
502 if (skb_is_gso(skb)) {
Herbert Xub82f08e2009-06-04 00:59:18 +0000503 hdr->hdr_len = skb_headlen(skb);
Rusty Russell296f96f2007-10-22 11:03:37 +1000504 hdr->gso_size = skb_shinfo(skb)->gso_size;
Rusty Russell34a48572008-02-04 23:50:02 -0500505 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
Rusty Russell296f96f2007-10-22 11:03:37 +1000506 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
507 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
508 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
509 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
510 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
511 else
512 BUG();
Rusty Russell34a48572008-02-04 23:50:02 -0500513 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
514 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
Rusty Russell296f96f2007-10-22 11:03:37 +1000515 } else {
516 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
Rusty Russell50c8ea82008-02-04 23:50:01 -0500517 hdr->gso_size = hdr->hdr_len = 0;
Rusty Russell296f96f2007-10-22 11:03:37 +1000518 }
519
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800520 mhdr->num_buffers = 0;
521
522 /* Encode metadata header at front. */
523 if (vi->mergeable_rx_bufs)
Ira W. Snyder8527bec2009-01-26 21:00:33 -0800524 sg_set_buf(sg, mhdr, sizeof(*mhdr));
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800525 else
Ira W. Snyder8527bec2009-01-26 21:00:33 -0800526 sg_set_buf(sg, hdr, sizeof(*hdr));
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800527
Rusty Russell296f96f2007-10-22 11:03:37 +1000528 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
Rusty Russell99ffc692008-05-02 21:50:46 -0500529
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000530 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
Rusty Russell3c1b27d2009-09-23 22:26:31 -0600531 if (err >= 0 && !vi->free_in_tasklet)
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000532 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
533
534 return err;
Rusty Russell99ffc692008-05-02 21:50:46 -0500535}
536
Rusty Russell11a3a152008-05-26 17:48:13 +1000537static void xmit_tasklet(unsigned long data)
538{
539 struct virtnet_info *vi = (void *)data;
540
541 netif_tx_lock_bh(vi->dev);
Rusty Russell3c1b27d2009-09-23 22:26:31 -0600542 if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) >= 0) {
Rusty Russell11a3a152008-05-26 17:48:13 +1000543 vi->svq->vq_ops->kick(vi->svq);
544 vi->last_xmit_skb = NULL;
545 }
Rusty Russell363f1512008-06-08 20:51:55 +1000546 if (vi->free_in_tasklet)
547 free_old_xmit_skbs(vi);
Rusty Russell11a3a152008-05-26 17:48:13 +1000548 netif_tx_unlock_bh(vi->dev);
549}
550
Stephen Hemminger424efe92009-08-31 19:50:51 +0000551static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
Rusty Russell99ffc692008-05-02 21:50:46 -0500552{
553 struct virtnet_info *vi = netdev_priv(dev);
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500554
555again:
556 /* Free up any pending old buffers before queueing new ones. */
557 free_old_xmit_skbs(vi);
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500558
Rusty Russell99ffc692008-05-02 21:50:46 -0500559 /* If we has a buffer left over from last time, send it now. */
Mark McLoughlin9953ca62008-05-27 12:06:26 +0100560 if (unlikely(vi->last_xmit_skb) &&
Rusty Russell3c1b27d2009-09-23 22:26:31 -0600561 xmit_skb(vi, vi->last_xmit_skb) < 0)
Mark McLoughlin9953ca62008-05-27 12:06:26 +0100562 goto stop_queue;
563
564 vi->last_xmit_skb = NULL;
Rusty Russell296f96f2007-10-22 11:03:37 +1000565
Rusty Russell99ffc692008-05-02 21:50:46 -0500566 /* Put new one in send queue and do transmit */
Rusty Russell7eb2e252008-05-26 17:42:42 +1000567 if (likely(skb)) {
568 __skb_queue_head(&vi->send, skb);
Rusty Russell3c1b27d2009-09-23 22:26:31 -0600569 if (xmit_skb(vi, skb) < 0) {
Rusty Russell7eb2e252008-05-26 17:42:42 +1000570 vi->last_xmit_skb = skb;
571 skb = NULL;
572 goto stop_queue;
573 }
Rusty Russell99ffc692008-05-02 21:50:46 -0500574 }
575done:
576 vi->svq->vq_ops->kick(vi->svq);
577 return NETDEV_TX_OK;
578
579stop_queue:
580 pr_debug("%s: virtio not prepared to send\n", dev->name);
581 netif_stop_queue(dev);
582
583 /* Activate callback for using skbs: if this returns false it
584 * means some were used in the meantime. */
585 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
586 vi->svq->vq_ops->disable_cb(vi->svq);
587 netif_start_queue(dev);
588 goto again;
589 }
Mark McLoughlin9953ca62008-05-27 12:06:26 +0100590 if (skb) {
591 /* Drop this skb: we only queue one. */
592 vi->dev->stats.tx_dropped++;
593 kfree_skb(skb);
594 }
Rusty Russell99ffc692008-05-02 21:50:46 -0500595 goto done;
Rusty Russell296f96f2007-10-22 11:03:37 +1000596}
597
Alex Williamson9c46f6d2009-02-04 16:36:34 -0800598static int virtnet_set_mac_address(struct net_device *dev, void *p)
599{
600 struct virtnet_info *vi = netdev_priv(dev);
601 struct virtio_device *vdev = vi->vdev;
602 int ret;
603
604 ret = eth_mac_addr(dev, p);
605 if (ret)
606 return ret;
607
Alex Williamson62994b22009-04-04 16:40:19 -0700608 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
609 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
610 dev->dev_addr, dev->addr_len);
Alex Williamson9c46f6d2009-02-04 16:36:34 -0800611
612 return 0;
613}
614
Amit Shahda74e892008-02-29 16:24:50 +0530615#ifdef CONFIG_NET_POLL_CONTROLLER
616static void virtnet_netpoll(struct net_device *dev)
617{
618 struct virtnet_info *vi = netdev_priv(dev);
619
620 napi_schedule(&vi->napi);
621}
622#endif
623
Rusty Russell296f96f2007-10-22 11:03:37 +1000624static int virtnet_open(struct net_device *dev)
625{
626 struct virtnet_info *vi = netdev_priv(dev);
627
Rusty Russell296f96f2007-10-22 11:03:37 +1000628 napi_enable(&vi->napi);
Rusty Russella48bd8f2008-02-04 23:50:07 -0500629
630 /* If all buffers were filled by other side before we napi_enabled, we
631 * won't get another interrupt, so process any outstanding packets
Christian Borntraeger370076d2008-02-06 08:50:11 +0100632 * now. virtnet_poll wants re-enable the queue, so we disable here.
633 * We synchronize against interrupts via NAPI_STATE_SCHED */
Ben Hutchings288379f2009-01-19 16:43:59 -0800634 if (napi_schedule_prep(&vi->napi)) {
Christian Borntraeger370076d2008-02-06 08:50:11 +0100635 vi->rvq->vq_ops->disable_cb(vi->rvq);
Ben Hutchings288379f2009-01-19 16:43:59 -0800636 __napi_schedule(&vi->napi);
Christian Borntraeger370076d2008-02-06 08:50:11 +0100637 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000638 return 0;
639}
640
Alex Williamson2a41f712009-02-04 09:02:34 +0000641/*
642 * Send command via the control virtqueue and check status. Commands
643 * supported by the hypervisor, as indicated by feature bits, should
644 * never fail unless improperly formated.
645 */
646static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
647 struct scatterlist *data, int out, int in)
648{
Alex Williamson23e258e2009-05-01 17:27:56 +0000649 struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
Alex Williamson2a41f712009-02-04 09:02:34 +0000650 struct virtio_net_ctrl_hdr ctrl;
651 virtio_net_ctrl_ack status = ~0;
652 unsigned int tmp;
Alex Williamson23e258e2009-05-01 17:27:56 +0000653 int i;
Alex Williamson2a41f712009-02-04 09:02:34 +0000654
Alexander Beregalov0ee904c2009-04-11 14:50:23 +0000655 /* Caller should know better */
656 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
657 (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
Alex Williamson2a41f712009-02-04 09:02:34 +0000658
659 out++; /* Add header */
660 in++; /* Add return status */
661
662 ctrl.class = class;
663 ctrl.cmd = cmd;
664
665 sg_init_table(sg, out + in);
666
667 sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
Alex Williamson23e258e2009-05-01 17:27:56 +0000668 for_each_sg(data, s, out + in - 2, i)
669 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
Alex Williamson2a41f712009-02-04 09:02:34 +0000670 sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
671
Rusty Russell3c1b27d2009-09-23 22:26:31 -0600672 BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
Alex Williamson2a41f712009-02-04 09:02:34 +0000673
674 vi->cvq->vq_ops->kick(vi->cvq);
675
676 /*
677 * Spin for a response, the kick causes an ioport write, trapping
678 * into the hypervisor, so the request should be handled immediately.
679 */
680 while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
681 cpu_relax();
682
683 return status == VIRTIO_NET_OK;
684}
685
Rusty Russell296f96f2007-10-22 11:03:37 +1000686static int virtnet_close(struct net_device *dev)
687{
688 struct virtnet_info *vi = netdev_priv(dev);
Rusty Russell296f96f2007-10-22 11:03:37 +1000689
690 napi_disable(&vi->napi);
691
Rusty Russell296f96f2007-10-22 11:03:37 +1000692 return 0;
693}
694
Herbert Xua9ea3fc2008-04-18 11:21:42 +0800695static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
696{
697 struct virtnet_info *vi = netdev_priv(dev);
698 struct virtio_device *vdev = vi->vdev;
699
700 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
701 return -ENOSYS;
702
703 return ethtool_op_set_tx_hw_csum(dev, data);
704}
705
Alex Williamson2af76982009-02-04 09:02:40 +0000706static void virtnet_set_rx_mode(struct net_device *dev)
707{
708 struct virtnet_info *vi = netdev_priv(dev);
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000709 struct scatterlist sg[2];
Alex Williamson2af76982009-02-04 09:02:40 +0000710 u8 promisc, allmulti;
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000711 struct virtio_net_ctrl_mac *mac_data;
712 struct dev_addr_list *addr;
Jiri Pirkoccffad252009-05-22 23:22:17 +0000713 struct netdev_hw_addr *ha;
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000714 void *buf;
715 int i;
Alex Williamson2af76982009-02-04 09:02:40 +0000716
717 /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
718 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
719 return;
720
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000721 promisc = ((dev->flags & IFF_PROMISC) != 0);
722 allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
Alex Williamson2af76982009-02-04 09:02:40 +0000723
Alex Williamson23e258e2009-05-01 17:27:56 +0000724 sg_init_one(sg, &promisc, sizeof(promisc));
Alex Williamson2af76982009-02-04 09:02:40 +0000725
726 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
727 VIRTIO_NET_CTRL_RX_PROMISC,
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000728 sg, 1, 0))
Alex Williamson2af76982009-02-04 09:02:40 +0000729 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
730 promisc ? "en" : "dis");
731
Alex Williamson23e258e2009-05-01 17:27:56 +0000732 sg_init_one(sg, &allmulti, sizeof(allmulti));
Alex Williamson2af76982009-02-04 09:02:40 +0000733
734 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
735 VIRTIO_NET_CTRL_RX_ALLMULTI,
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000736 sg, 1, 0))
Alex Williamson2af76982009-02-04 09:02:40 +0000737 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
738 allmulti ? "en" : "dis");
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000739
740 /* MAC filter - use one buffer for both lists */
Jiri Pirko31278e72009-06-17 01:12:19 +0000741 mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000742 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
743 if (!buf) {
744 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
745 return;
746 }
747
Alex Williamson23e258e2009-05-01 17:27:56 +0000748 sg_init_table(sg, 2);
749
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000750 /* Store the unicast list and count in the front of the buffer */
Jiri Pirko31278e72009-06-17 01:12:19 +0000751 mac_data->entries = dev->uc.count;
Jiri Pirkoccffad252009-05-22 23:22:17 +0000752 i = 0;
Jiri Pirko31278e72009-06-17 01:12:19 +0000753 list_for_each_entry(ha, &dev->uc.list, list)
Jiri Pirkoccffad252009-05-22 23:22:17 +0000754 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000755
756 sg_set_buf(&sg[0], mac_data,
Jiri Pirko31278e72009-06-17 01:12:19 +0000757 sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000758
759 /* multicast list and count fill the end */
Jiri Pirko31278e72009-06-17 01:12:19 +0000760 mac_data = (void *)&mac_data->macs[dev->uc.count][0];
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000761
762 mac_data->entries = dev->mc_count;
763 addr = dev->mc_list;
764 for (i = 0; i < dev->mc_count; i++, addr = addr->next)
765 memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
766
767 sg_set_buf(&sg[1], mac_data,
768 sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
769
770 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
771 VIRTIO_NET_CTRL_MAC_TABLE_SET,
772 sg, 2, 0))
773 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
774
775 kfree(buf);
Alex Williamson2af76982009-02-04 09:02:40 +0000776}
777
Alex Williamson1824a982009-05-01 17:31:10 +0000778static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
Alex Williamson0bde95692009-02-04 09:02:50 +0000779{
780 struct virtnet_info *vi = netdev_priv(dev);
781 struct scatterlist sg;
782
Alex Williamson23e258e2009-05-01 17:27:56 +0000783 sg_init_one(&sg, &vid, sizeof(vid));
Alex Williamson0bde95692009-02-04 09:02:50 +0000784
785 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
786 VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
787 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
788}
789
Alex Williamson1824a982009-05-01 17:31:10 +0000790static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
Alex Williamson0bde95692009-02-04 09:02:50 +0000791{
792 struct virtnet_info *vi = netdev_priv(dev);
793 struct scatterlist sg;
794
Alex Williamson23e258e2009-05-01 17:27:56 +0000795 sg_init_one(&sg, &vid, sizeof(vid));
Alex Williamson0bde95692009-02-04 09:02:50 +0000796
797 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
798 VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
799 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
800}
801
Stephen Hemminger0fc0b732009-09-02 01:03:33 -0700802static const struct ethtool_ops virtnet_ethtool_ops = {
Herbert Xua9ea3fc2008-04-18 11:21:42 +0800803 .set_tx_csum = virtnet_set_tx_csum,
804 .set_sg = ethtool_op_set_sg,
Mark McLoughlin0276b492008-11-16 22:40:36 -0800805 .set_tso = ethtool_op_set_tso,
Sridhar Samudrala5c516752009-07-14 14:21:02 +0000806 .set_ufo = ethtool_op_set_ufo,
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -0800807 .get_link = ethtool_op_get_link,
Herbert Xua9ea3fc2008-04-18 11:21:42 +0800808};
809
Mark McLoughlin39da5812008-11-26 13:58:11 +0000810#define MIN_MTU 68
811#define MAX_MTU 65535
812
813static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
814{
815 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
816 return -EINVAL;
817 dev->mtu = new_mtu;
818 return 0;
819}
820
Stephen Hemminger76288b42009-01-06 10:44:22 -0800821static const struct net_device_ops virtnet_netdev = {
822 .ndo_open = virtnet_open,
823 .ndo_stop = virtnet_close,
824 .ndo_start_xmit = start_xmit,
825 .ndo_validate_addr = eth_validate_addr,
Alex Williamson9c46f6d2009-02-04 16:36:34 -0800826 .ndo_set_mac_address = virtnet_set_mac_address,
Alex Williamson2af76982009-02-04 09:02:40 +0000827 .ndo_set_rx_mode = virtnet_set_rx_mode,
Stephen Hemminger76288b42009-01-06 10:44:22 -0800828 .ndo_change_mtu = virtnet_change_mtu,
Alex Williamson1824a982009-05-01 17:31:10 +0000829 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
830 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
Stephen Hemminger76288b42009-01-06 10:44:22 -0800831#ifdef CONFIG_NET_POLL_CONTROLLER
832 .ndo_poll_controller = virtnet_netpoll,
833#endif
834};
835
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -0800836static void virtnet_update_status(struct virtnet_info *vi)
837{
838 u16 v;
839
840 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
841 return;
842
843 vi->vdev->config->get(vi->vdev,
844 offsetof(struct virtio_net_config, status),
845 &v, sizeof(v));
846
847 /* Ignore unknown (future) status bits */
848 v &= VIRTIO_NET_S_LINK_UP;
849
850 if (vi->status == v)
851 return;
852
853 vi->status = v;
854
855 if (vi->status & VIRTIO_NET_S_LINK_UP) {
856 netif_carrier_on(vi->dev);
857 netif_wake_queue(vi->dev);
858 } else {
859 netif_carrier_off(vi->dev);
860 netif_stop_queue(vi->dev);
861 }
862}
863
864static void virtnet_config_changed(struct virtio_device *vdev)
865{
866 struct virtnet_info *vi = vdev->priv;
867
868 virtnet_update_status(vi);
869}
870
Rusty Russell296f96f2007-10-22 11:03:37 +1000871static int virtnet_probe(struct virtio_device *vdev)
872{
873 int err;
Rusty Russell296f96f2007-10-22 11:03:37 +1000874 struct net_device *dev;
875 struct virtnet_info *vi;
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600876 struct virtqueue *vqs[3];
877 vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
878 const char *names[] = { "input", "output", "control" };
879 int nvqs;
Rusty Russell296f96f2007-10-22 11:03:37 +1000880
881 /* Allocate ourselves a network device with room for our info */
882 dev = alloc_etherdev(sizeof(struct virtnet_info));
883 if (!dev)
884 return -ENOMEM;
885
886 /* Set up network device as normal. */
Stephen Hemminger76288b42009-01-06 10:44:22 -0800887 dev->netdev_ops = &virtnet_netdev;
Rusty Russell296f96f2007-10-22 11:03:37 +1000888 dev->features = NETIF_F_HIGHDMA;
Herbert Xua9ea3fc2008-04-18 11:21:42 +0800889 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
Rusty Russell296f96f2007-10-22 11:03:37 +1000890 SET_NETDEV_DEV(dev, &vdev->dev);
891
892 /* Do we support "hardware" checksums? */
Rusty Russellc45a6812008-05-02 21:50:50 -0500893 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
Rusty Russell296f96f2007-10-22 11:03:37 +1000894 /* This opens up the world of extra features. */
895 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
Rusty Russellc45a6812008-05-02 21:50:50 -0500896 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
Rusty Russell34a48572008-02-04 23:50:02 -0500897 dev->features |= NETIF_F_TSO | NETIF_F_UFO
898 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
899 }
Rusty Russell5539ae92008-05-02 21:50:46 -0500900 /* Individual feature bits: what can host handle? */
Rusty Russellc45a6812008-05-02 21:50:50 -0500901 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
Rusty Russell5539ae92008-05-02 21:50:46 -0500902 dev->features |= NETIF_F_TSO;
Rusty Russellc45a6812008-05-02 21:50:50 -0500903 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
Rusty Russell5539ae92008-05-02 21:50:46 -0500904 dev->features |= NETIF_F_TSO6;
Rusty Russellc45a6812008-05-02 21:50:50 -0500905 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
Rusty Russell5539ae92008-05-02 21:50:46 -0500906 dev->features |= NETIF_F_TSO_ECN;
Rusty Russellc45a6812008-05-02 21:50:50 -0500907 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
Rusty Russell5539ae92008-05-02 21:50:46 -0500908 dev->features |= NETIF_F_UFO;
Rusty Russell296f96f2007-10-22 11:03:37 +1000909 }
910
911 /* Configuration may specify what MAC to use. Otherwise random. */
Rusty Russellc45a6812008-05-02 21:50:50 -0500912 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
Rusty Russella586d4f2008-02-04 23:49:56 -0500913 vdev->config->get(vdev,
914 offsetof(struct virtio_net_config, mac),
915 dev->dev_addr, dev->addr_len);
Alex Williamson62994b22009-04-04 16:40:19 -0700916 } else
Rusty Russell296f96f2007-10-22 11:03:37 +1000917 random_ether_addr(dev->dev_addr);
918
919 /* Set up our device-specific information */
920 vi = netdev_priv(dev);
Dor Laor6c0cd7c2007-12-16 15:19:43 +0200921 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
Rusty Russell296f96f2007-10-22 11:03:37 +1000922 vi->dev = dev;
923 vi->vdev = vdev;
Christian Borntraegerd9d5dcc2008-02-18 10:02:51 +0100924 vdev->priv = vi;
Rusty Russellfb6813f2008-07-25 12:06:01 -0500925 vi->pages = NULL;
Rusty Russell3161e452009-08-26 12:22:32 -0700926 INIT_DELAYED_WORK(&vi->refill, refill_work);
Rusty Russell296f96f2007-10-22 11:03:37 +1000927
Rusty Russell363f1512008-06-08 20:51:55 +1000928 /* If they give us a callback when all buffers are done, we don't need
929 * the timer. */
930 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
931
Herbert Xu97402b92008-04-18 11:24:27 +0800932 /* If we can receive ANY GSO packets, we must allocate large ones. */
933 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
934 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
935 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
936 vi->big_packets = true;
937
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800938 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
939 vi->mergeable_rx_bufs = true;
940
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600941 /* We expect two virtqueues, receive then send,
942 * and optionally control. */
943 nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
Rusty Russell296f96f2007-10-22 11:03:37 +1000944
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600945 err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
946 if (err)
947 goto free;
948
949 vi->rvq = vqs[0];
950 vi->svq = vqs[1];
Rusty Russell296f96f2007-10-22 11:03:37 +1000951
Alex Williamson2a41f712009-02-04 09:02:34 +0000952 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600953 vi->cvq = vqs[2];
Alex Williamson0bde95692009-02-04 09:02:50 +0000954
955 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
956 dev->features |= NETIF_F_HW_VLAN_FILTER;
Alex Williamson2a41f712009-02-04 09:02:34 +0000957 }
958
Rusty Russell296f96f2007-10-22 11:03:37 +1000959 /* Initialize our empty receive and send queues. */
960 skb_queue_head_init(&vi->recv);
961 skb_queue_head_init(&vi->send);
962
Rusty Russell11a3a152008-05-26 17:48:13 +1000963 tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
964
Rusty Russell363f1512008-06-08 20:51:55 +1000965 if (!vi->free_in_tasklet)
966 setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000967
Rusty Russell296f96f2007-10-22 11:03:37 +1000968 err = register_netdev(dev);
969 if (err) {
970 pr_debug("virtio_net: registering device failed\n");
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600971 goto free_vqs;
Rusty Russell296f96f2007-10-22 11:03:37 +1000972 }
Rusty Russellb3369c12008-02-04 23:50:02 -0500973
974 /* Last of all, set up some receive buffers. */
Rusty Russell3161e452009-08-26 12:22:32 -0700975 try_fill_recv(vi, GFP_KERNEL);
Rusty Russellb3369c12008-02-04 23:50:02 -0500976
977 /* If we didn't even get one input buffer, we're useless. */
978 if (vi->num == 0) {
979 err = -ENOMEM;
980 goto unregister;
981 }
982
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -0800983 vi->status = VIRTIO_NET_S_LINK_UP;
984 virtnet_update_status(vi);
Pantelis Koukousoulas47832562009-03-18 18:40:02 -0700985 netif_carrier_on(dev);
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -0800986
Rusty Russell296f96f2007-10-22 11:03:37 +1000987 pr_debug("virtnet: registered device %s\n", dev->name);
Rusty Russell296f96f2007-10-22 11:03:37 +1000988 return 0;
989
Rusty Russellb3369c12008-02-04 23:50:02 -0500990unregister:
991 unregister_netdev(dev);
Rusty Russell3161e452009-08-26 12:22:32 -0700992 cancel_delayed_work_sync(&vi->refill);
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600993free_vqs:
994 vdev->config->del_vqs(vdev);
Rusty Russell296f96f2007-10-22 11:03:37 +1000995free:
996 free_netdev(dev);
997 return err;
998}
999
1000static void virtnet_remove(struct virtio_device *vdev)
1001{
Rusty Russell74b25532007-11-19 11:20:42 -05001002 struct virtnet_info *vi = vdev->priv;
Rusty Russellb3369c12008-02-04 23:50:02 -05001003 struct sk_buff *skb;
1004
Rusty Russell6e5aa7e2008-02-04 23:50:03 -05001005 /* Stop all the virtqueues. */
1006 vdev->config->reset(vdev);
1007
Rusty Russell363f1512008-06-08 20:51:55 +10001008 if (!vi->free_in_tasklet)
1009 del_timer_sync(&vi->xmit_free_timer);
Mark McLoughlin14c998f2008-06-08 20:50:56 +10001010
Rusty Russellb3369c12008-02-04 23:50:02 -05001011 /* Free our skbs in send and recv queues, if any. */
Rusty Russellb3369c12008-02-04 23:50:02 -05001012 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
1013 kfree_skb(skb);
1014 vi->num--;
1015 }
Wang Chen288369c2008-05-22 18:07:43 +08001016 __skb_queue_purge(&vi->send);
Rusty Russellb3369c12008-02-04 23:50:02 -05001017
1018 BUG_ON(vi->num != 0);
Rusty Russell74b25532007-11-19 11:20:42 -05001019
Rusty Russell74b25532007-11-19 11:20:42 -05001020 unregister_netdev(vi->dev);
Rusty Russell3161e452009-08-26 12:22:32 -07001021 cancel_delayed_work_sync(&vi->refill);
Rusty Russellfb6813f2008-07-25 12:06:01 -05001022
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -06001023 vdev->config->del_vqs(vi->vdev);
1024
Rusty Russellfb6813f2008-07-25 12:06:01 -05001025 while (vi->pages)
1026 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
1027
Rusty Russell74b25532007-11-19 11:20:42 -05001028 free_netdev(vi->dev);
Rusty Russell296f96f2007-10-22 11:03:37 +10001029}
1030
1031static struct virtio_device_id id_table[] = {
1032 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
1033 { 0 },
1034};
1035
Rusty Russellc45a6812008-05-02 21:50:50 -05001036static unsigned int features[] = {
Mark McLoughlin5e4fe5c2008-07-08 17:10:42 +10001037 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1038 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
Rusty Russellc45a6812008-05-02 21:50:50 -05001039 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
Herbert Xu97402b92008-04-18 11:24:27 +08001040 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
Sridhar Samudrala5c516752009-07-14 14:21:02 +00001041 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
Alex Williamson2a41f712009-02-04 09:02:34 +00001042 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
Alex Williamson0bde95692009-02-04 09:02:50 +00001043 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
Herbert Xu97402b92008-04-18 11:24:27 +08001044 VIRTIO_F_NOTIFY_ON_EMPTY,
Rusty Russellc45a6812008-05-02 21:50:50 -05001045};
1046
Rusty Russell296f96f2007-10-22 11:03:37 +10001047static struct virtio_driver virtio_net = {
Rusty Russellc45a6812008-05-02 21:50:50 -05001048 .feature_table = features,
1049 .feature_table_size = ARRAY_SIZE(features),
Rusty Russell296f96f2007-10-22 11:03:37 +10001050 .driver.name = KBUILD_MODNAME,
1051 .driver.owner = THIS_MODULE,
1052 .id_table = id_table,
1053 .probe = virtnet_probe,
1054 .remove = __devexit_p(virtnet_remove),
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -08001055 .config_changed = virtnet_config_changed,
Rusty Russell296f96f2007-10-22 11:03:37 +10001056};
1057
1058static int __init init(void)
1059{
1060 return register_virtio_driver(&virtio_net);
1061}
1062
1063static void __exit fini(void)
1064{
1065 unregister_virtio_driver(&virtio_net);
1066}
1067module_init(init);
1068module_exit(fini);
1069
1070MODULE_DEVICE_TABLE(virtio, id_table);
1071MODULE_DESCRIPTION("Virtio network driver");
1072MODULE_LICENSE("GPL");