blob: a34b308c8e32bc3d114a1c8b8770fd014109a40b [file] [log] [blame]
Ian Campbellf942dc22011-03-15 00:06:18 +00001/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
40
41#include <net/tcp.h>
42
43#include <xen/events.h>
44#include <xen/interface/memory.h>
45
46#include <asm/xen/hypercall.h>
47#include <asm/xen/page.h>
48
Wei Liu9832f4a2013-04-22 02:20:42 +000049/*
50 * This is the maximum slots a skb can have. If a guest sends a skb
51 * which exceeds this limit it is considered malicious.
52 */
53#define MAX_SKB_SLOTS_DEFAULT 20
54static unsigned int max_skb_slots = MAX_SKB_SLOTS_DEFAULT;
55module_param(max_skb_slots, uint, 0444);
56
Ian Campbellf942dc22011-03-15 00:06:18 +000057typedef unsigned int pending_ring_idx_t;
Wei Liu9832f4a2013-04-22 02:20:42 +000058#define INVALID_PENDING_RING_IDX (~0U)
59
60struct pending_tx_info {
61 struct xen_netif_tx_request req; /* coalesced tx request */
62 struct xenvif *vif;
63 pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
64 * if it is head of one or more tx
65 * reqs
66 */
67};
Ian Campbellf942dc22011-03-15 00:06:18 +000068
69struct netbk_rx_meta {
70 int id;
71 int size;
72 int gso_size;
73};
74
75#define MAX_PENDING_REQS 256
76
Ian Campbellea066ad2011-10-05 00:28:46 +000077/* Discriminate from any valid pending_idx value. */
78#define INVALID_PENDING_IDX 0xFFFF
79
Ian Campbellf942dc22011-03-15 00:06:18 +000080#define MAX_BUFFER_OFFSET PAGE_SIZE
81
82/* extra field used in struct page */
83union page_ext {
84 struct {
85#if BITS_PER_LONG < 64
86#define IDX_WIDTH 8
87#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
88 unsigned int group:GROUP_WIDTH;
89 unsigned int idx:IDX_WIDTH;
90#else
91 unsigned int group, idx;
92#endif
93 } e;
94 void *mapping;
95};
96
97struct xen_netbk {
98 wait_queue_head_t wq;
99 struct task_struct *task;
100
101 struct sk_buff_head rx_queue;
102 struct sk_buff_head tx_queue;
103
104 struct timer_list net_timer;
105
106 struct page *mmap_pages[MAX_PENDING_REQS];
107
108 pending_ring_idx_t pending_prod;
109 pending_ring_idx_t pending_cons;
110 struct list_head net_schedule_list;
111
112 /* Protect the net_schedule_list in netif. */
113 spinlock_t net_schedule_list_lock;
114
115 atomic_t netfront_count;
116
117 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
Wei Liu9832f4a2013-04-22 02:20:42 +0000118 /* Coalescing tx requests before copying makes number of grant
119 * copy ops greater or equal to number of slots required. In
120 * worst case a tx request consumes 2 gnttab_copy.
121 */
122 struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
Ian Campbellf942dc22011-03-15 00:06:18 +0000123
124 u16 pending_ring[MAX_PENDING_REQS];
125
126 /*
127 * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
128 * head/fragment page uses 2 copy operations because it
129 * straddles two buffers in the frontend.
130 */
131 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
132 struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
133};
134
135static struct xen_netbk *xen_netbk;
136static int xen_netbk_group_nr;
137
Wei Liu9832f4a2013-04-22 02:20:42 +0000138/*
139 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
140 * one or more merged tx requests, otherwise it is the continuation of
141 * previous tx request.
142 */
143static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
144{
145 return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
146}
147
Ian Campbellf942dc22011-03-15 00:06:18 +0000148void xen_netbk_add_xenvif(struct xenvif *vif)
149{
150 int i;
151 int min_netfront_count;
152 int min_group = 0;
153 struct xen_netbk *netbk;
154
155 min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
156 for (i = 0; i < xen_netbk_group_nr; i++) {
157 int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
158 if (netfront_count < min_netfront_count) {
159 min_group = i;
160 min_netfront_count = netfront_count;
161 }
162 }
163
164 netbk = &xen_netbk[min_group];
165
166 vif->netbk = netbk;
167 atomic_inc(&netbk->netfront_count);
168}
169
170void xen_netbk_remove_xenvif(struct xenvif *vif)
171{
172 struct xen_netbk *netbk = vif->netbk;
173 vif->netbk = NULL;
174 atomic_dec(&netbk->netfront_count);
175}
176
Matthew Daley33eb2602013-02-06 23:41:36 +0000177static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
178 u8 status);
Ian Campbellf942dc22011-03-15 00:06:18 +0000179static void make_tx_response(struct xenvif *vif,
180 struct xen_netif_tx_request *txp,
181 s8 st);
182static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
183 u16 id,
184 s8 st,
185 u16 offset,
186 u16 size,
187 u16 flags);
188
189static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
Ian Campbellea066ad2011-10-05 00:28:46 +0000190 u16 idx)
Ian Campbellf942dc22011-03-15 00:06:18 +0000191{
192 return page_to_pfn(netbk->mmap_pages[idx]);
193}
194
195static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
Ian Campbellea066ad2011-10-05 00:28:46 +0000196 u16 idx)
Ian Campbellf942dc22011-03-15 00:06:18 +0000197{
198 return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
199}
200
201/* extra field used in struct page */
202static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
203 unsigned int idx)
204{
205 unsigned int group = netbk - xen_netbk;
206 union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
207
208 BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
209 pg->mapping = ext.mapping;
210}
211
212static int get_page_ext(struct page *pg,
213 unsigned int *pgroup, unsigned int *pidx)
214{
215 union page_ext ext = { .mapping = pg->mapping };
216 struct xen_netbk *netbk;
217 unsigned int group, idx;
218
219 group = ext.e.group - 1;
220
221 if (group < 0 || group >= xen_netbk_group_nr)
222 return 0;
223
224 netbk = &xen_netbk[group];
225
226 idx = ext.e.idx;
227
228 if ((idx < 0) || (idx >= MAX_PENDING_REQS))
229 return 0;
230
231 if (netbk->mmap_pages[idx] != pg)
232 return 0;
233
234 *pgroup = group;
235 *pidx = idx;
236
237 return 1;
238}
239
240/*
241 * This is the amount of packet we copy rather than map, so that the
242 * guest can't fiddle with the contents of the headers while we do
243 * packet processing on them (netfilter, routing, etc).
244 */
245#define PKT_PROT_LEN (ETH_HLEN + \
246 VLAN_HLEN + \
247 sizeof(struct iphdr) + MAX_IPOPTLEN + \
248 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
249
Ian Campbellea066ad2011-10-05 00:28:46 +0000250static u16 frag_get_pending_idx(skb_frag_t *frag)
251{
252 return (u16)frag->page_offset;
253}
254
255static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
256{
257 frag->page_offset = pending_idx;
258}
259
Ian Campbellf942dc22011-03-15 00:06:18 +0000260static inline pending_ring_idx_t pending_index(unsigned i)
261{
262 return i & (MAX_PENDING_REQS-1);
263}
264
265static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
266{
267 return MAX_PENDING_REQS -
268 netbk->pending_prod + netbk->pending_cons;
269}
270
271static void xen_netbk_kick_thread(struct xen_netbk *netbk)
272{
273 wake_up(&netbk->wq);
274}
275
276static int max_required_rx_slots(struct xenvif *vif)
277{
278 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
279
Wei Liu9832f4a2013-04-22 02:20:42 +0000280 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
Ian Campbellf942dc22011-03-15 00:06:18 +0000281 if (vif->can_sg || vif->gso || vif->gso_prefix)
282 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
283
284 return max;
285}
286
287int xen_netbk_rx_ring_full(struct xenvif *vif)
288{
289 RING_IDX peek = vif->rx_req_cons_peek;
290 RING_IDX needed = max_required_rx_slots(vif);
291
292 return ((vif->rx.sring->req_prod - peek) < needed) ||
293 ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
294}
295
296int xen_netbk_must_stop_queue(struct xenvif *vif)
297{
298 if (!xen_netbk_rx_ring_full(vif))
299 return 0;
300
301 vif->rx.sring->req_event = vif->rx_req_cons_peek +
302 max_required_rx_slots(vif);
303 mb(); /* request notification /then/ check the queue */
304
305 return xen_netbk_rx_ring_full(vif);
306}
307
308/*
309 * Returns true if we should start a new receive buffer instead of
310 * adding 'size' bytes to a buffer which currently contains 'offset'
311 * bytes.
312 */
313static bool start_new_rx_buffer(int offset, unsigned long size, int head)
314{
315 /* simple case: we have completely filled the current buffer. */
316 if (offset == MAX_BUFFER_OFFSET)
317 return true;
318
319 /*
320 * complex case: start a fresh buffer if the current frag
321 * would overflow the current buffer but only if:
322 * (i) this frag would fit completely in the next buffer
323 * and (ii) there is already some data in the current buffer
324 * and (iii) this is not the head buffer.
325 *
326 * Where:
327 * - (i) stops us splitting a frag into two copies
328 * unless the frag is too large for a single buffer.
329 * - (ii) stops us from leaving a buffer pointlessly empty.
330 * - (iii) stops us leaving the first buffer
331 * empty. Strictly speaking this is already covered
332 * by (ii) but is explicitly checked because
333 * netfront relies on the first buffer being
334 * non-empty and can crash otherwise.
335 *
336 * This means we will effectively linearise small
337 * frags but do not needlessly split large buffers
338 * into multiple copies tend to give large frags their
339 * own buffers as before.
340 */
341 if ((offset + size > MAX_BUFFER_OFFSET) &&
342 (size <= MAX_BUFFER_OFFSET) && offset && !head)
343 return true;
344
345 return false;
346}
347
348/*
349 * Figure out how many ring slots we're going to need to send @skb to
350 * the guest. This function is essentially a dry run of
351 * netbk_gop_frag_copy.
352 */
353unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
354{
355 unsigned int count;
356 int i, copy_off;
357
358 count = DIV_ROUND_UP(
359 offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
360
361 copy_off = skb_headlen(skb) % PAGE_SIZE;
362
363 if (skb_shinfo(skb)->gso_size)
364 count++;
365
366 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000367 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
Ian Campbellf942dc22011-03-15 00:06:18 +0000368 unsigned long bytes;
369 while (size > 0) {
370 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
371
372 if (start_new_rx_buffer(copy_off, size, 0)) {
373 count++;
374 copy_off = 0;
375 }
376
377 bytes = size;
378 if (copy_off + bytes > MAX_BUFFER_OFFSET)
379 bytes = MAX_BUFFER_OFFSET - copy_off;
380
381 copy_off += bytes;
382 size -= bytes;
383 }
384 }
385 return count;
386}
387
388struct netrx_pending_operations {
389 unsigned copy_prod, copy_cons;
390 unsigned meta_prod, meta_cons;
391 struct gnttab_copy *copy;
392 struct netbk_rx_meta *meta;
393 int copy_off;
394 grant_ref_t copy_gref;
395};
396
397static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
398 struct netrx_pending_operations *npo)
399{
400 struct netbk_rx_meta *meta;
401 struct xen_netif_rx_request *req;
402
403 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
404
405 meta = npo->meta + npo->meta_prod++;
406 meta->gso_size = 0;
407 meta->size = 0;
408 meta->id = req->id;
409
410 npo->copy_off = 0;
411 npo->copy_gref = req->gref;
412
413 return meta;
414}
415
416/*
417 * Set up the grant operations for this fragment. If it's a flipping
418 * interface, we also set up the unmap request from here.
419 */
420static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
421 struct netrx_pending_operations *npo,
422 struct page *page, unsigned long size,
423 unsigned long offset, int *head)
424{
425 struct gnttab_copy *copy_gop;
426 struct netbk_rx_meta *meta;
427 /*
Wei Liue34c0242011-12-06 02:04:50 +0000428 * These variables are used iff get_page_ext returns true,
Ian Campbellf942dc22011-03-15 00:06:18 +0000429 * in which case they are guaranteed to be initialized.
430 */
431 unsigned int uninitialized_var(group), uninitialized_var(idx);
432 int foreign = get_page_ext(page, &group, &idx);
433 unsigned long bytes;
434
435 /* Data must not cross a page boundary. */
436 BUG_ON(size + offset > PAGE_SIZE);
437
438 meta = npo->meta + npo->meta_prod - 1;
439
440 while (size > 0) {
441 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
442
443 if (start_new_rx_buffer(npo->copy_off, size, *head)) {
444 /*
445 * Netfront requires there to be some data in the head
446 * buffer.
447 */
448 BUG_ON(*head);
449
450 meta = get_next_rx_buffer(vif, npo);
451 }
452
453 bytes = size;
454 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
455 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
456
457 copy_gop = npo->copy + npo->copy_prod++;
458 copy_gop->flags = GNTCOPY_dest_gref;
459 if (foreign) {
460 struct xen_netbk *netbk = &xen_netbk[group];
461 struct pending_tx_info *src_pend;
462
463 src_pend = &netbk->pending_tx_info[idx];
464
465 copy_gop->source.domid = src_pend->vif->domid;
466 copy_gop->source.u.ref = src_pend->req.gref;
467 copy_gop->flags |= GNTCOPY_source_gref;
468 } else {
469 void *vaddr = page_address(page);
470 copy_gop->source.domid = DOMID_SELF;
471 copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
472 }
473 copy_gop->source.offset = offset;
474 copy_gop->dest.domid = vif->domid;
475
476 copy_gop->dest.offset = npo->copy_off;
477 copy_gop->dest.u.ref = npo->copy_gref;
478 copy_gop->len = bytes;
479
480 npo->copy_off += bytes;
481 meta->size += bytes;
482
483 offset += bytes;
484 size -= bytes;
485
486 /* Leave a gap for the GSO descriptor. */
487 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
488 vif->rx.req_cons++;
489
490 *head = 0; /* There must be something in this buffer now. */
491
492 }
493}
494
495/*
496 * Prepare an SKB to be transmitted to the frontend.
497 *
498 * This function is responsible for allocating grant operations, meta
499 * structures, etc.
500 *
501 * It returns the number of meta structures consumed. The number of
502 * ring slots used is always equal to the number of meta slots used
503 * plus the number of GSO descriptors used. Currently, we use either
504 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
505 * frontend-side LRO).
506 */
507static int netbk_gop_skb(struct sk_buff *skb,
508 struct netrx_pending_operations *npo)
509{
510 struct xenvif *vif = netdev_priv(skb->dev);
511 int nr_frags = skb_shinfo(skb)->nr_frags;
512 int i;
513 struct xen_netif_rx_request *req;
514 struct netbk_rx_meta *meta;
515 unsigned char *data;
516 int head = 1;
517 int old_meta_prod;
518
519 old_meta_prod = npo->meta_prod;
520
521 /* Set up a GSO prefix descriptor, if necessary */
522 if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
523 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
524 meta = npo->meta + npo->meta_prod++;
525 meta->gso_size = skb_shinfo(skb)->gso_size;
526 meta->size = 0;
527 meta->id = req->id;
528 }
529
530 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
531 meta = npo->meta + npo->meta_prod++;
532
533 if (!vif->gso_prefix)
534 meta->gso_size = skb_shinfo(skb)->gso_size;
535 else
536 meta->gso_size = 0;
537
538 meta->size = 0;
539 meta->id = req->id;
540 npo->copy_off = 0;
541 npo->copy_gref = req->gref;
542
543 data = skb->data;
544 while (data < skb_tail_pointer(skb)) {
545 unsigned int offset = offset_in_page(data);
546 unsigned int len = PAGE_SIZE - offset;
547
548 if (data + len > skb_tail_pointer(skb))
549 len = skb_tail_pointer(skb) - data;
550
551 netbk_gop_frag_copy(vif, skb, npo,
552 virt_to_page(data), len, offset, &head);
553 data += len;
554 }
555
556 for (i = 0; i < nr_frags; i++) {
557 netbk_gop_frag_copy(vif, skb, npo,
Ian Campbellea066ad2011-10-05 00:28:46 +0000558 skb_frag_page(&skb_shinfo(skb)->frags[i]),
Eric Dumazet9e903e02011-10-18 21:00:24 +0000559 skb_frag_size(&skb_shinfo(skb)->frags[i]),
Ian Campbellf942dc22011-03-15 00:06:18 +0000560 skb_shinfo(skb)->frags[i].page_offset,
561 &head);
562 }
563
564 return npo->meta_prod - old_meta_prod;
565}
566
567/*
568 * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
569 * used to set up the operations on the top of
570 * netrx_pending_operations, which have since been done. Check that
571 * they didn't give any errors and advance over them.
572 */
573static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
574 struct netrx_pending_operations *npo)
575{
576 struct gnttab_copy *copy_op;
577 int status = XEN_NETIF_RSP_OKAY;
578 int i;
579
580 for (i = 0; i < nr_meta_slots; i++) {
581 copy_op = npo->copy + npo->copy_cons++;
582 if (copy_op->status != GNTST_okay) {
583 netdev_dbg(vif->dev,
584 "Bad status %d from copy to DOM%d.\n",
585 copy_op->status, vif->domid);
586 status = XEN_NETIF_RSP_ERROR;
587 }
588 }
589
590 return status;
591}
592
593static void netbk_add_frag_responses(struct xenvif *vif, int status,
594 struct netbk_rx_meta *meta,
595 int nr_meta_slots)
596{
597 int i;
598 unsigned long offset;
599
600 /* No fragments used */
601 if (nr_meta_slots <= 1)
602 return;
603
604 nr_meta_slots--;
605
606 for (i = 0; i < nr_meta_slots; i++) {
607 int flags;
608 if (i == nr_meta_slots - 1)
609 flags = 0;
610 else
611 flags = XEN_NETRXF_more_data;
612
613 offset = 0;
614 make_rx_response(vif, meta[i].id, status, offset,
615 meta[i].size, flags);
616 }
617}
618
619struct skb_cb_overlay {
620 int meta_slots_used;
621};
622
623static void xen_netbk_rx_action(struct xen_netbk *netbk)
624{
625 struct xenvif *vif = NULL, *tmp;
626 s8 status;
627 u16 irq, flags;
628 struct xen_netif_rx_response *resp;
629 struct sk_buff_head rxq;
630 struct sk_buff *skb;
631 LIST_HEAD(notify);
632 int ret;
633 int nr_frags;
634 int count;
635 unsigned long offset;
636 struct skb_cb_overlay *sco;
637
638 struct netrx_pending_operations npo = {
639 .copy = netbk->grant_copy_op,
640 .meta = netbk->meta,
641 };
642
643 skb_queue_head_init(&rxq);
644
645 count = 0;
646
647 while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
648 vif = netdev_priv(skb->dev);
649 nr_frags = skb_shinfo(skb)->nr_frags;
650
651 sco = (struct skb_cb_overlay *)skb->cb;
652 sco->meta_slots_used = netbk_gop_skb(skb, &npo);
653
654 count += nr_frags + 1;
655
656 __skb_queue_tail(&rxq, skb);
657
658 /* Filled the batch queue? */
Wei Liu9832f4a2013-04-22 02:20:42 +0000659 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
Ian Campbellf942dc22011-03-15 00:06:18 +0000660 if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
661 break;
662 }
663
664 BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
665
666 if (!npo.copy_prod)
667 return;
668
669 BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
670 ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, &netbk->grant_copy_op,
671 npo.copy_prod);
672 BUG_ON(ret != 0);
673
674 while ((skb = __skb_dequeue(&rxq)) != NULL) {
675 sco = (struct skb_cb_overlay *)skb->cb;
676
677 vif = netdev_priv(skb->dev);
678
679 if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
680 resp = RING_GET_RESPONSE(&vif->rx,
681 vif->rx.rsp_prod_pvt++);
682
683 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
684
685 resp->offset = netbk->meta[npo.meta_cons].gso_size;
686 resp->id = netbk->meta[npo.meta_cons].id;
687 resp->status = sco->meta_slots_used;
688
689 npo.meta_cons++;
690 sco->meta_slots_used--;
691 }
692
693
694 vif->dev->stats.tx_bytes += skb->len;
695 vif->dev->stats.tx_packets++;
696
697 status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
698
699 if (sco->meta_slots_used == 1)
700 flags = 0;
701 else
702 flags = XEN_NETRXF_more_data;
703
704 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
705 flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
706 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
707 /* remote but checksummed. */
708 flags |= XEN_NETRXF_data_validated;
709
710 offset = 0;
711 resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
712 status, offset,
713 netbk->meta[npo.meta_cons].size,
714 flags);
715
716 if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
717 struct xen_netif_extra_info *gso =
718 (struct xen_netif_extra_info *)
719 RING_GET_RESPONSE(&vif->rx,
720 vif->rx.rsp_prod_pvt++);
721
722 resp->flags |= XEN_NETRXF_extra_info;
723
724 gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
725 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
726 gso->u.gso.pad = 0;
727 gso->u.gso.features = 0;
728
729 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
730 gso->flags = 0;
731 }
732
733 netbk_add_frag_responses(vif, status,
734 netbk->meta + npo.meta_cons + 1,
735 sco->meta_slots_used);
736
737 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
738 irq = vif->irq;
739 if (ret && list_empty(&vif->notify_list))
740 list_add_tail(&vif->notify_list, &notify);
741
742 xenvif_notify_tx_completion(vif);
743
744 xenvif_put(vif);
745 npo.meta_cons += sco->meta_slots_used;
746 dev_kfree_skb(skb);
747 }
748
749 list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
750 notify_remote_via_irq(vif->irq);
751 list_del_init(&vif->notify_list);
752 }
753
754 /* More work to do? */
755 if (!skb_queue_empty(&netbk->rx_queue) &&
756 !timer_pending(&netbk->net_timer))
757 xen_netbk_kick_thread(netbk);
758}
759
760void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
761{
762 struct xen_netbk *netbk = vif->netbk;
763
764 skb_queue_tail(&netbk->rx_queue, skb);
765
766 xen_netbk_kick_thread(netbk);
767}
768
769static void xen_netbk_alarm(unsigned long data)
770{
771 struct xen_netbk *netbk = (struct xen_netbk *)data;
772 xen_netbk_kick_thread(netbk);
773}
774
775static int __on_net_schedule_list(struct xenvif *vif)
776{
777 return !list_empty(&vif->schedule_list);
778}
779
780/* Must be called with net_schedule_list_lock held */
781static void remove_from_net_schedule_list(struct xenvif *vif)
782{
783 if (likely(__on_net_schedule_list(vif))) {
784 list_del_init(&vif->schedule_list);
785 xenvif_put(vif);
786 }
787}
788
789static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
790{
791 struct xenvif *vif = NULL;
792
793 spin_lock_irq(&netbk->net_schedule_list_lock);
794 if (list_empty(&netbk->net_schedule_list))
795 goto out;
796
797 vif = list_first_entry(&netbk->net_schedule_list,
798 struct xenvif, schedule_list);
799 if (!vif)
800 goto out;
801
802 xenvif_get(vif);
803
804 remove_from_net_schedule_list(vif);
805out:
806 spin_unlock_irq(&netbk->net_schedule_list_lock);
807 return vif;
808}
809
810void xen_netbk_schedule_xenvif(struct xenvif *vif)
811{
812 unsigned long flags;
813 struct xen_netbk *netbk = vif->netbk;
814
815 if (__on_net_schedule_list(vif))
816 goto kick;
817
818 spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
819 if (!__on_net_schedule_list(vif) &&
820 likely(xenvif_schedulable(vif))) {
821 list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
822 xenvif_get(vif);
823 }
824 spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
825
826kick:
827 smp_mb();
828 if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
829 !list_empty(&netbk->net_schedule_list))
830 xen_netbk_kick_thread(netbk);
831}
832
833void xen_netbk_deschedule_xenvif(struct xenvif *vif)
834{
835 struct xen_netbk *netbk = vif->netbk;
836 spin_lock_irq(&netbk->net_schedule_list_lock);
837 remove_from_net_schedule_list(vif);
838 spin_unlock_irq(&netbk->net_schedule_list_lock);
839}
840
841void xen_netbk_check_rx_xenvif(struct xenvif *vif)
842{
843 int more_to_do;
844
845 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
846
847 if (more_to_do)
848 xen_netbk_schedule_xenvif(vif);
849}
850
851static void tx_add_credit(struct xenvif *vif)
852{
853 unsigned long max_burst, max_credit;
854
855 /*
856 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
857 * Otherwise the interface can seize up due to insufficient credit.
858 */
859 max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
860 max_burst = min(max_burst, 131072UL);
861 max_burst = max(max_burst, vif->credit_bytes);
862
863 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
864 max_credit = vif->remaining_credit + vif->credit_bytes;
865 if (max_credit < vif->remaining_credit)
866 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
867
868 vif->remaining_credit = min(max_credit, max_burst);
869}
870
871static void tx_credit_callback(unsigned long data)
872{
873 struct xenvif *vif = (struct xenvif *)data;
874 tx_add_credit(vif);
875 xen_netbk_check_rx_xenvif(vif);
876}
877
878static void netbk_tx_err(struct xenvif *vif,
879 struct xen_netif_tx_request *txp, RING_IDX end)
880{
881 RING_IDX cons = vif->tx.req_cons;
882
883 do {
884 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
Ian Campbell42671f12013-02-06 23:41:38 +0000885 if (cons == end)
Ian Campbellf942dc22011-03-15 00:06:18 +0000886 break;
887 txp = RING_GET_REQUEST(&vif->tx, cons++);
888 } while (1);
889 vif->tx.req_cons = cons;
890 xen_netbk_check_rx_xenvif(vif);
891 xenvif_put(vif);
892}
893
Ian Campbellbe7254f2013-02-06 23:41:35 +0000894static void netbk_fatal_tx_err(struct xenvif *vif)
895{
896 netdev_err(vif->dev, "fatal error; disabling device\n");
897 xenvif_carrier_off(vif);
898 xenvif_put(vif);
899}
900
Ian Campbellf942dc22011-03-15 00:06:18 +0000901static int netbk_count_requests(struct xenvif *vif,
902 struct xen_netif_tx_request *first,
Wei Liu9832f4a2013-04-22 02:20:42 +0000903 RING_IDX first_idx,
Ian Campbellf942dc22011-03-15 00:06:18 +0000904 struct xen_netif_tx_request *txp,
905 int work_to_do)
906{
907 RING_IDX cons = vif->tx.req_cons;
Wei Liu9832f4a2013-04-22 02:20:42 +0000908 int slots = 0;
909 int drop_err = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000910
911 if (!(first->flags & XEN_NETTXF_more_data))
912 return 0;
913
914 do {
Wei Liu9832f4a2013-04-22 02:20:42 +0000915 if (slots >= work_to_do) {
916 netdev_err(vif->dev,
917 "Asked for %d slots but exceeds this limit\n",
918 work_to_do);
Ian Campbellbe7254f2013-02-06 23:41:35 +0000919 netbk_fatal_tx_err(vif);
David Vrabel320c1502013-02-14 03:18:57 +0000920 return -ENODATA;
Ian Campbellf942dc22011-03-15 00:06:18 +0000921 }
922
Wei Liu9832f4a2013-04-22 02:20:42 +0000923 /* This guest is really using too many slots and
924 * considered malicious.
925 */
926 if (unlikely(slots >= max_skb_slots)) {
927 netdev_err(vif->dev,
928 "Malicious frontend using %d slots, threshold %u\n",
929 slots, max_skb_slots);
Ian Campbellbe7254f2013-02-06 23:41:35 +0000930 netbk_fatal_tx_err(vif);
David Vrabel320c1502013-02-14 03:18:57 +0000931 return -E2BIG;
Ian Campbellf942dc22011-03-15 00:06:18 +0000932 }
933
Wei Liu9832f4a2013-04-22 02:20:42 +0000934 /* Xen network protocol had implicit dependency on
935 * MAX_SKB_FRAGS. XEN_NETIF_NR_SLOTS_MIN is set to the
936 * historical MAX_SKB_FRAGS value 18 to honor the same
937 * behavior as before. Any packet using more than 18
938 * slots but less than max_skb_slots slots is dropped
939 */
940 if (!drop_err && slots >= XEN_NETIF_NR_SLOTS_MIN) {
941 if (net_ratelimit())
942 netdev_dbg(vif->dev,
943 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
944 slots, XEN_NETIF_NR_SLOTS_MIN);
945 drop_err = -E2BIG;
946 }
947
948 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
Ian Campbellf942dc22011-03-15 00:06:18 +0000949 sizeof(*txp));
950 if (txp->size > first->size) {
Wei Liu9832f4a2013-04-22 02:20:42 +0000951 netdev_err(vif->dev,
952 "Invalid tx request, slot size %u > remaining size %u\n",
953 txp->size, first->size);
Ian Campbellbe7254f2013-02-06 23:41:35 +0000954 netbk_fatal_tx_err(vif);
David Vrabel320c1502013-02-14 03:18:57 +0000955 return -EIO;
Ian Campbellf942dc22011-03-15 00:06:18 +0000956 }
957
958 first->size -= txp->size;
Wei Liu9832f4a2013-04-22 02:20:42 +0000959 slots++;
Ian Campbellf942dc22011-03-15 00:06:18 +0000960
961 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
Wei Liu9832f4a2013-04-22 02:20:42 +0000962 netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
Ian Campbellf942dc22011-03-15 00:06:18 +0000963 txp->offset, txp->size);
Ian Campbellbe7254f2013-02-06 23:41:35 +0000964 netbk_fatal_tx_err(vif);
David Vrabel320c1502013-02-14 03:18:57 +0000965 return -EINVAL;
Ian Campbellf942dc22011-03-15 00:06:18 +0000966 }
967 } while ((txp++)->flags & XEN_NETTXF_more_data);
Wei Liu9832f4a2013-04-22 02:20:42 +0000968
969 if (drop_err) {
970 netbk_tx_err(vif, first, first_idx + slots);
971 return drop_err;
972 }
973
974 return slots;
Ian Campbellf942dc22011-03-15 00:06:18 +0000975}
976
977static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
Ian Campbellea066ad2011-10-05 00:28:46 +0000978 u16 pending_idx)
Ian Campbellf942dc22011-03-15 00:06:18 +0000979{
980 struct page *page;
981 page = alloc_page(GFP_KERNEL|__GFP_COLD);
982 if (!page)
983 return NULL;
984 set_page_ext(page, netbk, pending_idx);
985 netbk->mmap_pages[pending_idx] = page;
986 return page;
987}
988
989static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
990 struct xenvif *vif,
991 struct sk_buff *skb,
992 struct xen_netif_tx_request *txp,
993 struct gnttab_copy *gop)
994{
995 struct skb_shared_info *shinfo = skb_shinfo(skb);
996 skb_frag_t *frags = shinfo->frags;
Ian Campbellea066ad2011-10-05 00:28:46 +0000997 u16 pending_idx = *((u16 *)skb->data);
Wei Liu9832f4a2013-04-22 02:20:42 +0000998 u16 head_idx = 0;
999 int slot, start;
1000 struct page *page;
1001 pending_ring_idx_t index, start_idx = 0;
1002 uint16_t dst_offset;
1003 unsigned int nr_slots;
1004 struct pending_tx_info *first = NULL;
1005
1006 /* At this point shinfo->nr_frags is in fact the number of
1007 * slots, which can be as large as XEN_NETIF_NR_SLOTS_MIN.
1008 */
1009 nr_slots = shinfo->nr_frags;
Ian Campbellf942dc22011-03-15 00:06:18 +00001010
1011 /* Skip first skb fragment if it is on same page as header fragment. */
Ian Campbellea066ad2011-10-05 00:28:46 +00001012 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001013
Wei Liu9832f4a2013-04-22 02:20:42 +00001014 /* Coalesce tx requests, at this point the packet passed in
1015 * should be <= 64K. Any packets larger than 64K have been
1016 * handled in netbk_count_requests().
1017 */
1018 for (shinfo->nr_frags = slot = start; slot < nr_slots;
1019 shinfo->nr_frags++) {
Ian Campbellf942dc22011-03-15 00:06:18 +00001020 struct pending_tx_info *pending_tx_info =
1021 netbk->pending_tx_info;
1022
Wei Liu9832f4a2013-04-22 02:20:42 +00001023 page = alloc_page(GFP_KERNEL|__GFP_COLD);
Ian Campbellf942dc22011-03-15 00:06:18 +00001024 if (!page)
Ian Campbell90ffc8f2013-02-06 23:41:37 +00001025 goto err;
Ian Campbellf942dc22011-03-15 00:06:18 +00001026
Wei Liu9832f4a2013-04-22 02:20:42 +00001027 dst_offset = 0;
1028 first = NULL;
1029 while (dst_offset < PAGE_SIZE && slot < nr_slots) {
1030 gop->flags = GNTCOPY_source_gref;
Ian Campbellf942dc22011-03-15 00:06:18 +00001031
Wei Liu9832f4a2013-04-22 02:20:42 +00001032 gop->source.u.ref = txp->gref;
1033 gop->source.domid = vif->domid;
1034 gop->source.offset = txp->offset;
Ian Campbellf942dc22011-03-15 00:06:18 +00001035
Wei Liu9832f4a2013-04-22 02:20:42 +00001036 gop->dest.domid = DOMID_SELF;
Ian Campbellf942dc22011-03-15 00:06:18 +00001037
Wei Liu9832f4a2013-04-22 02:20:42 +00001038 gop->dest.offset = dst_offset;
1039 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
Ian Campbellf942dc22011-03-15 00:06:18 +00001040
Wei Liu9832f4a2013-04-22 02:20:42 +00001041 if (dst_offset + txp->size > PAGE_SIZE) {
1042 /* This page can only merge a portion
1043 * of tx request. Do not increment any
1044 * pointer / counter here. The txp
1045 * will be dealt with in future
1046 * rounds, eventually hitting the
1047 * `else` branch.
1048 */
1049 gop->len = PAGE_SIZE - dst_offset;
1050 txp->offset += gop->len;
1051 txp->size -= gop->len;
1052 dst_offset += gop->len; /* quit loop */
1053 } else {
1054 /* This tx request can be merged in the page */
1055 gop->len = txp->size;
1056 dst_offset += gop->len;
1057
1058 index = pending_index(netbk->pending_cons++);
1059
1060 pending_idx = netbk->pending_ring[index];
1061
1062 memcpy(&pending_tx_info[pending_idx].req, txp,
1063 sizeof(*txp));
1064 xenvif_get(vif);
1065
1066 pending_tx_info[pending_idx].vif = vif;
1067
1068 /* Poison these fields, corresponding
1069 * fields for head tx req will be set
1070 * to correct values after the loop.
1071 */
1072 netbk->mmap_pages[pending_idx] = (void *)(~0UL);
1073 pending_tx_info[pending_idx].head =
1074 INVALID_PENDING_RING_IDX;
1075
1076 if (!first) {
1077 first = &pending_tx_info[pending_idx];
1078 start_idx = index;
1079 head_idx = pending_idx;
1080 }
1081
1082 txp++;
1083 slot++;
1084 }
1085
1086 gop++;
1087 }
1088
1089 first->req.offset = 0;
1090 first->req.size = dst_offset;
1091 first->head = start_idx;
1092 set_page_ext(page, netbk, head_idx);
1093 netbk->mmap_pages[head_idx] = page;
1094 frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001095 }
1096
Wei Liu9832f4a2013-04-22 02:20:42 +00001097 BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
1098
Ian Campbellf942dc22011-03-15 00:06:18 +00001099 return gop;
Ian Campbell90ffc8f2013-02-06 23:41:37 +00001100err:
1101 /* Unwind, freeing all pages and sending error responses. */
Wei Liu9832f4a2013-04-22 02:20:42 +00001102 while (shinfo->nr_frags-- > start) {
1103 xen_netbk_idx_release(netbk,
1104 frag_get_pending_idx(&frags[shinfo->nr_frags]),
1105 XEN_NETIF_RSP_ERROR);
Ian Campbell90ffc8f2013-02-06 23:41:37 +00001106 }
1107 /* The head too, if necessary. */
1108 if (start)
1109 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1110
1111 return NULL;
Ian Campbellf942dc22011-03-15 00:06:18 +00001112}
1113
1114static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1115 struct sk_buff *skb,
1116 struct gnttab_copy **gopp)
1117{
1118 struct gnttab_copy *gop = *gopp;
Ian Campbellea066ad2011-10-05 00:28:46 +00001119 u16 pending_idx = *((u16 *)skb->data);
Ian Campbellf942dc22011-03-15 00:06:18 +00001120 struct skb_shared_info *shinfo = skb_shinfo(skb);
Wei Liu9832f4a2013-04-22 02:20:42 +00001121 struct pending_tx_info *tx_info;
Ian Campbellf942dc22011-03-15 00:06:18 +00001122 int nr_frags = shinfo->nr_frags;
1123 int i, err, start;
Wei Liu9832f4a2013-04-22 02:20:42 +00001124 u16 peek; /* peek into next tx request */
Ian Campbellf942dc22011-03-15 00:06:18 +00001125
1126 /* Check status of header. */
1127 err = gop->status;
Matthew Daley33eb2602013-02-06 23:41:36 +00001128 if (unlikely(err))
1129 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
Ian Campbellf942dc22011-03-15 00:06:18 +00001130
1131 /* Skip first skb fragment if it is on same page as header fragment. */
Ian Campbellea066ad2011-10-05 00:28:46 +00001132 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001133
1134 for (i = start; i < nr_frags; i++) {
1135 int j, newerr;
Wei Liu9832f4a2013-04-22 02:20:42 +00001136 pending_ring_idx_t head;
Ian Campbellf942dc22011-03-15 00:06:18 +00001137
Ian Campbellea066ad2011-10-05 00:28:46 +00001138 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
Wei Liu9832f4a2013-04-22 02:20:42 +00001139 tx_info = &netbk->pending_tx_info[pending_idx];
1140 head = tx_info->head;
Ian Campbellf942dc22011-03-15 00:06:18 +00001141
1142 /* Check error status: if okay then remember grant handle. */
Wei Liu9832f4a2013-04-22 02:20:42 +00001143 do {
1144 newerr = (++gop)->status;
1145 if (newerr)
1146 break;
1147 peek = netbk->pending_ring[pending_index(++head)];
1148 } while (!pending_tx_is_head(netbk, peek));
1149
Ian Campbellf942dc22011-03-15 00:06:18 +00001150 if (likely(!newerr)) {
1151 /* Had a previous error? Invalidate this fragment. */
1152 if (unlikely(err))
Matthew Daley33eb2602013-02-06 23:41:36 +00001153 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +00001154 continue;
1155 }
1156
1157 /* Error on this fragment: respond to client with an error. */
Matthew Daley33eb2602013-02-06 23:41:36 +00001158 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
Ian Campbellf942dc22011-03-15 00:06:18 +00001159
1160 /* Not the first error? Preceding frags already invalidated. */
1161 if (err)
1162 continue;
1163
1164 /* First error: invalidate header and preceding fragments. */
1165 pending_idx = *((u16 *)skb->data);
Matthew Daley33eb2602013-02-06 23:41:36 +00001166 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +00001167 for (j = start; j < i; j++) {
Jan Beulich5ccb3ea2011-11-18 05:42:05 +00001168 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
Matthew Daley33eb2602013-02-06 23:41:36 +00001169 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +00001170 }
1171
1172 /* Remember the error: invalidate all subsequent fragments. */
1173 err = newerr;
1174 }
1175
1176 *gopp = gop + 1;
1177 return err;
1178}
1179
1180static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1181{
1182 struct skb_shared_info *shinfo = skb_shinfo(skb);
1183 int nr_frags = shinfo->nr_frags;
1184 int i;
1185
1186 for (i = 0; i < nr_frags; i++) {
1187 skb_frag_t *frag = shinfo->frags + i;
1188 struct xen_netif_tx_request *txp;
Ian Campbellea066ad2011-10-05 00:28:46 +00001189 struct page *page;
1190 u16 pending_idx;
Ian Campbellf942dc22011-03-15 00:06:18 +00001191
Ian Campbellea066ad2011-10-05 00:28:46 +00001192 pending_idx = frag_get_pending_idx(frag);
Ian Campbellf942dc22011-03-15 00:06:18 +00001193
1194 txp = &netbk->pending_tx_info[pending_idx].req;
Ian Campbellea066ad2011-10-05 00:28:46 +00001195 page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
1196 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
Ian Campbellf942dc22011-03-15 00:06:18 +00001197 skb->len += txp->size;
1198 skb->data_len += txp->size;
1199 skb->truesize += txp->size;
1200
1201 /* Take an extra reference to offset xen_netbk_idx_release */
1202 get_page(netbk->mmap_pages[pending_idx]);
Matthew Daley33eb2602013-02-06 23:41:36 +00001203 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +00001204 }
1205}
1206
1207static int xen_netbk_get_extras(struct xenvif *vif,
1208 struct xen_netif_extra_info *extras,
1209 int work_to_do)
1210{
1211 struct xen_netif_extra_info extra;
1212 RING_IDX cons = vif->tx.req_cons;
1213
1214 do {
1215 if (unlikely(work_to_do-- <= 0)) {
Ian Campbellbe7254f2013-02-06 23:41:35 +00001216 netdev_err(vif->dev, "Missing extra info\n");
1217 netbk_fatal_tx_err(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001218 return -EBADR;
1219 }
1220
1221 memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1222 sizeof(extra));
1223 if (unlikely(!extra.type ||
1224 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1225 vif->tx.req_cons = ++cons;
Ian Campbellbe7254f2013-02-06 23:41:35 +00001226 netdev_err(vif->dev,
Ian Campbellf942dc22011-03-15 00:06:18 +00001227 "Invalid extra type: %d\n", extra.type);
Ian Campbellbe7254f2013-02-06 23:41:35 +00001228 netbk_fatal_tx_err(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001229 return -EINVAL;
1230 }
1231
1232 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1233 vif->tx.req_cons = ++cons;
1234 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1235
1236 return work_to_do;
1237}
1238
1239static int netbk_set_skb_gso(struct xenvif *vif,
1240 struct sk_buff *skb,
1241 struct xen_netif_extra_info *gso)
1242{
1243 if (!gso->u.gso.size) {
Ian Campbellbe7254f2013-02-06 23:41:35 +00001244 netdev_err(vif->dev, "GSO size must not be zero.\n");
1245 netbk_fatal_tx_err(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001246 return -EINVAL;
1247 }
1248
1249 /* Currently only TCPv4 S.O. is supported. */
1250 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
Ian Campbellbe7254f2013-02-06 23:41:35 +00001251 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1252 netbk_fatal_tx_err(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001253 return -EINVAL;
1254 }
1255
1256 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1257 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1258
1259 /* Header must be checked, and gso_segs computed. */
1260 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1261 skb_shinfo(skb)->gso_segs = 0;
1262
1263 return 0;
1264}
1265
1266static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1267{
1268 struct iphdr *iph;
1269 unsigned char *th;
1270 int err = -EPROTO;
1271 int recalculate_partial_csum = 0;
1272
1273 /*
1274 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1275 * peers can fail to set NETRXF_csum_blank when sending a GSO
1276 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1277 * recalculate the partial checksum.
1278 */
1279 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1280 vif->rx_gso_checksum_fixup++;
1281 skb->ip_summed = CHECKSUM_PARTIAL;
1282 recalculate_partial_csum = 1;
1283 }
1284
1285 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1286 if (skb->ip_summed != CHECKSUM_PARTIAL)
1287 return 0;
1288
1289 if (skb->protocol != htons(ETH_P_IP))
1290 goto out;
1291
1292 iph = (void *)skb->data;
1293 th = skb->data + 4 * iph->ihl;
1294 if (th >= skb_tail_pointer(skb))
1295 goto out;
1296
1297 skb->csum_start = th - skb->head;
1298 switch (iph->protocol) {
1299 case IPPROTO_TCP:
1300 skb->csum_offset = offsetof(struct tcphdr, check);
1301
1302 if (recalculate_partial_csum) {
1303 struct tcphdr *tcph = (struct tcphdr *)th;
1304 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1305 skb->len - iph->ihl*4,
1306 IPPROTO_TCP, 0);
1307 }
1308 break;
1309 case IPPROTO_UDP:
1310 skb->csum_offset = offsetof(struct udphdr, check);
1311
1312 if (recalculate_partial_csum) {
1313 struct udphdr *udph = (struct udphdr *)th;
1314 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1315 skb->len - iph->ihl*4,
1316 IPPROTO_UDP, 0);
1317 }
1318 break;
1319 default:
1320 if (net_ratelimit())
1321 netdev_err(vif->dev,
1322 "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
1323 iph->protocol);
1324 goto out;
1325 }
1326
1327 if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
1328 goto out;
1329
1330 err = 0;
1331
1332out:
1333 return err;
1334}
1335
1336static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1337{
Wei Liu2b58df72013-10-28 12:07:57 +00001338 u64 now = get_jiffies_64();
1339 u64 next_credit = vif->credit_window_start +
Ian Campbellf942dc22011-03-15 00:06:18 +00001340 msecs_to_jiffies(vif->credit_usec / 1000);
1341
1342 /* Timer could already be pending in rare cases. */
1343 if (timer_pending(&vif->credit_timeout))
1344 return true;
1345
1346 /* Passed the point where we can replenish credit? */
Wei Liu2b58df72013-10-28 12:07:57 +00001347 if (time_after_eq64(now, next_credit)) {
1348 vif->credit_window_start = now;
Ian Campbellf942dc22011-03-15 00:06:18 +00001349 tx_add_credit(vif);
1350 }
1351
1352 /* Still too big to send right now? Set a callback. */
1353 if (size > vif->remaining_credit) {
1354 vif->credit_timeout.data =
1355 (unsigned long)vif;
1356 vif->credit_timeout.function =
1357 tx_credit_callback;
1358 mod_timer(&vif->credit_timeout,
1359 next_credit);
Wei Liu2b58df72013-10-28 12:07:57 +00001360 vif->credit_window_start = next_credit;
Ian Campbellf942dc22011-03-15 00:06:18 +00001361
1362 return true;
1363 }
1364
1365 return false;
1366}
1367
1368static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1369{
1370 struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
1371 struct sk_buff *skb;
1372 int ret;
1373
Wei Liu9832f4a2013-04-22 02:20:42 +00001374 while ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
1375 < MAX_PENDING_REQS) &&
Ian Campbellf942dc22011-03-15 00:06:18 +00001376 !list_empty(&netbk->net_schedule_list)) {
1377 struct xenvif *vif;
1378 struct xen_netif_tx_request txreq;
Wei Liu9832f4a2013-04-22 02:20:42 +00001379 struct xen_netif_tx_request txfrags[max_skb_slots];
Ian Campbellf942dc22011-03-15 00:06:18 +00001380 struct page *page;
1381 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1382 u16 pending_idx;
1383 RING_IDX idx;
1384 int work_to_do;
1385 unsigned int data_len;
1386 pending_ring_idx_t index;
1387
1388 /* Get a netif from the list with work to do. */
1389 vif = poll_net_schedule_list(netbk);
Ian Campbellbe7254f2013-02-06 23:41:35 +00001390 /* This can sometimes happen because the test of
1391 * list_empty(net_schedule_list) at the top of the
1392 * loop is unlocked. Just go back and have another
1393 * look.
1394 */
Ian Campbellf942dc22011-03-15 00:06:18 +00001395 if (!vif)
1396 continue;
1397
Ian Campbellbe7254f2013-02-06 23:41:35 +00001398 if (vif->tx.sring->req_prod - vif->tx.req_cons >
1399 XEN_NETIF_TX_RING_SIZE) {
1400 netdev_err(vif->dev,
1401 "Impossible number of requests. "
1402 "req_prod %d, req_cons %d, size %ld\n",
1403 vif->tx.sring->req_prod, vif->tx.req_cons,
1404 XEN_NETIF_TX_RING_SIZE);
1405 netbk_fatal_tx_err(vif);
1406 continue;
1407 }
1408
Ian Campbellf942dc22011-03-15 00:06:18 +00001409 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1410 if (!work_to_do) {
1411 xenvif_put(vif);
1412 continue;
1413 }
1414
1415 idx = vif->tx.req_cons;
1416 rmb(); /* Ensure that we see the request before we copy it. */
1417 memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1418
1419 /* Credit-based scheduling. */
1420 if (txreq.size > vif->remaining_credit &&
1421 tx_credit_exceeded(vif, txreq.size)) {
1422 xenvif_put(vif);
1423 continue;
1424 }
1425
1426 vif->remaining_credit -= txreq.size;
1427
1428 work_to_do--;
1429 vif->tx.req_cons = ++idx;
1430
1431 memset(extras, 0, sizeof(extras));
1432 if (txreq.flags & XEN_NETTXF_extra_info) {
1433 work_to_do = xen_netbk_get_extras(vif, extras,
1434 work_to_do);
1435 idx = vif->tx.req_cons;
Ian Campbellbe7254f2013-02-06 23:41:35 +00001436 if (unlikely(work_to_do < 0))
Ian Campbellf942dc22011-03-15 00:06:18 +00001437 continue;
Ian Campbellf942dc22011-03-15 00:06:18 +00001438 }
1439
Wei Liu9832f4a2013-04-22 02:20:42 +00001440 ret = netbk_count_requests(vif, &txreq, idx,
1441 txfrags, work_to_do);
Ian Campbellbe7254f2013-02-06 23:41:35 +00001442 if (unlikely(ret < 0))
Ian Campbellf942dc22011-03-15 00:06:18 +00001443 continue;
Ian Campbellbe7254f2013-02-06 23:41:35 +00001444
Ian Campbellf942dc22011-03-15 00:06:18 +00001445 idx += ret;
1446
1447 if (unlikely(txreq.size < ETH_HLEN)) {
1448 netdev_dbg(vif->dev,
1449 "Bad packet size: %d\n", txreq.size);
1450 netbk_tx_err(vif, &txreq, idx);
1451 continue;
1452 }
1453
1454 /* No crossing a page as the payload mustn't fragment. */
1455 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
Ian Campbellbe7254f2013-02-06 23:41:35 +00001456 netdev_err(vif->dev,
Ian Campbellf942dc22011-03-15 00:06:18 +00001457 "txreq.offset: %x, size: %u, end: %lu\n",
1458 txreq.offset, txreq.size,
1459 (txreq.offset&~PAGE_MASK) + txreq.size);
Ian Campbellbe7254f2013-02-06 23:41:35 +00001460 netbk_fatal_tx_err(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001461 continue;
1462 }
1463
1464 index = pending_index(netbk->pending_cons);
1465 pending_idx = netbk->pending_ring[index];
1466
1467 data_len = (txreq.size > PKT_PROT_LEN &&
Wei Liu9832f4a2013-04-22 02:20:42 +00001468 ret < XEN_NETIF_NR_SLOTS_MIN) ?
Ian Campbellf942dc22011-03-15 00:06:18 +00001469 PKT_PROT_LEN : txreq.size;
1470
1471 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
1472 GFP_ATOMIC | __GFP_NOWARN);
1473 if (unlikely(skb == NULL)) {
1474 netdev_dbg(vif->dev,
1475 "Can't allocate a skb in start_xmit.\n");
1476 netbk_tx_err(vif, &txreq, idx);
1477 break;
1478 }
1479
1480 /* Packets passed to netif_rx() must have some headroom. */
1481 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1482
1483 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1484 struct xen_netif_extra_info *gso;
1485 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1486
1487 if (netbk_set_skb_gso(vif, skb, gso)) {
Ian Campbellbe7254f2013-02-06 23:41:35 +00001488 /* Failure in netbk_set_skb_gso is fatal. */
Ian Campbellf942dc22011-03-15 00:06:18 +00001489 kfree_skb(skb);
Ian Campbellf942dc22011-03-15 00:06:18 +00001490 continue;
1491 }
1492 }
1493
1494 /* XXX could copy straight to head */
Wei Liubaff3c82013-03-25 01:08:20 +00001495 page = xen_netbk_alloc_page(netbk, pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001496 if (!page) {
1497 kfree_skb(skb);
1498 netbk_tx_err(vif, &txreq, idx);
1499 continue;
1500 }
1501
Ian Campbellf942dc22011-03-15 00:06:18 +00001502 gop->source.u.ref = txreq.gref;
1503 gop->source.domid = vif->domid;
1504 gop->source.offset = txreq.offset;
1505
1506 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1507 gop->dest.domid = DOMID_SELF;
1508 gop->dest.offset = txreq.offset;
1509
1510 gop->len = txreq.size;
1511 gop->flags = GNTCOPY_source_gref;
1512
1513 gop++;
1514
1515 memcpy(&netbk->pending_tx_info[pending_idx].req,
1516 &txreq, sizeof(txreq));
1517 netbk->pending_tx_info[pending_idx].vif = vif;
Wei Liu9832f4a2013-04-22 02:20:42 +00001518 netbk->pending_tx_info[pending_idx].head = index;
Ian Campbellf942dc22011-03-15 00:06:18 +00001519 *((u16 *)skb->data) = pending_idx;
1520
1521 __skb_put(skb, data_len);
1522
1523 skb_shinfo(skb)->nr_frags = ret;
1524 if (data_len < txreq.size) {
1525 skb_shinfo(skb)->nr_frags++;
Ian Campbellea066ad2011-10-05 00:28:46 +00001526 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1527 pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001528 } else {
Ian Campbellea066ad2011-10-05 00:28:46 +00001529 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1530 INVALID_PENDING_IDX);
Ian Campbellf942dc22011-03-15 00:06:18 +00001531 }
1532
1533 __skb_queue_tail(&netbk->tx_queue, skb);
1534
1535 netbk->pending_cons++;
1536
1537 request_gop = xen_netbk_get_requests(netbk, vif,
1538 skb, txfrags, gop);
1539 if (request_gop == NULL) {
1540 kfree_skb(skb);
1541 netbk_tx_err(vif, &txreq, idx);
1542 continue;
1543 }
1544 gop = request_gop;
1545
1546 vif->tx.req_cons = idx;
1547 xen_netbk_check_rx_xenvif(vif);
1548
1549 if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
1550 break;
1551 }
1552
1553 return gop - netbk->tx_copy_ops;
1554}
1555
1556static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1557{
1558 struct gnttab_copy *gop = netbk->tx_copy_ops;
1559 struct sk_buff *skb;
1560
1561 while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
1562 struct xen_netif_tx_request *txp;
1563 struct xenvif *vif;
1564 u16 pending_idx;
1565 unsigned data_len;
1566
1567 pending_idx = *((u16 *)skb->data);
1568 vif = netbk->pending_tx_info[pending_idx].vif;
1569 txp = &netbk->pending_tx_info[pending_idx].req;
1570
1571 /* Check the remap error code. */
1572 if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
1573 netdev_dbg(vif->dev, "netback grant failed.\n");
1574 skb_shinfo(skb)->nr_frags = 0;
1575 kfree_skb(skb);
1576 continue;
1577 }
1578
1579 data_len = skb->len;
1580 memcpy(skb->data,
1581 (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
1582 data_len);
1583 if (data_len < txp->size) {
1584 /* Append the packet payload as a fragment. */
1585 txp->offset += data_len;
1586 txp->size -= data_len;
1587 } else {
1588 /* Schedule a response immediately. */
Matthew Daley33eb2602013-02-06 23:41:36 +00001589 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +00001590 }
1591
1592 if (txp->flags & XEN_NETTXF_csum_blank)
1593 skb->ip_summed = CHECKSUM_PARTIAL;
1594 else if (txp->flags & XEN_NETTXF_data_validated)
1595 skb->ip_summed = CHECKSUM_UNNECESSARY;
1596
1597 xen_netbk_fill_frags(netbk, skb);
1598
1599 /*
1600 * If the initial fragment was < PKT_PROT_LEN then
1601 * pull through some bytes from the other fragments to
1602 * increase the linear region to PKT_PROT_LEN bytes.
1603 */
1604 if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
1605 int target = min_t(int, skb->len, PKT_PROT_LEN);
1606 __pskb_pull_tail(skb, target - skb_headlen(skb));
1607 }
1608
1609 skb->dev = vif->dev;
1610 skb->protocol = eth_type_trans(skb, skb->dev);
1611
1612 if (checksum_setup(vif, skb)) {
1613 netdev_dbg(vif->dev,
1614 "Can't setup checksum in net_tx_action\n");
1615 kfree_skb(skb);
1616 continue;
1617 }
1618
1619 vif->dev->stats.rx_bytes += skb->len;
1620 vif->dev->stats.rx_packets++;
1621
1622 xenvif_receive_skb(vif, skb);
1623 }
1624}
1625
1626/* Called after netfront has transmitted */
1627static void xen_netbk_tx_action(struct xen_netbk *netbk)
1628{
1629 unsigned nr_gops;
1630 int ret;
1631
1632 nr_gops = xen_netbk_tx_build_gops(netbk);
1633
1634 if (nr_gops == 0)
1635 return;
1636 ret = HYPERVISOR_grant_table_op(GNTTABOP_copy,
1637 netbk->tx_copy_ops, nr_gops);
1638 BUG_ON(ret);
1639
1640 xen_netbk_tx_submit(netbk);
1641
1642}
1643
Matthew Daley33eb2602013-02-06 23:41:36 +00001644static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1645 u8 status)
Ian Campbellf942dc22011-03-15 00:06:18 +00001646{
1647 struct xenvif *vif;
1648 struct pending_tx_info *pending_tx_info;
Wei Liu9832f4a2013-04-22 02:20:42 +00001649 pending_ring_idx_t head;
1650 u16 peek; /* peek into next tx request */
1651
1652 BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
Ian Campbellf942dc22011-03-15 00:06:18 +00001653
1654 /* Already complete? */
1655 if (netbk->mmap_pages[pending_idx] == NULL)
1656 return;
1657
1658 pending_tx_info = &netbk->pending_tx_info[pending_idx];
1659
1660 vif = pending_tx_info->vif;
Wei Liu9832f4a2013-04-22 02:20:42 +00001661 head = pending_tx_info->head;
Ian Campbellf942dc22011-03-15 00:06:18 +00001662
Wei Liu9832f4a2013-04-22 02:20:42 +00001663 BUG_ON(!pending_tx_is_head(netbk, head));
1664 BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001665
Wei Liu9832f4a2013-04-22 02:20:42 +00001666 do {
1667 pending_ring_idx_t index;
1668 pending_ring_idx_t idx = pending_index(head);
1669 u16 info_idx = netbk->pending_ring[idx];
Ian Campbellf942dc22011-03-15 00:06:18 +00001670
Wei Liu9832f4a2013-04-22 02:20:42 +00001671 pending_tx_info = &netbk->pending_tx_info[info_idx];
1672 make_tx_response(vif, &pending_tx_info->req, status);
Ian Campbellf942dc22011-03-15 00:06:18 +00001673
Wei Liu9832f4a2013-04-22 02:20:42 +00001674 /* Setting any number other than
1675 * INVALID_PENDING_RING_IDX indicates this slot is
1676 * starting a new packet / ending a previous packet.
1677 */
1678 pending_tx_info->head = 0;
1679
1680 index = pending_index(netbk->pending_prod++);
1681 netbk->pending_ring[index] = netbk->pending_ring[info_idx];
1682
1683 xenvif_put(vif);
1684
1685 peek = netbk->pending_ring[pending_index(++head)];
1686
1687 } while (!pending_tx_is_head(netbk, peek));
1688
1689 netbk->mmap_pages[pending_idx]->mapping = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +00001690 put_page(netbk->mmap_pages[pending_idx]);
1691 netbk->mmap_pages[pending_idx] = NULL;
1692}
1693
Wei Liu9832f4a2013-04-22 02:20:42 +00001694
Ian Campbellf942dc22011-03-15 00:06:18 +00001695static void make_tx_response(struct xenvif *vif,
1696 struct xen_netif_tx_request *txp,
1697 s8 st)
1698{
1699 RING_IDX i = vif->tx.rsp_prod_pvt;
1700 struct xen_netif_tx_response *resp;
1701 int notify;
1702
1703 resp = RING_GET_RESPONSE(&vif->tx, i);
1704 resp->id = txp->id;
1705 resp->status = st;
1706
1707 if (txp->flags & XEN_NETTXF_extra_info)
1708 RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1709
1710 vif->tx.rsp_prod_pvt = ++i;
1711 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1712 if (notify)
1713 notify_remote_via_irq(vif->irq);
1714}
1715
1716static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1717 u16 id,
1718 s8 st,
1719 u16 offset,
1720 u16 size,
1721 u16 flags)
1722{
1723 RING_IDX i = vif->rx.rsp_prod_pvt;
1724 struct xen_netif_rx_response *resp;
1725
1726 resp = RING_GET_RESPONSE(&vif->rx, i);
1727 resp->offset = offset;
1728 resp->flags = flags;
1729 resp->id = id;
1730 resp->status = (s16)size;
1731 if (st < 0)
1732 resp->status = (s16)st;
1733
1734 vif->rx.rsp_prod_pvt = ++i;
1735
1736 return resp;
1737}
1738
1739static inline int rx_work_todo(struct xen_netbk *netbk)
1740{
1741 return !skb_queue_empty(&netbk->rx_queue);
1742}
1743
1744static inline int tx_work_todo(struct xen_netbk *netbk)
1745{
1746
Wei Liu9832f4a2013-04-22 02:20:42 +00001747 if ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN
1748 < MAX_PENDING_REQS) &&
1749 !list_empty(&netbk->net_schedule_list))
Ian Campbellf942dc22011-03-15 00:06:18 +00001750 return 1;
1751
1752 return 0;
1753}
1754
1755static int xen_netbk_kthread(void *data)
1756{
1757 struct xen_netbk *netbk = data;
1758 while (!kthread_should_stop()) {
1759 wait_event_interruptible(netbk->wq,
1760 rx_work_todo(netbk) ||
1761 tx_work_todo(netbk) ||
1762 kthread_should_stop());
1763 cond_resched();
1764
1765 if (kthread_should_stop())
1766 break;
1767
1768 if (rx_work_todo(netbk))
1769 xen_netbk_rx_action(netbk);
1770
1771 if (tx_work_todo(netbk))
1772 xen_netbk_tx_action(netbk);
1773 }
1774
1775 return 0;
1776}
1777
1778void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
1779{
David Vrabelc9d63692011-09-29 16:53:31 +01001780 if (vif->tx.sring)
1781 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1782 vif->tx.sring);
1783 if (vif->rx.sring)
1784 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1785 vif->rx.sring);
Ian Campbellf942dc22011-03-15 00:06:18 +00001786}
1787
1788int xen_netbk_map_frontend_rings(struct xenvif *vif,
1789 grant_ref_t tx_ring_ref,
1790 grant_ref_t rx_ring_ref)
1791{
David Vrabelc9d63692011-09-29 16:53:31 +01001792 void *addr;
Ian Campbellf942dc22011-03-15 00:06:18 +00001793 struct xen_netif_tx_sring *txs;
1794 struct xen_netif_rx_sring *rxs;
1795
1796 int err = -ENOMEM;
1797
David Vrabelc9d63692011-09-29 16:53:31 +01001798 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1799 tx_ring_ref, &addr);
1800 if (err)
Ian Campbellf942dc22011-03-15 00:06:18 +00001801 goto err;
1802
David Vrabelc9d63692011-09-29 16:53:31 +01001803 txs = (struct xen_netif_tx_sring *)addr;
Ian Campbellf942dc22011-03-15 00:06:18 +00001804 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1805
David Vrabelc9d63692011-09-29 16:53:31 +01001806 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1807 rx_ring_ref, &addr);
1808 if (err)
Ian Campbellf942dc22011-03-15 00:06:18 +00001809 goto err;
Ian Campbellf942dc22011-03-15 00:06:18 +00001810
David Vrabelc9d63692011-09-29 16:53:31 +01001811 rxs = (struct xen_netif_rx_sring *)addr;
Ian Campbellf942dc22011-03-15 00:06:18 +00001812 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1813
David Vrabelc9d63692011-09-29 16:53:31 +01001814 vif->rx_req_cons_peek = 0;
1815
Ian Campbellf942dc22011-03-15 00:06:18 +00001816 return 0;
1817
1818err:
1819 xen_netbk_unmap_frontend_rings(vif);
1820 return err;
1821}
1822
1823static int __init netback_init(void)
1824{
1825 int i;
1826 int rc = 0;
1827 int group;
1828
Daniel De Graaf2a14b2442011-12-14 15:12:13 -05001829 if (!xen_domain())
Ian Campbellf942dc22011-03-15 00:06:18 +00001830 return -ENODEV;
1831
Wei Liu9832f4a2013-04-22 02:20:42 +00001832 if (max_skb_slots < XEN_NETIF_NR_SLOTS_MIN) {
1833 printk(KERN_INFO
1834 "xen-netback: max_skb_slots too small (%d), bump it to XEN_NETIF_NR_SLOTS_MIN (%d)\n",
1835 max_skb_slots, XEN_NETIF_NR_SLOTS_MIN);
1836 max_skb_slots = XEN_NETIF_NR_SLOTS_MIN;
1837 }
1838
Ian Campbellf942dc22011-03-15 00:06:18 +00001839 xen_netbk_group_nr = num_online_cpus();
1840 xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
Joe Perchese404dec2012-01-29 12:56:23 +00001841 if (!xen_netbk)
Ian Campbellf942dc22011-03-15 00:06:18 +00001842 return -ENOMEM;
Ian Campbellf942dc22011-03-15 00:06:18 +00001843
1844 for (group = 0; group < xen_netbk_group_nr; group++) {
1845 struct xen_netbk *netbk = &xen_netbk[group];
1846 skb_queue_head_init(&netbk->rx_queue);
1847 skb_queue_head_init(&netbk->tx_queue);
1848
1849 init_timer(&netbk->net_timer);
1850 netbk->net_timer.data = (unsigned long)netbk;
1851 netbk->net_timer.function = xen_netbk_alarm;
1852
1853 netbk->pending_cons = 0;
1854 netbk->pending_prod = MAX_PENDING_REQS;
1855 for (i = 0; i < MAX_PENDING_REQS; i++)
1856 netbk->pending_ring[i] = i;
1857
1858 init_waitqueue_head(&netbk->wq);
1859 netbk->task = kthread_create(xen_netbk_kthread,
1860 (void *)netbk,
1861 "netback/%u", group);
1862
1863 if (IS_ERR(netbk->task)) {
Wei Liu6b84bd12011-12-05 06:57:44 +00001864 printk(KERN_ALERT "kthread_create() fails at netback\n");
Ian Campbellf942dc22011-03-15 00:06:18 +00001865 del_timer(&netbk->net_timer);
1866 rc = PTR_ERR(netbk->task);
1867 goto failed_init;
1868 }
1869
1870 kthread_bind(netbk->task, group);
1871
1872 INIT_LIST_HEAD(&netbk->net_schedule_list);
1873
1874 spin_lock_init(&netbk->net_schedule_list_lock);
1875
1876 atomic_set(&netbk->netfront_count, 0);
1877
1878 wake_up_process(netbk->task);
1879 }
1880
1881 rc = xenvif_xenbus_init();
1882 if (rc)
1883 goto failed_init;
1884
1885 return 0;
1886
1887failed_init:
1888 while (--group >= 0) {
1889 struct xen_netbk *netbk = &xen_netbk[group];
1890 for (i = 0; i < MAX_PENDING_REQS; i++) {
1891 if (netbk->mmap_pages[i])
1892 __free_page(netbk->mmap_pages[i]);
1893 }
1894 del_timer(&netbk->net_timer);
1895 kthread_stop(netbk->task);
1896 }
1897 vfree(xen_netbk);
1898 return rc;
1899
1900}
1901
1902module_init(netback_init);
1903
1904MODULE_LICENSE("Dual BSD/GPL");
Bastian Blankf984cec2011-06-30 11:19:09 -07001905MODULE_ALIAS("xen-backend:vif");