blob: 91f163d03a49da0dfa76c832701bfee6e6d59dc7 [file] [log] [blame]
Ian Campbellf942dc22011-03-15 00:06:18 +00001/*
2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
7 *
8 * Copyright (c) 2002-2005, K A Fraser
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
40
41#include <net/tcp.h>
42
Stefano Stabellinica981632012-08-08 17:21:23 +000043#include <xen/xen.h>
Ian Campbellf942dc22011-03-15 00:06:18 +000044#include <xen/events.h>
45#include <xen/interface/memory.h>
46
47#include <asm/xen/hypercall.h>
48#include <asm/xen/page.h>
49
Wei Liue1f00a692013-05-22 06:34:45 +000050/* Provide an option to disable split event channels at load time as
51 * event channels are limited resource. Split event channels are
52 * enabled by default.
53 */
54bool separate_tx_rx_irq = 1;
55module_param(separate_tx_rx_irq, bool, 0644);
56
Wei Liu2810e5b2013-04-22 02:20:42 +000057/*
58 * This is the maximum slots a skb can have. If a guest sends a skb
59 * which exceeds this limit it is considered malicious.
60 */
Wei Liu37641492013-05-02 00:43:59 +000061#define FATAL_SKB_SLOTS_DEFAULT 20
62static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
63module_param(fatal_skb_slots, uint, 0444);
64
65/*
66 * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
67 * the maximum slots a valid packet can use. Now this value is defined
68 * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
69 * all backend.
70 */
71#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
Wei Liu2810e5b2013-04-22 02:20:42 +000072
Ian Campbellf942dc22011-03-15 00:06:18 +000073typedef unsigned int pending_ring_idx_t;
Wei Liu2810e5b2013-04-22 02:20:42 +000074#define INVALID_PENDING_RING_IDX (~0U)
75
76struct pending_tx_info {
77 struct xen_netif_tx_request req; /* coalesced tx request */
78 struct xenvif *vif;
79 pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
80 * if it is head of one or more tx
81 * reqs
82 */
83};
Ian Campbellf942dc22011-03-15 00:06:18 +000084
85struct netbk_rx_meta {
86 int id;
87 int size;
88 int gso_size;
89};
90
91#define MAX_PENDING_REQS 256
92
Ian Campbellea066ad2011-10-05 00:28:46 +000093/* Discriminate from any valid pending_idx value. */
94#define INVALID_PENDING_IDX 0xFFFF
95
Ian Campbellf942dc22011-03-15 00:06:18 +000096#define MAX_BUFFER_OFFSET PAGE_SIZE
97
Ian Campbellf942dc22011-03-15 00:06:18 +000098struct xen_netbk {
99 wait_queue_head_t wq;
100 struct task_struct *task;
101
102 struct sk_buff_head rx_queue;
103 struct sk_buff_head tx_queue;
104
105 struct timer_list net_timer;
106
107 struct page *mmap_pages[MAX_PENDING_REQS];
108
109 pending_ring_idx_t pending_prod;
110 pending_ring_idx_t pending_cons;
111 struct list_head net_schedule_list;
112
113 /* Protect the net_schedule_list in netif. */
114 spinlock_t net_schedule_list_lock;
115
116 atomic_t netfront_count;
117
118 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
Wei Liu2810e5b2013-04-22 02:20:42 +0000119 /* Coalescing tx requests before copying makes number of grant
120 * copy ops greater or equal to number of slots required. In
121 * worst case a tx request consumes 2 gnttab_copy.
122 */
123 struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
Ian Campbellf942dc22011-03-15 00:06:18 +0000124
125 u16 pending_ring[MAX_PENDING_REQS];
126
127 /*
128 * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
129 * head/fragment page uses 2 copy operations because it
130 * straddles two buffers in the frontend.
131 */
132 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
133 struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
134};
135
136static struct xen_netbk *xen_netbk;
137static int xen_netbk_group_nr;
138
Wei Liu2810e5b2013-04-22 02:20:42 +0000139/*
140 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
141 * one or more merged tx requests, otherwise it is the continuation of
142 * previous tx request.
143 */
144static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
145{
146 return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
147}
148
Ian Campbellf942dc22011-03-15 00:06:18 +0000149void xen_netbk_add_xenvif(struct xenvif *vif)
150{
151 int i;
152 int min_netfront_count;
153 int min_group = 0;
154 struct xen_netbk *netbk;
155
156 min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
157 for (i = 0; i < xen_netbk_group_nr; i++) {
158 int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
159 if (netfront_count < min_netfront_count) {
160 min_group = i;
161 min_netfront_count = netfront_count;
162 }
163 }
164
165 netbk = &xen_netbk[min_group];
166
167 vif->netbk = netbk;
168 atomic_inc(&netbk->netfront_count);
169}
170
171void xen_netbk_remove_xenvif(struct xenvif *vif)
172{
173 struct xen_netbk *netbk = vif->netbk;
174 vif->netbk = NULL;
175 atomic_dec(&netbk->netfront_count);
176}
177
Matthew Daley7d5145d2013-02-06 23:41:36 +0000178static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
179 u8 status);
Ian Campbellf942dc22011-03-15 00:06:18 +0000180static void make_tx_response(struct xenvif *vif,
181 struct xen_netif_tx_request *txp,
182 s8 st);
183static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
184 u16 id,
185 s8 st,
186 u16 offset,
187 u16 size,
188 u16 flags);
189
190static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
Ian Campbellea066ad2011-10-05 00:28:46 +0000191 u16 idx)
Ian Campbellf942dc22011-03-15 00:06:18 +0000192{
193 return page_to_pfn(netbk->mmap_pages[idx]);
194}
195
196static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
Ian Campbellea066ad2011-10-05 00:28:46 +0000197 u16 idx)
Ian Campbellf942dc22011-03-15 00:06:18 +0000198{
199 return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
200}
201
Ian Campbellf942dc22011-03-15 00:06:18 +0000202/*
203 * This is the amount of packet we copy rather than map, so that the
204 * guest can't fiddle with the contents of the headers while we do
205 * packet processing on them (netfilter, routing, etc).
206 */
207#define PKT_PROT_LEN (ETH_HLEN + \
208 VLAN_HLEN + \
209 sizeof(struct iphdr) + MAX_IPOPTLEN + \
210 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
211
Ian Campbellea066ad2011-10-05 00:28:46 +0000212static u16 frag_get_pending_idx(skb_frag_t *frag)
213{
214 return (u16)frag->page_offset;
215}
216
217static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
218{
219 frag->page_offset = pending_idx;
220}
221
Ian Campbellf942dc22011-03-15 00:06:18 +0000222static inline pending_ring_idx_t pending_index(unsigned i)
223{
224 return i & (MAX_PENDING_REQS-1);
225}
226
227static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
228{
229 return MAX_PENDING_REQS -
230 netbk->pending_prod + netbk->pending_cons;
231}
232
233static void xen_netbk_kick_thread(struct xen_netbk *netbk)
234{
235 wake_up(&netbk->wq);
236}
237
238static int max_required_rx_slots(struct xenvif *vif)
239{
240 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
241
Wei Liu2810e5b2013-04-22 02:20:42 +0000242 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
Ian Campbellf942dc22011-03-15 00:06:18 +0000243 if (vif->can_sg || vif->gso || vif->gso_prefix)
244 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
245
246 return max;
247}
248
249int xen_netbk_rx_ring_full(struct xenvif *vif)
250{
251 RING_IDX peek = vif->rx_req_cons_peek;
252 RING_IDX needed = max_required_rx_slots(vif);
253
254 return ((vif->rx.sring->req_prod - peek) < needed) ||
255 ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
256}
257
258int xen_netbk_must_stop_queue(struct xenvif *vif)
259{
260 if (!xen_netbk_rx_ring_full(vif))
261 return 0;
262
263 vif->rx.sring->req_event = vif->rx_req_cons_peek +
264 max_required_rx_slots(vif);
265 mb(); /* request notification /then/ check the queue */
266
267 return xen_netbk_rx_ring_full(vif);
268}
269
270/*
271 * Returns true if we should start a new receive buffer instead of
272 * adding 'size' bytes to a buffer which currently contains 'offset'
273 * bytes.
274 */
275static bool start_new_rx_buffer(int offset, unsigned long size, int head)
276{
277 /* simple case: we have completely filled the current buffer. */
278 if (offset == MAX_BUFFER_OFFSET)
279 return true;
280
281 /*
282 * complex case: start a fresh buffer if the current frag
283 * would overflow the current buffer but only if:
284 * (i) this frag would fit completely in the next buffer
285 * and (ii) there is already some data in the current buffer
286 * and (iii) this is not the head buffer.
287 *
288 * Where:
289 * - (i) stops us splitting a frag into two copies
290 * unless the frag is too large for a single buffer.
291 * - (ii) stops us from leaving a buffer pointlessly empty.
292 * - (iii) stops us leaving the first buffer
293 * empty. Strictly speaking this is already covered
294 * by (ii) but is explicitly checked because
295 * netfront relies on the first buffer being
296 * non-empty and can crash otherwise.
297 *
298 * This means we will effectively linearise small
299 * frags but do not needlessly split large buffers
300 * into multiple copies tend to give large frags their
301 * own buffers as before.
302 */
303 if ((offset + size > MAX_BUFFER_OFFSET) &&
304 (size <= MAX_BUFFER_OFFSET) && offset && !head)
305 return true;
306
307 return false;
308}
309
310/*
311 * Figure out how many ring slots we're going to need to send @skb to
312 * the guest. This function is essentially a dry run of
313 * netbk_gop_frag_copy.
314 */
315unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
316{
317 unsigned int count;
318 int i, copy_off;
319
Simon Grahame26b2032012-05-24 06:26:07 +0000320 count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000321
322 copy_off = skb_headlen(skb) % PAGE_SIZE;
323
324 if (skb_shinfo(skb)->gso_size)
325 count++;
326
327 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000328 unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
Ian Campbell6a8ed462012-10-10 03:48:42 +0000329 unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
Ian Campbellf942dc22011-03-15 00:06:18 +0000330 unsigned long bytes;
Ian Campbell6a8ed462012-10-10 03:48:42 +0000331
332 offset &= ~PAGE_MASK;
333
Ian Campbellf942dc22011-03-15 00:06:18 +0000334 while (size > 0) {
Ian Campbell6a8ed462012-10-10 03:48:42 +0000335 BUG_ON(offset >= PAGE_SIZE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000336 BUG_ON(copy_off > MAX_BUFFER_OFFSET);
337
Ian Campbell6a8ed462012-10-10 03:48:42 +0000338 bytes = PAGE_SIZE - offset;
339
340 if (bytes > size)
341 bytes = size;
342
343 if (start_new_rx_buffer(copy_off, bytes, 0)) {
Ian Campbellf942dc22011-03-15 00:06:18 +0000344 count++;
345 copy_off = 0;
346 }
347
Ian Campbellf942dc22011-03-15 00:06:18 +0000348 if (copy_off + bytes > MAX_BUFFER_OFFSET)
349 bytes = MAX_BUFFER_OFFSET - copy_off;
350
351 copy_off += bytes;
Ian Campbell6a8ed462012-10-10 03:48:42 +0000352
353 offset += bytes;
Ian Campbellf942dc22011-03-15 00:06:18 +0000354 size -= bytes;
Ian Campbell6a8ed462012-10-10 03:48:42 +0000355
356 if (offset == PAGE_SIZE)
357 offset = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +0000358 }
359 }
360 return count;
361}
362
363struct netrx_pending_operations {
364 unsigned copy_prod, copy_cons;
365 unsigned meta_prod, meta_cons;
366 struct gnttab_copy *copy;
367 struct netbk_rx_meta *meta;
368 int copy_off;
369 grant_ref_t copy_gref;
370};
371
372static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
373 struct netrx_pending_operations *npo)
374{
375 struct netbk_rx_meta *meta;
376 struct xen_netif_rx_request *req;
377
378 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
379
380 meta = npo->meta + npo->meta_prod++;
381 meta->gso_size = 0;
382 meta->size = 0;
383 meta->id = req->id;
384
385 npo->copy_off = 0;
386 npo->copy_gref = req->gref;
387
388 return meta;
389}
390
391/*
392 * Set up the grant operations for this fragment. If it's a flipping
393 * interface, we also set up the unmap request from here.
394 */
395static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
396 struct netrx_pending_operations *npo,
397 struct page *page, unsigned long size,
398 unsigned long offset, int *head)
399{
400 struct gnttab_copy *copy_gop;
401 struct netbk_rx_meta *meta;
Ian Campbellf942dc22011-03-15 00:06:18 +0000402 unsigned long bytes;
403
404 /* Data must not cross a page boundary. */
Ian Campbell6a8ed462012-10-10 03:48:42 +0000405 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
Ian Campbellf942dc22011-03-15 00:06:18 +0000406
407 meta = npo->meta + npo->meta_prod - 1;
408
Ian Campbell6a8ed462012-10-10 03:48:42 +0000409 /* Skip unused frames from start of page */
410 page += offset >> PAGE_SHIFT;
411 offset &= ~PAGE_MASK;
412
Ian Campbellf942dc22011-03-15 00:06:18 +0000413 while (size > 0) {
Ian Campbell6a8ed462012-10-10 03:48:42 +0000414 BUG_ON(offset >= PAGE_SIZE);
Ian Campbellf942dc22011-03-15 00:06:18 +0000415 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
416
Ian Campbell6a8ed462012-10-10 03:48:42 +0000417 bytes = PAGE_SIZE - offset;
418
419 if (bytes > size)
420 bytes = size;
421
422 if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
Ian Campbellf942dc22011-03-15 00:06:18 +0000423 /*
424 * Netfront requires there to be some data in the head
425 * buffer.
426 */
427 BUG_ON(*head);
428
429 meta = get_next_rx_buffer(vif, npo);
430 }
431
Ian Campbellf942dc22011-03-15 00:06:18 +0000432 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
433 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
434
435 copy_gop = npo->copy + npo->copy_prod++;
436 copy_gop->flags = GNTCOPY_dest_gref;
Wei Liu43e9d192013-08-26 12:59:37 +0100437 copy_gop->source.domid = DOMID_SELF;
438 copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
Ian Campbellf942dc22011-03-15 00:06:18 +0000439
Ian Campbellf942dc22011-03-15 00:06:18 +0000440 copy_gop->source.offset = offset;
441 copy_gop->dest.domid = vif->domid;
442
443 copy_gop->dest.offset = npo->copy_off;
444 copy_gop->dest.u.ref = npo->copy_gref;
445 copy_gop->len = bytes;
446
447 npo->copy_off += bytes;
448 meta->size += bytes;
449
450 offset += bytes;
451 size -= bytes;
452
Ian Campbell6a8ed462012-10-10 03:48:42 +0000453 /* Next frame */
454 if (offset == PAGE_SIZE && size) {
455 BUG_ON(!PageCompound(page));
456 page++;
457 offset = 0;
458 }
459
Ian Campbellf942dc22011-03-15 00:06:18 +0000460 /* Leave a gap for the GSO descriptor. */
461 if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
462 vif->rx.req_cons++;
463
464 *head = 0; /* There must be something in this buffer now. */
465
466 }
467}
468
469/*
470 * Prepare an SKB to be transmitted to the frontend.
471 *
472 * This function is responsible for allocating grant operations, meta
473 * structures, etc.
474 *
475 * It returns the number of meta structures consumed. The number of
476 * ring slots used is always equal to the number of meta slots used
477 * plus the number of GSO descriptors used. Currently, we use either
478 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
479 * frontend-side LRO).
480 */
481static int netbk_gop_skb(struct sk_buff *skb,
482 struct netrx_pending_operations *npo)
483{
484 struct xenvif *vif = netdev_priv(skb->dev);
485 int nr_frags = skb_shinfo(skb)->nr_frags;
486 int i;
487 struct xen_netif_rx_request *req;
488 struct netbk_rx_meta *meta;
489 unsigned char *data;
490 int head = 1;
491 int old_meta_prod;
492
493 old_meta_prod = npo->meta_prod;
494
495 /* Set up a GSO prefix descriptor, if necessary */
496 if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
497 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
498 meta = npo->meta + npo->meta_prod++;
499 meta->gso_size = skb_shinfo(skb)->gso_size;
500 meta->size = 0;
501 meta->id = req->id;
502 }
503
504 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
505 meta = npo->meta + npo->meta_prod++;
506
507 if (!vif->gso_prefix)
508 meta->gso_size = skb_shinfo(skb)->gso_size;
509 else
510 meta->gso_size = 0;
511
512 meta->size = 0;
513 meta->id = req->id;
514 npo->copy_off = 0;
515 npo->copy_gref = req->gref;
516
517 data = skb->data;
518 while (data < skb_tail_pointer(skb)) {
519 unsigned int offset = offset_in_page(data);
520 unsigned int len = PAGE_SIZE - offset;
521
522 if (data + len > skb_tail_pointer(skb))
523 len = skb_tail_pointer(skb) - data;
524
525 netbk_gop_frag_copy(vif, skb, npo,
526 virt_to_page(data), len, offset, &head);
527 data += len;
528 }
529
530 for (i = 0; i < nr_frags; i++) {
531 netbk_gop_frag_copy(vif, skb, npo,
Ian Campbellea066ad2011-10-05 00:28:46 +0000532 skb_frag_page(&skb_shinfo(skb)->frags[i]),
Eric Dumazet9e903e02011-10-18 21:00:24 +0000533 skb_frag_size(&skb_shinfo(skb)->frags[i]),
Ian Campbellf942dc22011-03-15 00:06:18 +0000534 skb_shinfo(skb)->frags[i].page_offset,
535 &head);
536 }
537
538 return npo->meta_prod - old_meta_prod;
539}
540
541/*
542 * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
543 * used to set up the operations on the top of
544 * netrx_pending_operations, which have since been done. Check that
545 * they didn't give any errors and advance over them.
546 */
547static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
548 struct netrx_pending_operations *npo)
549{
550 struct gnttab_copy *copy_op;
551 int status = XEN_NETIF_RSP_OKAY;
552 int i;
553
554 for (i = 0; i < nr_meta_slots; i++) {
555 copy_op = npo->copy + npo->copy_cons++;
556 if (copy_op->status != GNTST_okay) {
557 netdev_dbg(vif->dev,
558 "Bad status %d from copy to DOM%d.\n",
559 copy_op->status, vif->domid);
560 status = XEN_NETIF_RSP_ERROR;
561 }
562 }
563
564 return status;
565}
566
567static void netbk_add_frag_responses(struct xenvif *vif, int status,
568 struct netbk_rx_meta *meta,
569 int nr_meta_slots)
570{
571 int i;
572 unsigned long offset;
573
574 /* No fragments used */
575 if (nr_meta_slots <= 1)
576 return;
577
578 nr_meta_slots--;
579
580 for (i = 0; i < nr_meta_slots; i++) {
581 int flags;
582 if (i == nr_meta_slots - 1)
583 flags = 0;
584 else
585 flags = XEN_NETRXF_more_data;
586
587 offset = 0;
588 make_rx_response(vif, meta[i].id, status, offset,
589 meta[i].size, flags);
590 }
591}
592
593struct skb_cb_overlay {
594 int meta_slots_used;
595};
596
597static void xen_netbk_rx_action(struct xen_netbk *netbk)
598{
599 struct xenvif *vif = NULL, *tmp;
600 s8 status;
Wei Liue1f00a692013-05-22 06:34:45 +0000601 u16 flags;
Ian Campbellf942dc22011-03-15 00:06:18 +0000602 struct xen_netif_rx_response *resp;
603 struct sk_buff_head rxq;
604 struct sk_buff *skb;
605 LIST_HEAD(notify);
606 int ret;
607 int nr_frags;
608 int count;
609 unsigned long offset;
610 struct skb_cb_overlay *sco;
611
612 struct netrx_pending_operations npo = {
613 .copy = netbk->grant_copy_op,
614 .meta = netbk->meta,
615 };
616
617 skb_queue_head_init(&rxq);
618
619 count = 0;
620
621 while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
622 vif = netdev_priv(skb->dev);
623 nr_frags = skb_shinfo(skb)->nr_frags;
624
625 sco = (struct skb_cb_overlay *)skb->cb;
626 sco->meta_slots_used = netbk_gop_skb(skb, &npo);
627
628 count += nr_frags + 1;
629
630 __skb_queue_tail(&rxq, skb);
631
632 /* Filled the batch queue? */
Wei Liu2810e5b2013-04-22 02:20:42 +0000633 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
Ian Campbellf942dc22011-03-15 00:06:18 +0000634 if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
635 break;
636 }
637
638 BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
639
640 if (!npo.copy_prod)
641 return;
642
643 BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
Andres Lagar-Cavillac5718982012-09-14 14:26:59 +0000644 gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
Ian Campbellf942dc22011-03-15 00:06:18 +0000645
646 while ((skb = __skb_dequeue(&rxq)) != NULL) {
647 sco = (struct skb_cb_overlay *)skb->cb;
648
649 vif = netdev_priv(skb->dev);
650
651 if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
652 resp = RING_GET_RESPONSE(&vif->rx,
653 vif->rx.rsp_prod_pvt++);
654
655 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
656
657 resp->offset = netbk->meta[npo.meta_cons].gso_size;
658 resp->id = netbk->meta[npo.meta_cons].id;
659 resp->status = sco->meta_slots_used;
660
661 npo.meta_cons++;
662 sco->meta_slots_used--;
663 }
664
665
666 vif->dev->stats.tx_bytes += skb->len;
667 vif->dev->stats.tx_packets++;
668
669 status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
670
671 if (sco->meta_slots_used == 1)
672 flags = 0;
673 else
674 flags = XEN_NETRXF_more_data;
675
676 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
677 flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
678 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
679 /* remote but checksummed. */
680 flags |= XEN_NETRXF_data_validated;
681
682 offset = 0;
683 resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
684 status, offset,
685 netbk->meta[npo.meta_cons].size,
686 flags);
687
688 if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
689 struct xen_netif_extra_info *gso =
690 (struct xen_netif_extra_info *)
691 RING_GET_RESPONSE(&vif->rx,
692 vif->rx.rsp_prod_pvt++);
693
694 resp->flags |= XEN_NETRXF_extra_info;
695
696 gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
697 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
698 gso->u.gso.pad = 0;
699 gso->u.gso.features = 0;
700
701 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
702 gso->flags = 0;
703 }
704
705 netbk_add_frag_responses(vif, status,
706 netbk->meta + npo.meta_cons + 1,
707 sco->meta_slots_used);
708
709 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
Ian Campbellf942dc22011-03-15 00:06:18 +0000710
711 xenvif_notify_tx_completion(vif);
712
Jan Beulich94f950c2013-06-11 11:00:34 +0100713 if (ret && list_empty(&vif->notify_list))
714 list_add_tail(&vif->notify_list, &notify);
715 else
716 xenvif_put(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000717 npo.meta_cons += sco->meta_slots_used;
718 dev_kfree_skb(skb);
719 }
720
721 list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
Wei Liue1f00a692013-05-22 06:34:45 +0000722 notify_remote_via_irq(vif->rx_irq);
Ian Campbellf942dc22011-03-15 00:06:18 +0000723 list_del_init(&vif->notify_list);
Jan Beulich94f950c2013-06-11 11:00:34 +0100724 xenvif_put(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +0000725 }
726
727 /* More work to do? */
728 if (!skb_queue_empty(&netbk->rx_queue) &&
729 !timer_pending(&netbk->net_timer))
730 xen_netbk_kick_thread(netbk);
731}
732
733void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
734{
735 struct xen_netbk *netbk = vif->netbk;
736
737 skb_queue_tail(&netbk->rx_queue, skb);
738
739 xen_netbk_kick_thread(netbk);
740}
741
742static void xen_netbk_alarm(unsigned long data)
743{
744 struct xen_netbk *netbk = (struct xen_netbk *)data;
745 xen_netbk_kick_thread(netbk);
746}
747
748static int __on_net_schedule_list(struct xenvif *vif)
749{
750 return !list_empty(&vif->schedule_list);
751}
752
753/* Must be called with net_schedule_list_lock held */
754static void remove_from_net_schedule_list(struct xenvif *vif)
755{
756 if (likely(__on_net_schedule_list(vif))) {
757 list_del_init(&vif->schedule_list);
758 xenvif_put(vif);
759 }
760}
761
762static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
763{
764 struct xenvif *vif = NULL;
765
766 spin_lock_irq(&netbk->net_schedule_list_lock);
767 if (list_empty(&netbk->net_schedule_list))
768 goto out;
769
770 vif = list_first_entry(&netbk->net_schedule_list,
771 struct xenvif, schedule_list);
772 if (!vif)
773 goto out;
774
775 xenvif_get(vif);
776
777 remove_from_net_schedule_list(vif);
778out:
779 spin_unlock_irq(&netbk->net_schedule_list_lock);
780 return vif;
781}
782
783void xen_netbk_schedule_xenvif(struct xenvif *vif)
784{
785 unsigned long flags;
786 struct xen_netbk *netbk = vif->netbk;
787
788 if (__on_net_schedule_list(vif))
789 goto kick;
790
791 spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
792 if (!__on_net_schedule_list(vif) &&
793 likely(xenvif_schedulable(vif))) {
794 list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
795 xenvif_get(vif);
796 }
797 spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
798
799kick:
800 smp_mb();
801 if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
802 !list_empty(&netbk->net_schedule_list))
803 xen_netbk_kick_thread(netbk);
804}
805
806void xen_netbk_deschedule_xenvif(struct xenvif *vif)
807{
808 struct xen_netbk *netbk = vif->netbk;
809 spin_lock_irq(&netbk->net_schedule_list_lock);
810 remove_from_net_schedule_list(vif);
811 spin_unlock_irq(&netbk->net_schedule_list_lock);
812}
813
814void xen_netbk_check_rx_xenvif(struct xenvif *vif)
815{
816 int more_to_do;
817
818 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
819
820 if (more_to_do)
821 xen_netbk_schedule_xenvif(vif);
822}
823
824static void tx_add_credit(struct xenvif *vif)
825{
826 unsigned long max_burst, max_credit;
827
828 /*
829 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
830 * Otherwise the interface can seize up due to insufficient credit.
831 */
832 max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
833 max_burst = min(max_burst, 131072UL);
834 max_burst = max(max_burst, vif->credit_bytes);
835
836 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
837 max_credit = vif->remaining_credit + vif->credit_bytes;
838 if (max_credit < vif->remaining_credit)
839 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
840
841 vif->remaining_credit = min(max_credit, max_burst);
842}
843
844static void tx_credit_callback(unsigned long data)
845{
846 struct xenvif *vif = (struct xenvif *)data;
847 tx_add_credit(vif);
848 xen_netbk_check_rx_xenvif(vif);
849}
850
851static void netbk_tx_err(struct xenvif *vif,
852 struct xen_netif_tx_request *txp, RING_IDX end)
853{
854 RING_IDX cons = vif->tx.req_cons;
855
856 do {
857 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
Ian Campbellb9149722013-02-06 23:41:38 +0000858 if (cons == end)
Ian Campbellf942dc22011-03-15 00:06:18 +0000859 break;
860 txp = RING_GET_REQUEST(&vif->tx, cons++);
861 } while (1);
862 vif->tx.req_cons = cons;
863 xen_netbk_check_rx_xenvif(vif);
864 xenvif_put(vif);
865}
866
Ian Campbell488562862013-02-06 23:41:35 +0000867static void netbk_fatal_tx_err(struct xenvif *vif)
868{
869 netdev_err(vif->dev, "fatal error; disabling device\n");
870 xenvif_carrier_off(vif);
David S. Miller629821d2013-02-19 13:04:34 -0500871 xenvif_put(vif);
Ian Campbell488562862013-02-06 23:41:35 +0000872}
873
Ian Campbellf942dc22011-03-15 00:06:18 +0000874static int netbk_count_requests(struct xenvif *vif,
875 struct xen_netif_tx_request *first,
876 struct xen_netif_tx_request *txp,
877 int work_to_do)
878{
879 RING_IDX cons = vif->tx.req_cons;
Wei Liu2810e5b2013-04-22 02:20:42 +0000880 int slots = 0;
881 int drop_err = 0;
Wei Liu59ccb4e2013-05-02 00:43:58 +0000882 int more_data;
Ian Campbellf942dc22011-03-15 00:06:18 +0000883
884 if (!(first->flags & XEN_NETTXF_more_data))
885 return 0;
886
887 do {
Wei Liu59ccb4e2013-05-02 00:43:58 +0000888 struct xen_netif_tx_request dropped_tx = { 0 };
889
Wei Liu2810e5b2013-04-22 02:20:42 +0000890 if (slots >= work_to_do) {
891 netdev_err(vif->dev,
892 "Asked for %d slots but exceeds this limit\n",
893 work_to_do);
Ian Campbell488562862013-02-06 23:41:35 +0000894 netbk_fatal_tx_err(vif);
David Vrabel35876b52013-02-14 03:18:57 +0000895 return -ENODATA;
Ian Campbellf942dc22011-03-15 00:06:18 +0000896 }
897
Wei Liu2810e5b2013-04-22 02:20:42 +0000898 /* This guest is really using too many slots and
899 * considered malicious.
900 */
Wei Liu37641492013-05-02 00:43:59 +0000901 if (unlikely(slots >= fatal_skb_slots)) {
Wei Liu2810e5b2013-04-22 02:20:42 +0000902 netdev_err(vif->dev,
903 "Malicious frontend using %d slots, threshold %u\n",
Wei Liu37641492013-05-02 00:43:59 +0000904 slots, fatal_skb_slots);
Ian Campbell488562862013-02-06 23:41:35 +0000905 netbk_fatal_tx_err(vif);
David Vrabel35876b52013-02-14 03:18:57 +0000906 return -E2BIG;
Ian Campbellf942dc22011-03-15 00:06:18 +0000907 }
908
Wei Liu2810e5b2013-04-22 02:20:42 +0000909 /* Xen network protocol had implicit dependency on
Wei Liu37641492013-05-02 00:43:59 +0000910 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
911 * the historical MAX_SKB_FRAGS value 18 to honor the
912 * same behavior as before. Any packet using more than
913 * 18 slots but less than fatal_skb_slots slots is
914 * dropped
Wei Liu2810e5b2013-04-22 02:20:42 +0000915 */
Wei Liu37641492013-05-02 00:43:59 +0000916 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
Wei Liu2810e5b2013-04-22 02:20:42 +0000917 if (net_ratelimit())
918 netdev_dbg(vif->dev,
919 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
Wei Liu37641492013-05-02 00:43:59 +0000920 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
Wei Liu2810e5b2013-04-22 02:20:42 +0000921 drop_err = -E2BIG;
922 }
923
Wei Liu59ccb4e2013-05-02 00:43:58 +0000924 if (drop_err)
925 txp = &dropped_tx;
926
Wei Liu2810e5b2013-04-22 02:20:42 +0000927 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
Ian Campbellf942dc22011-03-15 00:06:18 +0000928 sizeof(*txp));
Wei Liu03393fd2013-04-22 02:20:43 +0000929
930 /* If the guest submitted a frame >= 64 KiB then
931 * first->size overflowed and following slots will
932 * appear to be larger than the frame.
933 *
934 * This cannot be fatal error as there are buggy
935 * frontends that do this.
936 *
937 * Consume all slots and drop the packet.
938 */
939 if (!drop_err && txp->size > first->size) {
940 if (net_ratelimit())
941 netdev_dbg(vif->dev,
942 "Invalid tx request, slot size %u > remaining size %u\n",
943 txp->size, first->size);
944 drop_err = -EIO;
Ian Campbellf942dc22011-03-15 00:06:18 +0000945 }
946
947 first->size -= txp->size;
Wei Liu2810e5b2013-04-22 02:20:42 +0000948 slots++;
Ian Campbellf942dc22011-03-15 00:06:18 +0000949
950 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
Wei Liu2810e5b2013-04-22 02:20:42 +0000951 netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
Ian Campbellf942dc22011-03-15 00:06:18 +0000952 txp->offset, txp->size);
Ian Campbell488562862013-02-06 23:41:35 +0000953 netbk_fatal_tx_err(vif);
David Vrabel35876b52013-02-14 03:18:57 +0000954 return -EINVAL;
Ian Campbellf942dc22011-03-15 00:06:18 +0000955 }
Wei Liu59ccb4e2013-05-02 00:43:58 +0000956
957 more_data = txp->flags & XEN_NETTXF_more_data;
958
959 if (!drop_err)
960 txp++;
961
962 } while (more_data);
Wei Liu2810e5b2013-04-22 02:20:42 +0000963
964 if (drop_err) {
Wei Liuac69c262013-05-02 00:43:57 +0000965 netbk_tx_err(vif, first, cons + slots);
Wei Liu2810e5b2013-04-22 02:20:42 +0000966 return drop_err;
967 }
968
969 return slots;
Ian Campbellf942dc22011-03-15 00:06:18 +0000970}
971
972static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
Ian Campbellea066ad2011-10-05 00:28:46 +0000973 u16 pending_idx)
Ian Campbellf942dc22011-03-15 00:06:18 +0000974{
975 struct page *page;
976 page = alloc_page(GFP_KERNEL|__GFP_COLD);
977 if (!page)
978 return NULL;
Ian Campbellf942dc22011-03-15 00:06:18 +0000979 netbk->mmap_pages[pending_idx] = page;
980 return page;
981}
982
983static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
984 struct xenvif *vif,
985 struct sk_buff *skb,
986 struct xen_netif_tx_request *txp,
987 struct gnttab_copy *gop)
988{
989 struct skb_shared_info *shinfo = skb_shinfo(skb);
990 skb_frag_t *frags = shinfo->frags;
Ian Campbellea066ad2011-10-05 00:28:46 +0000991 u16 pending_idx = *((u16 *)skb->data);
Wei Liu2810e5b2013-04-22 02:20:42 +0000992 u16 head_idx = 0;
993 int slot, start;
994 struct page *page;
995 pending_ring_idx_t index, start_idx = 0;
996 uint16_t dst_offset;
997 unsigned int nr_slots;
998 struct pending_tx_info *first = NULL;
999
1000 /* At this point shinfo->nr_frags is in fact the number of
Wei Liu37641492013-05-02 00:43:59 +00001001 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
Wei Liu2810e5b2013-04-22 02:20:42 +00001002 */
1003 nr_slots = shinfo->nr_frags;
Ian Campbellf942dc22011-03-15 00:06:18 +00001004
1005 /* Skip first skb fragment if it is on same page as header fragment. */
Ian Campbellea066ad2011-10-05 00:28:46 +00001006 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001007
Wei Liu2810e5b2013-04-22 02:20:42 +00001008 /* Coalesce tx requests, at this point the packet passed in
1009 * should be <= 64K. Any packets larger than 64K have been
1010 * handled in netbk_count_requests().
1011 */
1012 for (shinfo->nr_frags = slot = start; slot < nr_slots;
1013 shinfo->nr_frags++) {
Ian Campbellf942dc22011-03-15 00:06:18 +00001014 struct pending_tx_info *pending_tx_info =
1015 netbk->pending_tx_info;
1016
Wei Liu2810e5b2013-04-22 02:20:42 +00001017 page = alloc_page(GFP_KERNEL|__GFP_COLD);
Ian Campbellf942dc22011-03-15 00:06:18 +00001018 if (!page)
Ian Campbell4cc7c1c2013-02-06 23:41:37 +00001019 goto err;
Ian Campbellf942dc22011-03-15 00:06:18 +00001020
Wei Liu2810e5b2013-04-22 02:20:42 +00001021 dst_offset = 0;
1022 first = NULL;
1023 while (dst_offset < PAGE_SIZE && slot < nr_slots) {
1024 gop->flags = GNTCOPY_source_gref;
Ian Campbellf942dc22011-03-15 00:06:18 +00001025
Wei Liu2810e5b2013-04-22 02:20:42 +00001026 gop->source.u.ref = txp->gref;
1027 gop->source.domid = vif->domid;
1028 gop->source.offset = txp->offset;
Ian Campbellf942dc22011-03-15 00:06:18 +00001029
Wei Liu2810e5b2013-04-22 02:20:42 +00001030 gop->dest.domid = DOMID_SELF;
Ian Campbellf942dc22011-03-15 00:06:18 +00001031
Wei Liu2810e5b2013-04-22 02:20:42 +00001032 gop->dest.offset = dst_offset;
1033 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
Ian Campbellf942dc22011-03-15 00:06:18 +00001034
Wei Liu2810e5b2013-04-22 02:20:42 +00001035 if (dst_offset + txp->size > PAGE_SIZE) {
1036 /* This page can only merge a portion
1037 * of tx request. Do not increment any
1038 * pointer / counter here. The txp
1039 * will be dealt with in future
1040 * rounds, eventually hitting the
1041 * `else` branch.
1042 */
1043 gop->len = PAGE_SIZE - dst_offset;
1044 txp->offset += gop->len;
1045 txp->size -= gop->len;
1046 dst_offset += gop->len; /* quit loop */
1047 } else {
1048 /* This tx request can be merged in the page */
1049 gop->len = txp->size;
1050 dst_offset += gop->len;
1051
1052 index = pending_index(netbk->pending_cons++);
1053
1054 pending_idx = netbk->pending_ring[index];
1055
1056 memcpy(&pending_tx_info[pending_idx].req, txp,
1057 sizeof(*txp));
1058 xenvif_get(vif);
1059
1060 pending_tx_info[pending_idx].vif = vif;
1061
1062 /* Poison these fields, corresponding
1063 * fields for head tx req will be set
1064 * to correct values after the loop.
1065 */
1066 netbk->mmap_pages[pending_idx] = (void *)(~0UL);
1067 pending_tx_info[pending_idx].head =
1068 INVALID_PENDING_RING_IDX;
1069
1070 if (!first) {
1071 first = &pending_tx_info[pending_idx];
1072 start_idx = index;
1073 head_idx = pending_idx;
1074 }
1075
1076 txp++;
1077 slot++;
1078 }
1079
1080 gop++;
1081 }
1082
1083 first->req.offset = 0;
1084 first->req.size = dst_offset;
1085 first->head = start_idx;
Wei Liu2810e5b2013-04-22 02:20:42 +00001086 netbk->mmap_pages[head_idx] = page;
1087 frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001088 }
1089
Wei Liu2810e5b2013-04-22 02:20:42 +00001090 BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
1091
Ian Campbellf942dc22011-03-15 00:06:18 +00001092 return gop;
Ian Campbell4cc7c1c2013-02-06 23:41:37 +00001093err:
1094 /* Unwind, freeing all pages and sending error responses. */
Wei Liu2810e5b2013-04-22 02:20:42 +00001095 while (shinfo->nr_frags-- > start) {
1096 xen_netbk_idx_release(netbk,
1097 frag_get_pending_idx(&frags[shinfo->nr_frags]),
1098 XEN_NETIF_RSP_ERROR);
Ian Campbell4cc7c1c2013-02-06 23:41:37 +00001099 }
1100 /* The head too, if necessary. */
1101 if (start)
1102 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1103
1104 return NULL;
Ian Campbellf942dc22011-03-15 00:06:18 +00001105}
1106
1107static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1108 struct sk_buff *skb,
1109 struct gnttab_copy **gopp)
1110{
1111 struct gnttab_copy *gop = *gopp;
Ian Campbellea066ad2011-10-05 00:28:46 +00001112 u16 pending_idx = *((u16 *)skb->data);
Ian Campbellf942dc22011-03-15 00:06:18 +00001113 struct skb_shared_info *shinfo = skb_shinfo(skb);
Wei Liu2810e5b2013-04-22 02:20:42 +00001114 struct pending_tx_info *tx_info;
Ian Campbellf942dc22011-03-15 00:06:18 +00001115 int nr_frags = shinfo->nr_frags;
1116 int i, err, start;
Wei Liu2810e5b2013-04-22 02:20:42 +00001117 u16 peek; /* peek into next tx request */
Ian Campbellf942dc22011-03-15 00:06:18 +00001118
1119 /* Check status of header. */
1120 err = gop->status;
Matthew Daley7d5145d2013-02-06 23:41:36 +00001121 if (unlikely(err))
1122 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
Ian Campbellf942dc22011-03-15 00:06:18 +00001123
1124 /* Skip first skb fragment if it is on same page as header fragment. */
Ian Campbellea066ad2011-10-05 00:28:46 +00001125 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001126
1127 for (i = start; i < nr_frags; i++) {
1128 int j, newerr;
Wei Liu2810e5b2013-04-22 02:20:42 +00001129 pending_ring_idx_t head;
Ian Campbellf942dc22011-03-15 00:06:18 +00001130
Ian Campbellea066ad2011-10-05 00:28:46 +00001131 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
Wei Liu2810e5b2013-04-22 02:20:42 +00001132 tx_info = &netbk->pending_tx_info[pending_idx];
1133 head = tx_info->head;
Ian Campbellf942dc22011-03-15 00:06:18 +00001134
1135 /* Check error status: if okay then remember grant handle. */
Wei Liu2810e5b2013-04-22 02:20:42 +00001136 do {
1137 newerr = (++gop)->status;
1138 if (newerr)
1139 break;
1140 peek = netbk->pending_ring[pending_index(++head)];
1141 } while (!pending_tx_is_head(netbk, peek));
1142
Ian Campbellf942dc22011-03-15 00:06:18 +00001143 if (likely(!newerr)) {
1144 /* Had a previous error? Invalidate this fragment. */
1145 if (unlikely(err))
Matthew Daley7d5145d2013-02-06 23:41:36 +00001146 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +00001147 continue;
1148 }
1149
1150 /* Error on this fragment: respond to client with an error. */
Matthew Daley7d5145d2013-02-06 23:41:36 +00001151 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
Ian Campbellf942dc22011-03-15 00:06:18 +00001152
1153 /* Not the first error? Preceding frags already invalidated. */
1154 if (err)
1155 continue;
1156
1157 /* First error: invalidate header and preceding fragments. */
1158 pending_idx = *((u16 *)skb->data);
Matthew Daley7d5145d2013-02-06 23:41:36 +00001159 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +00001160 for (j = start; j < i; j++) {
Jan Beulich5ccb3ea2011-11-18 05:42:05 +00001161 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
Matthew Daley7d5145d2013-02-06 23:41:36 +00001162 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +00001163 }
1164
1165 /* Remember the error: invalidate all subsequent fragments. */
1166 err = newerr;
1167 }
1168
1169 *gopp = gop + 1;
1170 return err;
1171}
1172
1173static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1174{
1175 struct skb_shared_info *shinfo = skb_shinfo(skb);
1176 int nr_frags = shinfo->nr_frags;
1177 int i;
1178
1179 for (i = 0; i < nr_frags; i++) {
1180 skb_frag_t *frag = shinfo->frags + i;
1181 struct xen_netif_tx_request *txp;
Ian Campbellea066ad2011-10-05 00:28:46 +00001182 struct page *page;
1183 u16 pending_idx;
Ian Campbellf942dc22011-03-15 00:06:18 +00001184
Ian Campbellea066ad2011-10-05 00:28:46 +00001185 pending_idx = frag_get_pending_idx(frag);
Ian Campbellf942dc22011-03-15 00:06:18 +00001186
1187 txp = &netbk->pending_tx_info[pending_idx].req;
Ian Campbellea066ad2011-10-05 00:28:46 +00001188 page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
1189 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
Ian Campbellf942dc22011-03-15 00:06:18 +00001190 skb->len += txp->size;
1191 skb->data_len += txp->size;
1192 skb->truesize += txp->size;
1193
1194 /* Take an extra reference to offset xen_netbk_idx_release */
1195 get_page(netbk->mmap_pages[pending_idx]);
Matthew Daley7d5145d2013-02-06 23:41:36 +00001196 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +00001197 }
1198}
1199
1200static int xen_netbk_get_extras(struct xenvif *vif,
1201 struct xen_netif_extra_info *extras,
1202 int work_to_do)
1203{
1204 struct xen_netif_extra_info extra;
1205 RING_IDX cons = vif->tx.req_cons;
1206
1207 do {
1208 if (unlikely(work_to_do-- <= 0)) {
Ian Campbell488562862013-02-06 23:41:35 +00001209 netdev_err(vif->dev, "Missing extra info\n");
1210 netbk_fatal_tx_err(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001211 return -EBADR;
1212 }
1213
1214 memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1215 sizeof(extra));
1216 if (unlikely(!extra.type ||
1217 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1218 vif->tx.req_cons = ++cons;
Ian Campbell488562862013-02-06 23:41:35 +00001219 netdev_err(vif->dev,
Ian Campbellf942dc22011-03-15 00:06:18 +00001220 "Invalid extra type: %d\n", extra.type);
Ian Campbell488562862013-02-06 23:41:35 +00001221 netbk_fatal_tx_err(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001222 return -EINVAL;
1223 }
1224
1225 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1226 vif->tx.req_cons = ++cons;
1227 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1228
1229 return work_to_do;
1230}
1231
1232static int netbk_set_skb_gso(struct xenvif *vif,
1233 struct sk_buff *skb,
1234 struct xen_netif_extra_info *gso)
1235{
1236 if (!gso->u.gso.size) {
Ian Campbell488562862013-02-06 23:41:35 +00001237 netdev_err(vif->dev, "GSO size must not be zero.\n");
1238 netbk_fatal_tx_err(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001239 return -EINVAL;
1240 }
1241
1242 /* Currently only TCPv4 S.O. is supported. */
1243 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
Ian Campbell488562862013-02-06 23:41:35 +00001244 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1245 netbk_fatal_tx_err(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001246 return -EINVAL;
1247 }
1248
1249 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1250 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1251
1252 /* Header must be checked, and gso_segs computed. */
1253 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1254 skb_shinfo(skb)->gso_segs = 0;
1255
1256 return 0;
1257}
1258
1259static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1260{
1261 struct iphdr *iph;
Ian Campbellf942dc22011-03-15 00:06:18 +00001262 int err = -EPROTO;
1263 int recalculate_partial_csum = 0;
1264
1265 /*
1266 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1267 * peers can fail to set NETRXF_csum_blank when sending a GSO
1268 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1269 * recalculate the partial checksum.
1270 */
1271 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1272 vif->rx_gso_checksum_fixup++;
1273 skb->ip_summed = CHECKSUM_PARTIAL;
1274 recalculate_partial_csum = 1;
1275 }
1276
1277 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1278 if (skb->ip_summed != CHECKSUM_PARTIAL)
1279 return 0;
1280
1281 if (skb->protocol != htons(ETH_P_IP))
1282 goto out;
1283
1284 iph = (void *)skb->data;
Ian Campbellf942dc22011-03-15 00:06:18 +00001285 switch (iph->protocol) {
1286 case IPPROTO_TCP:
Jason Wangbea89332013-04-10 20:35:29 +00001287 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
1288 offsetof(struct tcphdr, check)))
1289 goto out;
Ian Campbellf942dc22011-03-15 00:06:18 +00001290
1291 if (recalculate_partial_csum) {
Jason Wangbea89332013-04-10 20:35:29 +00001292 struct tcphdr *tcph = tcp_hdr(skb);
Ian Campbellf942dc22011-03-15 00:06:18 +00001293 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1294 skb->len - iph->ihl*4,
1295 IPPROTO_TCP, 0);
1296 }
1297 break;
1298 case IPPROTO_UDP:
Jason Wangbea89332013-04-10 20:35:29 +00001299 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
1300 offsetof(struct udphdr, check)))
1301 goto out;
Ian Campbellf942dc22011-03-15 00:06:18 +00001302
1303 if (recalculate_partial_csum) {
Jason Wangbea89332013-04-10 20:35:29 +00001304 struct udphdr *udph = udp_hdr(skb);
Ian Campbellf942dc22011-03-15 00:06:18 +00001305 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1306 skb->len - iph->ihl*4,
1307 IPPROTO_UDP, 0);
1308 }
1309 break;
1310 default:
1311 if (net_ratelimit())
1312 netdev_err(vif->dev,
1313 "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
1314 iph->protocol);
1315 goto out;
1316 }
1317
Ian Campbellf942dc22011-03-15 00:06:18 +00001318 err = 0;
1319
1320out:
1321 return err;
1322}
1323
1324static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1325{
1326 unsigned long now = jiffies;
1327 unsigned long next_credit =
1328 vif->credit_timeout.expires +
1329 msecs_to_jiffies(vif->credit_usec / 1000);
1330
1331 /* Timer could already be pending in rare cases. */
1332 if (timer_pending(&vif->credit_timeout))
1333 return true;
1334
1335 /* Passed the point where we can replenish credit? */
1336 if (time_after_eq(now, next_credit)) {
1337 vif->credit_timeout.expires = now;
1338 tx_add_credit(vif);
1339 }
1340
1341 /* Still too big to send right now? Set a callback. */
1342 if (size > vif->remaining_credit) {
1343 vif->credit_timeout.data =
1344 (unsigned long)vif;
1345 vif->credit_timeout.function =
1346 tx_credit_callback;
1347 mod_timer(&vif->credit_timeout,
1348 next_credit);
1349
1350 return true;
1351 }
1352
1353 return false;
1354}
1355
1356static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1357{
1358 struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
1359 struct sk_buff *skb;
1360 int ret;
1361
Wei Liu37641492013-05-02 00:43:59 +00001362 while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
Wei Liu2810e5b2013-04-22 02:20:42 +00001363 < MAX_PENDING_REQS) &&
Ian Campbellf942dc22011-03-15 00:06:18 +00001364 !list_empty(&netbk->net_schedule_list)) {
1365 struct xenvif *vif;
1366 struct xen_netif_tx_request txreq;
Wei Liu37641492013-05-02 00:43:59 +00001367 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
Ian Campbellf942dc22011-03-15 00:06:18 +00001368 struct page *page;
1369 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1370 u16 pending_idx;
1371 RING_IDX idx;
1372 int work_to_do;
1373 unsigned int data_len;
1374 pending_ring_idx_t index;
1375
1376 /* Get a netif from the list with work to do. */
1377 vif = poll_net_schedule_list(netbk);
Ian Campbell488562862013-02-06 23:41:35 +00001378 /* This can sometimes happen because the test of
1379 * list_empty(net_schedule_list) at the top of the
1380 * loop is unlocked. Just go back and have another
1381 * look.
1382 */
Ian Campbellf942dc22011-03-15 00:06:18 +00001383 if (!vif)
1384 continue;
1385
Ian Campbell488562862013-02-06 23:41:35 +00001386 if (vif->tx.sring->req_prod - vif->tx.req_cons >
1387 XEN_NETIF_TX_RING_SIZE) {
1388 netdev_err(vif->dev,
1389 "Impossible number of requests. "
1390 "req_prod %d, req_cons %d, size %ld\n",
1391 vif->tx.sring->req_prod, vif->tx.req_cons,
1392 XEN_NETIF_TX_RING_SIZE);
1393 netbk_fatal_tx_err(vif);
1394 continue;
1395 }
1396
Ian Campbellf942dc22011-03-15 00:06:18 +00001397 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1398 if (!work_to_do) {
1399 xenvif_put(vif);
1400 continue;
1401 }
1402
1403 idx = vif->tx.req_cons;
1404 rmb(); /* Ensure that we see the request before we copy it. */
1405 memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1406
1407 /* Credit-based scheduling. */
1408 if (txreq.size > vif->remaining_credit &&
1409 tx_credit_exceeded(vif, txreq.size)) {
1410 xenvif_put(vif);
1411 continue;
1412 }
1413
1414 vif->remaining_credit -= txreq.size;
1415
1416 work_to_do--;
1417 vif->tx.req_cons = ++idx;
1418
1419 memset(extras, 0, sizeof(extras));
1420 if (txreq.flags & XEN_NETTXF_extra_info) {
1421 work_to_do = xen_netbk_get_extras(vif, extras,
1422 work_to_do);
1423 idx = vif->tx.req_cons;
Ian Campbell488562862013-02-06 23:41:35 +00001424 if (unlikely(work_to_do < 0))
Ian Campbellf942dc22011-03-15 00:06:18 +00001425 continue;
Ian Campbellf942dc22011-03-15 00:06:18 +00001426 }
1427
Wei Liuac69c262013-05-02 00:43:57 +00001428 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
Ian Campbell488562862013-02-06 23:41:35 +00001429 if (unlikely(ret < 0))
Ian Campbellf942dc22011-03-15 00:06:18 +00001430 continue;
Ian Campbell488562862013-02-06 23:41:35 +00001431
Ian Campbellf942dc22011-03-15 00:06:18 +00001432 idx += ret;
1433
1434 if (unlikely(txreq.size < ETH_HLEN)) {
1435 netdev_dbg(vif->dev,
1436 "Bad packet size: %d\n", txreq.size);
1437 netbk_tx_err(vif, &txreq, idx);
1438 continue;
1439 }
1440
1441 /* No crossing a page as the payload mustn't fragment. */
1442 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
Ian Campbell488562862013-02-06 23:41:35 +00001443 netdev_err(vif->dev,
Ian Campbellf942dc22011-03-15 00:06:18 +00001444 "txreq.offset: %x, size: %u, end: %lu\n",
1445 txreq.offset, txreq.size,
1446 (txreq.offset&~PAGE_MASK) + txreq.size);
Ian Campbell488562862013-02-06 23:41:35 +00001447 netbk_fatal_tx_err(vif);
Ian Campbellf942dc22011-03-15 00:06:18 +00001448 continue;
1449 }
1450
1451 index = pending_index(netbk->pending_cons);
1452 pending_idx = netbk->pending_ring[index];
1453
1454 data_len = (txreq.size > PKT_PROT_LEN &&
Wei Liu37641492013-05-02 00:43:59 +00001455 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
Ian Campbellf942dc22011-03-15 00:06:18 +00001456 PKT_PROT_LEN : txreq.size;
1457
1458 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
1459 GFP_ATOMIC | __GFP_NOWARN);
1460 if (unlikely(skb == NULL)) {
1461 netdev_dbg(vif->dev,
1462 "Can't allocate a skb in start_xmit.\n");
1463 netbk_tx_err(vif, &txreq, idx);
1464 break;
1465 }
1466
1467 /* Packets passed to netif_rx() must have some headroom. */
1468 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1469
1470 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1471 struct xen_netif_extra_info *gso;
1472 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1473
1474 if (netbk_set_skb_gso(vif, skb, gso)) {
Ian Campbell488562862013-02-06 23:41:35 +00001475 /* Failure in netbk_set_skb_gso is fatal. */
Ian Campbellf942dc22011-03-15 00:06:18 +00001476 kfree_skb(skb);
Ian Campbellf942dc22011-03-15 00:06:18 +00001477 continue;
1478 }
1479 }
1480
1481 /* XXX could copy straight to head */
Wei Liu27f85222013-03-25 01:08:20 +00001482 page = xen_netbk_alloc_page(netbk, pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001483 if (!page) {
1484 kfree_skb(skb);
1485 netbk_tx_err(vif, &txreq, idx);
1486 continue;
1487 }
1488
Ian Campbellf942dc22011-03-15 00:06:18 +00001489 gop->source.u.ref = txreq.gref;
1490 gop->source.domid = vif->domid;
1491 gop->source.offset = txreq.offset;
1492
1493 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1494 gop->dest.domid = DOMID_SELF;
1495 gop->dest.offset = txreq.offset;
1496
1497 gop->len = txreq.size;
1498 gop->flags = GNTCOPY_source_gref;
1499
1500 gop++;
1501
1502 memcpy(&netbk->pending_tx_info[pending_idx].req,
1503 &txreq, sizeof(txreq));
1504 netbk->pending_tx_info[pending_idx].vif = vif;
Wei Liu2810e5b2013-04-22 02:20:42 +00001505 netbk->pending_tx_info[pending_idx].head = index;
Ian Campbellf942dc22011-03-15 00:06:18 +00001506 *((u16 *)skb->data) = pending_idx;
1507
1508 __skb_put(skb, data_len);
1509
1510 skb_shinfo(skb)->nr_frags = ret;
1511 if (data_len < txreq.size) {
1512 skb_shinfo(skb)->nr_frags++;
Ian Campbellea066ad2011-10-05 00:28:46 +00001513 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1514 pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001515 } else {
Ian Campbellea066ad2011-10-05 00:28:46 +00001516 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1517 INVALID_PENDING_IDX);
Ian Campbellf942dc22011-03-15 00:06:18 +00001518 }
1519
Ian Campbellf942dc22011-03-15 00:06:18 +00001520 netbk->pending_cons++;
1521
1522 request_gop = xen_netbk_get_requests(netbk, vif,
1523 skb, txfrags, gop);
1524 if (request_gop == NULL) {
1525 kfree_skb(skb);
1526 netbk_tx_err(vif, &txreq, idx);
1527 continue;
1528 }
1529 gop = request_gop;
1530
Annie Li1e0b6ea2012-06-27 00:46:58 +00001531 __skb_queue_tail(&netbk->tx_queue, skb);
1532
Ian Campbellf942dc22011-03-15 00:06:18 +00001533 vif->tx.req_cons = idx;
1534 xen_netbk_check_rx_xenvif(vif);
1535
1536 if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
1537 break;
1538 }
1539
1540 return gop - netbk->tx_copy_ops;
1541}
1542
1543static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1544{
1545 struct gnttab_copy *gop = netbk->tx_copy_ops;
1546 struct sk_buff *skb;
1547
1548 while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
1549 struct xen_netif_tx_request *txp;
1550 struct xenvif *vif;
1551 u16 pending_idx;
1552 unsigned data_len;
1553
1554 pending_idx = *((u16 *)skb->data);
1555 vif = netbk->pending_tx_info[pending_idx].vif;
1556 txp = &netbk->pending_tx_info[pending_idx].req;
1557
1558 /* Check the remap error code. */
1559 if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
1560 netdev_dbg(vif->dev, "netback grant failed.\n");
1561 skb_shinfo(skb)->nr_frags = 0;
1562 kfree_skb(skb);
1563 continue;
1564 }
1565
1566 data_len = skb->len;
1567 memcpy(skb->data,
1568 (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
1569 data_len);
1570 if (data_len < txp->size) {
1571 /* Append the packet payload as a fragment. */
1572 txp->offset += data_len;
1573 txp->size -= data_len;
1574 } else {
1575 /* Schedule a response immediately. */
Matthew Daley7d5145d2013-02-06 23:41:36 +00001576 xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
Ian Campbellf942dc22011-03-15 00:06:18 +00001577 }
1578
1579 if (txp->flags & XEN_NETTXF_csum_blank)
1580 skb->ip_summed = CHECKSUM_PARTIAL;
1581 else if (txp->flags & XEN_NETTXF_data_validated)
1582 skb->ip_summed = CHECKSUM_UNNECESSARY;
1583
1584 xen_netbk_fill_frags(netbk, skb);
1585
1586 /*
1587 * If the initial fragment was < PKT_PROT_LEN then
1588 * pull through some bytes from the other fragments to
1589 * increase the linear region to PKT_PROT_LEN bytes.
1590 */
1591 if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
1592 int target = min_t(int, skb->len, PKT_PROT_LEN);
1593 __pskb_pull_tail(skb, target - skb_headlen(skb));
1594 }
1595
1596 skb->dev = vif->dev;
1597 skb->protocol = eth_type_trans(skb, skb->dev);
Jason Wangf9ca8f72013-03-25 20:19:58 +00001598 skb_reset_network_header(skb);
Ian Campbellf942dc22011-03-15 00:06:18 +00001599
1600 if (checksum_setup(vif, skb)) {
1601 netdev_dbg(vif->dev,
1602 "Can't setup checksum in net_tx_action\n");
1603 kfree_skb(skb);
1604 continue;
1605 }
1606
Jason Wang40893fd2013-03-26 23:11:22 +00001607 skb_probe_transport_header(skb, 0);
Jason Wangf9ca8f72013-03-25 20:19:58 +00001608
Ian Campbellf942dc22011-03-15 00:06:18 +00001609 vif->dev->stats.rx_bytes += skb->len;
1610 vif->dev->stats.rx_packets++;
1611
1612 xenvif_receive_skb(vif, skb);
1613 }
1614}
1615
1616/* Called after netfront has transmitted */
1617static void xen_netbk_tx_action(struct xen_netbk *netbk)
1618{
1619 unsigned nr_gops;
Ian Campbellf942dc22011-03-15 00:06:18 +00001620
1621 nr_gops = xen_netbk_tx_build_gops(netbk);
1622
1623 if (nr_gops == 0)
1624 return;
Andres Lagar-Cavillac5718982012-09-14 14:26:59 +00001625
1626 gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
Ian Campbellf942dc22011-03-15 00:06:18 +00001627
1628 xen_netbk_tx_submit(netbk);
Ian Campbellf942dc22011-03-15 00:06:18 +00001629}
1630
Matthew Daley7d5145d2013-02-06 23:41:36 +00001631static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1632 u8 status)
Ian Campbellf942dc22011-03-15 00:06:18 +00001633{
1634 struct xenvif *vif;
1635 struct pending_tx_info *pending_tx_info;
Wei Liu2810e5b2013-04-22 02:20:42 +00001636 pending_ring_idx_t head;
1637 u16 peek; /* peek into next tx request */
1638
1639 BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
Ian Campbellf942dc22011-03-15 00:06:18 +00001640
1641 /* Already complete? */
1642 if (netbk->mmap_pages[pending_idx] == NULL)
1643 return;
1644
1645 pending_tx_info = &netbk->pending_tx_info[pending_idx];
1646
1647 vif = pending_tx_info->vif;
Wei Liu2810e5b2013-04-22 02:20:42 +00001648 head = pending_tx_info->head;
Ian Campbellf942dc22011-03-15 00:06:18 +00001649
Wei Liu2810e5b2013-04-22 02:20:42 +00001650 BUG_ON(!pending_tx_is_head(netbk, head));
1651 BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
Ian Campbellf942dc22011-03-15 00:06:18 +00001652
Wei Liu2810e5b2013-04-22 02:20:42 +00001653 do {
1654 pending_ring_idx_t index;
1655 pending_ring_idx_t idx = pending_index(head);
1656 u16 info_idx = netbk->pending_ring[idx];
Ian Campbellf942dc22011-03-15 00:06:18 +00001657
Wei Liu2810e5b2013-04-22 02:20:42 +00001658 pending_tx_info = &netbk->pending_tx_info[info_idx];
1659 make_tx_response(vif, &pending_tx_info->req, status);
Ian Campbellf942dc22011-03-15 00:06:18 +00001660
Wei Liu2810e5b2013-04-22 02:20:42 +00001661 /* Setting any number other than
1662 * INVALID_PENDING_RING_IDX indicates this slot is
1663 * starting a new packet / ending a previous packet.
1664 */
1665 pending_tx_info->head = 0;
1666
1667 index = pending_index(netbk->pending_prod++);
1668 netbk->pending_ring[index] = netbk->pending_ring[info_idx];
1669
1670 xenvif_put(vif);
1671
1672 peek = netbk->pending_ring[pending_index(++head)];
1673
1674 } while (!pending_tx_is_head(netbk, peek));
1675
1676 netbk->mmap_pages[pending_idx]->mapping = 0;
Ian Campbellf942dc22011-03-15 00:06:18 +00001677 put_page(netbk->mmap_pages[pending_idx]);
1678 netbk->mmap_pages[pending_idx] = NULL;
1679}
1680
Wei Liu2810e5b2013-04-22 02:20:42 +00001681
Ian Campbellf942dc22011-03-15 00:06:18 +00001682static void make_tx_response(struct xenvif *vif,
1683 struct xen_netif_tx_request *txp,
1684 s8 st)
1685{
1686 RING_IDX i = vif->tx.rsp_prod_pvt;
1687 struct xen_netif_tx_response *resp;
1688 int notify;
1689
1690 resp = RING_GET_RESPONSE(&vif->tx, i);
1691 resp->id = txp->id;
1692 resp->status = st;
1693
1694 if (txp->flags & XEN_NETTXF_extra_info)
1695 RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1696
1697 vif->tx.rsp_prod_pvt = ++i;
1698 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1699 if (notify)
Wei Liue1f00a692013-05-22 06:34:45 +00001700 notify_remote_via_irq(vif->tx_irq);
Ian Campbellf942dc22011-03-15 00:06:18 +00001701}
1702
1703static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1704 u16 id,
1705 s8 st,
1706 u16 offset,
1707 u16 size,
1708 u16 flags)
1709{
1710 RING_IDX i = vif->rx.rsp_prod_pvt;
1711 struct xen_netif_rx_response *resp;
1712
1713 resp = RING_GET_RESPONSE(&vif->rx, i);
1714 resp->offset = offset;
1715 resp->flags = flags;
1716 resp->id = id;
1717 resp->status = (s16)size;
1718 if (st < 0)
1719 resp->status = (s16)st;
1720
1721 vif->rx.rsp_prod_pvt = ++i;
1722
1723 return resp;
1724}
1725
1726static inline int rx_work_todo(struct xen_netbk *netbk)
1727{
1728 return !skb_queue_empty(&netbk->rx_queue);
1729}
1730
1731static inline int tx_work_todo(struct xen_netbk *netbk)
1732{
1733
Wei Liu37641492013-05-02 00:43:59 +00001734 if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
Wei Liu2810e5b2013-04-22 02:20:42 +00001735 < MAX_PENDING_REQS) &&
1736 !list_empty(&netbk->net_schedule_list))
Ian Campbellf942dc22011-03-15 00:06:18 +00001737 return 1;
1738
1739 return 0;
1740}
1741
1742static int xen_netbk_kthread(void *data)
1743{
1744 struct xen_netbk *netbk = data;
1745 while (!kthread_should_stop()) {
1746 wait_event_interruptible(netbk->wq,
1747 rx_work_todo(netbk) ||
1748 tx_work_todo(netbk) ||
1749 kthread_should_stop());
1750 cond_resched();
1751
1752 if (kthread_should_stop())
1753 break;
1754
1755 if (rx_work_todo(netbk))
1756 xen_netbk_rx_action(netbk);
1757
1758 if (tx_work_todo(netbk))
1759 xen_netbk_tx_action(netbk);
1760 }
1761
1762 return 0;
1763}
1764
1765void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
1766{
David Vrabelc9d63692011-09-29 16:53:31 +01001767 if (vif->tx.sring)
1768 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1769 vif->tx.sring);
1770 if (vif->rx.sring)
1771 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1772 vif->rx.sring);
Ian Campbellf942dc22011-03-15 00:06:18 +00001773}
1774
1775int xen_netbk_map_frontend_rings(struct xenvif *vif,
1776 grant_ref_t tx_ring_ref,
1777 grant_ref_t rx_ring_ref)
1778{
David Vrabelc9d63692011-09-29 16:53:31 +01001779 void *addr;
Ian Campbellf942dc22011-03-15 00:06:18 +00001780 struct xen_netif_tx_sring *txs;
1781 struct xen_netif_rx_sring *rxs;
1782
1783 int err = -ENOMEM;
1784
David Vrabelc9d63692011-09-29 16:53:31 +01001785 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1786 tx_ring_ref, &addr);
1787 if (err)
Ian Campbellf942dc22011-03-15 00:06:18 +00001788 goto err;
1789
David Vrabelc9d63692011-09-29 16:53:31 +01001790 txs = (struct xen_netif_tx_sring *)addr;
Ian Campbellf942dc22011-03-15 00:06:18 +00001791 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1792
David Vrabelc9d63692011-09-29 16:53:31 +01001793 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1794 rx_ring_ref, &addr);
1795 if (err)
Ian Campbellf942dc22011-03-15 00:06:18 +00001796 goto err;
Ian Campbellf942dc22011-03-15 00:06:18 +00001797
David Vrabelc9d63692011-09-29 16:53:31 +01001798 rxs = (struct xen_netif_rx_sring *)addr;
Ian Campbellf942dc22011-03-15 00:06:18 +00001799 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1800
David Vrabelc9d63692011-09-29 16:53:31 +01001801 vif->rx_req_cons_peek = 0;
1802
Ian Campbellf942dc22011-03-15 00:06:18 +00001803 return 0;
1804
1805err:
1806 xen_netbk_unmap_frontend_rings(vif);
1807 return err;
1808}
1809
1810static int __init netback_init(void)
1811{
1812 int i;
1813 int rc = 0;
1814 int group;
1815
Daniel De Graaf2a14b2442011-12-14 15:12:13 -05001816 if (!xen_domain())
Ian Campbellf942dc22011-03-15 00:06:18 +00001817 return -ENODEV;
1818
Wei Liu37641492013-05-02 00:43:59 +00001819 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
Joe Perches383eda32013-06-27 21:57:49 -07001820 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1821 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
Wei Liu37641492013-05-02 00:43:59 +00001822 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
Wei Liu2810e5b2013-04-22 02:20:42 +00001823 }
1824
Ian Campbellf942dc22011-03-15 00:06:18 +00001825 xen_netbk_group_nr = num_online_cpus();
1826 xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
Joe Perchese404dec2012-01-29 12:56:23 +00001827 if (!xen_netbk)
Ian Campbellf942dc22011-03-15 00:06:18 +00001828 return -ENOMEM;
Ian Campbellf942dc22011-03-15 00:06:18 +00001829
1830 for (group = 0; group < xen_netbk_group_nr; group++) {
1831 struct xen_netbk *netbk = &xen_netbk[group];
1832 skb_queue_head_init(&netbk->rx_queue);
1833 skb_queue_head_init(&netbk->tx_queue);
1834
1835 init_timer(&netbk->net_timer);
1836 netbk->net_timer.data = (unsigned long)netbk;
1837 netbk->net_timer.function = xen_netbk_alarm;
1838
1839 netbk->pending_cons = 0;
1840 netbk->pending_prod = MAX_PENDING_REQS;
1841 for (i = 0; i < MAX_PENDING_REQS; i++)
1842 netbk->pending_ring[i] = i;
1843
1844 init_waitqueue_head(&netbk->wq);
1845 netbk->task = kthread_create(xen_netbk_kthread,
1846 (void *)netbk,
1847 "netback/%u", group);
1848
1849 if (IS_ERR(netbk->task)) {
Joe Perches383eda32013-06-27 21:57:49 -07001850 pr_alert("kthread_create() fails at netback\n");
Ian Campbellf942dc22011-03-15 00:06:18 +00001851 del_timer(&netbk->net_timer);
1852 rc = PTR_ERR(netbk->task);
1853 goto failed_init;
1854 }
1855
1856 kthread_bind(netbk->task, group);
1857
1858 INIT_LIST_HEAD(&netbk->net_schedule_list);
1859
1860 spin_lock_init(&netbk->net_schedule_list_lock);
1861
1862 atomic_set(&netbk->netfront_count, 0);
1863
1864 wake_up_process(netbk->task);
1865 }
1866
1867 rc = xenvif_xenbus_init();
1868 if (rc)
1869 goto failed_init;
1870
1871 return 0;
1872
1873failed_init:
1874 while (--group >= 0) {
1875 struct xen_netbk *netbk = &xen_netbk[group];
Ian Campbellf942dc22011-03-15 00:06:18 +00001876 del_timer(&netbk->net_timer);
1877 kthread_stop(netbk->task);
1878 }
1879 vfree(xen_netbk);
1880 return rc;
1881
1882}
1883
1884module_init(netback_init);
1885
Wei Liub103f352013-05-16 23:26:11 +00001886static void __exit netback_fini(void)
1887{
1888 int i, j;
1889
1890 xenvif_xenbus_fini();
1891
1892 for (i = 0; i < xen_netbk_group_nr; i++) {
1893 struct xen_netbk *netbk = &xen_netbk[i];
1894 del_timer_sync(&netbk->net_timer);
1895 kthread_stop(netbk->task);
1896 for (j = 0; j < MAX_PENDING_REQS; j++) {
Dan Carpenter07cc61b2013-06-21 09:20:08 +03001897 if (netbk->mmap_pages[j])
1898 __free_page(netbk->mmap_pages[j]);
Wei Liub103f352013-05-16 23:26:11 +00001899 }
1900 }
1901
1902 vfree(xen_netbk);
1903}
1904module_exit(netback_fini);
1905
Ian Campbellf942dc22011-03-15 00:06:18 +00001906MODULE_LICENSE("Dual BSD/GPL");
Bastian Blankf984cec2011-06-30 11:19:09 -07001907MODULE_ALIAS("xen-backend:vif");