blob: 78ab8b7aeed9b8849728c2100d277905f32682c5 [file] [log] [blame]
David Brownell2b3d9422008-06-19 18:19:28 -07001/*
2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
3 *
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
David Brownell2b3d9422008-06-19 18:19:28 -070012 */
13
14/* #define VERBOSE_DEBUG */
15
16#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/gfp.h>
David Brownell2b3d9422008-06-19 18:19:28 -070018#include <linux/device.h>
19#include <linux/ctype.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
22
23#include "u_ether.h"
24
25
26/*
27 * This component encapsulates the Ethernet link glue needed to provide
28 * one (!) network link through the USB gadget stack, normally "usb0".
29 *
30 * The control and data models are handled by the function driver which
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -050031 * connects to this code; such as CDC Ethernet (ECM or EEM),
32 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
33 * management.
David Brownell2b3d9422008-06-19 18:19:28 -070034 *
35 * Link level addressing is handled by this component using module
36 * parameters; if no such parameters are provided, random link level
37 * addresses are used. Each end of the link uses one address. The
38 * host end address is exported in various ways, and is often recorded
39 * in configuration databases.
40 *
41 * The driver which assembles each configuration using such a link is
42 * responsible for ensuring that each configuration includes at most one
43 * instance of is network link. (The network layer provides ways for
44 * this single "physical" link to be used by multiple virtual links.)
45 */
46
David Brownell8a1ce2c2008-08-18 17:43:56 -070047#define UETH__VERSION "29-May-2008"
David Brownell2b3d9422008-06-19 18:19:28 -070048
49struct eth_dev {
50 /* lock is held while accessing port_usb
51 * or updating its backlink port_usb->ioport
52 */
53 spinlock_t lock;
54 struct gether *port_usb;
55
56 struct net_device *net;
57 struct usb_gadget *gadget;
58
59 spinlock_t req_lock; /* guard {rx,tx}_reqs */
60 struct list_head tx_reqs, rx_reqs;
Vijayavardhan Vennapusa20b627e2012-03-06 14:26:11 +053061 unsigned tx_qlen;
Mayank Rana5b165cb2012-05-25 19:52:45 +053062/* Minimum number of TX USB request queued to UDC */
63#define TX_REQ_THRESHOLD 5
64 int no_tx_req_used;
65 int tx_skb_hold_count;
66 u32 tx_req_bufsize;
David Brownell2b3d9422008-06-19 18:19:28 -070067
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -050068 struct sk_buff_head rx_frames;
69
David Brownell2b3d9422008-06-19 18:19:28 -070070 unsigned header_len;
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -050071 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
72 int (*unwrap)(struct gether *,
73 struct sk_buff *skb,
74 struct sk_buff_head *list);
David Brownell2b3d9422008-06-19 18:19:28 -070075
76 struct work_struct work;
77
78 unsigned long todo;
79#define WORK_RX_MEMORY 0
80
81 bool zlp;
82 u8 host_mac[ETH_ALEN];
83};
84
85/*-------------------------------------------------------------------------*/
86
87#define RX_EXTRA 20 /* bytes guarding against rx overflows */
88
89#define DEFAULT_QLEN 2 /* double buffering by default */
90
91
92#ifdef CONFIG_USB_GADGET_DUALSPEED
93
Mayank Rana5b165cb2012-05-25 19:52:45 +053094static unsigned qmult = 10;
David Brownell2b3d9422008-06-19 18:19:28 -070095module_param(qmult, uint, S_IRUGO|S_IWUSR);
Paul Zimmerman04617db2011-06-27 14:13:18 -070096MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
David Brownell2b3d9422008-06-19 18:19:28 -070097
98#else /* full speed (low speed doesn't do bulk) */
99#define qmult 1
100#endif
101
Paul Zimmerman04617db2011-06-27 14:13:18 -0700102/* for dual-speed hardware, use deeper queues at high/super speed */
David Brownell2b3d9422008-06-19 18:19:28 -0700103static inline int qlen(struct usb_gadget *gadget)
104{
Paul Zimmerman04617db2011-06-27 14:13:18 -0700105 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
106 gadget->speed == USB_SPEED_SUPER))
David Brownell2b3d9422008-06-19 18:19:28 -0700107 return qmult * DEFAULT_QLEN;
108 else
109 return DEFAULT_QLEN;
110}
111
112/*-------------------------------------------------------------------------*/
113
114/* REVISIT there must be a better way than having two sets
115 * of debug calls ...
116 */
117
118#undef DBG
119#undef VDBG
120#undef ERROR
David Brownell2b3d9422008-06-19 18:19:28 -0700121#undef INFO
122
123#define xprintk(d, level, fmt, args...) \
124 printk(level "%s: " fmt , (d)->net->name , ## args)
125
126#ifdef DEBUG
127#undef DEBUG
128#define DBG(dev, fmt, args...) \
129 xprintk(dev , KERN_DEBUG , fmt , ## args)
130#else
131#define DBG(dev, fmt, args...) \
132 do { } while (0)
133#endif /* DEBUG */
134
135#ifdef VERBOSE_DEBUG
136#define VDBG DBG
137#else
138#define VDBG(dev, fmt, args...) \
139 do { } while (0)
140#endif /* DEBUG */
141
142#define ERROR(dev, fmt, args...) \
143 xprintk(dev , KERN_ERR , fmt , ## args)
David Brownell2b3d9422008-06-19 18:19:28 -0700144#define INFO(dev, fmt, args...) \
145 xprintk(dev , KERN_INFO , fmt , ## args)
146
147/*-------------------------------------------------------------------------*/
148
149/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
150
Stephen Hemmingerccad6372008-11-19 22:42:31 -0800151static int ueth_change_mtu(struct net_device *net, int new_mtu)
David Brownell2b3d9422008-06-19 18:19:28 -0700152{
153 struct eth_dev *dev = netdev_priv(net);
154 unsigned long flags;
155 int status = 0;
156
157 /* don't change MTU on "live" link (peer won't know) */
158 spin_lock_irqsave(&dev->lock, flags);
159 if (dev->port_usb)
160 status = -EBUSY;
161 else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
162 status = -ERANGE;
163 else
164 net->mtu = new_mtu;
165 spin_unlock_irqrestore(&dev->lock, flags);
166
167 return status;
168}
169
170static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
171{
172 struct eth_dev *dev = netdev_priv(net);
173
174 strlcpy(p->driver, "g_ether", sizeof p->driver);
David Brownell8a1ce2c2008-08-18 17:43:56 -0700175 strlcpy(p->version, UETH__VERSION, sizeof p->version);
David Brownell2b3d9422008-06-19 18:19:28 -0700176 strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
177 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
178}
179
David Brownell2b3d9422008-06-19 18:19:28 -0700180/* REVISIT can also support:
181 * - WOL (by tracking suspends and issuing remote wakeup)
182 * - msglevel (implies updated messaging)
183 * - ... probably more ethtool ops
184 */
185
Stephen Hemminger0fc0b732009-09-02 01:03:33 -0700186static const struct ethtool_ops ops = {
David Brownell2b3d9422008-06-19 18:19:28 -0700187 .get_drvinfo = eth_get_drvinfo,
Jonathan McDowell237e75b2009-03-26 00:45:27 -0700188 .get_link = ethtool_op_get_link,
David Brownell2b3d9422008-06-19 18:19:28 -0700189};
190
191static void defer_kevent(struct eth_dev *dev, int flag)
192{
193 if (test_and_set_bit(flag, &dev->todo))
194 return;
195 if (!schedule_work(&dev->work))
196 ERROR(dev, "kevent %d may have been dropped\n", flag);
197 else
198 DBG(dev, "kevent %d scheduled\n", flag);
199}
200
201static void rx_complete(struct usb_ep *ep, struct usb_request *req);
202
203static int
204rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
205{
206 struct sk_buff *skb;
207 int retval = -ENOMEM;
208 size_t size = 0;
209 struct usb_ep *out;
210 unsigned long flags;
211
212 spin_lock_irqsave(&dev->lock, flags);
213 if (dev->port_usb)
214 out = dev->port_usb->out_ep;
215 else
216 out = NULL;
217 spin_unlock_irqrestore(&dev->lock, flags);
218
219 if (!out)
220 return -ENOTCONN;
221
222
223 /* Padding up to RX_EXTRA handles minor disagreements with host.
224 * Normally we use the USB "terminate on short read" convention;
225 * so allow up to (N*maxpacket), since that memory is normally
226 * already allocated. Some hardware doesn't deal well with short
227 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
228 * byte off the end (to force hardware errors on overflow).
229 *
230 * RNDIS uses internal framing, and explicitly allows senders to
231 * pad to end-of-packet. That's potentially nice for speed, but
232 * means receivers can't recover lost synch on their own (because
233 * new packets don't only start after a short RX).
234 */
235 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
236 size += dev->port_usb->header_len;
237 size += out->maxpacket - 1;
238 size -= size % out->maxpacket;
239
Yauheni Kaliuta5c1168d2010-12-08 13:12:04 +0200240 if (dev->port_usb->is_fixed)
Stephen Hemminger45d1b7a2011-03-01 22:40:57 -0800241 size = max_t(size_t, size, dev->port_usb->fixed_out_len);
Yauheni Kaliuta5c1168d2010-12-08 13:12:04 +0200242
David Brownell2b3d9422008-06-19 18:19:28 -0700243 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
244 if (skb == NULL) {
245 DBG(dev, "no rx skb\n");
246 goto enomem;
247 }
248
249 /* Some platforms perform better when IP packets are aligned,
250 * but on at least one, checksumming fails otherwise. Note:
251 * RNDIS headers involve variable numbers of LE32 values.
252 */
253 skb_reserve(skb, NET_IP_ALIGN);
254
255 req->buf = skb->data;
256 req->length = size;
257 req->complete = rx_complete;
258 req->context = skb;
259
260 retval = usb_ep_queue(out, req, gfp_flags);
261 if (retval == -ENOMEM)
262enomem:
263 defer_kevent(dev, WORK_RX_MEMORY);
264 if (retval) {
265 DBG(dev, "rx submit --> %d\n", retval);
266 if (skb)
267 dev_kfree_skb_any(skb);
268 spin_lock_irqsave(&dev->req_lock, flags);
269 list_add(&req->list, &dev->rx_reqs);
270 spin_unlock_irqrestore(&dev->req_lock, flags);
271 }
272 return retval;
273}
274
275static void rx_complete(struct usb_ep *ep, struct usb_request *req)
276{
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500277 struct sk_buff *skb = req->context, *skb2;
David Brownell2b3d9422008-06-19 18:19:28 -0700278 struct eth_dev *dev = ep->driver_data;
279 int status = req->status;
280
281 switch (status) {
282
283 /* normal completion */
284 case 0:
285 skb_put(skb, req->actual);
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500286
287 if (dev->unwrap) {
288 unsigned long flags;
289
290 spin_lock_irqsave(&dev->lock, flags);
291 if (dev->port_usb) {
292 status = dev->unwrap(dev->port_usb,
293 skb,
294 &dev->rx_frames);
295 } else {
296 dev_kfree_skb_any(skb);
297 status = -ENOTCONN;
298 }
299 spin_unlock_irqrestore(&dev->lock, flags);
300 } else {
301 skb_queue_tail(&dev->rx_frames, skb);
David Brownell2b3d9422008-06-19 18:19:28 -0700302 }
David Brownell2b3d9422008-06-19 18:19:28 -0700303 skb = NULL;
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500304
305 skb2 = skb_dequeue(&dev->rx_frames);
306 while (skb2) {
307 if (status < 0
308 || ETH_HLEN > skb2->len
309 || skb2->len > ETH_FRAME_LEN) {
310 dev->net->stats.rx_errors++;
311 dev->net->stats.rx_length_errors++;
312 DBG(dev, "rx length %d\n", skb2->len);
313 dev_kfree_skb_any(skb2);
314 goto next_frame;
315 }
316 skb2->protocol = eth_type_trans(skb2, dev->net);
317 dev->net->stats.rx_packets++;
318 dev->net->stats.rx_bytes += skb2->len;
319
320 /* no buffer copies needed, unless hardware can't
321 * use skb buffers.
322 */
323 status = netif_rx(skb2);
324next_frame:
325 skb2 = skb_dequeue(&dev->rx_frames);
326 }
David Brownell2b3d9422008-06-19 18:19:28 -0700327 break;
328
329 /* software-driven interface shutdown */
330 case -ECONNRESET: /* unlink */
331 case -ESHUTDOWN: /* disconnect etc */
332 VDBG(dev, "rx shutdown, code %d\n", status);
333 goto quiesce;
334
335 /* for hardware automagic (such as pxa) */
336 case -ECONNABORTED: /* endpoint reset */
337 DBG(dev, "rx %s reset\n", ep->name);
338 defer_kevent(dev, WORK_RX_MEMORY);
339quiesce:
340 dev_kfree_skb_any(skb);
341 goto clean;
342
343 /* data overrun */
344 case -EOVERFLOW:
345 dev->net->stats.rx_over_errors++;
346 /* FALLTHROUGH */
347
348 default:
349 dev->net->stats.rx_errors++;
350 DBG(dev, "rx status %d\n", status);
351 break;
352 }
353
354 if (skb)
355 dev_kfree_skb_any(skb);
356 if (!netif_running(dev->net)) {
357clean:
358 spin_lock(&dev->req_lock);
359 list_add(&req->list, &dev->rx_reqs);
360 spin_unlock(&dev->req_lock);
361 req = NULL;
362 }
363 if (req)
364 rx_submit(dev, req, GFP_ATOMIC);
365}
366
367static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
368{
369 unsigned i;
370 struct usb_request *req;
371
372 if (!n)
373 return -ENOMEM;
374
375 /* queue/recycle up to N requests */
376 i = n;
377 list_for_each_entry(req, list, list) {
378 if (i-- == 0)
379 goto extra;
380 }
381 while (i--) {
382 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
383 if (!req)
384 return list_empty(list) ? -ENOMEM : 0;
385 list_add(&req->list, list);
386 }
387 return 0;
388
389extra:
390 /* free extras */
391 for (;;) {
392 struct list_head *next;
393
394 next = req->list.next;
395 list_del(&req->list);
396 usb_ep_free_request(ep, req);
397
398 if (next == list)
399 break;
400
401 req = container_of(next, struct usb_request, list);
402 }
403 return 0;
404}
405
406static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
407{
408 int status;
409
410 spin_lock(&dev->req_lock);
411 status = prealloc(&dev->tx_reqs, link->in_ep, n);
412 if (status < 0)
413 goto fail;
414 status = prealloc(&dev->rx_reqs, link->out_ep, n);
415 if (status < 0)
416 goto fail;
417 goto done;
418fail:
419 DBG(dev, "can't alloc requests\n");
420done:
421 spin_unlock(&dev->req_lock);
422 return status;
423}
424
425static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
426{
427 struct usb_request *req;
428 unsigned long flags;
429
430 /* fill unused rxq slots with some skb */
431 spin_lock_irqsave(&dev->req_lock, flags);
432 while (!list_empty(&dev->rx_reqs)) {
433 req = container_of(dev->rx_reqs.next,
434 struct usb_request, list);
435 list_del_init(&req->list);
436 spin_unlock_irqrestore(&dev->req_lock, flags);
437
438 if (rx_submit(dev, req, gfp_flags) < 0) {
439 defer_kevent(dev, WORK_RX_MEMORY);
440 return;
441 }
442
443 spin_lock_irqsave(&dev->req_lock, flags);
444 }
445 spin_unlock_irqrestore(&dev->req_lock, flags);
446}
447
448static void eth_work(struct work_struct *work)
449{
450 struct eth_dev *dev = container_of(work, struct eth_dev, work);
451
452 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
453 if (netif_running(dev->net))
454 rx_fill(dev, GFP_KERNEL);
455 }
456
457 if (dev->todo)
458 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
459}
460
461static void tx_complete(struct usb_ep *ep, struct usb_request *req)
462{
463 struct sk_buff *skb = req->context;
464 struct eth_dev *dev = ep->driver_data;
Mayank Rana5b165cb2012-05-25 19:52:45 +0530465 struct net_device *net = dev->net;
466 struct usb_request *new_req;
467 struct usb_ep *in;
468 int length;
469 int retval;
David Brownell2b3d9422008-06-19 18:19:28 -0700470
471 switch (req->status) {
472 default:
473 dev->net->stats.tx_errors++;
474 VDBG(dev, "tx err %d\n", req->status);
475 /* FALLTHROUGH */
476 case -ECONNRESET: /* unlink */
477 case -ESHUTDOWN: /* disconnect etc */
478 break;
479 case 0:
Mayank Rana5b165cb2012-05-25 19:52:45 +0530480 if (!req->zero)
481 dev->net->stats.tx_bytes += req->length-1;
482 else
483 dev->net->stats.tx_bytes += req->length;
David Brownell2b3d9422008-06-19 18:19:28 -0700484 }
485 dev->net->stats.tx_packets++;
486
487 spin_lock(&dev->req_lock);
Mayank Rana5b165cb2012-05-25 19:52:45 +0530488 list_add_tail(&req->list, &dev->tx_reqs);
489
490 if (dev->port_usb->multi_pkt_xfer) {
491 dev->no_tx_req_used--;
492 req->length = 0;
493 in = dev->port_usb->in_ep;
494
495 if (!list_empty(&dev->tx_reqs)) {
496 new_req = container_of(dev->tx_reqs.next,
497 struct usb_request, list);
498 list_del(&new_req->list);
499 spin_unlock(&dev->req_lock);
500 if (new_req->length > 0) {
501 length = new_req->length;
502
503 /* NCM requires no zlp if transfer is
504 * dwNtbInMaxSize */
505 if (dev->port_usb->is_fixed &&
506 length == dev->port_usb->fixed_in_len &&
507 (length % in->maxpacket) == 0)
508 new_req->zero = 0;
509 else
510 new_req->zero = 1;
511
512 /* use zlp framing on tx for strict CDC-Ether
513 * conformance, though any robust network rx
514 * path ignores extra padding. and some hardware
515 * doesn't like to write zlps.
516 */
517 if (new_req->zero && !dev->zlp &&
518 (length % in->maxpacket) == 0) {
519 new_req->zero = 0;
520 length++;
521 }
522
523 new_req->length = length;
524 retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
525 switch (retval) {
526 default:
527 DBG(dev, "tx queue err %d\n", retval);
528 break;
529 case 0:
530 spin_lock(&dev->req_lock);
531 dev->no_tx_req_used++;
532 spin_unlock(&dev->req_lock);
533 net->trans_start = jiffies;
534 }
535 } else {
536 spin_lock(&dev->req_lock);
537 list_add(&new_req->list, &dev->tx_reqs);
538 spin_unlock(&dev->req_lock);
539 }
540 } else {
541 spin_unlock(&dev->req_lock);
542 }
543 } else {
544 spin_unlock(&dev->req_lock);
545 dev_kfree_skb_any(skb);
546 }
David Brownell2b3d9422008-06-19 18:19:28 -0700547
David Brownell2b3d9422008-06-19 18:19:28 -0700548 if (netif_carrier_ok(dev->net))
549 netif_wake_queue(dev->net);
550}
551
552static inline int is_promisc(u16 cdc_filter)
553{
554 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
555}
556
Mayank Rana5b165cb2012-05-25 19:52:45 +0530557static void alloc_tx_buffer(struct eth_dev *dev)
558{
559 struct list_head *act;
560 struct usb_request *req;
561
562 dev->tx_req_bufsize = (TX_SKB_HOLD_THRESHOLD *
563 (dev->net->mtu
564 + sizeof(struct ethhdr)
565 /* size of rndis_packet_msg_type */
566 + 44
567 + 22));
568
569 list_for_each(act, &dev->tx_reqs) {
570 req = container_of(act, struct usb_request, list);
571 if (!req->buf)
572 req->buf = kmalloc(dev->tx_req_bufsize,
573 GFP_ATOMIC);
574 }
575}
576
Stephen Hemminger25a79c42009-08-31 19:50:45 +0000577static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
578 struct net_device *net)
David Brownell2b3d9422008-06-19 18:19:28 -0700579{
580 struct eth_dev *dev = netdev_priv(net);
581 int length = skb->len;
582 int retval;
583 struct usb_request *req = NULL;
584 unsigned long flags;
585 struct usb_ep *in;
586 u16 cdc_filter;
587
588 spin_lock_irqsave(&dev->lock, flags);
589 if (dev->port_usb) {
590 in = dev->port_usb->in_ep;
591 cdc_filter = dev->port_usb->cdc_filter;
592 } else {
593 in = NULL;
594 cdc_filter = 0;
595 }
596 spin_unlock_irqrestore(&dev->lock, flags);
597
598 if (!in) {
599 dev_kfree_skb_any(skb);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000600 return NETDEV_TX_OK;
David Brownell2b3d9422008-06-19 18:19:28 -0700601 }
602
Mayank Rana5b165cb2012-05-25 19:52:45 +0530603 /* Allocate memory for tx_reqs to support multi packet transfer */
604 if (dev->port_usb->multi_pkt_xfer && !dev->tx_req_bufsize)
605 alloc_tx_buffer(dev);
606
David Brownell2b3d9422008-06-19 18:19:28 -0700607 /* apply outgoing CDC or RNDIS filters */
608 if (!is_promisc(cdc_filter)) {
609 u8 *dest = skb->data;
610
611 if (is_multicast_ether_addr(dest)) {
612 u16 type;
613
614 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
615 * SET_ETHERNET_MULTICAST_FILTERS requests
616 */
617 if (is_broadcast_ether_addr(dest))
618 type = USB_CDC_PACKET_TYPE_BROADCAST;
619 else
620 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
621 if (!(cdc_filter & type)) {
622 dev_kfree_skb_any(skb);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000623 return NETDEV_TX_OK;
David Brownell2b3d9422008-06-19 18:19:28 -0700624 }
625 }
626 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
627 }
628
629 spin_lock_irqsave(&dev->req_lock, flags);
630 /*
631 * this freelist can be empty if an interrupt triggered disconnect()
632 * and reconfigured the gadget (shutting down this queue) after the
633 * network stack decided to xmit but before we got the spinlock.
634 */
635 if (list_empty(&dev->tx_reqs)) {
636 spin_unlock_irqrestore(&dev->req_lock, flags);
Patrick McHardy5b548142009-06-12 06:22:29 +0000637 return NETDEV_TX_BUSY;
David Brownell2b3d9422008-06-19 18:19:28 -0700638 }
639
640 req = container_of(dev->tx_reqs.next, struct usb_request, list);
641 list_del(&req->list);
642
643 /* temporarily stop TX queue when the freelist empties */
644 if (list_empty(&dev->tx_reqs))
645 netif_stop_queue(net);
646 spin_unlock_irqrestore(&dev->req_lock, flags);
647
648 /* no buffer copies needed, unless the network stack did it
649 * or the hardware can't use skb buffers.
650 * or there's not enough space for extra headers we need
651 */
652 if (dev->wrap) {
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500653 unsigned long flags;
David Brownell2b3d9422008-06-19 18:19:28 -0700654
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500655 spin_lock_irqsave(&dev->lock, flags);
656 if (dev->port_usb)
657 skb = dev->wrap(dev->port_usb, skb);
658 spin_unlock_irqrestore(&dev->lock, flags);
659 if (!skb)
David Brownell2b3d9422008-06-19 18:19:28 -0700660 goto drop;
David Brownell2b3d9422008-06-19 18:19:28 -0700661 }
Mayank Rana5b165cb2012-05-25 19:52:45 +0530662
663 spin_lock_irqsave(&dev->req_lock, flags);
664 dev->tx_skb_hold_count++;
665 spin_unlock_irqrestore(&dev->req_lock, flags);
666
667 if (dev->port_usb->multi_pkt_xfer) {
668 memcpy(req->buf + req->length, skb->data, skb->len);
669 req->length = req->length + skb->len;
670 length = req->length;
671 dev_kfree_skb_any(skb);
672
673 spin_lock_irqsave(&dev->req_lock, flags);
674 if (dev->tx_skb_hold_count < TX_SKB_HOLD_THRESHOLD) {
675 if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
676 list_add(&req->list, &dev->tx_reqs);
677 spin_unlock_irqrestore(&dev->req_lock, flags);
678 goto success;
679 }
680 }
681
682 dev->no_tx_req_used++;
683 spin_unlock_irqrestore(&dev->req_lock, flags);
684
685 spin_lock_irqsave(&dev->lock, flags);
686 dev->tx_skb_hold_count = 0;
687 spin_unlock_irqrestore(&dev->lock, flags);
688 } else {
689 length = skb->len;
690 req->buf = skb->data;
691 req->context = skb;
692 }
693
David Brownell2b3d9422008-06-19 18:19:28 -0700694 req->complete = tx_complete;
695
Yauheni Kaliuta5c1168d2010-12-08 13:12:04 +0200696 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
697 if (dev->port_usb->is_fixed &&
698 length == dev->port_usb->fixed_in_len &&
699 (length % in->maxpacket) == 0)
700 req->zero = 0;
701 else
702 req->zero = 1;
703
David Brownell2b3d9422008-06-19 18:19:28 -0700704 /* use zlp framing on tx for strict CDC-Ether conformance,
705 * though any robust network rx path ignores extra padding.
706 * and some hardware doesn't like to write zlps.
707 */
Mayank Rana5b165cb2012-05-25 19:52:45 +0530708 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
709 req->zero = 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700710 length++;
Mayank Rana5b165cb2012-05-25 19:52:45 +0530711 }
David Brownell2b3d9422008-06-19 18:19:28 -0700712
713 req->length = length;
714
715 /* throttle highspeed IRQ rate back slightly */
Vijayavardhan Vennapusa20b627e2012-03-06 14:26:11 +0530716 if (gadget_is_dualspeed(dev->gadget) &&
717 (dev->gadget->speed == USB_SPEED_HIGH)) {
718 dev->tx_qlen++;
Mayank Rana5b165cb2012-05-25 19:52:45 +0530719 if (dev->tx_qlen == (qmult/2)) {
Vijayavardhan Vennapusa20b627e2012-03-06 14:26:11 +0530720 req->no_interrupt = 0;
721 dev->tx_qlen = 0;
722 } else {
723 req->no_interrupt = 1;
724 }
725 } else {
726 req->no_interrupt = 0;
727 }
David Brownell2b3d9422008-06-19 18:19:28 -0700728
729 retval = usb_ep_queue(in, req, GFP_ATOMIC);
730 switch (retval) {
731 default:
732 DBG(dev, "tx queue err %d\n", retval);
733 break;
734 case 0:
735 net->trans_start = jiffies;
David Brownell2b3d9422008-06-19 18:19:28 -0700736 }
737
738 if (retval) {
Mayank Rana5b165cb2012-05-25 19:52:45 +0530739 if (!dev->port_usb->multi_pkt_xfer)
740 dev_kfree_skb_any(skb);
David Brownell2b3d9422008-06-19 18:19:28 -0700741drop:
742 dev->net->stats.tx_dropped++;
David Brownell2b3d9422008-06-19 18:19:28 -0700743 spin_lock_irqsave(&dev->req_lock, flags);
744 if (list_empty(&dev->tx_reqs))
745 netif_start_queue(net);
746 list_add(&req->list, &dev->tx_reqs);
747 spin_unlock_irqrestore(&dev->req_lock, flags);
748 }
Mayank Rana5b165cb2012-05-25 19:52:45 +0530749success:
Patrick McHardy6ed10652009-06-23 06:03:08 +0000750 return NETDEV_TX_OK;
David Brownell2b3d9422008-06-19 18:19:28 -0700751}
752
753/*-------------------------------------------------------------------------*/
754
755static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
756{
757 DBG(dev, "%s\n", __func__);
758
759 /* fill the rx queue */
760 rx_fill(dev, gfp_flags);
761
762 /* and open the tx floodgates */
Vijayavardhan Vennapusa20b627e2012-03-06 14:26:11 +0530763 dev->tx_qlen = 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700764 netif_wake_queue(dev->net);
765}
766
767static int eth_open(struct net_device *net)
768{
769 struct eth_dev *dev = netdev_priv(net);
770 struct gether *link;
771
772 DBG(dev, "%s\n", __func__);
773 if (netif_carrier_ok(dev->net))
774 eth_start(dev, GFP_KERNEL);
775
776 spin_lock_irq(&dev->lock);
777 link = dev->port_usb;
778 if (link && link->open)
779 link->open(link);
780 spin_unlock_irq(&dev->lock);
781
782 return 0;
783}
784
785static int eth_stop(struct net_device *net)
786{
787 struct eth_dev *dev = netdev_priv(net);
788 unsigned long flags;
789
790 VDBG(dev, "%s\n", __func__);
791 netif_stop_queue(net);
792
793 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
794 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
795 dev->net->stats.rx_errors, dev->net->stats.tx_errors
796 );
797
798 /* ensure there are no more active requests */
799 spin_lock_irqsave(&dev->lock, flags);
800 if (dev->port_usb) {
801 struct gether *link = dev->port_usb;
802
803 if (link->close)
804 link->close(link);
805
806 /* NOTE: we have no abort-queue primitive we could use
807 * to cancel all pending I/O. Instead, we disable then
808 * reenable the endpoints ... this idiom may leave toggle
809 * wrong, but that's a self-correcting error.
810 *
811 * REVISIT: we *COULD* just let the transfers complete at
812 * their own pace; the network stack can handle old packets.
813 * For the moment we leave this here, since it works.
814 */
815 usb_ep_disable(link->in_ep);
816 usb_ep_disable(link->out_ep);
817 if (netif_carrier_ok(net)) {
Rajkumar Raghupathye7c42322012-06-13 19:31:15 +0530818 if (config_ep_by_speed(dev->gadget, &link->func,
819 link->in_ep) ||
820 config_ep_by_speed(dev->gadget, &link->func,
821 link->out_ep)) {
822 link->in_ep->desc = NULL;
823 link->out_ep->desc = NULL;
824 return -EINVAL;
825 }
David Brownell2b3d9422008-06-19 18:19:28 -0700826 DBG(dev, "host still using in/out endpoints\n");
Tatyana Brokhman72c973d2011-06-28 16:33:48 +0300827 usb_ep_enable(link->in_ep);
828 usb_ep_enable(link->out_ep);
David Brownell2b3d9422008-06-19 18:19:28 -0700829 }
830 }
831 spin_unlock_irqrestore(&dev->lock, flags);
832
833 return 0;
834}
835
836/*-------------------------------------------------------------------------*/
837
838/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
839static char *dev_addr;
840module_param(dev_addr, charp, S_IRUGO);
841MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
842
843/* this address is invisible to ifconfig */
844static char *host_addr;
845module_param(host_addr, charp, S_IRUGO);
846MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
847
Michal Nazarewicz28824b12010-05-05 12:53:13 +0200848static int get_ether_addr(const char *str, u8 *dev_addr)
David Brownell2b3d9422008-06-19 18:19:28 -0700849{
850 if (str) {
851 unsigned i;
852
853 for (i = 0; i < 6; i++) {
854 unsigned char num;
855
856 if ((*str == '.') || (*str == ':'))
857 str++;
Andy Shevchenkoe6448142010-06-15 17:04:44 +0300858 num = hex_to_bin(*str++) << 4;
859 num |= hex_to_bin(*str++);
David Brownell2b3d9422008-06-19 18:19:28 -0700860 dev_addr [i] = num;
861 }
862 if (is_valid_ether_addr(dev_addr))
863 return 0;
864 }
865 random_ether_addr(dev_addr);
866 return 1;
867}
868
869static struct eth_dev *the_dev;
870
Stephen Hemminger5ec38f32009-01-07 18:05:39 -0800871static const struct net_device_ops eth_netdev_ops = {
872 .ndo_open = eth_open,
873 .ndo_stop = eth_stop,
874 .ndo_start_xmit = eth_start_xmit,
875 .ndo_change_mtu = ueth_change_mtu,
876 .ndo_set_mac_address = eth_mac_addr,
877 .ndo_validate_addr = eth_validate_addr,
878};
David Brownell2b3d9422008-06-19 18:19:28 -0700879
Marcel Holtmannaa790742010-01-15 22:13:58 -0800880static struct device_type gadget_type = {
881 .name = "gadget",
882};
883
David Brownell2b3d9422008-06-19 18:19:28 -0700884/**
885 * gether_setup - initialize one ethernet-over-usb link
886 * @g: gadget to associated with these links
887 * @ethaddr: NULL, or a buffer in which the ethernet address of the
888 * host side of the link is recorded
889 * Context: may sleep
890 *
891 * This sets up the single network link that may be exported by a
892 * gadget driver using this framework. The link layer addresses are
893 * set up using module parameters.
894 *
895 * Returns negative errno, or zero on success
896 */
Michal Nazarewicz28824b12010-05-05 12:53:13 +0200897int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
David Brownell2b3d9422008-06-19 18:19:28 -0700898{
Mike Lockwood735d20e2011-08-12 14:35:42 -0700899 return gether_setup_name(g, ethaddr, "usb");
900}
901
902/**
903 * gether_setup_name - initialize one ethernet-over-usb link
904 * @g: gadget to associated with these links
905 * @ethaddr: NULL, or a buffer in which the ethernet address of the
906 * host side of the link is recorded
907 * @netname: name for network device (for example, "usb")
908 * Context: may sleep
909 *
910 * This sets up the single network link that may be exported by a
911 * gadget driver using this framework. The link layer addresses are
912 * set up using module parameters.
913 *
914 * Returns negative errno, or zero on success
915 */
916int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
917 const char *netname)
918{
David Brownell2b3d9422008-06-19 18:19:28 -0700919 struct eth_dev *dev;
920 struct net_device *net;
921 int status;
922
923 if (the_dev)
924 return -EBUSY;
925
926 net = alloc_etherdev(sizeof *dev);
927 if (!net)
928 return -ENOMEM;
929
930 dev = netdev_priv(net);
931 spin_lock_init(&dev->lock);
932 spin_lock_init(&dev->req_lock);
933 INIT_WORK(&dev->work, eth_work);
934 INIT_LIST_HEAD(&dev->tx_reqs);
935 INIT_LIST_HEAD(&dev->rx_reqs);
936
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500937 skb_queue_head_init(&dev->rx_frames);
938
David Brownell2b3d9422008-06-19 18:19:28 -0700939 /* network device setup */
940 dev->net = net;
Mike Lockwood735d20e2011-08-12 14:35:42 -0700941 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
David Brownell2b3d9422008-06-19 18:19:28 -0700942
943 if (get_ether_addr(dev_addr, net->dev_addr))
944 dev_warn(&g->dev,
945 "using random %s ethernet address\n", "self");
946 if (get_ether_addr(host_addr, dev->host_mac))
947 dev_warn(&g->dev,
948 "using random %s ethernet address\n", "host");
949
950 if (ethaddr)
951 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
952
Stephen Hemminger5ec38f32009-01-07 18:05:39 -0800953 net->netdev_ops = &eth_netdev_ops;
954
David Brownell2b3d9422008-06-19 18:19:28 -0700955 SET_ETHTOOL_OPS(net, &ops);
956
957 /* two kinds of host-initiated state changes:
958 * - iff DATA transfer is active, carrier is "on"
959 * - tx queueing enabled if open *and* carrier is "on"
960 */
David Brownell2b3d9422008-06-19 18:19:28 -0700961 netif_carrier_off(net);
962
963 dev->gadget = g;
964 SET_NETDEV_DEV(net, &g->dev);
Marcel Holtmannaa790742010-01-15 22:13:58 -0800965 SET_NETDEV_DEVTYPE(net, &gadget_type);
David Brownell2b3d9422008-06-19 18:19:28 -0700966
967 status = register_netdev(net);
968 if (status < 0) {
969 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
970 free_netdev(net);
971 } else {
Johannes Berge1749612008-10-27 15:59:26 -0700972 INFO(dev, "MAC %pM\n", net->dev_addr);
973 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
David Brownell2b3d9422008-06-19 18:19:28 -0700974
975 the_dev = dev;
976 }
977
978 return status;
979}
980
981/**
982 * gether_cleanup - remove Ethernet-over-USB device
983 * Context: may sleep
984 *
985 * This is called to free all resources allocated by @gether_setup().
986 */
987void gether_cleanup(void)
988{
989 if (!the_dev)
990 return;
991
992 unregister_netdev(the_dev->net);
Tejun Heo569ff2d2010-12-24 16:14:20 +0100993 flush_work_sync(&the_dev->work);
David Brownell2b3d9422008-06-19 18:19:28 -0700994 free_netdev(the_dev->net);
995
David Brownell2b3d9422008-06-19 18:19:28 -0700996 the_dev = NULL;
997}
998
999
1000/**
1001 * gether_connect - notify network layer that USB link is active
1002 * @link: the USB link, set up with endpoints, descriptors matching
1003 * current device speed, and any framing wrapper(s) set up.
1004 * Context: irqs blocked
1005 *
1006 * This is called to activate endpoints and let the network layer know
1007 * the connection is active ("carrier detect"). It may cause the I/O
1008 * queues to open and start letting network packets flow, but will in
1009 * any case activate the endpoints so that they respond properly to the
1010 * USB host.
1011 *
1012 * Verify net_device pointer returned using IS_ERR(). If it doesn't
1013 * indicate some error code (negative errno), ep->driver_data values
1014 * have been overwritten.
1015 */
1016struct net_device *gether_connect(struct gether *link)
1017{
1018 struct eth_dev *dev = the_dev;
1019 int result = 0;
1020
1021 if (!dev)
1022 return ERR_PTR(-EINVAL);
1023
1024 link->in_ep->driver_data = dev;
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001025 result = usb_ep_enable(link->in_ep);
David Brownell2b3d9422008-06-19 18:19:28 -07001026 if (result != 0) {
1027 DBG(dev, "enable %s --> %d\n",
1028 link->in_ep->name, result);
1029 goto fail0;
1030 }
1031
1032 link->out_ep->driver_data = dev;
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001033 result = usb_ep_enable(link->out_ep);
David Brownell2b3d9422008-06-19 18:19:28 -07001034 if (result != 0) {
1035 DBG(dev, "enable %s --> %d\n",
1036 link->out_ep->name, result);
1037 goto fail1;
1038 }
1039
1040 if (result == 0)
1041 result = alloc_requests(dev, link, qlen(dev->gadget));
1042
1043 if (result == 0) {
1044 dev->zlp = link->is_zlp_ok;
1045 DBG(dev, "qlen %d\n", qlen(dev->gadget));
1046
1047 dev->header_len = link->header_len;
1048 dev->unwrap = link->unwrap;
1049 dev->wrap = link->wrap;
1050
1051 spin_lock(&dev->lock);
Mayank Rana5b165cb2012-05-25 19:52:45 +05301052 dev->tx_skb_hold_count = 0;
1053 dev->no_tx_req_used = 0;
1054 dev->tx_req_bufsize = 0;
David Brownell2b3d9422008-06-19 18:19:28 -07001055 dev->port_usb = link;
1056 link->ioport = dev;
David Brownell29bac7b2008-09-06 21:33:49 -07001057 if (netif_running(dev->net)) {
1058 if (link->open)
1059 link->open(link);
1060 } else {
1061 if (link->close)
1062 link->close(link);
1063 }
David Brownell2b3d9422008-06-19 18:19:28 -07001064 spin_unlock(&dev->lock);
1065
1066 netif_carrier_on(dev->net);
1067 if (netif_running(dev->net))
1068 eth_start(dev, GFP_ATOMIC);
1069
1070 /* on error, disable any endpoints */
1071 } else {
1072 (void) usb_ep_disable(link->out_ep);
1073fail1:
1074 (void) usb_ep_disable(link->in_ep);
1075 }
1076fail0:
1077 /* caller is responsible for cleanup on error */
1078 if (result < 0)
1079 return ERR_PTR(result);
1080 return dev->net;
1081}
1082
1083/**
1084 * gether_disconnect - notify network layer that USB link is inactive
1085 * @link: the USB link, on which gether_connect() was called
1086 * Context: irqs blocked
1087 *
1088 * This is called to deactivate endpoints and let the network layer know
1089 * the connection went inactive ("no carrier").
1090 *
1091 * On return, the state is as if gether_connect() had never been called.
1092 * The endpoints are inactive, and accordingly without active USB I/O.
1093 * Pointers to endpoint descriptors and endpoint private data are nulled.
1094 */
1095void gether_disconnect(struct gether *link)
1096{
1097 struct eth_dev *dev = link->ioport;
1098 struct usb_request *req;
1099
David Brownell2b3d9422008-06-19 18:19:28 -07001100 if (!dev)
1101 return;
1102
1103 DBG(dev, "%s\n", __func__);
1104
1105 netif_stop_queue(dev->net);
1106 netif_carrier_off(dev->net);
1107
1108 /* disable endpoints, forcing (synchronous) completion
1109 * of all pending i/o. then free the request objects
1110 * and forget about the endpoints.
1111 */
1112 usb_ep_disable(link->in_ep);
1113 spin_lock(&dev->req_lock);
1114 while (!list_empty(&dev->tx_reqs)) {
1115 req = container_of(dev->tx_reqs.next,
1116 struct usb_request, list);
1117 list_del(&req->list);
1118
1119 spin_unlock(&dev->req_lock);
Mayank Rana5b165cb2012-05-25 19:52:45 +05301120 if (link->multi_pkt_xfer)
1121 kfree(req->buf);
David Brownell2b3d9422008-06-19 18:19:28 -07001122 usb_ep_free_request(link->in_ep, req);
1123 spin_lock(&dev->req_lock);
1124 }
1125 spin_unlock(&dev->req_lock);
1126 link->in_ep->driver_data = NULL;
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001127 link->in_ep->desc = NULL;
David Brownell2b3d9422008-06-19 18:19:28 -07001128
1129 usb_ep_disable(link->out_ep);
1130 spin_lock(&dev->req_lock);
1131 while (!list_empty(&dev->rx_reqs)) {
1132 req = container_of(dev->rx_reqs.next,
1133 struct usb_request, list);
1134 list_del(&req->list);
1135
1136 spin_unlock(&dev->req_lock);
1137 usb_ep_free_request(link->out_ep, req);
1138 spin_lock(&dev->req_lock);
1139 }
1140 spin_unlock(&dev->req_lock);
1141 link->out_ep->driver_data = NULL;
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001142 link->out_ep->desc = NULL;
David Brownell2b3d9422008-06-19 18:19:28 -07001143
1144 /* finish forgetting about this USB link episode */
1145 dev->header_len = 0;
1146 dev->unwrap = NULL;
1147 dev->wrap = NULL;
1148
1149 spin_lock(&dev->lock);
1150 dev->port_usb = NULL;
1151 link->ioport = NULL;
1152 spin_unlock(&dev->lock);
1153}