blob: cc2851fc254aac2ed6f529deca9e21c6e5fe4ec7 [file] [log] [blame]
David Brownell2b3d9422008-06-19 18:19:28 -07001/*
2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
3 *
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
David Brownell2b3d9422008-06-19 18:19:28 -070012 */
13
14/* #define VERBOSE_DEBUG */
15
16#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/gfp.h>
David Brownell2b3d9422008-06-19 18:19:28 -070018#include <linux/device.h>
19#include <linux/ctype.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
22
23#include "u_ether.h"
24
25
26/*
27 * This component encapsulates the Ethernet link glue needed to provide
28 * one (!) network link through the USB gadget stack, normally "usb0".
29 *
30 * The control and data models are handled by the function driver which
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -050031 * connects to this code; such as CDC Ethernet (ECM or EEM),
32 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
33 * management.
David Brownell2b3d9422008-06-19 18:19:28 -070034 *
35 * Link level addressing is handled by this component using module
36 * parameters; if no such parameters are provided, random link level
37 * addresses are used. Each end of the link uses one address. The
38 * host end address is exported in various ways, and is often recorded
39 * in configuration databases.
40 *
41 * The driver which assembles each configuration using such a link is
42 * responsible for ensuring that each configuration includes at most one
43 * instance of is network link. (The network layer provides ways for
44 * this single "physical" link to be used by multiple virtual links.)
45 */
46
David Brownell8a1ce2c2008-08-18 17:43:56 -070047#define UETH__VERSION "29-May-2008"
David Brownell2b3d9422008-06-19 18:19:28 -070048
Hemant Kumar08eb78e2012-07-30 20:38:47 -070049static struct workqueue_struct *uether_wq;
50
David Brownell2b3d9422008-06-19 18:19:28 -070051struct eth_dev {
52 /* lock is held while accessing port_usb
53 * or updating its backlink port_usb->ioport
54 */
55 spinlock_t lock;
56 struct gether *port_usb;
57
58 struct net_device *net;
59 struct usb_gadget *gadget;
60
61 spinlock_t req_lock; /* guard {rx,tx}_reqs */
62 struct list_head tx_reqs, rx_reqs;
Vijayavardhan Vennapusa20b627e2012-03-06 14:26:11 +053063 unsigned tx_qlen;
Mayank Rana5b165cb2012-05-25 19:52:45 +053064/* Minimum number of TX USB request queued to UDC */
65#define TX_REQ_THRESHOLD 5
66 int no_tx_req_used;
67 int tx_skb_hold_count;
68 u32 tx_req_bufsize;
David Brownell2b3d9422008-06-19 18:19:28 -070069
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -050070 struct sk_buff_head rx_frames;
71
David Brownell2b3d9422008-06-19 18:19:28 -070072 unsigned header_len;
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -050073 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
74 int (*unwrap)(struct gether *,
75 struct sk_buff *skb,
76 struct sk_buff_head *list);
David Brownell2b3d9422008-06-19 18:19:28 -070077
78 struct work_struct work;
Hemant Kumar08eb78e2012-07-30 20:38:47 -070079 struct work_struct rx_work;
David Brownell2b3d9422008-06-19 18:19:28 -070080
81 unsigned long todo;
82#define WORK_RX_MEMORY 0
83
84 bool zlp;
85 u8 host_mac[ETH_ALEN];
86};
87
88/*-------------------------------------------------------------------------*/
89
90#define RX_EXTRA 20 /* bytes guarding against rx overflows */
91
92#define DEFAULT_QLEN 2 /* double buffering by default */
93
David Brownell2b3d9422008-06-19 18:19:28 -070094#ifdef CONFIG_USB_GADGET_DUALSPEED
95
Mayank Rana5b165cb2012-05-25 19:52:45 +053096static unsigned qmult = 10;
David Brownell2b3d9422008-06-19 18:19:28 -070097module_param(qmult, uint, S_IRUGO|S_IWUSR);
Paul Zimmerman04617db2011-06-27 14:13:18 -070098MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
David Brownell2b3d9422008-06-19 18:19:28 -070099
100#else /* full speed (low speed doesn't do bulk) */
101#define qmult 1
102#endif
103
Paul Zimmerman04617db2011-06-27 14:13:18 -0700104/* for dual-speed hardware, use deeper queues at high/super speed */
David Brownell2b3d9422008-06-19 18:19:28 -0700105static inline int qlen(struct usb_gadget *gadget)
106{
Paul Zimmerman04617db2011-06-27 14:13:18 -0700107 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
108 gadget->speed == USB_SPEED_SUPER))
David Brownell2b3d9422008-06-19 18:19:28 -0700109 return qmult * DEFAULT_QLEN;
110 else
111 return DEFAULT_QLEN;
112}
113
114/*-------------------------------------------------------------------------*/
115
116/* REVISIT there must be a better way than having two sets
117 * of debug calls ...
118 */
119
120#undef DBG
121#undef VDBG
122#undef ERROR
Anh Nguyen64376432012-10-17 16:08:48 -0700123#undef DEBUG
David Brownell2b3d9422008-06-19 18:19:28 -0700124#undef INFO
125
126#define xprintk(d, level, fmt, args...) \
127 printk(level "%s: " fmt , (d)->net->name , ## args)
128
129#ifdef DEBUG
130#undef DEBUG
131#define DBG(dev, fmt, args...) \
132 xprintk(dev , KERN_DEBUG , fmt , ## args)
133#else
134#define DBG(dev, fmt, args...) \
135 do { } while (0)
136#endif /* DEBUG */
137
138#ifdef VERBOSE_DEBUG
139#define VDBG DBG
140#else
141#define VDBG(dev, fmt, args...) \
142 do { } while (0)
143#endif /* DEBUG */
144
145#define ERROR(dev, fmt, args...) \
146 xprintk(dev , KERN_ERR , fmt , ## args)
David Brownell2b3d9422008-06-19 18:19:28 -0700147#define INFO(dev, fmt, args...) \
148 xprintk(dev , KERN_INFO , fmt , ## args)
149
150/*-------------------------------------------------------------------------*/
151
152/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
153
Stephen Hemmingerccad6372008-11-19 22:42:31 -0800154static int ueth_change_mtu(struct net_device *net, int new_mtu)
David Brownell2b3d9422008-06-19 18:19:28 -0700155{
156 struct eth_dev *dev = netdev_priv(net);
157 unsigned long flags;
158 int status = 0;
159
160 /* don't change MTU on "live" link (peer won't know) */
161 spin_lock_irqsave(&dev->lock, flags);
162 if (dev->port_usb)
163 status = -EBUSY;
164 else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
165 status = -ERANGE;
166 else
167 net->mtu = new_mtu;
168 spin_unlock_irqrestore(&dev->lock, flags);
169
170 return status;
171}
172
173static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
174{
175 struct eth_dev *dev = netdev_priv(net);
176
177 strlcpy(p->driver, "g_ether", sizeof p->driver);
David Brownell8a1ce2c2008-08-18 17:43:56 -0700178 strlcpy(p->version, UETH__VERSION, sizeof p->version);
David Brownell2b3d9422008-06-19 18:19:28 -0700179 strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
180 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
181}
182
David Brownell2b3d9422008-06-19 18:19:28 -0700183/* REVISIT can also support:
184 * - WOL (by tracking suspends and issuing remote wakeup)
185 * - msglevel (implies updated messaging)
186 * - ... probably more ethtool ops
187 */
188
Stephen Hemminger0fc0b732009-09-02 01:03:33 -0700189static const struct ethtool_ops ops = {
David Brownell2b3d9422008-06-19 18:19:28 -0700190 .get_drvinfo = eth_get_drvinfo,
Jonathan McDowell237e75b2009-03-26 00:45:27 -0700191 .get_link = ethtool_op_get_link,
David Brownell2b3d9422008-06-19 18:19:28 -0700192};
193
194static void defer_kevent(struct eth_dev *dev, int flag)
195{
196 if (test_and_set_bit(flag, &dev->todo))
197 return;
198 if (!schedule_work(&dev->work))
199 ERROR(dev, "kevent %d may have been dropped\n", flag);
200 else
201 DBG(dev, "kevent %d scheduled\n", flag);
202}
203
204static void rx_complete(struct usb_ep *ep, struct usb_request *req);
205
206static int
207rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
208{
209 struct sk_buff *skb;
210 int retval = -ENOMEM;
211 size_t size = 0;
212 struct usb_ep *out;
213 unsigned long flags;
214
215 spin_lock_irqsave(&dev->lock, flags);
216 if (dev->port_usb)
217 out = dev->port_usb->out_ep;
218 else
219 out = NULL;
220 spin_unlock_irqrestore(&dev->lock, flags);
221
222 if (!out)
223 return -ENOTCONN;
224
225
226 /* Padding up to RX_EXTRA handles minor disagreements with host.
227 * Normally we use the USB "terminate on short read" convention;
228 * so allow up to (N*maxpacket), since that memory is normally
229 * already allocated. Some hardware doesn't deal well with short
230 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
231 * byte off the end (to force hardware errors on overflow).
232 *
233 * RNDIS uses internal framing, and explicitly allows senders to
234 * pad to end-of-packet. That's potentially nice for speed, but
235 * means receivers can't recover lost synch on their own (because
236 * new packets don't only start after a short RX).
237 */
238 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
239 size += dev->port_usb->header_len;
240 size += out->maxpacket - 1;
241 size -= size % out->maxpacket;
242
Yauheni Kaliuta5c1168d2010-12-08 13:12:04 +0200243 if (dev->port_usb->is_fixed)
Stephen Hemminger45d1b7a2011-03-01 22:40:57 -0800244 size = max_t(size_t, size, dev->port_usb->fixed_out_len);
Yauheni Kaliuta5c1168d2010-12-08 13:12:04 +0200245
David Brownell2b3d9422008-06-19 18:19:28 -0700246 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
247 if (skb == NULL) {
248 DBG(dev, "no rx skb\n");
249 goto enomem;
250 }
251
252 /* Some platforms perform better when IP packets are aligned,
253 * but on at least one, checksumming fails otherwise. Note:
254 * RNDIS headers involve variable numbers of LE32 values.
255 */
256 skb_reserve(skb, NET_IP_ALIGN);
257
258 req->buf = skb->data;
259 req->length = size;
260 req->complete = rx_complete;
261 req->context = skb;
262
263 retval = usb_ep_queue(out, req, gfp_flags);
264 if (retval == -ENOMEM)
265enomem:
266 defer_kevent(dev, WORK_RX_MEMORY);
267 if (retval) {
268 DBG(dev, "rx submit --> %d\n", retval);
269 if (skb)
270 dev_kfree_skb_any(skb);
David Brownell2b3d9422008-06-19 18:19:28 -0700271 }
272 return retval;
273}
274
275static void rx_complete(struct usb_ep *ep, struct usb_request *req)
276{
Hemant Kumar08eb78e2012-07-30 20:38:47 -0700277 struct sk_buff *skb = req->context;
David Brownell2b3d9422008-06-19 18:19:28 -0700278 struct eth_dev *dev = ep->driver_data;
279 int status = req->status;
Hemant Kumar08eb78e2012-07-30 20:38:47 -0700280 bool queue = 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700281
282 switch (status) {
283
284 /* normal completion */
285 case 0:
286 skb_put(skb, req->actual);
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500287
288 if (dev->unwrap) {
289 unsigned long flags;
290
291 spin_lock_irqsave(&dev->lock, flags);
292 if (dev->port_usb) {
293 status = dev->unwrap(dev->port_usb,
294 skb,
295 &dev->rx_frames);
Mayank Ranab8c88c62012-09-12 18:12:46 +0530296 if (status == -EINVAL)
297 dev->net->stats.rx_errors++;
298 else if (status == -EOVERFLOW)
299 dev->net->stats.rx_over_errors++;
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500300 } else {
301 dev_kfree_skb_any(skb);
302 status = -ENOTCONN;
303 }
304 spin_unlock_irqrestore(&dev->lock, flags);
305 } else {
306 skb_queue_tail(&dev->rx_frames, skb);
David Brownell2b3d9422008-06-19 18:19:28 -0700307 }
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500308
Hemant Kumar08eb78e2012-07-30 20:38:47 -0700309 if (!status)
310 queue = 1;
David Brownell2b3d9422008-06-19 18:19:28 -0700311 break;
312
313 /* software-driven interface shutdown */
314 case -ECONNRESET: /* unlink */
315 case -ESHUTDOWN: /* disconnect etc */
316 VDBG(dev, "rx shutdown, code %d\n", status);
317 goto quiesce;
318
319 /* for hardware automagic (such as pxa) */
320 case -ECONNABORTED: /* endpoint reset */
321 DBG(dev, "rx %s reset\n", ep->name);
322 defer_kevent(dev, WORK_RX_MEMORY);
323quiesce:
324 dev_kfree_skb_any(skb);
325 goto clean;
326
327 /* data overrun */
328 case -EOVERFLOW:
329 dev->net->stats.rx_over_errors++;
330 /* FALLTHROUGH */
331
332 default:
Hemant Kumar08eb78e2012-07-30 20:38:47 -0700333 queue = 1;
334 dev_kfree_skb_any(skb);
David Brownell2b3d9422008-06-19 18:19:28 -0700335 dev->net->stats.rx_errors++;
336 DBG(dev, "rx status %d\n", status);
337 break;
338 }
339
David Brownell2b3d9422008-06-19 18:19:28 -0700340clean:
Hemant Kumar08eb78e2012-07-30 20:38:47 -0700341 spin_lock(&dev->req_lock);
342 list_add(&req->list, &dev->rx_reqs);
343 spin_unlock(&dev->req_lock);
344
345 if (queue)
346 queue_work(uether_wq, &dev->rx_work);
David Brownell2b3d9422008-06-19 18:19:28 -0700347}
348
349static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
350{
351 unsigned i;
352 struct usb_request *req;
353
354 if (!n)
355 return -ENOMEM;
356
357 /* queue/recycle up to N requests */
358 i = n;
359 list_for_each_entry(req, list, list) {
360 if (i-- == 0)
361 goto extra;
362 }
363 while (i--) {
364 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
365 if (!req)
366 return list_empty(list) ? -ENOMEM : 0;
367 list_add(&req->list, list);
368 }
369 return 0;
370
371extra:
372 /* free extras */
373 for (;;) {
374 struct list_head *next;
375
376 next = req->list.next;
377 list_del(&req->list);
378 usb_ep_free_request(ep, req);
379
380 if (next == list)
381 break;
382
383 req = container_of(next, struct usb_request, list);
384 }
385 return 0;
386}
387
388static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
389{
390 int status;
391
392 spin_lock(&dev->req_lock);
393 status = prealloc(&dev->tx_reqs, link->in_ep, n);
394 if (status < 0)
395 goto fail;
396 status = prealloc(&dev->rx_reqs, link->out_ep, n);
397 if (status < 0)
398 goto fail;
399 goto done;
400fail:
401 DBG(dev, "can't alloc requests\n");
402done:
403 spin_unlock(&dev->req_lock);
404 return status;
405}
406
407static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
408{
409 struct usb_request *req;
410 unsigned long flags;
Hemant Kumar08eb78e2012-07-30 20:38:47 -0700411 int req_cnt = 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700412
413 /* fill unused rxq slots with some skb */
414 spin_lock_irqsave(&dev->req_lock, flags);
415 while (!list_empty(&dev->rx_reqs)) {
Hemant Kumar08eb78e2012-07-30 20:38:47 -0700416 /* break the nexus of continuous completion and re-submission*/
417 if (++req_cnt > qlen(dev->gadget))
418 break;
419
David Brownell2b3d9422008-06-19 18:19:28 -0700420 req = container_of(dev->rx_reqs.next,
421 struct usb_request, list);
422 list_del_init(&req->list);
423 spin_unlock_irqrestore(&dev->req_lock, flags);
424
425 if (rx_submit(dev, req, gfp_flags) < 0) {
Hemant Kumar08eb78e2012-07-30 20:38:47 -0700426 spin_lock_irqsave(&dev->req_lock, flags);
427 list_add(&req->list, &dev->rx_reqs);
428 spin_unlock_irqrestore(&dev->req_lock, flags);
David Brownell2b3d9422008-06-19 18:19:28 -0700429 defer_kevent(dev, WORK_RX_MEMORY);
430 return;
431 }
432
433 spin_lock_irqsave(&dev->req_lock, flags);
434 }
435 spin_unlock_irqrestore(&dev->req_lock, flags);
436}
437
Hemant Kumar08eb78e2012-07-30 20:38:47 -0700438static void process_rx_w(struct work_struct *work)
439{
440 struct eth_dev *dev = container_of(work, struct eth_dev, rx_work);
441 struct sk_buff *skb;
442 int status = 0;
443
444 if (!dev->port_usb)
445 return;
446
447 while ((skb = skb_dequeue(&dev->rx_frames))) {
448 if (status < 0
449 || ETH_HLEN > skb->len
450 || skb->len > ETH_FRAME_LEN) {
451 dev->net->stats.rx_errors++;
452 dev->net->stats.rx_length_errors++;
453 DBG(dev, "rx length %d\n", skb->len);
454 dev_kfree_skb_any(skb);
455 continue;
456 }
457 skb->protocol = eth_type_trans(skb, dev->net);
458 dev->net->stats.rx_packets++;
459 dev->net->stats.rx_bytes += skb->len;
460
461 status = netif_rx_ni(skb);
462 }
463
464 if (netif_running(dev->net))
465 rx_fill(dev, GFP_KERNEL);
466}
467
David Brownell2b3d9422008-06-19 18:19:28 -0700468static void eth_work(struct work_struct *work)
469{
470 struct eth_dev *dev = container_of(work, struct eth_dev, work);
471
472 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
473 if (netif_running(dev->net))
474 rx_fill(dev, GFP_KERNEL);
475 }
476
477 if (dev->todo)
478 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
479}
480
481static void tx_complete(struct usb_ep *ep, struct usb_request *req)
482{
483 struct sk_buff *skb = req->context;
484 struct eth_dev *dev = ep->driver_data;
Mayank Rana5b165cb2012-05-25 19:52:45 +0530485 struct net_device *net = dev->net;
486 struct usb_request *new_req;
487 struct usb_ep *in;
488 int length;
489 int retval;
David Brownell2b3d9422008-06-19 18:19:28 -0700490
491 switch (req->status) {
492 default:
493 dev->net->stats.tx_errors++;
494 VDBG(dev, "tx err %d\n", req->status);
495 /* FALLTHROUGH */
496 case -ECONNRESET: /* unlink */
497 case -ESHUTDOWN: /* disconnect etc */
498 break;
499 case 0:
Mayank Rana5b165cb2012-05-25 19:52:45 +0530500 if (!req->zero)
501 dev->net->stats.tx_bytes += req->length-1;
502 else
503 dev->net->stats.tx_bytes += req->length;
David Brownell2b3d9422008-06-19 18:19:28 -0700504 }
505 dev->net->stats.tx_packets++;
506
507 spin_lock(&dev->req_lock);
Mayank Rana5b165cb2012-05-25 19:52:45 +0530508 list_add_tail(&req->list, &dev->tx_reqs);
509
510 if (dev->port_usb->multi_pkt_xfer) {
511 dev->no_tx_req_used--;
512 req->length = 0;
513 in = dev->port_usb->in_ep;
514
515 if (!list_empty(&dev->tx_reqs)) {
516 new_req = container_of(dev->tx_reqs.next,
517 struct usb_request, list);
518 list_del(&new_req->list);
519 spin_unlock(&dev->req_lock);
520 if (new_req->length > 0) {
521 length = new_req->length;
522
523 /* NCM requires no zlp if transfer is
524 * dwNtbInMaxSize */
525 if (dev->port_usb->is_fixed &&
526 length == dev->port_usb->fixed_in_len &&
527 (length % in->maxpacket) == 0)
528 new_req->zero = 0;
529 else
530 new_req->zero = 1;
531
532 /* use zlp framing on tx for strict CDC-Ether
533 * conformance, though any robust network rx
534 * path ignores extra padding. and some hardware
535 * doesn't like to write zlps.
536 */
537 if (new_req->zero && !dev->zlp &&
538 (length % in->maxpacket) == 0) {
539 new_req->zero = 0;
540 length++;
541 }
542
543 new_req->length = length;
544 retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
545 switch (retval) {
546 default:
547 DBG(dev, "tx queue err %d\n", retval);
Pavankumar Kondetid15dbb12013-04-01 18:13:32 +0530548 new_req->length = 0;
549 spin_lock(&dev->req_lock);
550 list_add_tail(&new_req->list,
551 &dev->tx_reqs);
552 spin_unlock(&dev->req_lock);
Mayank Rana5b165cb2012-05-25 19:52:45 +0530553 break;
554 case 0:
555 spin_lock(&dev->req_lock);
556 dev->no_tx_req_used++;
557 spin_unlock(&dev->req_lock);
558 net->trans_start = jiffies;
559 }
560 } else {
561 spin_lock(&dev->req_lock);
Pavankumar Kondetid15dbb12013-04-01 18:13:32 +0530562 /*
563 * Put the idle request at the back of the
564 * queue. The xmit function will put the
565 * unfinished request at the beginning of the
566 * queue.
567 */
568 list_add_tail(&new_req->list, &dev->tx_reqs);
Mayank Rana5b165cb2012-05-25 19:52:45 +0530569 spin_unlock(&dev->req_lock);
570 }
571 } else {
572 spin_unlock(&dev->req_lock);
573 }
574 } else {
575 spin_unlock(&dev->req_lock);
576 dev_kfree_skb_any(skb);
577 }
David Brownell2b3d9422008-06-19 18:19:28 -0700578
David Brownell2b3d9422008-06-19 18:19:28 -0700579 if (netif_carrier_ok(dev->net))
580 netif_wake_queue(dev->net);
581}
582
583static inline int is_promisc(u16 cdc_filter)
584{
585 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
586}
587
Rajkumar Raghupathye6245872013-05-23 11:37:41 +0530588static int alloc_tx_buffer(struct eth_dev *dev)
Mayank Rana5b165cb2012-05-25 19:52:45 +0530589{
590 struct list_head *act;
591 struct usb_request *req;
592
593 dev->tx_req_bufsize = (TX_SKB_HOLD_THRESHOLD *
594 (dev->net->mtu
595 + sizeof(struct ethhdr)
596 /* size of rndis_packet_msg_type */
597 + 44
598 + 22));
599
600 list_for_each(act, &dev->tx_reqs) {
601 req = container_of(act, struct usb_request, list);
602 if (!req->buf)
603 req->buf = kmalloc(dev->tx_req_bufsize,
604 GFP_ATOMIC);
Rajkumar Raghupathye6245872013-05-23 11:37:41 +0530605 if (!req->buf)
606 goto free_buf;
Mayank Rana5b165cb2012-05-25 19:52:45 +0530607 }
Rajkumar Raghupathye6245872013-05-23 11:37:41 +0530608 return 0;
609
610free_buf:
611 /* tx_req_bufsize = 0 retries mem alloc on next eth_start_xmit */
612 dev->tx_req_bufsize = 0;
613 list_for_each(act, &dev->tx_reqs) {
614 req = container_of(act, struct usb_request, list);
615 kfree(req->buf);
616 }
617 return -ENOMEM;
Mayank Rana5b165cb2012-05-25 19:52:45 +0530618}
619
Stephen Hemminger25a79c42009-08-31 19:50:45 +0000620static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
621 struct net_device *net)
David Brownell2b3d9422008-06-19 18:19:28 -0700622{
623 struct eth_dev *dev = netdev_priv(net);
624 int length = skb->len;
625 int retval;
626 struct usb_request *req = NULL;
627 unsigned long flags;
628 struct usb_ep *in;
629 u16 cdc_filter;
Mayank Rana7e70c222013-02-15 14:55:30 +0530630 bool multi_pkt_xfer = false;
David Brownell2b3d9422008-06-19 18:19:28 -0700631
632 spin_lock_irqsave(&dev->lock, flags);
633 if (dev->port_usb) {
634 in = dev->port_usb->in_ep;
635 cdc_filter = dev->port_usb->cdc_filter;
Mayank Rana7e70c222013-02-15 14:55:30 +0530636 multi_pkt_xfer = dev->port_usb->multi_pkt_xfer;
David Brownell2b3d9422008-06-19 18:19:28 -0700637 } else {
638 in = NULL;
639 cdc_filter = 0;
640 }
641 spin_unlock_irqrestore(&dev->lock, flags);
642
643 if (!in) {
644 dev_kfree_skb_any(skb);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000645 return NETDEV_TX_OK;
David Brownell2b3d9422008-06-19 18:19:28 -0700646 }
647
Mayank Rana5b165cb2012-05-25 19:52:45 +0530648 /* Allocate memory for tx_reqs to support multi packet transfer */
Rajkumar Raghupathye6245872013-05-23 11:37:41 +0530649 if (multi_pkt_xfer && !dev->tx_req_bufsize) {
650 retval = alloc_tx_buffer(dev);
651 if (retval < 0)
652 return -ENOMEM;
653 }
Mayank Rana5b165cb2012-05-25 19:52:45 +0530654
David Brownell2b3d9422008-06-19 18:19:28 -0700655 /* apply outgoing CDC or RNDIS filters */
656 if (!is_promisc(cdc_filter)) {
657 u8 *dest = skb->data;
658
659 if (is_multicast_ether_addr(dest)) {
660 u16 type;
661
662 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
663 * SET_ETHERNET_MULTICAST_FILTERS requests
664 */
665 if (is_broadcast_ether_addr(dest))
666 type = USB_CDC_PACKET_TYPE_BROADCAST;
667 else
668 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
669 if (!(cdc_filter & type)) {
670 dev_kfree_skb_any(skb);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000671 return NETDEV_TX_OK;
David Brownell2b3d9422008-06-19 18:19:28 -0700672 }
673 }
674 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
675 }
676
677 spin_lock_irqsave(&dev->req_lock, flags);
678 /*
679 * this freelist can be empty if an interrupt triggered disconnect()
680 * and reconfigured the gadget (shutting down this queue) after the
681 * network stack decided to xmit but before we got the spinlock.
682 */
683 if (list_empty(&dev->tx_reqs)) {
684 spin_unlock_irqrestore(&dev->req_lock, flags);
Patrick McHardy5b548142009-06-12 06:22:29 +0000685 return NETDEV_TX_BUSY;
David Brownell2b3d9422008-06-19 18:19:28 -0700686 }
687
688 req = container_of(dev->tx_reqs.next, struct usb_request, list);
689 list_del(&req->list);
690
691 /* temporarily stop TX queue when the freelist empties */
692 if (list_empty(&dev->tx_reqs))
693 netif_stop_queue(net);
694 spin_unlock_irqrestore(&dev->req_lock, flags);
695
696 /* no buffer copies needed, unless the network stack did it
697 * or the hardware can't use skb buffers.
698 * or there's not enough space for extra headers we need
699 */
700 if (dev->wrap) {
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500701 unsigned long flags;
David Brownell2b3d9422008-06-19 18:19:28 -0700702
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -0500703 spin_lock_irqsave(&dev->lock, flags);
704 if (dev->port_usb)
705 skb = dev->wrap(dev->port_usb, skb);
706 spin_unlock_irqrestore(&dev->lock, flags);
707 if (!skb)
David Brownell2b3d9422008-06-19 18:19:28 -0700708 goto drop;
David Brownell2b3d9422008-06-19 18:19:28 -0700709 }
Mayank Rana5b165cb2012-05-25 19:52:45 +0530710
711 spin_lock_irqsave(&dev->req_lock, flags);
712 dev->tx_skb_hold_count++;
713 spin_unlock_irqrestore(&dev->req_lock, flags);
714
Mayank Rana7e70c222013-02-15 14:55:30 +0530715 if (multi_pkt_xfer) {
Mayank Rana5b165cb2012-05-25 19:52:45 +0530716 memcpy(req->buf + req->length, skb->data, skb->len);
717 req->length = req->length + skb->len;
718 length = req->length;
719 dev_kfree_skb_any(skb);
720
721 spin_lock_irqsave(&dev->req_lock, flags);
722 if (dev->tx_skb_hold_count < TX_SKB_HOLD_THRESHOLD) {
723 if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
724 list_add(&req->list, &dev->tx_reqs);
725 spin_unlock_irqrestore(&dev->req_lock, flags);
726 goto success;
727 }
728 }
729
730 dev->no_tx_req_used++;
731 spin_unlock_irqrestore(&dev->req_lock, flags);
732
733 spin_lock_irqsave(&dev->lock, flags);
734 dev->tx_skb_hold_count = 0;
735 spin_unlock_irqrestore(&dev->lock, flags);
736 } else {
737 length = skb->len;
738 req->buf = skb->data;
739 req->context = skb;
740 }
741
David Brownell2b3d9422008-06-19 18:19:28 -0700742 req->complete = tx_complete;
743
Yauheni Kaliuta5c1168d2010-12-08 13:12:04 +0200744 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
745 if (dev->port_usb->is_fixed &&
746 length == dev->port_usb->fixed_in_len &&
747 (length % in->maxpacket) == 0)
748 req->zero = 0;
749 else
750 req->zero = 1;
751
David Brownell2b3d9422008-06-19 18:19:28 -0700752 /* use zlp framing on tx for strict CDC-Ether conformance,
753 * though any robust network rx path ignores extra padding.
754 * and some hardware doesn't like to write zlps.
755 */
Mayank Rana5b165cb2012-05-25 19:52:45 +0530756 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
757 req->zero = 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700758 length++;
Mayank Rana5b165cb2012-05-25 19:52:45 +0530759 }
David Brownell2b3d9422008-06-19 18:19:28 -0700760
761 req->length = length;
762
763 /* throttle highspeed IRQ rate back slightly */
Vijayavardhan Vennapusa20b627e2012-03-06 14:26:11 +0530764 if (gadget_is_dualspeed(dev->gadget) &&
765 (dev->gadget->speed == USB_SPEED_HIGH)) {
766 dev->tx_qlen++;
Mayank Rana5b165cb2012-05-25 19:52:45 +0530767 if (dev->tx_qlen == (qmult/2)) {
Vijayavardhan Vennapusa20b627e2012-03-06 14:26:11 +0530768 req->no_interrupt = 0;
769 dev->tx_qlen = 0;
770 } else {
771 req->no_interrupt = 1;
772 }
773 } else {
774 req->no_interrupt = 0;
775 }
David Brownell2b3d9422008-06-19 18:19:28 -0700776
777 retval = usb_ep_queue(in, req, GFP_ATOMIC);
778 switch (retval) {
779 default:
780 DBG(dev, "tx queue err %d\n", retval);
781 break;
782 case 0:
783 net->trans_start = jiffies;
David Brownell2b3d9422008-06-19 18:19:28 -0700784 }
785
786 if (retval) {
Mayank Rana7e70c222013-02-15 14:55:30 +0530787 if (!multi_pkt_xfer)
Mayank Rana5b165cb2012-05-25 19:52:45 +0530788 dev_kfree_skb_any(skb);
Pavankumar Kondetid15dbb12013-04-01 18:13:32 +0530789 else
790 req->length = 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700791drop:
792 dev->net->stats.tx_dropped++;
David Brownell2b3d9422008-06-19 18:19:28 -0700793 spin_lock_irqsave(&dev->req_lock, flags);
794 if (list_empty(&dev->tx_reqs))
795 netif_start_queue(net);
796 list_add(&req->list, &dev->tx_reqs);
797 spin_unlock_irqrestore(&dev->req_lock, flags);
798 }
Mayank Rana5b165cb2012-05-25 19:52:45 +0530799success:
Patrick McHardy6ed10652009-06-23 06:03:08 +0000800 return NETDEV_TX_OK;
David Brownell2b3d9422008-06-19 18:19:28 -0700801}
802
803/*-------------------------------------------------------------------------*/
804
805static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
806{
807 DBG(dev, "%s\n", __func__);
808
809 /* fill the rx queue */
810 rx_fill(dev, gfp_flags);
811
812 /* and open the tx floodgates */
Vijayavardhan Vennapusa20b627e2012-03-06 14:26:11 +0530813 dev->tx_qlen = 0;
David Brownell2b3d9422008-06-19 18:19:28 -0700814 netif_wake_queue(dev->net);
815}
816
817static int eth_open(struct net_device *net)
818{
819 struct eth_dev *dev = netdev_priv(net);
820 struct gether *link;
821
822 DBG(dev, "%s\n", __func__);
823 if (netif_carrier_ok(dev->net))
824 eth_start(dev, GFP_KERNEL);
825
826 spin_lock_irq(&dev->lock);
827 link = dev->port_usb;
828 if (link && link->open)
829 link->open(link);
830 spin_unlock_irq(&dev->lock);
831
832 return 0;
833}
834
835static int eth_stop(struct net_device *net)
836{
837 struct eth_dev *dev = netdev_priv(net);
838 unsigned long flags;
839
840 VDBG(dev, "%s\n", __func__);
841 netif_stop_queue(net);
842
843 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
844 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
845 dev->net->stats.rx_errors, dev->net->stats.tx_errors
846 );
847
848 /* ensure there are no more active requests */
849 spin_lock_irqsave(&dev->lock, flags);
850 if (dev->port_usb) {
851 struct gether *link = dev->port_usb;
Michael Grzeschik4cfb77e2012-08-08 11:48:10 +0200852 const struct usb_endpoint_descriptor *in;
853 const struct usb_endpoint_descriptor *out;
David Brownell2b3d9422008-06-19 18:19:28 -0700854
855 if (link->close)
856 link->close(link);
857
858 /* NOTE: we have no abort-queue primitive we could use
859 * to cancel all pending I/O. Instead, we disable then
860 * reenable the endpoints ... this idiom may leave toggle
861 * wrong, but that's a self-correcting error.
862 *
863 * REVISIT: we *COULD* just let the transfers complete at
864 * their own pace; the network stack can handle old packets.
865 * For the moment we leave this here, since it works.
866 */
Michael Grzeschik4cfb77e2012-08-08 11:48:10 +0200867 in = link->in_ep->desc;
868 out = link->out_ep->desc;
David Brownell2b3d9422008-06-19 18:19:28 -0700869 usb_ep_disable(link->in_ep);
870 usb_ep_disable(link->out_ep);
871 if (netif_carrier_ok(net)) {
Rajkumar Raghupathye7c42322012-06-13 19:31:15 +0530872 if (config_ep_by_speed(dev->gadget, &link->func,
873 link->in_ep) ||
874 config_ep_by_speed(dev->gadget, &link->func,
875 link->out_ep)) {
876 link->in_ep->desc = NULL;
877 link->out_ep->desc = NULL;
878 return -EINVAL;
879 }
David Brownell2b3d9422008-06-19 18:19:28 -0700880 DBG(dev, "host still using in/out endpoints\n");
Michael Grzeschik4cfb77e2012-08-08 11:48:10 +0200881 link->in_ep->desc = in;
882 link->out_ep->desc = out;
Tatyana Brokhman72c973d2011-06-28 16:33:48 +0300883 usb_ep_enable(link->in_ep);
884 usb_ep_enable(link->out_ep);
David Brownell2b3d9422008-06-19 18:19:28 -0700885 }
886 }
887 spin_unlock_irqrestore(&dev->lock, flags);
888
889 return 0;
890}
891
892/*-------------------------------------------------------------------------*/
893
taeju.parkda2f5f22012-09-14 14:09:03 +0900894static u8 host_ethaddr[ETH_ALEN];
895
David Brownell2b3d9422008-06-19 18:19:28 -0700896/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
897static char *dev_addr;
898module_param(dev_addr, charp, S_IRUGO);
899MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
900
901/* this address is invisible to ifconfig */
902static char *host_addr;
903module_param(host_addr, charp, S_IRUGO);
904MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
905
Michal Nazarewicz28824b12010-05-05 12:53:13 +0200906static int get_ether_addr(const char *str, u8 *dev_addr)
David Brownell2b3d9422008-06-19 18:19:28 -0700907{
908 if (str) {
909 unsigned i;
910
911 for (i = 0; i < 6; i++) {
912 unsigned char num;
913
914 if ((*str == '.') || (*str == ':'))
915 str++;
Andy Shevchenkoe6448142010-06-15 17:04:44 +0300916 num = hex_to_bin(*str++) << 4;
917 num |= hex_to_bin(*str++);
David Brownell2b3d9422008-06-19 18:19:28 -0700918 dev_addr [i] = num;
919 }
920 if (is_valid_ether_addr(dev_addr))
921 return 0;
922 }
923 random_ether_addr(dev_addr);
924 return 1;
925}
926
taeju.parkda2f5f22012-09-14 14:09:03 +0900927static int get_host_ether_addr(u8 *str, u8 *dev_addr)
928{
929 memcpy(dev_addr, str, ETH_ALEN);
930 if (is_valid_ether_addr(dev_addr))
931 return 0;
932
933 random_ether_addr(dev_addr);
934 memcpy(str, dev_addr, ETH_ALEN);
935 return 1;
936}
937
David Brownell2b3d9422008-06-19 18:19:28 -0700938static struct eth_dev *the_dev;
939
Stephen Hemminger5ec38f32009-01-07 18:05:39 -0800940static const struct net_device_ops eth_netdev_ops = {
941 .ndo_open = eth_open,
942 .ndo_stop = eth_stop,
943 .ndo_start_xmit = eth_start_xmit,
944 .ndo_change_mtu = ueth_change_mtu,
945 .ndo_set_mac_address = eth_mac_addr,
946 .ndo_validate_addr = eth_validate_addr,
947};
David Brownell2b3d9422008-06-19 18:19:28 -0700948
Marcel Holtmannaa790742010-01-15 22:13:58 -0800949static struct device_type gadget_type = {
950 .name = "gadget",
951};
952
David Brownell2b3d9422008-06-19 18:19:28 -0700953/**
954 * gether_setup - initialize one ethernet-over-usb link
955 * @g: gadget to associated with these links
956 * @ethaddr: NULL, or a buffer in which the ethernet address of the
957 * host side of the link is recorded
958 * Context: may sleep
959 *
960 * This sets up the single network link that may be exported by a
961 * gadget driver using this framework. The link layer addresses are
962 * set up using module parameters.
963 *
964 * Returns negative errno, or zero on success
965 */
Michal Nazarewicz28824b12010-05-05 12:53:13 +0200966int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
David Brownell2b3d9422008-06-19 18:19:28 -0700967{
Mike Lockwood735d20e2011-08-12 14:35:42 -0700968 return gether_setup_name(g, ethaddr, "usb");
969}
970
971/**
972 * gether_setup_name - initialize one ethernet-over-usb link
973 * @g: gadget to associated with these links
974 * @ethaddr: NULL, or a buffer in which the ethernet address of the
975 * host side of the link is recorded
976 * @netname: name for network device (for example, "usb")
977 * Context: may sleep
978 *
979 * This sets up the single network link that may be exported by a
980 * gadget driver using this framework. The link layer addresses are
981 * set up using module parameters.
982 *
983 * Returns negative errno, or zero on success
984 */
985int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
986 const char *netname)
987{
David Brownell2b3d9422008-06-19 18:19:28 -0700988 struct eth_dev *dev;
989 struct net_device *net;
990 int status;
991
992 if (the_dev)
993 return -EBUSY;
994
995 net = alloc_etherdev(sizeof *dev);
996 if (!net)
997 return -ENOMEM;
998
999 dev = netdev_priv(net);
1000 spin_lock_init(&dev->lock);
1001 spin_lock_init(&dev->req_lock);
1002 INIT_WORK(&dev->work, eth_work);
Hemant Kumar08eb78e2012-07-30 20:38:47 -07001003 INIT_WORK(&dev->rx_work, process_rx_w);
David Brownell2b3d9422008-06-19 18:19:28 -07001004 INIT_LIST_HEAD(&dev->tx_reqs);
1005 INIT_LIST_HEAD(&dev->rx_reqs);
1006
Brian Niebuhr9b39e9d2009-08-14 10:04:22 -05001007 skb_queue_head_init(&dev->rx_frames);
1008
David Brownell2b3d9422008-06-19 18:19:28 -07001009 /* network device setup */
1010 dev->net = net;
Mike Lockwood735d20e2011-08-12 14:35:42 -07001011 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
David Brownell2b3d9422008-06-19 18:19:28 -07001012
1013 if (get_ether_addr(dev_addr, net->dev_addr))
1014 dev_warn(&g->dev,
1015 "using random %s ethernet address\n", "self");
taeju.parkda2f5f22012-09-14 14:09:03 +09001016
1017 if (get_host_ether_addr(host_ethaddr, dev->host_mac))
1018 dev_warn(&g->dev, "using random %s ethernet address\n", "host");
1019 else
1020 dev_warn(&g->dev, "using previous %s ethernet address\n", "host");
David Brownell2b3d9422008-06-19 18:19:28 -07001021
1022 if (ethaddr)
1023 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
1024
Stephen Hemminger5ec38f32009-01-07 18:05:39 -08001025 net->netdev_ops = &eth_netdev_ops;
1026
David Brownell2b3d9422008-06-19 18:19:28 -07001027 SET_ETHTOOL_OPS(net, &ops);
1028
David Brownell2b3d9422008-06-19 18:19:28 -07001029 dev->gadget = g;
1030 SET_NETDEV_DEV(net, &g->dev);
Marcel Holtmannaa790742010-01-15 22:13:58 -08001031 SET_NETDEV_DEVTYPE(net, &gadget_type);
David Brownell2b3d9422008-06-19 18:19:28 -07001032
1033 status = register_netdev(net);
1034 if (status < 0) {
1035 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
1036 free_netdev(net);
1037 } else {
Johannes Berge1749612008-10-27 15:59:26 -07001038 INFO(dev, "MAC %pM\n", net->dev_addr);
1039 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
David Brownell2b3d9422008-06-19 18:19:28 -07001040
1041 the_dev = dev;
taeju.park5c17cb62012-09-22 14:32:26 +09001042
1043 /* two kinds of host-initiated state changes:
1044 * - iff DATA transfer is active, carrier is "on"
1045 * - tx queueing enabled if open *and* carrier is "on"
1046 */
1047 netif_carrier_off(net);
David Brownell2b3d9422008-06-19 18:19:28 -07001048 }
1049
1050 return status;
1051}
1052
1053/**
1054 * gether_cleanup - remove Ethernet-over-USB device
1055 * Context: may sleep
1056 *
1057 * This is called to free all resources allocated by @gether_setup().
1058 */
1059void gether_cleanup(void)
1060{
1061 if (!the_dev)
1062 return;
1063
1064 unregister_netdev(the_dev->net);
Tejun Heo569ff2d2010-12-24 16:14:20 +01001065 flush_work_sync(&the_dev->work);
David Brownell2b3d9422008-06-19 18:19:28 -07001066 free_netdev(the_dev->net);
1067
David Brownell2b3d9422008-06-19 18:19:28 -07001068 the_dev = NULL;
1069}
1070
1071
1072/**
1073 * gether_connect - notify network layer that USB link is active
1074 * @link: the USB link, set up with endpoints, descriptors matching
1075 * current device speed, and any framing wrapper(s) set up.
1076 * Context: irqs blocked
1077 *
1078 * This is called to activate endpoints and let the network layer know
1079 * the connection is active ("carrier detect"). It may cause the I/O
1080 * queues to open and start letting network packets flow, but will in
1081 * any case activate the endpoints so that they respond properly to the
1082 * USB host.
1083 *
1084 * Verify net_device pointer returned using IS_ERR(). If it doesn't
1085 * indicate some error code (negative errno), ep->driver_data values
1086 * have been overwritten.
1087 */
1088struct net_device *gether_connect(struct gether *link)
1089{
1090 struct eth_dev *dev = the_dev;
1091 int result = 0;
1092
1093 if (!dev)
1094 return ERR_PTR(-EINVAL);
1095
1096 link->in_ep->driver_data = dev;
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001097 result = usb_ep_enable(link->in_ep);
David Brownell2b3d9422008-06-19 18:19:28 -07001098 if (result != 0) {
1099 DBG(dev, "enable %s --> %d\n",
1100 link->in_ep->name, result);
1101 goto fail0;
1102 }
1103
1104 link->out_ep->driver_data = dev;
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001105 result = usb_ep_enable(link->out_ep);
David Brownell2b3d9422008-06-19 18:19:28 -07001106 if (result != 0) {
1107 DBG(dev, "enable %s --> %d\n",
1108 link->out_ep->name, result);
1109 goto fail1;
1110 }
1111
1112 if (result == 0)
1113 result = alloc_requests(dev, link, qlen(dev->gadget));
1114
1115 if (result == 0) {
1116 dev->zlp = link->is_zlp_ok;
1117 DBG(dev, "qlen %d\n", qlen(dev->gadget));
1118
1119 dev->header_len = link->header_len;
1120 dev->unwrap = link->unwrap;
1121 dev->wrap = link->wrap;
1122
1123 spin_lock(&dev->lock);
Mayank Rana5b165cb2012-05-25 19:52:45 +05301124 dev->tx_skb_hold_count = 0;
1125 dev->no_tx_req_used = 0;
1126 dev->tx_req_bufsize = 0;
David Brownell2b3d9422008-06-19 18:19:28 -07001127 dev->port_usb = link;
1128 link->ioport = dev;
David Brownell29bac7b2008-09-06 21:33:49 -07001129 if (netif_running(dev->net)) {
1130 if (link->open)
1131 link->open(link);
1132 } else {
1133 if (link->close)
1134 link->close(link);
1135 }
David Brownell2b3d9422008-06-19 18:19:28 -07001136 spin_unlock(&dev->lock);
1137
1138 netif_carrier_on(dev->net);
1139 if (netif_running(dev->net))
1140 eth_start(dev, GFP_ATOMIC);
1141
1142 /* on error, disable any endpoints */
1143 } else {
1144 (void) usb_ep_disable(link->out_ep);
1145fail1:
1146 (void) usb_ep_disable(link->in_ep);
1147 }
1148fail0:
1149 /* caller is responsible for cleanup on error */
1150 if (result < 0)
1151 return ERR_PTR(result);
1152 return dev->net;
1153}
1154
1155/**
1156 * gether_disconnect - notify network layer that USB link is inactive
1157 * @link: the USB link, on which gether_connect() was called
1158 * Context: irqs blocked
1159 *
1160 * This is called to deactivate endpoints and let the network layer know
1161 * the connection went inactive ("no carrier").
1162 *
1163 * On return, the state is as if gether_connect() had never been called.
1164 * The endpoints are inactive, and accordingly without active USB I/O.
1165 * Pointers to endpoint descriptors and endpoint private data are nulled.
1166 */
1167void gether_disconnect(struct gether *link)
1168{
1169 struct eth_dev *dev = link->ioport;
1170 struct usb_request *req;
Hemant Kumar08eb78e2012-07-30 20:38:47 -07001171 struct sk_buff *skb;
David Brownell2b3d9422008-06-19 18:19:28 -07001172
David Brownell2b3d9422008-06-19 18:19:28 -07001173 if (!dev)
1174 return;
1175
1176 DBG(dev, "%s\n", __func__);
1177
1178 netif_stop_queue(dev->net);
1179 netif_carrier_off(dev->net);
1180
1181 /* disable endpoints, forcing (synchronous) completion
1182 * of all pending i/o. then free the request objects
1183 * and forget about the endpoints.
1184 */
1185 usb_ep_disable(link->in_ep);
1186 spin_lock(&dev->req_lock);
1187 while (!list_empty(&dev->tx_reqs)) {
1188 req = container_of(dev->tx_reqs.next,
1189 struct usb_request, list);
1190 list_del(&req->list);
1191
1192 spin_unlock(&dev->req_lock);
Mayank Rana5b165cb2012-05-25 19:52:45 +05301193 if (link->multi_pkt_xfer)
1194 kfree(req->buf);
David Brownell2b3d9422008-06-19 18:19:28 -07001195 usb_ep_free_request(link->in_ep, req);
1196 spin_lock(&dev->req_lock);
1197 }
1198 spin_unlock(&dev->req_lock);
1199 link->in_ep->driver_data = NULL;
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001200 link->in_ep->desc = NULL;
David Brownell2b3d9422008-06-19 18:19:28 -07001201
1202 usb_ep_disable(link->out_ep);
1203 spin_lock(&dev->req_lock);
1204 while (!list_empty(&dev->rx_reqs)) {
1205 req = container_of(dev->rx_reqs.next,
1206 struct usb_request, list);
1207 list_del(&req->list);
1208
1209 spin_unlock(&dev->req_lock);
1210 usb_ep_free_request(link->out_ep, req);
1211 spin_lock(&dev->req_lock);
1212 }
1213 spin_unlock(&dev->req_lock);
Hemant Kumar08eb78e2012-07-30 20:38:47 -07001214
1215 spin_lock(&dev->rx_frames.lock);
1216 while ((skb = __skb_dequeue(&dev->rx_frames)))
1217 dev_kfree_skb_any(skb);
1218 spin_unlock(&dev->rx_frames.lock);
1219
David Brownell2b3d9422008-06-19 18:19:28 -07001220 link->out_ep->driver_data = NULL;
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001221 link->out_ep->desc = NULL;
David Brownell2b3d9422008-06-19 18:19:28 -07001222
1223 /* finish forgetting about this USB link episode */
1224 dev->header_len = 0;
1225 dev->unwrap = NULL;
1226 dev->wrap = NULL;
1227
1228 spin_lock(&dev->lock);
1229 dev->port_usb = NULL;
1230 link->ioport = NULL;
1231 spin_unlock(&dev->lock);
1232}
Hemant Kumar08eb78e2012-07-30 20:38:47 -07001233
1234static int __init gether_init(void)
1235{
1236 uether_wq = create_singlethread_workqueue("uether");
1237 if (!uether_wq) {
1238 pr_err("%s: Unable to create workqueue: uether\n", __func__);
1239 return -ENOMEM;
1240 }
1241 return 0;
1242}
1243module_init(gether_init);
1244
1245static void __exit gether_exit(void)
1246{
1247 destroy_workqueue(uether_wq);
1248
1249}
1250module_exit(gether_exit);
1251MODULE_DESCRIPTION("ethernet over USB driver");
1252MODULE_LICENSE("GPL v2");