blob: acdffd105593b906db7030555b3ecc88cf0be345 [file] [log] [blame]
Eric Holmbergb3a45d32012-04-24 17:26:05 -06001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * RMNET SDIO Module.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/wakelock.h>
29#include <linux/if_arp.h>
30#include <linux/msm_rmnet.h>
31
32#ifdef CONFIG_HAS_EARLYSUSPEND
33#include <linux/earlysuspend.h>
34#endif
35
36#include <mach/sdio_dmux.h>
37
38/* Debug message support */
39static int msm_rmnet_sdio_debug_mask;
40module_param_named(debug_enable, msm_rmnet_sdio_debug_mask,
41 int, S_IRUGO | S_IWUSR | S_IWGRP);
42
43#define DEBUG_MASK_LVL0 (1U << 0)
44#define DEBUG_MASK_LVL1 (1U << 1)
45#define DEBUG_MASK_LVL2 (1U << 2)
46
47#define DBG(m, x...) do { \
48 if (msm_rmnet_sdio_debug_mask & m) \
49 pr_info(x); \
50} while (0)
51#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
52#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
53#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
54
55/* Configure device instances */
56#define RMNET_DEVICE_COUNT (8)
57
58/* allow larger frames */
59#define RMNET_DATA_LEN 2000
60
61#define DEVICE_ID_INVALID -1
62
63#define DEVICE_INACTIVE 0
64#define DEVICE_ACTIVE 1
65
66#define HEADROOM_FOR_SDIO 8 /* for mux header */
67#define HEADROOM_FOR_QOS 8
68#define TAILROOM 8 /* for padding by mux layer */
69
70struct rmnet_private {
71 struct net_device_stats stats;
72 uint32_t ch_id;
73#ifdef CONFIG_MSM_RMNET_DEBUG
74 ktime_t last_packet;
75 unsigned long wakeups_xmit;
76 unsigned long wakeups_rcv;
77 unsigned long timeout_us;
78#endif
79 struct sk_buff *skb;
80 spinlock_t lock;
Eric Holmbergb3a45d32012-04-24 17:26:05 -060081 spinlock_t tx_queue_lock;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082 struct tasklet_struct tsklt;
83 u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
84 uint8_t device_up;
85 uint8_t in_reset;
86};
87
88#ifdef CONFIG_MSM_RMNET_DEBUG
89static unsigned long timeout_us;
90
91#ifdef CONFIG_HAS_EARLYSUSPEND
92/*
93 * If early suspend is enabled then we specify two timeout values,
94 * screen on (default), and screen is off.
95 */
96static unsigned long timeout_suspend_us;
97static struct device *rmnet0;
98
99/* Set timeout in us when the screen is off. */
100static ssize_t timeout_suspend_store(struct device *d,
101 struct device_attribute *attr,
102 const char *buf, size_t n)
103{
104 timeout_suspend_us = strict_strtoul(buf, NULL, 10);
105 return n;
106}
107
108static ssize_t timeout_suspend_show(struct device *d,
109 struct device_attribute *attr,
110 char *buf)
111{
112 return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
113}
114
115static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
116 timeout_suspend_store);
117
118static void rmnet_early_suspend(struct early_suspend *handler)
119{
120 if (rmnet0) {
121 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
122 p->timeout_us = timeout_suspend_us;
123 }
124}
125
126static void rmnet_late_resume(struct early_suspend *handler)
127{
128 if (rmnet0) {
129 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
130 p->timeout_us = timeout_us;
131 }
132}
133
134static struct early_suspend rmnet_power_suspend = {
135 .suspend = rmnet_early_suspend,
136 .resume = rmnet_late_resume,
137};
138
139static int __init rmnet_late_init(void)
140{
141 register_early_suspend(&rmnet_power_suspend);
142 return 0;
143}
144
145late_initcall(rmnet_late_init);
146#endif
147
148/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
149static int rmnet_cause_wakeup(struct rmnet_private *p)
150{
151 int ret = 0;
152 ktime_t now;
153 if (p->timeout_us == 0) /* Check if disabled */
154 return 0;
155
156 /* Use real (wall) time. */
157 now = ktime_get_real();
158
159 if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
160 ret = 1;
161
162 p->last_packet = now;
163 return ret;
164}
165
166static ssize_t wakeups_xmit_show(struct device *d,
167 struct device_attribute *attr,
168 char *buf)
169{
170 struct rmnet_private *p = netdev_priv(to_net_dev(d));
171 return sprintf(buf, "%lu\n", p->wakeups_xmit);
172}
173
174DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
175
176static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
177 char *buf)
178{
179 struct rmnet_private *p = netdev_priv(to_net_dev(d));
180 return sprintf(buf, "%lu\n", p->wakeups_rcv);
181}
182
183DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
184
185/* Set timeout in us. */
186static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
187 const char *buf, size_t n)
188{
189#ifndef CONFIG_HAS_EARLYSUSPEND
190 struct rmnet_private *p = netdev_priv(to_net_dev(d));
191 p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
192#else
193/* If using early suspend/resume hooks do not write the value on store. */
194 timeout_us = strict_strtoul(buf, NULL, 10);
195#endif
196 return n;
197}
198
199static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
200 char *buf)
201{
202 struct rmnet_private *p = netdev_priv(to_net_dev(d));
203 p = netdev_priv(to_net_dev(d));
204 return sprintf(buf, "%lu\n", timeout_us);
205}
206
207DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
208#endif
209
210
211/* Forward declaration */
212static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
213
214static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
215{
216 __be16 protocol = 0;
217
218 skb->dev = dev;
219
220 /* Determine L3 protocol */
221 switch (skb->data[0] & 0xf0) {
222 case 0x40:
223 protocol = htons(ETH_P_IP);
224 break;
225 case 0x60:
226 protocol = htons(ETH_P_IPV6);
227 break;
228 default:
229 pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
230 dev->name, skb->data[0] & 0xf0);
231 /* skb will be dropped in upper layer for unknown protocol */
232 }
233 return protocol;
234}
235
236static int count_this_packet(void *_hdr, int len)
237{
238 struct ethhdr *hdr = _hdr;
239
240 if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
241 return 0;
242
243 return 1;
244}
245
246static int sdio_update_reset_state(struct net_device *dev)
247{
248 struct rmnet_private *p = netdev_priv(dev);
249 int new_state;
250
251 new_state = msm_sdio_is_channel_in_reset(p->ch_id);
252
253 if (p->in_reset != new_state) {
254 p->in_reset = (uint8_t)new_state;
255
256 if (p->in_reset)
257 netif_carrier_off(dev);
258 else
259 netif_carrier_on(dev);
260 return 1;
261 }
262 return 0;
263}
264
265/* Rx Callback, Called in Work Queue context */
266static void sdio_recv_notify(void *dev, struct sk_buff *skb)
267{
268 struct rmnet_private *p = netdev_priv(dev);
269 unsigned long flags;
270 u32 opmode;
271
272 if (skb) {
273 skb->dev = dev;
274 /* Handle Rx frame format */
275 spin_lock_irqsave(&p->lock, flags);
276 opmode = p->operation_mode;
277 spin_unlock_irqrestore(&p->lock, flags);
278
279 if (RMNET_IS_MODE_IP(opmode)) {
280 /* Driver in IP mode */
281 skb->protocol = rmnet_ip_type_trans(skb, dev);
282 } else {
283 /* Driver in Ethernet mode */
284 skb->protocol = eth_type_trans(skb, dev);
285 }
286 if (RMNET_IS_MODE_IP(opmode) ||
287 count_this_packet(skb->data, skb->len)) {
288#ifdef CONFIG_MSM_RMNET_DEBUG
289 p->wakeups_rcv += rmnet_cause_wakeup(p);
290#endif
291 p->stats.rx_packets++;
292 p->stats.rx_bytes += skb->len;
293 }
294 DBG1("[%s] Rx packet #%lu len=%d\n",
295 ((struct net_device *)dev)->name,
296 p->stats.rx_packets, skb->len);
297
298 /* Deliver to network stack */
299 netif_rx(skb);
300 } else {
301 spin_lock_irqsave(&p->lock, flags);
302 if (!sdio_update_reset_state((struct net_device *)dev))
303 pr_err("[%s] %s: No skb received",
304 ((struct net_device *)dev)->name, __func__);
305 spin_unlock_irqrestore(&p->lock, flags);
306 }
307}
308
309static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
310{
311 struct rmnet_private *p = netdev_priv(dev);
312 int sdio_ret;
313 struct QMI_QOS_HDR_S *qmih;
314 u32 opmode;
315 unsigned long flags;
316
317 if (!netif_carrier_ok(dev)) {
318 pr_err("[%s] %s: channel in reset",
319 dev->name, __func__);
320 goto xmit_out;
321 }
322
323 /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
324 spin_lock_irqsave(&p->lock, flags);
325 opmode = p->operation_mode;
326 spin_unlock_irqrestore(&p->lock, flags);
327
328 if (RMNET_IS_MODE_QOS(opmode)) {
329 qmih = (struct QMI_QOS_HDR_S *)
330 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
331 qmih->version = 1;
332 qmih->flags = 0;
333 qmih->flow_id = skb->mark;
334 }
335
336 dev->trans_start = jiffies;
337 sdio_ret = msm_sdio_dmux_write(p->ch_id, skb);
338
339 if (sdio_ret != 0) {
340 pr_err("[%s] %s: write returned error %d",
341 dev->name, __func__, sdio_ret);
342 goto xmit_out;
343 }
344
345 if (count_this_packet(skb->data, skb->len)) {
346 p->stats.tx_packets++;
347 p->stats.tx_bytes += skb->len;
348#ifdef CONFIG_MSM_RMNET_DEBUG
349 p->wakeups_xmit += rmnet_cause_wakeup(p);
350#endif
351 }
352 DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
353 dev->name, p->stats.tx_packets, skb->len, skb->mark);
354
355 return 0;
356xmit_out:
357 dev_kfree_skb_any(skb);
358 p->stats.tx_errors++;
359 return 0;
360}
361
362static void sdio_write_done(void *dev, struct sk_buff *skb)
363{
364 struct rmnet_private *p = netdev_priv(dev);
Eric Holmbergb3a45d32012-04-24 17:26:05 -0600365 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366
367 if (skb)
368 dev_kfree_skb_any(skb);
369
370 if (!p->in_reset) {
371 DBG1("%s: write complete skb=%p\n", __func__, skb);
372
Eric Holmbergb3a45d32012-04-24 17:26:05 -0600373 spin_lock_irqsave(&p->tx_queue_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374 if (netif_queue_stopped(dev) &&
375 msm_sdio_dmux_is_ch_low(p->ch_id)) {
376 DBG0("%s: Low WM hit, waking queue=%p\n",
377 __func__, skb);
378 netif_wake_queue(dev);
379 }
Eric Holmbergb3a45d32012-04-24 17:26:05 -0600380 spin_unlock_irqrestore(&p->tx_queue_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381 } else {
382 DBG1("%s: write in reset skb=%p\n", __func__, skb);
383 }
384}
385
386static int __rmnet_open(struct net_device *dev)
387{
388 int r;
389 struct rmnet_private *p = netdev_priv(dev);
390
391 DBG0("[%s] __rmnet_open()\n", dev->name);
392
393 if (!p->device_up) {
394 r = msm_sdio_dmux_open(p->ch_id, dev,
395 sdio_recv_notify, sdio_write_done);
396
397 if (r < 0)
398 return -ENODEV;
399 }
400
401 p->device_up = DEVICE_ACTIVE;
402 return 0;
403}
404
405static int rmnet_open(struct net_device *dev)
406{
407 int rc = 0;
408
409 DBG0("[%s] rmnet_open()\n", dev->name);
410
411 rc = __rmnet_open(dev);
412
413 if (rc == 0)
414 netif_start_queue(dev);
415
416 return rc;
417}
418
419
420static int __rmnet_close(struct net_device *dev)
421{
422 struct rmnet_private *p = netdev_priv(dev);
423 int rc = 0;
424
425 if (p->device_up) {
426 /* do not close rmnet port once up, this causes
427 remote side to hang if tried to open again */
428 /* rc = msm_sdio_dmux_close(p->ch_id); */
429 p->device_up = DEVICE_INACTIVE;
430 return rc;
431 } else
432 return -EBADF;
433}
434
435
436static int rmnet_stop(struct net_device *dev)
437{
438 DBG0("[%s] rmnet_stop()\n", dev->name);
439
440 __rmnet_close(dev);
441 netif_stop_queue(dev);
442
443 return 0;
444}
445
446static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
447{
448 if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
449 return -EINVAL;
450
451 DBG0("[%s] MTU change: old=%d new=%d\n",
452 dev->name, dev->mtu, new_mtu);
453 dev->mtu = new_mtu;
454
455 return 0;
456}
457
458static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
459{
460 struct rmnet_private *p = netdev_priv(dev);
Eric Holmbergb3a45d32012-04-24 17:26:05 -0600461 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462
463 if (netif_queue_stopped(dev)) {
464 pr_err("[%s]fatal: rmnet_xmit called when "
465 "netif_queue is stopped", dev->name);
466 return 0;
467 }
468
469 _rmnet_xmit(skb, dev);
470
Eric Holmbergb3a45d32012-04-24 17:26:05 -0600471 spin_lock_irqsave(&p->tx_queue_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700472 if (msm_sdio_dmux_is_ch_full(p->ch_id)) {
473 netif_stop_queue(dev);
474 DBG0("%s: High WM hit, stopping queue=%p\n", __func__, skb);
475 }
Eric Holmbergb3a45d32012-04-24 17:26:05 -0600476 spin_unlock_irqrestore(&p->tx_queue_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477
478 return 0;
479}
480
481static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
482{
483 struct rmnet_private *p = netdev_priv(dev);
484 return &p->stats;
485}
486
487static void rmnet_set_multicast_list(struct net_device *dev)
488{
489}
490
491static void rmnet_tx_timeout(struct net_device *dev)
492{
493 pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
494}
495
496static const struct net_device_ops rmnet_ops_ether = {
497 .ndo_open = rmnet_open,
498 .ndo_stop = rmnet_stop,
499 .ndo_start_xmit = rmnet_xmit,
500 .ndo_get_stats = rmnet_get_stats,
501 .ndo_set_multicast_list = rmnet_set_multicast_list,
502 .ndo_tx_timeout = rmnet_tx_timeout,
503 .ndo_do_ioctl = rmnet_ioctl,
504 .ndo_change_mtu = rmnet_change_mtu,
505 .ndo_set_mac_address = eth_mac_addr,
506 .ndo_validate_addr = eth_validate_addr,
507};
508
509static const struct net_device_ops rmnet_ops_ip = {
510 .ndo_open = rmnet_open,
511 .ndo_stop = rmnet_stop,
512 .ndo_start_xmit = rmnet_xmit,
513 .ndo_get_stats = rmnet_get_stats,
514 .ndo_set_multicast_list = rmnet_set_multicast_list,
515 .ndo_tx_timeout = rmnet_tx_timeout,
516 .ndo_do_ioctl = rmnet_ioctl,
517 .ndo_change_mtu = rmnet_change_mtu,
518 .ndo_set_mac_address = 0,
519 .ndo_validate_addr = 0,
520};
521
522static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
523{
524 struct rmnet_private *p = netdev_priv(dev);
525 u32 old_opmode = p->operation_mode;
526 unsigned long flags;
527 int prev_mtu = dev->mtu;
528 int rc = 0;
529
530 /* Process IOCTL command */
531 switch (cmd) {
532 case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
533 /* Perform Ethernet config only if in IP mode currently*/
534 if (p->operation_mode & RMNET_MODE_LLP_IP) {
535 ether_setup(dev);
536 random_ether_addr(dev->dev_addr);
537 dev->mtu = prev_mtu;
538
539 dev->netdev_ops = &rmnet_ops_ether;
540 spin_lock_irqsave(&p->lock, flags);
541 p->operation_mode &= ~RMNET_MODE_LLP_IP;
542 p->operation_mode |= RMNET_MODE_LLP_ETH;
543 spin_unlock_irqrestore(&p->lock, flags);
544 DBG0("[%s] rmnet_ioctl(): "
545 "set Ethernet protocol mode\n",
546 dev->name);
547 }
548 break;
549
550 case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
551 /* Perform IP config only if in Ethernet mode currently*/
552 if (p->operation_mode & RMNET_MODE_LLP_ETH) {
553
554 /* Undo config done in ether_setup() */
555 dev->header_ops = 0; /* No header */
556 dev->type = ARPHRD_RAWIP;
557 dev->hard_header_len = 0;
558 dev->mtu = prev_mtu;
559 dev->addr_len = 0;
560 dev->flags &= ~(IFF_BROADCAST|
561 IFF_MULTICAST);
562
563 dev->needed_headroom = HEADROOM_FOR_SDIO +
564 HEADROOM_FOR_QOS;
565 dev->needed_tailroom = TAILROOM;
566 dev->netdev_ops = &rmnet_ops_ip;
567 spin_lock_irqsave(&p->lock, flags);
568 p->operation_mode &= ~RMNET_MODE_LLP_ETH;
569 p->operation_mode |= RMNET_MODE_LLP_IP;
570 spin_unlock_irqrestore(&p->lock, flags);
571 DBG0("[%s] rmnet_ioctl(): "
572 "set IP protocol mode\n",
573 dev->name);
574 }
575 break;
576
577 case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
578 ifr->ifr_ifru.ifru_data =
579 (void *)(p->operation_mode &
580 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
581 break;
582
583 case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
584 spin_lock_irqsave(&p->lock, flags);
585 p->operation_mode |= RMNET_MODE_QOS;
586 spin_unlock_irqrestore(&p->lock, flags);
587 DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
588 dev->name);
589 break;
590
591 case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
592 spin_lock_irqsave(&p->lock, flags);
593 p->operation_mode &= ~RMNET_MODE_QOS;
594 spin_unlock_irqrestore(&p->lock, flags);
595 DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
596 dev->name);
597 break;
598
599 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
600 ifr->ifr_ifru.ifru_data =
601 (void *)(p->operation_mode & RMNET_MODE_QOS);
602 break;
603
604 case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
605 ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
606 break;
607
608 case RMNET_IOCTL_OPEN: /* Open transport port */
609 rc = __rmnet_open(dev);
610 DBG0("[%s] rmnet_ioctl(): open transport port\n",
611 dev->name);
612 break;
613
614 case RMNET_IOCTL_CLOSE: /* Close transport port */
615 rc = __rmnet_close(dev);
616 DBG0("[%s] rmnet_ioctl(): close transport port\n",
617 dev->name);
618 break;
619
620 default:
621 pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
622 dev->name, cmd);
623 return -EINVAL;
624 }
625
626 DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
627 dev->name, __func__, cmd, old_opmode, p->operation_mode);
628 return rc;
629}
630
631static void __init rmnet_setup(struct net_device *dev)
632{
633 /* Using Ethernet mode by default */
634 dev->netdev_ops = &rmnet_ops_ether;
635 ether_setup(dev);
636
637 /* set this after calling ether_setup */
638 dev->mtu = RMNET_DATA_LEN;
639 dev->needed_headroom = HEADROOM_FOR_SDIO + HEADROOM_FOR_QOS ;
640 dev->needed_tailroom = TAILROOM;
641 random_ether_addr(dev->dev_addr);
642
643 dev->watchdog_timeo = 1000; /* 10 seconds? */
644}
645
646
647static int __init rmnet_init(void)
648{
649 int ret;
650 struct device *d;
651 struct net_device *dev;
652 struct rmnet_private *p;
653 unsigned n;
654
655 pr_info("%s: SDIO devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
656
657#ifdef CONFIG_MSM_RMNET_DEBUG
658 timeout_us = 0;
659#ifdef CONFIG_HAS_EARLYSUSPEND
660 timeout_suspend_us = 0;
661#endif
662#endif
663
664 for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
665 dev = alloc_netdev(sizeof(struct rmnet_private),
666 "rmnet_sdio%d", rmnet_setup);
667
668 if (!dev)
669 return -ENOMEM;
670
671 d = &(dev->dev);
672 p = netdev_priv(dev);
673 /* Initial config uses Ethernet */
674 p->operation_mode = RMNET_MODE_LLP_ETH;
675 p->ch_id = n;
676 spin_lock_init(&p->lock);
Eric Holmbergb3a45d32012-04-24 17:26:05 -0600677 spin_lock_init(&p->tx_queue_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678#ifdef CONFIG_MSM_RMNET_DEBUG
679 p->timeout_us = timeout_us;
680 p->wakeups_xmit = p->wakeups_rcv = 0;
681#endif
682
683 ret = register_netdev(dev);
684 if (ret) {
685 free_netdev(dev);
686 return ret;
687 }
688
689#ifdef CONFIG_MSM_RMNET_DEBUG
690 if (device_create_file(d, &dev_attr_timeout))
691 continue;
692 if (device_create_file(d, &dev_attr_wakeups_xmit))
693 continue;
694 if (device_create_file(d, &dev_attr_wakeups_rcv))
695 continue;
696#ifdef CONFIG_HAS_EARLYSUSPEND
697 if (device_create_file(d, &dev_attr_timeout_suspend))
698 continue;
699
700 /* Only care about rmnet0 for suspend/resume tiemout hooks. */
701 if (n == 0)
702 rmnet0 = d;
703#endif
704#endif
705 }
706 return 0;
707}
708
709module_init(rmnet_init);
710MODULE_DESCRIPTION("MSM RMNET SDIO TRANSPORT");
711MODULE_LICENSE("GPL v2");
712