blob: 8a3e4278b671abece3b835bd69190dc245b0d381 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * RMNET BAM Module.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/wakelock.h>
29#include <linux/if_arp.h>
30#include <linux/msm_rmnet.h>
31
32#ifdef CONFIG_HAS_EARLYSUSPEND
33#include <linux/earlysuspend.h>
34#endif
35
36#include <mach/bam_dmux.h>
37
38/* Debug message support */
39static int msm_rmnet_bam_debug_mask;
40module_param_named(debug_enable, msm_rmnet_bam_debug_mask,
41 int, S_IRUGO | S_IWUSR | S_IWGRP);
42
43#define DEBUG_MASK_LVL0 (1U << 0)
44#define DEBUG_MASK_LVL1 (1U << 1)
45#define DEBUG_MASK_LVL2 (1U << 2)
46
47#define DBG(m, x...) do { \
48 if (msm_rmnet_bam_debug_mask & m) \
49 pr_info(x); \
50} while (0)
51#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
52#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
53#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
54
55/* Configure device instances */
56#define RMNET_DEVICE_COUNT (8)
57
58/* allow larger frames */
59#define RMNET_DATA_LEN 2000
60
61#define DEVICE_ID_INVALID -1
62
63#define DEVICE_INACTIVE 0
64#define DEVICE_ACTIVE 1
65
66#define HEADROOM_FOR_BAM 8 /* for mux header */
67#define HEADROOM_FOR_QOS 8
68#define TAILROOM 8 /* for padding by mux layer */
69
70struct rmnet_private {
71 struct net_device_stats stats;
72 uint32_t ch_id;
73#ifdef CONFIG_MSM_RMNET_DEBUG
74 ktime_t last_packet;
75 unsigned long wakeups_xmit;
76 unsigned long wakeups_rcv;
77 unsigned long timeout_us;
78#endif
79 struct sk_buff *skb;
80 spinlock_t lock;
81 struct tasklet_struct tsklt;
82 u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
83 uint8_t device_up;
Jeff Hugobac7ea22011-10-24 10:58:48 -060084 uint8_t waiting_for_ul;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085};
86
Jeff Hugobac7ea22011-10-24 10:58:48 -060087static uint8_t ul_is_connected;
88
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070089#ifdef CONFIG_MSM_RMNET_DEBUG
90static unsigned long timeout_us;
91
92#ifdef CONFIG_HAS_EARLYSUSPEND
93/*
94 * If early suspend is enabled then we specify two timeout values,
95 * screen on (default), and screen is off.
96 */
97static unsigned long timeout_suspend_us;
98static struct device *rmnet0;
99
100/* Set timeout in us when the screen is off. */
101static ssize_t timeout_suspend_store(struct device *d,
102 struct device_attribute *attr,
103 const char *buf, size_t n)
104{
105 timeout_suspend_us = strict_strtoul(buf, NULL, 10);
106 return n;
107}
108
109static ssize_t timeout_suspend_show(struct device *d,
110 struct device_attribute *attr,
111 char *buf)
112{
113 return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
114}
115
116static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
117 timeout_suspend_store);
118
119static void rmnet_early_suspend(struct early_suspend *handler)
120{
121 if (rmnet0) {
122 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
123 p->timeout_us = timeout_suspend_us;
124 }
125}
126
127static void rmnet_late_resume(struct early_suspend *handler)
128{
129 if (rmnet0) {
130 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
131 p->timeout_us = timeout_us;
132 }
133}
134
135static struct early_suspend rmnet_power_suspend = {
136 .suspend = rmnet_early_suspend,
137 .resume = rmnet_late_resume,
138};
139
140static int __init rmnet_late_init(void)
141{
142 register_early_suspend(&rmnet_power_suspend);
143 return 0;
144}
145
146late_initcall(rmnet_late_init);
147#endif
148
149/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
150static int rmnet_cause_wakeup(struct rmnet_private *p)
151{
152 int ret = 0;
153 ktime_t now;
154 if (p->timeout_us == 0) /* Check if disabled */
155 return 0;
156
157 /* Use real (wall) time. */
158 now = ktime_get_real();
159
160 if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
161 ret = 1;
162
163 p->last_packet = now;
164 return ret;
165}
166
167static ssize_t wakeups_xmit_show(struct device *d,
168 struct device_attribute *attr,
169 char *buf)
170{
171 struct rmnet_private *p = netdev_priv(to_net_dev(d));
172 return sprintf(buf, "%lu\n", p->wakeups_xmit);
173}
174
175DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
176
177static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
178 char *buf)
179{
180 struct rmnet_private *p = netdev_priv(to_net_dev(d));
181 return sprintf(buf, "%lu\n", p->wakeups_rcv);
182}
183
184DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
185
186/* Set timeout in us. */
187static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
188 const char *buf, size_t n)
189{
190#ifndef CONFIG_HAS_EARLYSUSPEND
191 struct rmnet_private *p = netdev_priv(to_net_dev(d));
192 p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
193#else
194/* If using early suspend/resume hooks do not write the value on store. */
195 timeout_us = strict_strtoul(buf, NULL, 10);
196#endif
197 return n;
198}
199
200static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
201 char *buf)
202{
203 struct rmnet_private *p = netdev_priv(to_net_dev(d));
204 p = netdev_priv(to_net_dev(d));
205 return sprintf(buf, "%lu\n", timeout_us);
206}
207
208DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
209#endif
210
211
212/* Forward declaration */
213static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
214
215static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
216{
217 __be16 protocol = 0;
218
219 skb->dev = dev;
220
221 /* Determine L3 protocol */
222 switch (skb->data[0] & 0xf0) {
223 case 0x40:
224 protocol = htons(ETH_P_IP);
225 break;
226 case 0x60:
227 protocol = htons(ETH_P_IPV6);
228 break;
229 default:
230 pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
231 dev->name, skb->data[0] & 0xf0);
232 /* skb will be dropped in upper layer for unknown protocol */
233 }
234 return protocol;
235}
236
237static int count_this_packet(void *_hdr, int len)
238{
239 struct ethhdr *hdr = _hdr;
240
241 if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
242 return 0;
243
244 return 1;
245}
246
247/* Rx Callback, Called in Work Queue context */
248static void bam_recv_notify(void *dev, struct sk_buff *skb)
249{
250 struct rmnet_private *p = netdev_priv(dev);
251 unsigned long flags;
252 u32 opmode;
253
254 if (skb) {
255 skb->dev = dev;
256 /* Handle Rx frame format */
257 spin_lock_irqsave(&p->lock, flags);
258 opmode = p->operation_mode;
259 spin_unlock_irqrestore(&p->lock, flags);
260
261 if (RMNET_IS_MODE_IP(opmode)) {
262 /* Driver in IP mode */
263 skb->protocol = rmnet_ip_type_trans(skb, dev);
264 } else {
265 /* Driver in Ethernet mode */
266 skb->protocol = eth_type_trans(skb, dev);
267 }
268 if (RMNET_IS_MODE_IP(opmode) ||
269 count_this_packet(skb->data, skb->len)) {
270#ifdef CONFIG_MSM_RMNET_DEBUG
271 p->wakeups_rcv += rmnet_cause_wakeup(p);
272#endif
273 p->stats.rx_packets++;
274 p->stats.rx_bytes += skb->len;
275 }
276 DBG1("[%s] Rx packet #%lu len=%d\n",
277 ((struct net_device *)dev)->name,
278 p->stats.rx_packets, skb->len);
279
280 /* Deliver to network stack */
281 netif_rx(skb);
282 } else
283 pr_err("[%s] %s: No skb received",
284 ((struct net_device *)dev)->name, __func__);
285}
286
287static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
288{
289 struct rmnet_private *p = netdev_priv(dev);
290 int bam_ret;
291 struct QMI_QOS_HDR_S *qmih;
292 u32 opmode;
293 unsigned long flags;
294
295 /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
296 spin_lock_irqsave(&p->lock, flags);
297 opmode = p->operation_mode;
298 spin_unlock_irqrestore(&p->lock, flags);
299
300 if (RMNET_IS_MODE_QOS(opmode)) {
301 qmih = (struct QMI_QOS_HDR_S *)
302 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
303 qmih->version = 1;
304 qmih->flags = 0;
305 qmih->flow_id = skb->mark;
306 }
307
308 dev->trans_start = jiffies;
309 bam_ret = msm_bam_dmux_write(p->ch_id, skb);
310
311 if (bam_ret != 0) {
312 pr_err("[%s] %s: write returned error %d",
313 dev->name, __func__, bam_ret);
314 goto xmit_out;
315 }
316
317 if (count_this_packet(skb->data, skb->len)) {
318 p->stats.tx_packets++;
319 p->stats.tx_bytes += skb->len;
320#ifdef CONFIG_MSM_RMNET_DEBUG
321 p->wakeups_xmit += rmnet_cause_wakeup(p);
322#endif
323 }
324 DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
325 dev->name, p->stats.tx_packets, skb->len, skb->mark);
326
327 return 0;
328xmit_out:
329 /* data xmited, safe to release skb */
330 dev_kfree_skb_any(skb);
331 return 0;
332}
333
334static void bam_write_done(void *dev, struct sk_buff *skb)
335{
336 DBG1("%s: write complete\n", __func__);
337 dev_kfree_skb_any(skb);
338 netif_wake_queue(dev);
339}
340
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600341static void bam_notify(void *dev, int event, unsigned long data)
342{
Jeff Hugobac7ea22011-10-24 10:58:48 -0600343 struct rmnet_private *p = netdev_priv(dev);
344
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600345 switch (event) {
346 case BAM_DMUX_RECEIVE:
347 bam_recv_notify(dev, (struct sk_buff *)(data));
348 break;
349 case BAM_DMUX_WRITE_DONE:
350 bam_write_done(dev, (struct sk_buff *)(data));
351 break;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600352 case BAM_DMUX_UL_CONNECTED:
353 ul_is_connected = 1;
354 if (p->waiting_for_ul) {
355 netif_wake_queue(dev);
356 p->waiting_for_ul = 0;
357 }
358 break;
359 case BAM_DMUX_UL_DISCONNECTED:
360 ul_is_connected = 0;
361 break;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600362 }
363}
364
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700365static int __rmnet_open(struct net_device *dev)
366{
367 int r;
368 struct rmnet_private *p = netdev_priv(dev);
369
370 DBG0("[%s] __rmnet_open()\n", dev->name);
371
372 if (!p->device_up) {
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600373 r = msm_bam_dmux_open(p->ch_id, dev, bam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374
375 if (r < 0)
376 return -ENODEV;
377 }
378
379 p->device_up = DEVICE_ACTIVE;
380 return 0;
381}
382
383static int rmnet_open(struct net_device *dev)
384{
385 int rc = 0;
386
387 DBG0("[%s] rmnet_open()\n", dev->name);
388
389 rc = __rmnet_open(dev);
390
391 if (rc == 0)
392 netif_start_queue(dev);
393
394 return rc;
395}
396
397
398static int __rmnet_close(struct net_device *dev)
399{
400 struct rmnet_private *p = netdev_priv(dev);
401 int rc = 0;
402
403 if (p->device_up) {
404 /* do not close rmnet port once up, this causes
405 remote side to hang if tried to open again */
406 p->device_up = DEVICE_INACTIVE;
407 return rc;
408 } else
409 return -EBADF;
410}
411
412
413static int rmnet_stop(struct net_device *dev)
414{
415 DBG0("[%s] rmnet_stop()\n", dev->name);
416
417 __rmnet_close(dev);
418 netif_stop_queue(dev);
419
420 return 0;
421}
422
423static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
424{
425 if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
426 return -EINVAL;
427
428 DBG0("[%s] MTU change: old=%d new=%d\n",
429 dev->name, dev->mtu, new_mtu);
430 dev->mtu = new_mtu;
431
432 return 0;
433}
434
435static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
436{
Jeff Hugobac7ea22011-10-24 10:58:48 -0600437 struct rmnet_private *p = netdev_priv(dev);
438
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439 if (netif_queue_stopped(dev)) {
440 pr_err("[%s]fatal: rmnet_xmit called when "
441 "netif_queue is stopped", dev->name);
442 return 0;
443 }
444
445 netif_stop_queue(dev);
Jeff Hugobac7ea22011-10-24 10:58:48 -0600446 if (!ul_is_connected) {
447 p->waiting_for_ul = 1;
448 msm_bam_dmux_kickoff_ul_wakeup();
449 return NETDEV_TX_BUSY;
450 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451 _rmnet_xmit(skb, dev);
452
453 return 0;
454}
455
456static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
457{
458 struct rmnet_private *p = netdev_priv(dev);
459 return &p->stats;
460}
461
462static void rmnet_set_multicast_list(struct net_device *dev)
463{
464}
465
466static void rmnet_tx_timeout(struct net_device *dev)
467{
468 pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
469}
470
471static const struct net_device_ops rmnet_ops_ether = {
472 .ndo_open = rmnet_open,
473 .ndo_stop = rmnet_stop,
474 .ndo_start_xmit = rmnet_xmit,
475 .ndo_get_stats = rmnet_get_stats,
476 .ndo_set_multicast_list = rmnet_set_multicast_list,
477 .ndo_tx_timeout = rmnet_tx_timeout,
478 .ndo_do_ioctl = rmnet_ioctl,
479 .ndo_change_mtu = rmnet_change_mtu,
480 .ndo_set_mac_address = eth_mac_addr,
481 .ndo_validate_addr = eth_validate_addr,
482};
483
484static const struct net_device_ops rmnet_ops_ip = {
485 .ndo_open = rmnet_open,
486 .ndo_stop = rmnet_stop,
487 .ndo_start_xmit = rmnet_xmit,
488 .ndo_get_stats = rmnet_get_stats,
489 .ndo_set_multicast_list = rmnet_set_multicast_list,
490 .ndo_tx_timeout = rmnet_tx_timeout,
491 .ndo_do_ioctl = rmnet_ioctl,
492 .ndo_change_mtu = rmnet_change_mtu,
493 .ndo_set_mac_address = 0,
494 .ndo_validate_addr = 0,
495};
496
497static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
498{
499 struct rmnet_private *p = netdev_priv(dev);
500 u32 old_opmode = p->operation_mode;
501 unsigned long flags;
502 int prev_mtu = dev->mtu;
503 int rc = 0;
504
505 /* Process IOCTL command */
506 switch (cmd) {
507 case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
508 /* Perform Ethernet config only if in IP mode currently*/
509 if (p->operation_mode & RMNET_MODE_LLP_IP) {
510 ether_setup(dev);
511 random_ether_addr(dev->dev_addr);
512 dev->mtu = prev_mtu;
513
514 dev->netdev_ops = &rmnet_ops_ether;
515 spin_lock_irqsave(&p->lock, flags);
516 p->operation_mode &= ~RMNET_MODE_LLP_IP;
517 p->operation_mode |= RMNET_MODE_LLP_ETH;
518 spin_unlock_irqrestore(&p->lock, flags);
519 DBG0("[%s] rmnet_ioctl(): "
520 "set Ethernet protocol mode\n",
521 dev->name);
522 }
523 break;
524
525 case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
526 /* Perform IP config only if in Ethernet mode currently*/
527 if (p->operation_mode & RMNET_MODE_LLP_ETH) {
528
529 /* Undo config done in ether_setup() */
530 dev->header_ops = 0; /* No header */
531 dev->type = ARPHRD_RAWIP;
532 dev->hard_header_len = 0;
533 dev->mtu = prev_mtu;
534 dev->addr_len = 0;
535 dev->flags &= ~(IFF_BROADCAST|
536 IFF_MULTICAST);
537
538 dev->needed_headroom = HEADROOM_FOR_BAM +
539 HEADROOM_FOR_QOS;
540 dev->needed_tailroom = TAILROOM;
541 dev->netdev_ops = &rmnet_ops_ip;
542 spin_lock_irqsave(&p->lock, flags);
543 p->operation_mode &= ~RMNET_MODE_LLP_ETH;
544 p->operation_mode |= RMNET_MODE_LLP_IP;
545 spin_unlock_irqrestore(&p->lock, flags);
546 DBG0("[%s] rmnet_ioctl(): "
547 "set IP protocol mode\n",
548 dev->name);
549 }
550 break;
551
552 case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
553 ifr->ifr_ifru.ifru_data =
554 (void *)(p->operation_mode &
555 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
556 break;
557
558 case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
559 spin_lock_irqsave(&p->lock, flags);
560 p->operation_mode |= RMNET_MODE_QOS;
561 spin_unlock_irqrestore(&p->lock, flags);
562 DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
563 dev->name);
564 break;
565
566 case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
567 spin_lock_irqsave(&p->lock, flags);
568 p->operation_mode &= ~RMNET_MODE_QOS;
569 spin_unlock_irqrestore(&p->lock, flags);
570 DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
571 dev->name);
572 break;
573
574 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
575 ifr->ifr_ifru.ifru_data =
576 (void *)(p->operation_mode & RMNET_MODE_QOS);
577 break;
578
579 case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
580 ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
581 break;
582
583 case RMNET_IOCTL_OPEN: /* Open transport port */
584 rc = __rmnet_open(dev);
585 DBG0("[%s] rmnet_ioctl(): open transport port\n",
586 dev->name);
587 break;
588
589 case RMNET_IOCTL_CLOSE: /* Close transport port */
590 rc = __rmnet_close(dev);
591 DBG0("[%s] rmnet_ioctl(): close transport port\n",
592 dev->name);
593 break;
594
595 default:
596 pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
597 dev->name, cmd);
598 return -EINVAL;
599 }
600
601 DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
602 dev->name, __func__, cmd, old_opmode, p->operation_mode);
603 return rc;
604}
605
606static void __init rmnet_setup(struct net_device *dev)
607{
608 /* Using Ethernet mode by default */
609 dev->netdev_ops = &rmnet_ops_ether;
610 ether_setup(dev);
611
612 /* set this after calling ether_setup */
613 dev->mtu = RMNET_DATA_LEN;
614 dev->needed_headroom = HEADROOM_FOR_BAM + HEADROOM_FOR_QOS ;
615 dev->needed_tailroom = TAILROOM;
616 random_ether_addr(dev->dev_addr);
617
618 dev->watchdog_timeo = 1000; /* 10 seconds? */
619}
620
621
622static int __init rmnet_init(void)
623{
624 int ret;
625 struct device *d;
626 struct net_device *dev;
627 struct rmnet_private *p;
628 unsigned n;
629
630 pr_info("%s: BAM devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
631
632#ifdef CONFIG_MSM_RMNET_DEBUG
633 timeout_us = 0;
634#ifdef CONFIG_HAS_EARLYSUSPEND
635 timeout_suspend_us = 0;
636#endif
637#endif
638
639 for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
640 dev = alloc_netdev(sizeof(struct rmnet_private),
641 "rmnet%d", rmnet_setup);
642
643 if (!dev)
644 return -ENOMEM;
645
646 d = &(dev->dev);
647 p = netdev_priv(dev);
648 /* Initial config uses Ethernet */
649 p->operation_mode = RMNET_MODE_LLP_ETH;
650 p->ch_id = n;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600651 p->waiting_for_ul = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652 spin_lock_init(&p->lock);
653#ifdef CONFIG_MSM_RMNET_DEBUG
654 p->timeout_us = timeout_us;
655 p->wakeups_xmit = p->wakeups_rcv = 0;
656#endif
657
658 ret = register_netdev(dev);
659 if (ret) {
660 free_netdev(dev);
661 return ret;
662 }
663
664#ifdef CONFIG_MSM_RMNET_DEBUG
665 if (device_create_file(d, &dev_attr_timeout))
666 continue;
667 if (device_create_file(d, &dev_attr_wakeups_xmit))
668 continue;
669 if (device_create_file(d, &dev_attr_wakeups_rcv))
670 continue;
671#ifdef CONFIG_HAS_EARLYSUSPEND
672 if (device_create_file(d, &dev_attr_timeout_suspend))
673 continue;
674
675 /* Only care about rmnet0 for suspend/resume tiemout hooks. */
676 if (n == 0)
677 rmnet0 = d;
678#endif
679#endif
680 }
681 return 0;
682}
683
684module_init(rmnet_init);
685MODULE_DESCRIPTION("MSM RMNET BAM TRANSPORT");
686MODULE_LICENSE("GPL v2");
687