blob: 99a2c04f36b9b9f8e6d3011b7ae1320760764582 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * RMNET BAM Module.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/wakelock.h>
29#include <linux/if_arp.h>
30#include <linux/msm_rmnet.h>
Jeff Hugo24dfa0c2011-10-07 17:55:06 -060031#include <linux/platform_device.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33#ifdef CONFIG_HAS_EARLYSUSPEND
34#include <linux/earlysuspend.h>
35#endif
36
37#include <mach/bam_dmux.h>
38
39/* Debug message support */
40static int msm_rmnet_bam_debug_mask;
41module_param_named(debug_enable, msm_rmnet_bam_debug_mask,
42 int, S_IRUGO | S_IWUSR | S_IWGRP);
43
44#define DEBUG_MASK_LVL0 (1U << 0)
45#define DEBUG_MASK_LVL1 (1U << 1)
46#define DEBUG_MASK_LVL2 (1U << 2)
47
48#define DBG(m, x...) do { \
49 if (msm_rmnet_bam_debug_mask & m) \
50 pr_info(x); \
51} while (0)
52#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
53#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
54#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
55
56/* Configure device instances */
57#define RMNET_DEVICE_COUNT (8)
58
59/* allow larger frames */
60#define RMNET_DATA_LEN 2000
61
62#define DEVICE_ID_INVALID -1
63
64#define DEVICE_INACTIVE 0
65#define DEVICE_ACTIVE 1
66
67#define HEADROOM_FOR_BAM 8 /* for mux header */
68#define HEADROOM_FOR_QOS 8
69#define TAILROOM 8 /* for padding by mux layer */
70
71struct rmnet_private {
72 struct net_device_stats stats;
73 uint32_t ch_id;
74#ifdef CONFIG_MSM_RMNET_DEBUG
75 ktime_t last_packet;
76 unsigned long wakeups_xmit;
77 unsigned long wakeups_rcv;
78 unsigned long timeout_us;
79#endif
80 struct sk_buff *skb;
81 spinlock_t lock;
82 struct tasklet_struct tsklt;
83 u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
84 uint8_t device_up;
Jeff Hugobac7ea22011-10-24 10:58:48 -060085 uint8_t waiting_for_ul;
Jeff Hugo24dfa0c2011-10-07 17:55:06 -060086 uint8_t in_reset;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087};
88
Jeff Hugobac7ea22011-10-24 10:58:48 -060089static uint8_t ul_is_connected;
90
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091#ifdef CONFIG_MSM_RMNET_DEBUG
92static unsigned long timeout_us;
93
94#ifdef CONFIG_HAS_EARLYSUSPEND
95/*
96 * If early suspend is enabled then we specify two timeout values,
97 * screen on (default), and screen is off.
98 */
99static unsigned long timeout_suspend_us;
100static struct device *rmnet0;
101
102/* Set timeout in us when the screen is off. */
103static ssize_t timeout_suspend_store(struct device *d,
104 struct device_attribute *attr,
105 const char *buf, size_t n)
106{
107 timeout_suspend_us = strict_strtoul(buf, NULL, 10);
108 return n;
109}
110
111static ssize_t timeout_suspend_show(struct device *d,
112 struct device_attribute *attr,
113 char *buf)
114{
115 return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
116}
117
118static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
119 timeout_suspend_store);
120
121static void rmnet_early_suspend(struct early_suspend *handler)
122{
123 if (rmnet0) {
124 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
125 p->timeout_us = timeout_suspend_us;
126 }
127}
128
129static void rmnet_late_resume(struct early_suspend *handler)
130{
131 if (rmnet0) {
132 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
133 p->timeout_us = timeout_us;
134 }
135}
136
137static struct early_suspend rmnet_power_suspend = {
138 .suspend = rmnet_early_suspend,
139 .resume = rmnet_late_resume,
140};
141
142static int __init rmnet_late_init(void)
143{
144 register_early_suspend(&rmnet_power_suspend);
145 return 0;
146}
147
148late_initcall(rmnet_late_init);
149#endif
150
151/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
152static int rmnet_cause_wakeup(struct rmnet_private *p)
153{
154 int ret = 0;
155 ktime_t now;
156 if (p->timeout_us == 0) /* Check if disabled */
157 return 0;
158
159 /* Use real (wall) time. */
160 now = ktime_get_real();
161
162 if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
163 ret = 1;
164
165 p->last_packet = now;
166 return ret;
167}
168
169static ssize_t wakeups_xmit_show(struct device *d,
170 struct device_attribute *attr,
171 char *buf)
172{
173 struct rmnet_private *p = netdev_priv(to_net_dev(d));
174 return sprintf(buf, "%lu\n", p->wakeups_xmit);
175}
176
177DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
178
179static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
180 char *buf)
181{
182 struct rmnet_private *p = netdev_priv(to_net_dev(d));
183 return sprintf(buf, "%lu\n", p->wakeups_rcv);
184}
185
186DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
187
188/* Set timeout in us. */
189static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
190 const char *buf, size_t n)
191{
192#ifndef CONFIG_HAS_EARLYSUSPEND
193 struct rmnet_private *p = netdev_priv(to_net_dev(d));
194 p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
195#else
196/* If using early suspend/resume hooks do not write the value on store. */
197 timeout_us = strict_strtoul(buf, NULL, 10);
198#endif
199 return n;
200}
201
202static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
203 char *buf)
204{
205 struct rmnet_private *p = netdev_priv(to_net_dev(d));
206 p = netdev_priv(to_net_dev(d));
207 return sprintf(buf, "%lu\n", timeout_us);
208}
209
210DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
211#endif
212
213
214/* Forward declaration */
215static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
216
217static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
218{
219 __be16 protocol = 0;
220
221 skb->dev = dev;
222
223 /* Determine L3 protocol */
224 switch (skb->data[0] & 0xf0) {
225 case 0x40:
226 protocol = htons(ETH_P_IP);
227 break;
228 case 0x60:
229 protocol = htons(ETH_P_IPV6);
230 break;
231 default:
232 pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
233 dev->name, skb->data[0] & 0xf0);
234 /* skb will be dropped in upper layer for unknown protocol */
235 }
236 return protocol;
237}
238
239static int count_this_packet(void *_hdr, int len)
240{
241 struct ethhdr *hdr = _hdr;
242
243 if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
244 return 0;
245
246 return 1;
247}
248
249/* Rx Callback, Called in Work Queue context */
250static void bam_recv_notify(void *dev, struct sk_buff *skb)
251{
252 struct rmnet_private *p = netdev_priv(dev);
253 unsigned long flags;
254 u32 opmode;
255
256 if (skb) {
257 skb->dev = dev;
258 /* Handle Rx frame format */
259 spin_lock_irqsave(&p->lock, flags);
260 opmode = p->operation_mode;
261 spin_unlock_irqrestore(&p->lock, flags);
262
263 if (RMNET_IS_MODE_IP(opmode)) {
264 /* Driver in IP mode */
265 skb->protocol = rmnet_ip_type_trans(skb, dev);
266 } else {
267 /* Driver in Ethernet mode */
268 skb->protocol = eth_type_trans(skb, dev);
269 }
270 if (RMNET_IS_MODE_IP(opmode) ||
271 count_this_packet(skb->data, skb->len)) {
272#ifdef CONFIG_MSM_RMNET_DEBUG
273 p->wakeups_rcv += rmnet_cause_wakeup(p);
274#endif
275 p->stats.rx_packets++;
276 p->stats.rx_bytes += skb->len;
277 }
278 DBG1("[%s] Rx packet #%lu len=%d\n",
279 ((struct net_device *)dev)->name,
280 p->stats.rx_packets, skb->len);
281
282 /* Deliver to network stack */
283 netif_rx(skb);
284 } else
285 pr_err("[%s] %s: No skb received",
286 ((struct net_device *)dev)->name, __func__);
287}
288
289static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
290{
291 struct rmnet_private *p = netdev_priv(dev);
292 int bam_ret;
293 struct QMI_QOS_HDR_S *qmih;
294 u32 opmode;
295 unsigned long flags;
296
297 /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
298 spin_lock_irqsave(&p->lock, flags);
299 opmode = p->operation_mode;
300 spin_unlock_irqrestore(&p->lock, flags);
301
302 if (RMNET_IS_MODE_QOS(opmode)) {
303 qmih = (struct QMI_QOS_HDR_S *)
304 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
305 qmih->version = 1;
306 qmih->flags = 0;
307 qmih->flow_id = skb->mark;
308 }
309
310 dev->trans_start = jiffies;
Jeff Hugo347fbbb2011-11-15 16:39:20 -0700311 /* if write() succeeds, skb access is unsafe in this process */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312 bam_ret = msm_bam_dmux_write(p->ch_id, skb);
313
314 if (bam_ret != 0) {
315 pr_err("[%s] %s: write returned error %d",
316 dev->name, __func__, bam_ret);
317 goto xmit_out;
318 }
319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700320 return 0;
321xmit_out:
322 /* data xmited, safe to release skb */
323 dev_kfree_skb_any(skb);
324 return 0;
325}
326
327static void bam_write_done(void *dev, struct sk_buff *skb)
328{
Jeff Hugo347fbbb2011-11-15 16:39:20 -0700329 struct rmnet_private *p = netdev_priv(dev);
330 u32 opmode = p->operation_mode;
331
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332 DBG1("%s: write complete\n", __func__);
Jeff Hugo347fbbb2011-11-15 16:39:20 -0700333 if (RMNET_IS_MODE_IP(opmode) ||
334 count_this_packet(skb->data, skb->len)) {
335 p->stats.tx_packets++;
336 p->stats.tx_bytes += skb->len;
337#ifdef CONFIG_MSM_RMNET_DEBUG
338 p->wakeups_xmit += rmnet_cause_wakeup(p);
339#endif
340 }
341 DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
342 ((struct net_device *)(dev))->name, p->stats.tx_packets,
343 skb->len, skb->mark);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700344 dev_kfree_skb_any(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700345 if (netif_queue_stopped(dev) &&
346 msm_bam_dmux_is_ch_low(p->ch_id)) {
347 DBG0("%s: Low WM hit, waking queue=%p\n",
348 __func__, skb);
349 netif_wake_queue(dev);
350 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351}
352
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600353static void bam_notify(void *dev, int event, unsigned long data)
354{
Jeff Hugobac7ea22011-10-24 10:58:48 -0600355 struct rmnet_private *p = netdev_priv(dev);
356
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600357 switch (event) {
358 case BAM_DMUX_RECEIVE:
359 bam_recv_notify(dev, (struct sk_buff *)(data));
360 break;
361 case BAM_DMUX_WRITE_DONE:
362 bam_write_done(dev, (struct sk_buff *)(data));
363 break;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600364 case BAM_DMUX_UL_CONNECTED:
365 ul_is_connected = 1;
366 if (p->waiting_for_ul) {
367 netif_wake_queue(dev);
368 p->waiting_for_ul = 0;
369 }
370 break;
371 case BAM_DMUX_UL_DISCONNECTED:
372 ul_is_connected = 0;
373 break;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600374 }
375}
376
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377static int __rmnet_open(struct net_device *dev)
378{
379 int r;
380 struct rmnet_private *p = netdev_priv(dev);
381
382 DBG0("[%s] __rmnet_open()\n", dev->name);
383
384 if (!p->device_up) {
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600385 r = msm_bam_dmux_open(p->ch_id, dev, bam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386
Eric Holmberg70385482011-11-09 10:33:51 -0700387 if (r < 0) {
388 DBG0("%s: ch=%d failed with rc %d\n",
389 __func__, p->ch_id, r);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390 return -ENODEV;
Eric Holmberg70385482011-11-09 10:33:51 -0700391 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700392 }
393
394 p->device_up = DEVICE_ACTIVE;
395 return 0;
396}
397
398static int rmnet_open(struct net_device *dev)
399{
400 int rc = 0;
401
402 DBG0("[%s] rmnet_open()\n", dev->name);
403
404 rc = __rmnet_open(dev);
405
406 if (rc == 0)
407 netif_start_queue(dev);
408
409 return rc;
410}
411
412
413static int __rmnet_close(struct net_device *dev)
414{
415 struct rmnet_private *p = netdev_priv(dev);
416 int rc = 0;
417
418 if (p->device_up) {
419 /* do not close rmnet port once up, this causes
420 remote side to hang if tried to open again */
421 p->device_up = DEVICE_INACTIVE;
422 return rc;
423 } else
424 return -EBADF;
425}
426
427
428static int rmnet_stop(struct net_device *dev)
429{
430 DBG0("[%s] rmnet_stop()\n", dev->name);
431
432 __rmnet_close(dev);
433 netif_stop_queue(dev);
434
435 return 0;
436}
437
438static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
439{
440 if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
441 return -EINVAL;
442
443 DBG0("[%s] MTU change: old=%d new=%d\n",
444 dev->name, dev->mtu, new_mtu);
445 dev->mtu = new_mtu;
446
447 return 0;
448}
449
450static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
451{
Jeff Hugobac7ea22011-10-24 10:58:48 -0600452 struct rmnet_private *p = netdev_priv(dev);
453
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454 if (netif_queue_stopped(dev)) {
455 pr_err("[%s]fatal: rmnet_xmit called when "
456 "netif_queue is stopped", dev->name);
457 return 0;
458 }
459
Jeff Hugobac7ea22011-10-24 10:58:48 -0600460 if (!ul_is_connected) {
461 p->waiting_for_ul = 1;
462 msm_bam_dmux_kickoff_ul_wakeup();
463 return NETDEV_TX_BUSY;
464 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 _rmnet_xmit(skb, dev);
466
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700467 if (msm_bam_dmux_is_ch_full(p->ch_id)) {
468 netif_stop_queue(dev);
469 DBG0("%s: High WM hit, stopping queue=%p\n", __func__, skb);
470 }
471
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700472 return 0;
473}
474
475static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
476{
477 struct rmnet_private *p = netdev_priv(dev);
478 return &p->stats;
479}
480
481static void rmnet_set_multicast_list(struct net_device *dev)
482{
483}
484
485static void rmnet_tx_timeout(struct net_device *dev)
486{
487 pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
488}
489
490static const struct net_device_ops rmnet_ops_ether = {
491 .ndo_open = rmnet_open,
492 .ndo_stop = rmnet_stop,
493 .ndo_start_xmit = rmnet_xmit,
494 .ndo_get_stats = rmnet_get_stats,
495 .ndo_set_multicast_list = rmnet_set_multicast_list,
496 .ndo_tx_timeout = rmnet_tx_timeout,
497 .ndo_do_ioctl = rmnet_ioctl,
498 .ndo_change_mtu = rmnet_change_mtu,
499 .ndo_set_mac_address = eth_mac_addr,
500 .ndo_validate_addr = eth_validate_addr,
501};
502
503static const struct net_device_ops rmnet_ops_ip = {
504 .ndo_open = rmnet_open,
505 .ndo_stop = rmnet_stop,
506 .ndo_start_xmit = rmnet_xmit,
507 .ndo_get_stats = rmnet_get_stats,
508 .ndo_set_multicast_list = rmnet_set_multicast_list,
509 .ndo_tx_timeout = rmnet_tx_timeout,
510 .ndo_do_ioctl = rmnet_ioctl,
511 .ndo_change_mtu = rmnet_change_mtu,
512 .ndo_set_mac_address = 0,
513 .ndo_validate_addr = 0,
514};
515
516static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
517{
518 struct rmnet_private *p = netdev_priv(dev);
519 u32 old_opmode = p->operation_mode;
520 unsigned long flags;
521 int prev_mtu = dev->mtu;
522 int rc = 0;
523
524 /* Process IOCTL command */
525 switch (cmd) {
526 case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
527 /* Perform Ethernet config only if in IP mode currently*/
528 if (p->operation_mode & RMNET_MODE_LLP_IP) {
529 ether_setup(dev);
530 random_ether_addr(dev->dev_addr);
531 dev->mtu = prev_mtu;
532
533 dev->netdev_ops = &rmnet_ops_ether;
534 spin_lock_irqsave(&p->lock, flags);
535 p->operation_mode &= ~RMNET_MODE_LLP_IP;
536 p->operation_mode |= RMNET_MODE_LLP_ETH;
537 spin_unlock_irqrestore(&p->lock, flags);
538 DBG0("[%s] rmnet_ioctl(): "
539 "set Ethernet protocol mode\n",
540 dev->name);
541 }
542 break;
543
544 case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
545 /* Perform IP config only if in Ethernet mode currently*/
546 if (p->operation_mode & RMNET_MODE_LLP_ETH) {
547
548 /* Undo config done in ether_setup() */
549 dev->header_ops = 0; /* No header */
550 dev->type = ARPHRD_RAWIP;
551 dev->hard_header_len = 0;
552 dev->mtu = prev_mtu;
553 dev->addr_len = 0;
554 dev->flags &= ~(IFF_BROADCAST|
555 IFF_MULTICAST);
556
557 dev->needed_headroom = HEADROOM_FOR_BAM +
558 HEADROOM_FOR_QOS;
559 dev->needed_tailroom = TAILROOM;
560 dev->netdev_ops = &rmnet_ops_ip;
561 spin_lock_irqsave(&p->lock, flags);
562 p->operation_mode &= ~RMNET_MODE_LLP_ETH;
563 p->operation_mode |= RMNET_MODE_LLP_IP;
564 spin_unlock_irqrestore(&p->lock, flags);
565 DBG0("[%s] rmnet_ioctl(): "
566 "set IP protocol mode\n",
567 dev->name);
568 }
569 break;
570
571 case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
572 ifr->ifr_ifru.ifru_data =
573 (void *)(p->operation_mode &
574 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
575 break;
576
577 case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
578 spin_lock_irqsave(&p->lock, flags);
579 p->operation_mode |= RMNET_MODE_QOS;
580 spin_unlock_irqrestore(&p->lock, flags);
581 DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
582 dev->name);
583 break;
584
585 case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
586 spin_lock_irqsave(&p->lock, flags);
587 p->operation_mode &= ~RMNET_MODE_QOS;
588 spin_unlock_irqrestore(&p->lock, flags);
589 DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
590 dev->name);
591 break;
592
593 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
594 ifr->ifr_ifru.ifru_data =
595 (void *)(p->operation_mode & RMNET_MODE_QOS);
596 break;
597
598 case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
599 ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
600 break;
601
602 case RMNET_IOCTL_OPEN: /* Open transport port */
603 rc = __rmnet_open(dev);
604 DBG0("[%s] rmnet_ioctl(): open transport port\n",
605 dev->name);
606 break;
607
608 case RMNET_IOCTL_CLOSE: /* Close transport port */
609 rc = __rmnet_close(dev);
610 DBG0("[%s] rmnet_ioctl(): close transport port\n",
611 dev->name);
612 break;
613
614 default:
615 pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
616 dev->name, cmd);
617 return -EINVAL;
618 }
619
620 DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
621 dev->name, __func__, cmd, old_opmode, p->operation_mode);
622 return rc;
623}
624
625static void __init rmnet_setup(struct net_device *dev)
626{
627 /* Using Ethernet mode by default */
628 dev->netdev_ops = &rmnet_ops_ether;
629 ether_setup(dev);
630
631 /* set this after calling ether_setup */
632 dev->mtu = RMNET_DATA_LEN;
633 dev->needed_headroom = HEADROOM_FOR_BAM + HEADROOM_FOR_QOS ;
634 dev->needed_tailroom = TAILROOM;
635 random_ether_addr(dev->dev_addr);
636
637 dev->watchdog_timeo = 1000; /* 10 seconds? */
638}
639
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600640static struct net_device *netdevs[RMNET_DEVICE_COUNT];
641static struct platform_driver bam_rmnet_drivers[RMNET_DEVICE_COUNT];
642
643static int bam_rmnet_probe(struct platform_device *pdev)
644{
645 int i;
646 char name[BAM_DMUX_CH_NAME_MAX_LEN];
647 struct rmnet_private *p;
648
649 for (i = 0; i < RMNET_DEVICE_COUNT; ++i) {
650 scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i);
651 if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN))
652 break;
653 }
654
655 p = netdev_priv(netdevs[i]);
656 if (p->in_reset) {
657 p->in_reset = 0;
658 msm_bam_dmux_open(p->ch_id, netdevs[i], bam_notify);
659 netif_carrier_on(netdevs[i]);
660 netif_start_queue(netdevs[i]);
661 }
662
663 return 0;
664}
665
666static int bam_rmnet_remove(struct platform_device *pdev)
667{
668 int i;
669 char name[BAM_DMUX_CH_NAME_MAX_LEN];
670 struct rmnet_private *p;
671
672 for (i = 0; i < RMNET_DEVICE_COUNT; ++i) {
673 scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i);
674 if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN))
675 break;
676 }
677
678 p = netdev_priv(netdevs[i]);
679 p->in_reset = 1;
680 msm_bam_dmux_close(p->ch_id);
681 netif_carrier_off(netdevs[i]);
682 netif_stop_queue(netdevs[i]);
683 return 0;
684}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700685
686static int __init rmnet_init(void)
687{
688 int ret;
689 struct device *d;
690 struct net_device *dev;
691 struct rmnet_private *p;
692 unsigned n;
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600693 char *tempname;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694
695 pr_info("%s: BAM devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
696
697#ifdef CONFIG_MSM_RMNET_DEBUG
698 timeout_us = 0;
699#ifdef CONFIG_HAS_EARLYSUSPEND
700 timeout_suspend_us = 0;
701#endif
702#endif
703
704 for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
705 dev = alloc_netdev(sizeof(struct rmnet_private),
706 "rmnet%d", rmnet_setup);
707
Eric Holmberg70385482011-11-09 10:33:51 -0700708 if (!dev) {
709 pr_err("%s: no memory for netdev %d\n", __func__, n);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710 return -ENOMEM;
Eric Holmberg70385482011-11-09 10:33:51 -0700711 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600713 netdevs[n] = dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714 d = &(dev->dev);
715 p = netdev_priv(dev);
716 /* Initial config uses Ethernet */
717 p->operation_mode = RMNET_MODE_LLP_ETH;
718 p->ch_id = n;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600719 p->waiting_for_ul = 0;
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600720 p->in_reset = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721 spin_lock_init(&p->lock);
722#ifdef CONFIG_MSM_RMNET_DEBUG
723 p->timeout_us = timeout_us;
724 p->wakeups_xmit = p->wakeups_rcv = 0;
725#endif
726
727 ret = register_netdev(dev);
728 if (ret) {
Eric Holmberg70385482011-11-09 10:33:51 -0700729 pr_err("%s: unable to register netdev"
730 " %d rc=%d\n", __func__, n, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731 free_netdev(dev);
732 return ret;
733 }
734
735#ifdef CONFIG_MSM_RMNET_DEBUG
736 if (device_create_file(d, &dev_attr_timeout))
737 continue;
738 if (device_create_file(d, &dev_attr_wakeups_xmit))
739 continue;
740 if (device_create_file(d, &dev_attr_wakeups_rcv))
741 continue;
742#ifdef CONFIG_HAS_EARLYSUSPEND
743 if (device_create_file(d, &dev_attr_timeout_suspend))
744 continue;
745
746 /* Only care about rmnet0 for suspend/resume tiemout hooks. */
747 if (n == 0)
748 rmnet0 = d;
749#endif
750#endif
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600751 bam_rmnet_drivers[n].probe = bam_rmnet_probe;
752 bam_rmnet_drivers[n].remove = bam_rmnet_remove;
753 tempname = kmalloc(BAM_DMUX_CH_NAME_MAX_LEN, GFP_KERNEL);
754 if (tempname == NULL)
755 return -ENOMEM;
756 scnprintf(tempname, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
757 n);
758 bam_rmnet_drivers[n].driver.name = tempname;
759 bam_rmnet_drivers[n].driver.owner = THIS_MODULE;
760 ret = platform_driver_register(&bam_rmnet_drivers[n]);
Eric Holmbergbd4c3842011-11-08 13:48:19 -0700761 if (ret) {
762 pr_err("%s: registration failed n=%d rc=%d\n",
763 __func__, n, ret);
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600764 return ret;
Eric Holmbergbd4c3842011-11-08 13:48:19 -0700765 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766 }
767 return 0;
768}
769
770module_init(rmnet_init);
771MODULE_DESCRIPTION("MSM RMNET BAM TRANSPORT");
772MODULE_LICENSE("GPL v2");
773