blob: 401b63c10fc3486a447b1ad0cc89758cbcfd4200 [file] [log] [blame]
Jeff Hugo4838f412012-01-20 11:19:37 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * RMNET BAM Module.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/wakelock.h>
29#include <linux/if_arp.h>
30#include <linux/msm_rmnet.h>
Jeff Hugo24dfa0c2011-10-07 17:55:06 -060031#include <linux/platform_device.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33#ifdef CONFIG_HAS_EARLYSUSPEND
34#include <linux/earlysuspend.h>
35#endif
36
37#include <mach/bam_dmux.h>
38
39/* Debug message support */
40static int msm_rmnet_bam_debug_mask;
41module_param_named(debug_enable, msm_rmnet_bam_debug_mask,
42 int, S_IRUGO | S_IWUSR | S_IWGRP);
43
44#define DEBUG_MASK_LVL0 (1U << 0)
45#define DEBUG_MASK_LVL1 (1U << 1)
46#define DEBUG_MASK_LVL2 (1U << 2)
47
48#define DBG(m, x...) do { \
49 if (msm_rmnet_bam_debug_mask & m) \
50 pr_info(x); \
51} while (0)
52#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
53#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
54#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
55
56/* Configure device instances */
57#define RMNET_DEVICE_COUNT (8)
58
59/* allow larger frames */
60#define RMNET_DATA_LEN 2000
61
62#define DEVICE_ID_INVALID -1
63
64#define DEVICE_INACTIVE 0
65#define DEVICE_ACTIVE 1
66
67#define HEADROOM_FOR_BAM 8 /* for mux header */
68#define HEADROOM_FOR_QOS 8
69#define TAILROOM 8 /* for padding by mux layer */
70
71struct rmnet_private {
72 struct net_device_stats stats;
73 uint32_t ch_id;
74#ifdef CONFIG_MSM_RMNET_DEBUG
75 ktime_t last_packet;
76 unsigned long wakeups_xmit;
77 unsigned long wakeups_rcv;
78 unsigned long timeout_us;
79#endif
Eric Holmberg8d8a0702012-01-19 13:38:08 -070080 struct sk_buff *waiting_for_ul_skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 spinlock_t lock;
82 struct tasklet_struct tsklt;
83 u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
84 uint8_t device_up;
Jeff Hugo24dfa0c2011-10-07 17:55:06 -060085 uint8_t in_reset;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086};
87
88#ifdef CONFIG_MSM_RMNET_DEBUG
89static unsigned long timeout_us;
90
91#ifdef CONFIG_HAS_EARLYSUSPEND
92/*
93 * If early suspend is enabled then we specify two timeout values,
94 * screen on (default), and screen is off.
95 */
96static unsigned long timeout_suspend_us;
97static struct device *rmnet0;
98
99/* Set timeout in us when the screen is off. */
100static ssize_t timeout_suspend_store(struct device *d,
101 struct device_attribute *attr,
102 const char *buf, size_t n)
103{
104 timeout_suspend_us = strict_strtoul(buf, NULL, 10);
105 return n;
106}
107
108static ssize_t timeout_suspend_show(struct device *d,
109 struct device_attribute *attr,
110 char *buf)
111{
112 return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
113}
114
115static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
116 timeout_suspend_store);
117
118static void rmnet_early_suspend(struct early_suspend *handler)
119{
120 if (rmnet0) {
121 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
122 p->timeout_us = timeout_suspend_us;
123 }
124}
125
126static void rmnet_late_resume(struct early_suspend *handler)
127{
128 if (rmnet0) {
129 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
130 p->timeout_us = timeout_us;
131 }
132}
133
134static struct early_suspend rmnet_power_suspend = {
135 .suspend = rmnet_early_suspend,
136 .resume = rmnet_late_resume,
137};
138
139static int __init rmnet_late_init(void)
140{
141 register_early_suspend(&rmnet_power_suspend);
142 return 0;
143}
144
145late_initcall(rmnet_late_init);
146#endif
147
148/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
149static int rmnet_cause_wakeup(struct rmnet_private *p)
150{
151 int ret = 0;
152 ktime_t now;
153 if (p->timeout_us == 0) /* Check if disabled */
154 return 0;
155
156 /* Use real (wall) time. */
157 now = ktime_get_real();
158
159 if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
160 ret = 1;
161
162 p->last_packet = now;
163 return ret;
164}
165
166static ssize_t wakeups_xmit_show(struct device *d,
167 struct device_attribute *attr,
168 char *buf)
169{
170 struct rmnet_private *p = netdev_priv(to_net_dev(d));
171 return sprintf(buf, "%lu\n", p->wakeups_xmit);
172}
173
174DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
175
176static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
177 char *buf)
178{
179 struct rmnet_private *p = netdev_priv(to_net_dev(d));
180 return sprintf(buf, "%lu\n", p->wakeups_rcv);
181}
182
183DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
184
185/* Set timeout in us. */
186static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
187 const char *buf, size_t n)
188{
189#ifndef CONFIG_HAS_EARLYSUSPEND
190 struct rmnet_private *p = netdev_priv(to_net_dev(d));
191 p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
192#else
193/* If using early suspend/resume hooks do not write the value on store. */
194 timeout_us = strict_strtoul(buf, NULL, 10);
195#endif
196 return n;
197}
198
199static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
200 char *buf)
201{
202 struct rmnet_private *p = netdev_priv(to_net_dev(d));
203 p = netdev_priv(to_net_dev(d));
204 return sprintf(buf, "%lu\n", timeout_us);
205}
206
207DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
208#endif
209
210
211/* Forward declaration */
212static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
213
214static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
215{
216 __be16 protocol = 0;
217
218 skb->dev = dev;
219
220 /* Determine L3 protocol */
221 switch (skb->data[0] & 0xf0) {
222 case 0x40:
223 protocol = htons(ETH_P_IP);
224 break;
225 case 0x60:
226 protocol = htons(ETH_P_IPV6);
227 break;
228 default:
229 pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
230 dev->name, skb->data[0] & 0xf0);
231 /* skb will be dropped in upper layer for unknown protocol */
232 }
233 return protocol;
234}
235
236static int count_this_packet(void *_hdr, int len)
237{
238 struct ethhdr *hdr = _hdr;
239
240 if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
241 return 0;
242
243 return 1;
244}
245
246/* Rx Callback, Called in Work Queue context */
247static void bam_recv_notify(void *dev, struct sk_buff *skb)
248{
249 struct rmnet_private *p = netdev_priv(dev);
250 unsigned long flags;
251 u32 opmode;
252
253 if (skb) {
254 skb->dev = dev;
255 /* Handle Rx frame format */
256 spin_lock_irqsave(&p->lock, flags);
257 opmode = p->operation_mode;
258 spin_unlock_irqrestore(&p->lock, flags);
259
260 if (RMNET_IS_MODE_IP(opmode)) {
261 /* Driver in IP mode */
262 skb->protocol = rmnet_ip_type_trans(skb, dev);
263 } else {
264 /* Driver in Ethernet mode */
265 skb->protocol = eth_type_trans(skb, dev);
266 }
267 if (RMNET_IS_MODE_IP(opmode) ||
268 count_this_packet(skb->data, skb->len)) {
269#ifdef CONFIG_MSM_RMNET_DEBUG
270 p->wakeups_rcv += rmnet_cause_wakeup(p);
271#endif
272 p->stats.rx_packets++;
273 p->stats.rx_bytes += skb->len;
274 }
275 DBG1("[%s] Rx packet #%lu len=%d\n",
276 ((struct net_device *)dev)->name,
277 p->stats.rx_packets, skb->len);
278
279 /* Deliver to network stack */
280 netif_rx(skb);
281 } else
282 pr_err("[%s] %s: No skb received",
283 ((struct net_device *)dev)->name, __func__);
284}
285
286static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
287{
288 struct rmnet_private *p = netdev_priv(dev);
289 int bam_ret;
290 struct QMI_QOS_HDR_S *qmih;
291 u32 opmode;
292 unsigned long flags;
293
294 /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
295 spin_lock_irqsave(&p->lock, flags);
296 opmode = p->operation_mode;
297 spin_unlock_irqrestore(&p->lock, flags);
298
299 if (RMNET_IS_MODE_QOS(opmode)) {
300 qmih = (struct QMI_QOS_HDR_S *)
301 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
302 qmih->version = 1;
303 qmih->flags = 0;
304 qmih->flow_id = skb->mark;
305 }
306
307 dev->trans_start = jiffies;
Jeff Hugo347fbbb2011-11-15 16:39:20 -0700308 /* if write() succeeds, skb access is unsafe in this process */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309 bam_ret = msm_bam_dmux_write(p->ch_id, skb);
310
Jeff Hugo4838f412012-01-20 11:19:37 -0700311 if (bam_ret != 0 && bam_ret != -EAGAIN && bam_ret != -EFAULT) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312 pr_err("[%s] %s: write returned error %d",
313 dev->name, __func__, bam_ret);
Jeff Hugo523de142012-01-06 10:52:09 -0700314 return -EPERM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315 }
316
Jeff Hugo523de142012-01-06 10:52:09 -0700317 return bam_ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318}
319
320static void bam_write_done(void *dev, struct sk_buff *skb)
321{
Jeff Hugo347fbbb2011-11-15 16:39:20 -0700322 struct rmnet_private *p = netdev_priv(dev);
323 u32 opmode = p->operation_mode;
324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325 DBG1("%s: write complete\n", __func__);
Jeff Hugo347fbbb2011-11-15 16:39:20 -0700326 if (RMNET_IS_MODE_IP(opmode) ||
327 count_this_packet(skb->data, skb->len)) {
328 p->stats.tx_packets++;
329 p->stats.tx_bytes += skb->len;
330#ifdef CONFIG_MSM_RMNET_DEBUG
331 p->wakeups_xmit += rmnet_cause_wakeup(p);
332#endif
333 }
334 DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
335 ((struct net_device *)(dev))->name, p->stats.tx_packets,
336 skb->len, skb->mark);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337 dev_kfree_skb_any(skb);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700338 if (netif_queue_stopped(dev) &&
339 msm_bam_dmux_is_ch_low(p->ch_id)) {
340 DBG0("%s: Low WM hit, waking queue=%p\n",
341 __func__, skb);
342 netif_wake_queue(dev);
343 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700344}
345
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600346static void bam_notify(void *dev, int event, unsigned long data)
347{
Jeff Hugobac7ea22011-10-24 10:58:48 -0600348 struct rmnet_private *p = netdev_priv(dev);
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700349 unsigned long flags;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600350
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600351 switch (event) {
352 case BAM_DMUX_RECEIVE:
353 bam_recv_notify(dev, (struct sk_buff *)(data));
354 break;
355 case BAM_DMUX_WRITE_DONE:
356 bam_write_done(dev, (struct sk_buff *)(data));
357 break;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600358 case BAM_DMUX_UL_CONNECTED:
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700359 spin_lock_irqsave(&p->lock, flags);
360 if (p->waiting_for_ul_skb != NULL) {
361 struct sk_buff *skb;
362 int ret;
363
364 skb = p->waiting_for_ul_skb;
365 p->waiting_for_ul_skb = NULL;
366 spin_unlock_irqrestore(&p->lock, flags);
367 ret = _rmnet_xmit(skb, dev);
368 if (ret) {
369 pr_err("%s: error %d dropping delayed TX SKB %p\n",
370 __func__, ret, skb);
371 dev_kfree_skb_any(skb);
372 }
Jeff Hugobac7ea22011-10-24 10:58:48 -0600373 netif_wake_queue(dev);
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700374 } else {
375 spin_unlock_irqrestore(&p->lock, flags);
Jeff Hugobac7ea22011-10-24 10:58:48 -0600376 }
377 break;
378 case BAM_DMUX_UL_DISCONNECTED:
Jeff Hugobac7ea22011-10-24 10:58:48 -0600379 break;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600380 }
381}
382
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383static int __rmnet_open(struct net_device *dev)
384{
385 int r;
386 struct rmnet_private *p = netdev_priv(dev);
387
388 DBG0("[%s] __rmnet_open()\n", dev->name);
389
390 if (!p->device_up) {
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600391 r = msm_bam_dmux_open(p->ch_id, dev, bam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700392
Eric Holmberg70385482011-11-09 10:33:51 -0700393 if (r < 0) {
394 DBG0("%s: ch=%d failed with rc %d\n",
395 __func__, p->ch_id, r);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 return -ENODEV;
Eric Holmberg70385482011-11-09 10:33:51 -0700397 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398 }
399
400 p->device_up = DEVICE_ACTIVE;
401 return 0;
402}
403
404static int rmnet_open(struct net_device *dev)
405{
406 int rc = 0;
407
408 DBG0("[%s] rmnet_open()\n", dev->name);
409
410 rc = __rmnet_open(dev);
411
412 if (rc == 0)
413 netif_start_queue(dev);
414
415 return rc;
416}
417
418
419static int __rmnet_close(struct net_device *dev)
420{
421 struct rmnet_private *p = netdev_priv(dev);
422 int rc = 0;
423
424 if (p->device_up) {
425 /* do not close rmnet port once up, this causes
426 remote side to hang if tried to open again */
427 p->device_up = DEVICE_INACTIVE;
428 return rc;
429 } else
430 return -EBADF;
431}
432
433
434static int rmnet_stop(struct net_device *dev)
435{
436 DBG0("[%s] rmnet_stop()\n", dev->name);
437
438 __rmnet_close(dev);
439 netif_stop_queue(dev);
440
441 return 0;
442}
443
444static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
445{
446 if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
447 return -EINVAL;
448
449 DBG0("[%s] MTU change: old=%d new=%d\n",
450 dev->name, dev->mtu, new_mtu);
451 dev->mtu = new_mtu;
452
453 return 0;
454}
455
456static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
457{
Jeff Hugobac7ea22011-10-24 10:58:48 -0600458 struct rmnet_private *p = netdev_priv(dev);
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700459 unsigned long flags;
460 int awake;
Jeff Hugo1dbacd72011-12-01 18:07:58 -0700461 int ret = 0;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600462
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700463 if (netif_queue_stopped(dev)) {
464 pr_err("[%s]fatal: rmnet_xmit called when "
465 "netif_queue is stopped", dev->name);
466 return 0;
467 }
468
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700469 spin_lock_irqsave(&p->lock, flags);
470 awake = msm_bam_dmux_ul_power_vote();
471 if (!awake) {
472 /* send SKB once wakeup is complete */
Jeff Hugo523de142012-01-06 10:52:09 -0700473 netif_stop_queue(dev);
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700474 p->waiting_for_ul_skb = skb;
475 spin_unlock_irqrestore(&p->lock, flags);
476 ret = 0;
477 goto exit;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600478 }
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700479 spin_unlock_irqrestore(&p->lock, flags);
480
Jeff Hugo1dbacd72011-12-01 18:07:58 -0700481 ret = _rmnet_xmit(skb, dev);
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700482 if (ret == -EPERM) {
483 ret = NETDEV_TX_BUSY;
484 goto exit;
485 }
Jeff Hugo523de142012-01-06 10:52:09 -0700486
Jeff Hugo4838f412012-01-20 11:19:37 -0700487 /*
488 * detected SSR a bit early. shut some things down now, and leave
489 * the rest to the main ssr handling code when that happens later
490 */
491 if (ret == -EFAULT) {
492 netif_carrier_off(dev);
493 dev_kfree_skb_any(skb);
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700494 ret = 0;
495 goto exit;
Jeff Hugo4838f412012-01-20 11:19:37 -0700496 }
497
Jeff Hugo1dbacd72011-12-01 18:07:58 -0700498 if (ret == -EAGAIN) {
Jeff Hugo523de142012-01-06 10:52:09 -0700499 /*
500 * This should not happen
501 * EAGAIN means we attempted to overflow the high watermark
502 * Clearly the queue is not stopped like it should be, so
503 * stop it and return BUSY to the TCP/IP framework. It will
504 * retry this packet with the queue is restarted which happens
505 * in the write_done callback when the low watermark is hit.
506 */
507 netif_stop_queue(dev);
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700508 ret = NETDEV_TX_BUSY;
509 goto exit;
Jeff Hugo1dbacd72011-12-01 18:07:58 -0700510 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700512 if (msm_bam_dmux_is_ch_full(p->ch_id)) {
513 netif_stop_queue(dev);
514 DBG0("%s: High WM hit, stopping queue=%p\n", __func__, skb);
515 }
516
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700517exit:
518 msm_bam_dmux_ul_power_unvote();
519 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520}
521
522static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
523{
524 struct rmnet_private *p = netdev_priv(dev);
525 return &p->stats;
526}
527
528static void rmnet_set_multicast_list(struct net_device *dev)
529{
530}
531
532static void rmnet_tx_timeout(struct net_device *dev)
533{
534 pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
535}
536
537static const struct net_device_ops rmnet_ops_ether = {
538 .ndo_open = rmnet_open,
539 .ndo_stop = rmnet_stop,
540 .ndo_start_xmit = rmnet_xmit,
541 .ndo_get_stats = rmnet_get_stats,
542 .ndo_set_multicast_list = rmnet_set_multicast_list,
543 .ndo_tx_timeout = rmnet_tx_timeout,
544 .ndo_do_ioctl = rmnet_ioctl,
545 .ndo_change_mtu = rmnet_change_mtu,
546 .ndo_set_mac_address = eth_mac_addr,
547 .ndo_validate_addr = eth_validate_addr,
548};
549
550static const struct net_device_ops rmnet_ops_ip = {
551 .ndo_open = rmnet_open,
552 .ndo_stop = rmnet_stop,
553 .ndo_start_xmit = rmnet_xmit,
554 .ndo_get_stats = rmnet_get_stats,
555 .ndo_set_multicast_list = rmnet_set_multicast_list,
556 .ndo_tx_timeout = rmnet_tx_timeout,
557 .ndo_do_ioctl = rmnet_ioctl,
558 .ndo_change_mtu = rmnet_change_mtu,
559 .ndo_set_mac_address = 0,
560 .ndo_validate_addr = 0,
561};
562
563static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
564{
565 struct rmnet_private *p = netdev_priv(dev);
566 u32 old_opmode = p->operation_mode;
567 unsigned long flags;
568 int prev_mtu = dev->mtu;
569 int rc = 0;
570
571 /* Process IOCTL command */
572 switch (cmd) {
573 case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
574 /* Perform Ethernet config only if in IP mode currently*/
575 if (p->operation_mode & RMNET_MODE_LLP_IP) {
576 ether_setup(dev);
577 random_ether_addr(dev->dev_addr);
578 dev->mtu = prev_mtu;
579
580 dev->netdev_ops = &rmnet_ops_ether;
581 spin_lock_irqsave(&p->lock, flags);
582 p->operation_mode &= ~RMNET_MODE_LLP_IP;
583 p->operation_mode |= RMNET_MODE_LLP_ETH;
584 spin_unlock_irqrestore(&p->lock, flags);
585 DBG0("[%s] rmnet_ioctl(): "
586 "set Ethernet protocol mode\n",
587 dev->name);
588 }
589 break;
590
591 case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
592 /* Perform IP config only if in Ethernet mode currently*/
593 if (p->operation_mode & RMNET_MODE_LLP_ETH) {
594
595 /* Undo config done in ether_setup() */
596 dev->header_ops = 0; /* No header */
597 dev->type = ARPHRD_RAWIP;
598 dev->hard_header_len = 0;
599 dev->mtu = prev_mtu;
600 dev->addr_len = 0;
601 dev->flags &= ~(IFF_BROADCAST|
602 IFF_MULTICAST);
603
604 dev->needed_headroom = HEADROOM_FOR_BAM +
605 HEADROOM_FOR_QOS;
606 dev->needed_tailroom = TAILROOM;
607 dev->netdev_ops = &rmnet_ops_ip;
608 spin_lock_irqsave(&p->lock, flags);
609 p->operation_mode &= ~RMNET_MODE_LLP_ETH;
610 p->operation_mode |= RMNET_MODE_LLP_IP;
611 spin_unlock_irqrestore(&p->lock, flags);
612 DBG0("[%s] rmnet_ioctl(): "
613 "set IP protocol mode\n",
614 dev->name);
615 }
616 break;
617
618 case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
619 ifr->ifr_ifru.ifru_data =
620 (void *)(p->operation_mode &
621 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
622 break;
623
624 case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
625 spin_lock_irqsave(&p->lock, flags);
626 p->operation_mode |= RMNET_MODE_QOS;
627 spin_unlock_irqrestore(&p->lock, flags);
628 DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
629 dev->name);
630 break;
631
632 case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
633 spin_lock_irqsave(&p->lock, flags);
634 p->operation_mode &= ~RMNET_MODE_QOS;
635 spin_unlock_irqrestore(&p->lock, flags);
636 DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
637 dev->name);
638 break;
639
640 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
641 ifr->ifr_ifru.ifru_data =
642 (void *)(p->operation_mode & RMNET_MODE_QOS);
643 break;
644
645 case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
646 ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
647 break;
648
649 case RMNET_IOCTL_OPEN: /* Open transport port */
650 rc = __rmnet_open(dev);
651 DBG0("[%s] rmnet_ioctl(): open transport port\n",
652 dev->name);
653 break;
654
655 case RMNET_IOCTL_CLOSE: /* Close transport port */
656 rc = __rmnet_close(dev);
657 DBG0("[%s] rmnet_ioctl(): close transport port\n",
658 dev->name);
659 break;
660
661 default:
662 pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
663 dev->name, cmd);
664 return -EINVAL;
665 }
666
667 DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
668 dev->name, __func__, cmd, old_opmode, p->operation_mode);
669 return rc;
670}
671
672static void __init rmnet_setup(struct net_device *dev)
673{
674 /* Using Ethernet mode by default */
675 dev->netdev_ops = &rmnet_ops_ether;
676 ether_setup(dev);
677
678 /* set this after calling ether_setup */
679 dev->mtu = RMNET_DATA_LEN;
680 dev->needed_headroom = HEADROOM_FOR_BAM + HEADROOM_FOR_QOS ;
681 dev->needed_tailroom = TAILROOM;
682 random_ether_addr(dev->dev_addr);
683
684 dev->watchdog_timeo = 1000; /* 10 seconds? */
685}
686
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600687static struct net_device *netdevs[RMNET_DEVICE_COUNT];
688static struct platform_driver bam_rmnet_drivers[RMNET_DEVICE_COUNT];
689
690static int bam_rmnet_probe(struct platform_device *pdev)
691{
692 int i;
693 char name[BAM_DMUX_CH_NAME_MAX_LEN];
694 struct rmnet_private *p;
695
696 for (i = 0; i < RMNET_DEVICE_COUNT; ++i) {
697 scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i);
698 if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN))
699 break;
700 }
701
702 p = netdev_priv(netdevs[i]);
703 if (p->in_reset) {
704 p->in_reset = 0;
705 msm_bam_dmux_open(p->ch_id, netdevs[i], bam_notify);
706 netif_carrier_on(netdevs[i]);
707 netif_start_queue(netdevs[i]);
708 }
709
710 return 0;
711}
712
713static int bam_rmnet_remove(struct platform_device *pdev)
714{
715 int i;
716 char name[BAM_DMUX_CH_NAME_MAX_LEN];
717 struct rmnet_private *p;
718
719 for (i = 0; i < RMNET_DEVICE_COUNT; ++i) {
720 scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i);
721 if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN))
722 break;
723 }
724
725 p = netdev_priv(netdevs[i]);
726 p->in_reset = 1;
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700727 if (p->waiting_for_ul_skb != NULL) {
728 dev_kfree_skb_any(p->waiting_for_ul_skb);
729 p->waiting_for_ul_skb = NULL;
730 }
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600731 msm_bam_dmux_close(p->ch_id);
732 netif_carrier_off(netdevs[i]);
733 netif_stop_queue(netdevs[i]);
734 return 0;
735}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700736
737static int __init rmnet_init(void)
738{
739 int ret;
740 struct device *d;
741 struct net_device *dev;
742 struct rmnet_private *p;
743 unsigned n;
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600744 char *tempname;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745
746 pr_info("%s: BAM devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
747
748#ifdef CONFIG_MSM_RMNET_DEBUG
749 timeout_us = 0;
750#ifdef CONFIG_HAS_EARLYSUSPEND
751 timeout_suspend_us = 0;
752#endif
753#endif
754
755 for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
756 dev = alloc_netdev(sizeof(struct rmnet_private),
757 "rmnet%d", rmnet_setup);
758
Eric Holmberg70385482011-11-09 10:33:51 -0700759 if (!dev) {
760 pr_err("%s: no memory for netdev %d\n", __func__, n);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700761 return -ENOMEM;
Eric Holmberg70385482011-11-09 10:33:51 -0700762 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700763
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600764 netdevs[n] = dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700765 d = &(dev->dev);
766 p = netdev_priv(dev);
767 /* Initial config uses Ethernet */
768 p->operation_mode = RMNET_MODE_LLP_ETH;
769 p->ch_id = n;
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700770 p->waiting_for_ul_skb = NULL;
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600771 p->in_reset = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700772 spin_lock_init(&p->lock);
773#ifdef CONFIG_MSM_RMNET_DEBUG
774 p->timeout_us = timeout_us;
775 p->wakeups_xmit = p->wakeups_rcv = 0;
776#endif
777
778 ret = register_netdev(dev);
779 if (ret) {
Eric Holmberg70385482011-11-09 10:33:51 -0700780 pr_err("%s: unable to register netdev"
781 " %d rc=%d\n", __func__, n, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700782 free_netdev(dev);
783 return ret;
784 }
785
786#ifdef CONFIG_MSM_RMNET_DEBUG
787 if (device_create_file(d, &dev_attr_timeout))
788 continue;
789 if (device_create_file(d, &dev_attr_wakeups_xmit))
790 continue;
791 if (device_create_file(d, &dev_attr_wakeups_rcv))
792 continue;
793#ifdef CONFIG_HAS_EARLYSUSPEND
794 if (device_create_file(d, &dev_attr_timeout_suspend))
795 continue;
796
797 /* Only care about rmnet0 for suspend/resume tiemout hooks. */
798 if (n == 0)
799 rmnet0 = d;
800#endif
801#endif
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600802 bam_rmnet_drivers[n].probe = bam_rmnet_probe;
803 bam_rmnet_drivers[n].remove = bam_rmnet_remove;
804 tempname = kmalloc(BAM_DMUX_CH_NAME_MAX_LEN, GFP_KERNEL);
805 if (tempname == NULL)
806 return -ENOMEM;
807 scnprintf(tempname, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
808 n);
809 bam_rmnet_drivers[n].driver.name = tempname;
810 bam_rmnet_drivers[n].driver.owner = THIS_MODULE;
811 ret = platform_driver_register(&bam_rmnet_drivers[n]);
Eric Holmbergbd4c3842011-11-08 13:48:19 -0700812 if (ret) {
813 pr_err("%s: registration failed n=%d rc=%d\n",
814 __func__, n, ret);
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600815 return ret;
Eric Holmbergbd4c3842011-11-08 13:48:19 -0700816 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700817 }
818 return 0;
819}
820
821module_init(rmnet_init);
822MODULE_DESCRIPTION("MSM RMNET BAM TRANSPORT");
823MODULE_LICENSE("GPL v2");
824