blob: 5397cbff0953dfc8a9daf40db2e3949de1368516 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * RMNET BAM Module.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/wakelock.h>
29#include <linux/if_arp.h>
30#include <linux/msm_rmnet.h>
31
32#ifdef CONFIG_HAS_EARLYSUSPEND
33#include <linux/earlysuspend.h>
34#endif
35
36#include <mach/bam_dmux.h>
37
38/* Debug message support */
39static int msm_rmnet_bam_debug_mask;
40module_param_named(debug_enable, msm_rmnet_bam_debug_mask,
41 int, S_IRUGO | S_IWUSR | S_IWGRP);
42
43#define DEBUG_MASK_LVL0 (1U << 0)
44#define DEBUG_MASK_LVL1 (1U << 1)
45#define DEBUG_MASK_LVL2 (1U << 2)
46
47#define DBG(m, x...) do { \
48 if (msm_rmnet_bam_debug_mask & m) \
49 pr_info(x); \
50} while (0)
51#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
52#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
53#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
54
55/* Configure device instances */
56#define RMNET_DEVICE_COUNT (8)
57
58/* allow larger frames */
59#define RMNET_DATA_LEN 2000
60
61#define DEVICE_ID_INVALID -1
62
63#define DEVICE_INACTIVE 0
64#define DEVICE_ACTIVE 1
65
66#define HEADROOM_FOR_BAM 8 /* for mux header */
67#define HEADROOM_FOR_QOS 8
68#define TAILROOM 8 /* for padding by mux layer */
69
70struct rmnet_private {
71 struct net_device_stats stats;
72 uint32_t ch_id;
73#ifdef CONFIG_MSM_RMNET_DEBUG
74 ktime_t last_packet;
75 unsigned long wakeups_xmit;
76 unsigned long wakeups_rcv;
77 unsigned long timeout_us;
78#endif
79 struct sk_buff *skb;
80 spinlock_t lock;
81 struct tasklet_struct tsklt;
82 u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
83 uint8_t device_up;
84};
85
86#ifdef CONFIG_MSM_RMNET_DEBUG
87static unsigned long timeout_us;
88
89#ifdef CONFIG_HAS_EARLYSUSPEND
90/*
91 * If early suspend is enabled then we specify two timeout values,
92 * screen on (default), and screen is off.
93 */
94static unsigned long timeout_suspend_us;
95static struct device *rmnet0;
96
97/* Set timeout in us when the screen is off. */
98static ssize_t timeout_suspend_store(struct device *d,
99 struct device_attribute *attr,
100 const char *buf, size_t n)
101{
102 timeout_suspend_us = strict_strtoul(buf, NULL, 10);
103 return n;
104}
105
106static ssize_t timeout_suspend_show(struct device *d,
107 struct device_attribute *attr,
108 char *buf)
109{
110 return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
111}
112
113static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
114 timeout_suspend_store);
115
116static void rmnet_early_suspend(struct early_suspend *handler)
117{
118 if (rmnet0) {
119 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
120 p->timeout_us = timeout_suspend_us;
121 }
122}
123
124static void rmnet_late_resume(struct early_suspend *handler)
125{
126 if (rmnet0) {
127 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
128 p->timeout_us = timeout_us;
129 }
130}
131
132static struct early_suspend rmnet_power_suspend = {
133 .suspend = rmnet_early_suspend,
134 .resume = rmnet_late_resume,
135};
136
137static int __init rmnet_late_init(void)
138{
139 register_early_suspend(&rmnet_power_suspend);
140 return 0;
141}
142
143late_initcall(rmnet_late_init);
144#endif
145
146/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
147static int rmnet_cause_wakeup(struct rmnet_private *p)
148{
149 int ret = 0;
150 ktime_t now;
151 if (p->timeout_us == 0) /* Check if disabled */
152 return 0;
153
154 /* Use real (wall) time. */
155 now = ktime_get_real();
156
157 if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
158 ret = 1;
159
160 p->last_packet = now;
161 return ret;
162}
163
164static ssize_t wakeups_xmit_show(struct device *d,
165 struct device_attribute *attr,
166 char *buf)
167{
168 struct rmnet_private *p = netdev_priv(to_net_dev(d));
169 return sprintf(buf, "%lu\n", p->wakeups_xmit);
170}
171
172DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
173
174static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
175 char *buf)
176{
177 struct rmnet_private *p = netdev_priv(to_net_dev(d));
178 return sprintf(buf, "%lu\n", p->wakeups_rcv);
179}
180
181DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
182
183/* Set timeout in us. */
184static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
185 const char *buf, size_t n)
186{
187#ifndef CONFIG_HAS_EARLYSUSPEND
188 struct rmnet_private *p = netdev_priv(to_net_dev(d));
189 p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
190#else
191/* If using early suspend/resume hooks do not write the value on store. */
192 timeout_us = strict_strtoul(buf, NULL, 10);
193#endif
194 return n;
195}
196
197static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
198 char *buf)
199{
200 struct rmnet_private *p = netdev_priv(to_net_dev(d));
201 p = netdev_priv(to_net_dev(d));
202 return sprintf(buf, "%lu\n", timeout_us);
203}
204
205DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
206#endif
207
208
209/* Forward declaration */
210static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
211
212static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
213{
214 __be16 protocol = 0;
215
216 skb->dev = dev;
217
218 /* Determine L3 protocol */
219 switch (skb->data[0] & 0xf0) {
220 case 0x40:
221 protocol = htons(ETH_P_IP);
222 break;
223 case 0x60:
224 protocol = htons(ETH_P_IPV6);
225 break;
226 default:
227 pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
228 dev->name, skb->data[0] & 0xf0);
229 /* skb will be dropped in upper layer for unknown protocol */
230 }
231 return protocol;
232}
233
234static int count_this_packet(void *_hdr, int len)
235{
236 struct ethhdr *hdr = _hdr;
237
238 if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
239 return 0;
240
241 return 1;
242}
243
244/* Rx Callback, Called in Work Queue context */
245static void bam_recv_notify(void *dev, struct sk_buff *skb)
246{
247 struct rmnet_private *p = netdev_priv(dev);
248 unsigned long flags;
249 u32 opmode;
250
251 if (skb) {
252 skb->dev = dev;
253 /* Handle Rx frame format */
254 spin_lock_irqsave(&p->lock, flags);
255 opmode = p->operation_mode;
256 spin_unlock_irqrestore(&p->lock, flags);
257
258 if (RMNET_IS_MODE_IP(opmode)) {
259 /* Driver in IP mode */
260 skb->protocol = rmnet_ip_type_trans(skb, dev);
261 } else {
262 /* Driver in Ethernet mode */
263 skb->protocol = eth_type_trans(skb, dev);
264 }
265 if (RMNET_IS_MODE_IP(opmode) ||
266 count_this_packet(skb->data, skb->len)) {
267#ifdef CONFIG_MSM_RMNET_DEBUG
268 p->wakeups_rcv += rmnet_cause_wakeup(p);
269#endif
270 p->stats.rx_packets++;
271 p->stats.rx_bytes += skb->len;
272 }
273 DBG1("[%s] Rx packet #%lu len=%d\n",
274 ((struct net_device *)dev)->name,
275 p->stats.rx_packets, skb->len);
276
277 /* Deliver to network stack */
278 netif_rx(skb);
279 } else
280 pr_err("[%s] %s: No skb received",
281 ((struct net_device *)dev)->name, __func__);
282}
283
284static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
285{
286 struct rmnet_private *p = netdev_priv(dev);
287 int bam_ret;
288 struct QMI_QOS_HDR_S *qmih;
289 u32 opmode;
290 unsigned long flags;
291
292 /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
293 spin_lock_irqsave(&p->lock, flags);
294 opmode = p->operation_mode;
295 spin_unlock_irqrestore(&p->lock, flags);
296
297 if (RMNET_IS_MODE_QOS(opmode)) {
298 qmih = (struct QMI_QOS_HDR_S *)
299 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
300 qmih->version = 1;
301 qmih->flags = 0;
302 qmih->flow_id = skb->mark;
303 }
304
305 dev->trans_start = jiffies;
306 bam_ret = msm_bam_dmux_write(p->ch_id, skb);
307
308 if (bam_ret != 0) {
309 pr_err("[%s] %s: write returned error %d",
310 dev->name, __func__, bam_ret);
311 goto xmit_out;
312 }
313
314 if (count_this_packet(skb->data, skb->len)) {
315 p->stats.tx_packets++;
316 p->stats.tx_bytes += skb->len;
317#ifdef CONFIG_MSM_RMNET_DEBUG
318 p->wakeups_xmit += rmnet_cause_wakeup(p);
319#endif
320 }
321 DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
322 dev->name, p->stats.tx_packets, skb->len, skb->mark);
323
324 return 0;
325xmit_out:
326 /* data xmited, safe to release skb */
327 dev_kfree_skb_any(skb);
328 return 0;
329}
330
331static void bam_write_done(void *dev, struct sk_buff *skb)
332{
333 DBG1("%s: write complete\n", __func__);
334 dev_kfree_skb_any(skb);
335 netif_wake_queue(dev);
336}
337
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600338static void bam_notify(void *dev, int event, unsigned long data)
339{
340 switch (event) {
341 case BAM_DMUX_RECEIVE:
342 bam_recv_notify(dev, (struct sk_buff *)(data));
343 break;
344 case BAM_DMUX_WRITE_DONE:
345 bam_write_done(dev, (struct sk_buff *)(data));
346 break;
347 }
348}
349
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350static int __rmnet_open(struct net_device *dev)
351{
352 int r;
353 struct rmnet_private *p = netdev_priv(dev);
354
355 DBG0("[%s] __rmnet_open()\n", dev->name);
356
357 if (!p->device_up) {
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600358 r = msm_bam_dmux_open(p->ch_id, dev, bam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359
360 if (r < 0)
361 return -ENODEV;
362 }
363
364 p->device_up = DEVICE_ACTIVE;
365 return 0;
366}
367
368static int rmnet_open(struct net_device *dev)
369{
370 int rc = 0;
371
372 DBG0("[%s] rmnet_open()\n", dev->name);
373
374 rc = __rmnet_open(dev);
375
376 if (rc == 0)
377 netif_start_queue(dev);
378
379 return rc;
380}
381
382
383static int __rmnet_close(struct net_device *dev)
384{
385 struct rmnet_private *p = netdev_priv(dev);
386 int rc = 0;
387
388 if (p->device_up) {
389 /* do not close rmnet port once up, this causes
390 remote side to hang if tried to open again */
391 p->device_up = DEVICE_INACTIVE;
392 return rc;
393 } else
394 return -EBADF;
395}
396
397
398static int rmnet_stop(struct net_device *dev)
399{
400 DBG0("[%s] rmnet_stop()\n", dev->name);
401
402 __rmnet_close(dev);
403 netif_stop_queue(dev);
404
405 return 0;
406}
407
408static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
409{
410 if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
411 return -EINVAL;
412
413 DBG0("[%s] MTU change: old=%d new=%d\n",
414 dev->name, dev->mtu, new_mtu);
415 dev->mtu = new_mtu;
416
417 return 0;
418}
419
420static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
421{
422 if (netif_queue_stopped(dev)) {
423 pr_err("[%s]fatal: rmnet_xmit called when "
424 "netif_queue is stopped", dev->name);
425 return 0;
426 }
427
428 netif_stop_queue(dev);
429 _rmnet_xmit(skb, dev);
430
431 return 0;
432}
433
434static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
435{
436 struct rmnet_private *p = netdev_priv(dev);
437 return &p->stats;
438}
439
440static void rmnet_set_multicast_list(struct net_device *dev)
441{
442}
443
444static void rmnet_tx_timeout(struct net_device *dev)
445{
446 pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
447}
448
449static const struct net_device_ops rmnet_ops_ether = {
450 .ndo_open = rmnet_open,
451 .ndo_stop = rmnet_stop,
452 .ndo_start_xmit = rmnet_xmit,
453 .ndo_get_stats = rmnet_get_stats,
454 .ndo_set_multicast_list = rmnet_set_multicast_list,
455 .ndo_tx_timeout = rmnet_tx_timeout,
456 .ndo_do_ioctl = rmnet_ioctl,
457 .ndo_change_mtu = rmnet_change_mtu,
458 .ndo_set_mac_address = eth_mac_addr,
459 .ndo_validate_addr = eth_validate_addr,
460};
461
462static const struct net_device_ops rmnet_ops_ip = {
463 .ndo_open = rmnet_open,
464 .ndo_stop = rmnet_stop,
465 .ndo_start_xmit = rmnet_xmit,
466 .ndo_get_stats = rmnet_get_stats,
467 .ndo_set_multicast_list = rmnet_set_multicast_list,
468 .ndo_tx_timeout = rmnet_tx_timeout,
469 .ndo_do_ioctl = rmnet_ioctl,
470 .ndo_change_mtu = rmnet_change_mtu,
471 .ndo_set_mac_address = 0,
472 .ndo_validate_addr = 0,
473};
474
475static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
476{
477 struct rmnet_private *p = netdev_priv(dev);
478 u32 old_opmode = p->operation_mode;
479 unsigned long flags;
480 int prev_mtu = dev->mtu;
481 int rc = 0;
482
483 /* Process IOCTL command */
484 switch (cmd) {
485 case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
486 /* Perform Ethernet config only if in IP mode currently*/
487 if (p->operation_mode & RMNET_MODE_LLP_IP) {
488 ether_setup(dev);
489 random_ether_addr(dev->dev_addr);
490 dev->mtu = prev_mtu;
491
492 dev->netdev_ops = &rmnet_ops_ether;
493 spin_lock_irqsave(&p->lock, flags);
494 p->operation_mode &= ~RMNET_MODE_LLP_IP;
495 p->operation_mode |= RMNET_MODE_LLP_ETH;
496 spin_unlock_irqrestore(&p->lock, flags);
497 DBG0("[%s] rmnet_ioctl(): "
498 "set Ethernet protocol mode\n",
499 dev->name);
500 }
501 break;
502
503 case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
504 /* Perform IP config only if in Ethernet mode currently*/
505 if (p->operation_mode & RMNET_MODE_LLP_ETH) {
506
507 /* Undo config done in ether_setup() */
508 dev->header_ops = 0; /* No header */
509 dev->type = ARPHRD_RAWIP;
510 dev->hard_header_len = 0;
511 dev->mtu = prev_mtu;
512 dev->addr_len = 0;
513 dev->flags &= ~(IFF_BROADCAST|
514 IFF_MULTICAST);
515
516 dev->needed_headroom = HEADROOM_FOR_BAM +
517 HEADROOM_FOR_QOS;
518 dev->needed_tailroom = TAILROOM;
519 dev->netdev_ops = &rmnet_ops_ip;
520 spin_lock_irqsave(&p->lock, flags);
521 p->operation_mode &= ~RMNET_MODE_LLP_ETH;
522 p->operation_mode |= RMNET_MODE_LLP_IP;
523 spin_unlock_irqrestore(&p->lock, flags);
524 DBG0("[%s] rmnet_ioctl(): "
525 "set IP protocol mode\n",
526 dev->name);
527 }
528 break;
529
530 case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
531 ifr->ifr_ifru.ifru_data =
532 (void *)(p->operation_mode &
533 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
534 break;
535
536 case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
537 spin_lock_irqsave(&p->lock, flags);
538 p->operation_mode |= RMNET_MODE_QOS;
539 spin_unlock_irqrestore(&p->lock, flags);
540 DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
541 dev->name);
542 break;
543
544 case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
545 spin_lock_irqsave(&p->lock, flags);
546 p->operation_mode &= ~RMNET_MODE_QOS;
547 spin_unlock_irqrestore(&p->lock, flags);
548 DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
549 dev->name);
550 break;
551
552 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
553 ifr->ifr_ifru.ifru_data =
554 (void *)(p->operation_mode & RMNET_MODE_QOS);
555 break;
556
557 case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
558 ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
559 break;
560
561 case RMNET_IOCTL_OPEN: /* Open transport port */
562 rc = __rmnet_open(dev);
563 DBG0("[%s] rmnet_ioctl(): open transport port\n",
564 dev->name);
565 break;
566
567 case RMNET_IOCTL_CLOSE: /* Close transport port */
568 rc = __rmnet_close(dev);
569 DBG0("[%s] rmnet_ioctl(): close transport port\n",
570 dev->name);
571 break;
572
573 default:
574 pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
575 dev->name, cmd);
576 return -EINVAL;
577 }
578
579 DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
580 dev->name, __func__, cmd, old_opmode, p->operation_mode);
581 return rc;
582}
583
584static void __init rmnet_setup(struct net_device *dev)
585{
586 /* Using Ethernet mode by default */
587 dev->netdev_ops = &rmnet_ops_ether;
588 ether_setup(dev);
589
590 /* set this after calling ether_setup */
591 dev->mtu = RMNET_DATA_LEN;
592 dev->needed_headroom = HEADROOM_FOR_BAM + HEADROOM_FOR_QOS ;
593 dev->needed_tailroom = TAILROOM;
594 random_ether_addr(dev->dev_addr);
595
596 dev->watchdog_timeo = 1000; /* 10 seconds? */
597}
598
599
600static int __init rmnet_init(void)
601{
602 int ret;
603 struct device *d;
604 struct net_device *dev;
605 struct rmnet_private *p;
606 unsigned n;
607
608 pr_info("%s: BAM devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
609
610#ifdef CONFIG_MSM_RMNET_DEBUG
611 timeout_us = 0;
612#ifdef CONFIG_HAS_EARLYSUSPEND
613 timeout_suspend_us = 0;
614#endif
615#endif
616
617 for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
618 dev = alloc_netdev(sizeof(struct rmnet_private),
619 "rmnet%d", rmnet_setup);
620
621 if (!dev)
622 return -ENOMEM;
623
624 d = &(dev->dev);
625 p = netdev_priv(dev);
626 /* Initial config uses Ethernet */
627 p->operation_mode = RMNET_MODE_LLP_ETH;
628 p->ch_id = n;
629 spin_lock_init(&p->lock);
630#ifdef CONFIG_MSM_RMNET_DEBUG
631 p->timeout_us = timeout_us;
632 p->wakeups_xmit = p->wakeups_rcv = 0;
633#endif
634
635 ret = register_netdev(dev);
636 if (ret) {
637 free_netdev(dev);
638 return ret;
639 }
640
641#ifdef CONFIG_MSM_RMNET_DEBUG
642 if (device_create_file(d, &dev_attr_timeout))
643 continue;
644 if (device_create_file(d, &dev_attr_wakeups_xmit))
645 continue;
646 if (device_create_file(d, &dev_attr_wakeups_rcv))
647 continue;
648#ifdef CONFIG_HAS_EARLYSUSPEND
649 if (device_create_file(d, &dev_attr_timeout_suspend))
650 continue;
651
652 /* Only care about rmnet0 for suspend/resume tiemout hooks. */
653 if (n == 0)
654 rmnet0 = d;
655#endif
656#endif
657 }
658 return 0;
659}
660
661module_init(rmnet_init);
662MODULE_DESCRIPTION("MSM RMNET BAM TRANSPORT");
663MODULE_LICENSE("GPL v2");
664