blob: 883c649931bfac78733d59fc6a2a100a2d4eb6df [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * RMNET SDIO Module.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/wakelock.h>
29#include <linux/if_arp.h>
30#include <linux/msm_rmnet.h>
31
32#ifdef CONFIG_HAS_EARLYSUSPEND
33#include <linux/earlysuspend.h>
34#endif
35
36#include <mach/sdio_dmux.h>
37
38/* Debug message support */
39static int msm_rmnet_sdio_debug_mask;
40module_param_named(debug_enable, msm_rmnet_sdio_debug_mask,
41 int, S_IRUGO | S_IWUSR | S_IWGRP);
42
43#define DEBUG_MASK_LVL0 (1U << 0)
44#define DEBUG_MASK_LVL1 (1U << 1)
45#define DEBUG_MASK_LVL2 (1U << 2)
46
47#define DBG(m, x...) do { \
48 if (msm_rmnet_sdio_debug_mask & m) \
49 pr_info(x); \
50} while (0)
51#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
52#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
53#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
54
55/* Configure device instances */
56#define RMNET_DEVICE_COUNT (8)
57
58/* allow larger frames */
59#define RMNET_DATA_LEN 2000
60
61#define DEVICE_ID_INVALID -1
62
63#define DEVICE_INACTIVE 0
64#define DEVICE_ACTIVE 1
65
66#define HEADROOM_FOR_SDIO 8 /* for mux header */
67#define HEADROOM_FOR_QOS 8
68#define TAILROOM 8 /* for padding by mux layer */
69
70struct rmnet_private {
71 struct net_device_stats stats;
72 uint32_t ch_id;
73#ifdef CONFIG_MSM_RMNET_DEBUG
74 ktime_t last_packet;
75 unsigned long wakeups_xmit;
76 unsigned long wakeups_rcv;
77 unsigned long timeout_us;
78#endif
79 struct sk_buff *skb;
80 spinlock_t lock;
81 struct tasklet_struct tsklt;
82 u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
83 uint8_t device_up;
84 uint8_t in_reset;
85};
86
87#ifdef CONFIG_MSM_RMNET_DEBUG
88static unsigned long timeout_us;
89
90#ifdef CONFIG_HAS_EARLYSUSPEND
91/*
92 * If early suspend is enabled then we specify two timeout values,
93 * screen on (default), and screen is off.
94 */
95static unsigned long timeout_suspend_us;
96static struct device *rmnet0;
97
98/* Set timeout in us when the screen is off. */
99static ssize_t timeout_suspend_store(struct device *d,
100 struct device_attribute *attr,
101 const char *buf, size_t n)
102{
103 timeout_suspend_us = strict_strtoul(buf, NULL, 10);
104 return n;
105}
106
107static ssize_t timeout_suspend_show(struct device *d,
108 struct device_attribute *attr,
109 char *buf)
110{
111 return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
112}
113
114static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
115 timeout_suspend_store);
116
117static void rmnet_early_suspend(struct early_suspend *handler)
118{
119 if (rmnet0) {
120 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
121 p->timeout_us = timeout_suspend_us;
122 }
123}
124
125static void rmnet_late_resume(struct early_suspend *handler)
126{
127 if (rmnet0) {
128 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
129 p->timeout_us = timeout_us;
130 }
131}
132
133static struct early_suspend rmnet_power_suspend = {
134 .suspend = rmnet_early_suspend,
135 .resume = rmnet_late_resume,
136};
137
138static int __init rmnet_late_init(void)
139{
140 register_early_suspend(&rmnet_power_suspend);
141 return 0;
142}
143
144late_initcall(rmnet_late_init);
145#endif
146
147/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
148static int rmnet_cause_wakeup(struct rmnet_private *p)
149{
150 int ret = 0;
151 ktime_t now;
152 if (p->timeout_us == 0) /* Check if disabled */
153 return 0;
154
155 /* Use real (wall) time. */
156 now = ktime_get_real();
157
158 if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
159 ret = 1;
160
161 p->last_packet = now;
162 return ret;
163}
164
165static ssize_t wakeups_xmit_show(struct device *d,
166 struct device_attribute *attr,
167 char *buf)
168{
169 struct rmnet_private *p = netdev_priv(to_net_dev(d));
170 return sprintf(buf, "%lu\n", p->wakeups_xmit);
171}
172
173DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
174
175static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
176 char *buf)
177{
178 struct rmnet_private *p = netdev_priv(to_net_dev(d));
179 return sprintf(buf, "%lu\n", p->wakeups_rcv);
180}
181
182DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
183
184/* Set timeout in us. */
185static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
186 const char *buf, size_t n)
187{
188#ifndef CONFIG_HAS_EARLYSUSPEND
189 struct rmnet_private *p = netdev_priv(to_net_dev(d));
190 p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
191#else
192/* If using early suspend/resume hooks do not write the value on store. */
193 timeout_us = strict_strtoul(buf, NULL, 10);
194#endif
195 return n;
196}
197
198static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
199 char *buf)
200{
201 struct rmnet_private *p = netdev_priv(to_net_dev(d));
202 p = netdev_priv(to_net_dev(d));
203 return sprintf(buf, "%lu\n", timeout_us);
204}
205
206DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
207#endif
208
209
210/* Forward declaration */
211static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
212
213static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
214{
215 __be16 protocol = 0;
216
217 skb->dev = dev;
218
219 /* Determine L3 protocol */
220 switch (skb->data[0] & 0xf0) {
221 case 0x40:
222 protocol = htons(ETH_P_IP);
223 break;
224 case 0x60:
225 protocol = htons(ETH_P_IPV6);
226 break;
227 default:
228 pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
229 dev->name, skb->data[0] & 0xf0);
230 /* skb will be dropped in upper layer for unknown protocol */
231 }
232 return protocol;
233}
234
235static int count_this_packet(void *_hdr, int len)
236{
237 struct ethhdr *hdr = _hdr;
238
239 if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
240 return 0;
241
242 return 1;
243}
244
245static int sdio_update_reset_state(struct net_device *dev)
246{
247 struct rmnet_private *p = netdev_priv(dev);
248 int new_state;
249
250 new_state = msm_sdio_is_channel_in_reset(p->ch_id);
251
252 if (p->in_reset != new_state) {
253 p->in_reset = (uint8_t)new_state;
254
255 if (p->in_reset)
256 netif_carrier_off(dev);
257 else
258 netif_carrier_on(dev);
259 return 1;
260 }
261 return 0;
262}
263
264/* Rx Callback, Called in Work Queue context */
265static void sdio_recv_notify(void *dev, struct sk_buff *skb)
266{
267 struct rmnet_private *p = netdev_priv(dev);
268 unsigned long flags;
269 u32 opmode;
270
271 if (skb) {
272 skb->dev = dev;
273 /* Handle Rx frame format */
274 spin_lock_irqsave(&p->lock, flags);
275 opmode = p->operation_mode;
276 spin_unlock_irqrestore(&p->lock, flags);
277
278 if (RMNET_IS_MODE_IP(opmode)) {
279 /* Driver in IP mode */
280 skb->protocol = rmnet_ip_type_trans(skb, dev);
281 } else {
282 /* Driver in Ethernet mode */
283 skb->protocol = eth_type_trans(skb, dev);
284 }
285 if (RMNET_IS_MODE_IP(opmode) ||
286 count_this_packet(skb->data, skb->len)) {
287#ifdef CONFIG_MSM_RMNET_DEBUG
288 p->wakeups_rcv += rmnet_cause_wakeup(p);
289#endif
290 p->stats.rx_packets++;
291 p->stats.rx_bytes += skb->len;
292 }
293 DBG1("[%s] Rx packet #%lu len=%d\n",
294 ((struct net_device *)dev)->name,
295 p->stats.rx_packets, skb->len);
296
297 /* Deliver to network stack */
298 netif_rx(skb);
299 } else {
300 spin_lock_irqsave(&p->lock, flags);
301 if (!sdio_update_reset_state((struct net_device *)dev))
302 pr_err("[%s] %s: No skb received",
303 ((struct net_device *)dev)->name, __func__);
304 spin_unlock_irqrestore(&p->lock, flags);
305 }
306}
307
308static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
309{
310 struct rmnet_private *p = netdev_priv(dev);
311 int sdio_ret;
312 struct QMI_QOS_HDR_S *qmih;
313 u32 opmode;
314 unsigned long flags;
315
316 if (!netif_carrier_ok(dev)) {
317 pr_err("[%s] %s: channel in reset",
318 dev->name, __func__);
319 goto xmit_out;
320 }
321
322 /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
323 spin_lock_irqsave(&p->lock, flags);
324 opmode = p->operation_mode;
325 spin_unlock_irqrestore(&p->lock, flags);
326
327 if (RMNET_IS_MODE_QOS(opmode)) {
328 qmih = (struct QMI_QOS_HDR_S *)
329 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
330 qmih->version = 1;
331 qmih->flags = 0;
332 qmih->flow_id = skb->mark;
333 }
334
335 dev->trans_start = jiffies;
336 sdio_ret = msm_sdio_dmux_write(p->ch_id, skb);
337
338 if (sdio_ret != 0) {
339 pr_err("[%s] %s: write returned error %d",
340 dev->name, __func__, sdio_ret);
341 goto xmit_out;
342 }
343
344 if (count_this_packet(skb->data, skb->len)) {
345 p->stats.tx_packets++;
346 p->stats.tx_bytes += skb->len;
347#ifdef CONFIG_MSM_RMNET_DEBUG
348 p->wakeups_xmit += rmnet_cause_wakeup(p);
349#endif
350 }
351 DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
352 dev->name, p->stats.tx_packets, skb->len, skb->mark);
353
354 return 0;
355xmit_out:
356 dev_kfree_skb_any(skb);
357 p->stats.tx_errors++;
358 return 0;
359}
360
361static void sdio_write_done(void *dev, struct sk_buff *skb)
362{
363 struct rmnet_private *p = netdev_priv(dev);
364
365 if (skb)
366 dev_kfree_skb_any(skb);
367
368 if (!p->in_reset) {
369 DBG1("%s: write complete skb=%p\n", __func__, skb);
370
371 if (netif_queue_stopped(dev) &&
372 msm_sdio_dmux_is_ch_low(p->ch_id)) {
373 DBG0("%s: Low WM hit, waking queue=%p\n",
374 __func__, skb);
375 netif_wake_queue(dev);
376 }
377 } else {
378 DBG1("%s: write in reset skb=%p\n", __func__, skb);
379 }
380}
381
382static int __rmnet_open(struct net_device *dev)
383{
384 int r;
385 struct rmnet_private *p = netdev_priv(dev);
386
387 DBG0("[%s] __rmnet_open()\n", dev->name);
388
389 if (!p->device_up) {
390 r = msm_sdio_dmux_open(p->ch_id, dev,
391 sdio_recv_notify, sdio_write_done);
392
393 if (r < 0)
394 return -ENODEV;
395 }
396
397 p->device_up = DEVICE_ACTIVE;
398 return 0;
399}
400
401static int rmnet_open(struct net_device *dev)
402{
403 int rc = 0;
404
405 DBG0("[%s] rmnet_open()\n", dev->name);
406
407 rc = __rmnet_open(dev);
408
409 if (rc == 0)
410 netif_start_queue(dev);
411
412 return rc;
413}
414
415
416static int __rmnet_close(struct net_device *dev)
417{
418 struct rmnet_private *p = netdev_priv(dev);
419 int rc = 0;
420
421 if (p->device_up) {
422 /* do not close rmnet port once up, this causes
423 remote side to hang if tried to open again */
424 /* rc = msm_sdio_dmux_close(p->ch_id); */
425 p->device_up = DEVICE_INACTIVE;
426 return rc;
427 } else
428 return -EBADF;
429}
430
431
432static int rmnet_stop(struct net_device *dev)
433{
434 DBG0("[%s] rmnet_stop()\n", dev->name);
435
436 __rmnet_close(dev);
437 netif_stop_queue(dev);
438
439 return 0;
440}
441
442static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
443{
444 if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
445 return -EINVAL;
446
447 DBG0("[%s] MTU change: old=%d new=%d\n",
448 dev->name, dev->mtu, new_mtu);
449 dev->mtu = new_mtu;
450
451 return 0;
452}
453
454static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
455{
456 struct rmnet_private *p = netdev_priv(dev);
457
458 if (netif_queue_stopped(dev)) {
459 pr_err("[%s]fatal: rmnet_xmit called when "
460 "netif_queue is stopped", dev->name);
461 return 0;
462 }
463
464 _rmnet_xmit(skb, dev);
465
466 if (msm_sdio_dmux_is_ch_full(p->ch_id)) {
467 netif_stop_queue(dev);
468 DBG0("%s: High WM hit, stopping queue=%p\n", __func__, skb);
469 }
470
471 return 0;
472}
473
474static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
475{
476 struct rmnet_private *p = netdev_priv(dev);
477 return &p->stats;
478}
479
480static void rmnet_set_multicast_list(struct net_device *dev)
481{
482}
483
484static void rmnet_tx_timeout(struct net_device *dev)
485{
486 pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
487}
488
489static const struct net_device_ops rmnet_ops_ether = {
490 .ndo_open = rmnet_open,
491 .ndo_stop = rmnet_stop,
492 .ndo_start_xmit = rmnet_xmit,
493 .ndo_get_stats = rmnet_get_stats,
494 .ndo_set_multicast_list = rmnet_set_multicast_list,
495 .ndo_tx_timeout = rmnet_tx_timeout,
496 .ndo_do_ioctl = rmnet_ioctl,
497 .ndo_change_mtu = rmnet_change_mtu,
498 .ndo_set_mac_address = eth_mac_addr,
499 .ndo_validate_addr = eth_validate_addr,
500};
501
502static const struct net_device_ops rmnet_ops_ip = {
503 .ndo_open = rmnet_open,
504 .ndo_stop = rmnet_stop,
505 .ndo_start_xmit = rmnet_xmit,
506 .ndo_get_stats = rmnet_get_stats,
507 .ndo_set_multicast_list = rmnet_set_multicast_list,
508 .ndo_tx_timeout = rmnet_tx_timeout,
509 .ndo_do_ioctl = rmnet_ioctl,
510 .ndo_change_mtu = rmnet_change_mtu,
511 .ndo_set_mac_address = 0,
512 .ndo_validate_addr = 0,
513};
514
515static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
516{
517 struct rmnet_private *p = netdev_priv(dev);
518 u32 old_opmode = p->operation_mode;
519 unsigned long flags;
520 int prev_mtu = dev->mtu;
521 int rc = 0;
522
523 /* Process IOCTL command */
524 switch (cmd) {
525 case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
526 /* Perform Ethernet config only if in IP mode currently*/
527 if (p->operation_mode & RMNET_MODE_LLP_IP) {
528 ether_setup(dev);
529 random_ether_addr(dev->dev_addr);
530 dev->mtu = prev_mtu;
531
532 dev->netdev_ops = &rmnet_ops_ether;
533 spin_lock_irqsave(&p->lock, flags);
534 p->operation_mode &= ~RMNET_MODE_LLP_IP;
535 p->operation_mode |= RMNET_MODE_LLP_ETH;
536 spin_unlock_irqrestore(&p->lock, flags);
537 DBG0("[%s] rmnet_ioctl(): "
538 "set Ethernet protocol mode\n",
539 dev->name);
540 }
541 break;
542
543 case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
544 /* Perform IP config only if in Ethernet mode currently*/
545 if (p->operation_mode & RMNET_MODE_LLP_ETH) {
546
547 /* Undo config done in ether_setup() */
548 dev->header_ops = 0; /* No header */
549 dev->type = ARPHRD_RAWIP;
550 dev->hard_header_len = 0;
551 dev->mtu = prev_mtu;
552 dev->addr_len = 0;
553 dev->flags &= ~(IFF_BROADCAST|
554 IFF_MULTICAST);
555
556 dev->needed_headroom = HEADROOM_FOR_SDIO +
557 HEADROOM_FOR_QOS;
558 dev->needed_tailroom = TAILROOM;
559 dev->netdev_ops = &rmnet_ops_ip;
560 spin_lock_irqsave(&p->lock, flags);
561 p->operation_mode &= ~RMNET_MODE_LLP_ETH;
562 p->operation_mode |= RMNET_MODE_LLP_IP;
563 spin_unlock_irqrestore(&p->lock, flags);
564 DBG0("[%s] rmnet_ioctl(): "
565 "set IP protocol mode\n",
566 dev->name);
567 }
568 break;
569
570 case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
571 ifr->ifr_ifru.ifru_data =
572 (void *)(p->operation_mode &
573 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
574 break;
575
576 case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
577 spin_lock_irqsave(&p->lock, flags);
578 p->operation_mode |= RMNET_MODE_QOS;
579 spin_unlock_irqrestore(&p->lock, flags);
580 DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
581 dev->name);
582 break;
583
584 case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
585 spin_lock_irqsave(&p->lock, flags);
586 p->operation_mode &= ~RMNET_MODE_QOS;
587 spin_unlock_irqrestore(&p->lock, flags);
588 DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
589 dev->name);
590 break;
591
592 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
593 ifr->ifr_ifru.ifru_data =
594 (void *)(p->operation_mode & RMNET_MODE_QOS);
595 break;
596
597 case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
598 ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
599 break;
600
601 case RMNET_IOCTL_OPEN: /* Open transport port */
602 rc = __rmnet_open(dev);
603 DBG0("[%s] rmnet_ioctl(): open transport port\n",
604 dev->name);
605 break;
606
607 case RMNET_IOCTL_CLOSE: /* Close transport port */
608 rc = __rmnet_close(dev);
609 DBG0("[%s] rmnet_ioctl(): close transport port\n",
610 dev->name);
611 break;
612
613 default:
614 pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
615 dev->name, cmd);
616 return -EINVAL;
617 }
618
619 DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
620 dev->name, __func__, cmd, old_opmode, p->operation_mode);
621 return rc;
622}
623
624static void __init rmnet_setup(struct net_device *dev)
625{
626 /* Using Ethernet mode by default */
627 dev->netdev_ops = &rmnet_ops_ether;
628 ether_setup(dev);
629
630 /* set this after calling ether_setup */
631 dev->mtu = RMNET_DATA_LEN;
632 dev->needed_headroom = HEADROOM_FOR_SDIO + HEADROOM_FOR_QOS ;
633 dev->needed_tailroom = TAILROOM;
634 random_ether_addr(dev->dev_addr);
635
636 dev->watchdog_timeo = 1000; /* 10 seconds? */
637}
638
639
640static int __init rmnet_init(void)
641{
642 int ret;
643 struct device *d;
644 struct net_device *dev;
645 struct rmnet_private *p;
646 unsigned n;
647
648 pr_info("%s: SDIO devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
649
650#ifdef CONFIG_MSM_RMNET_DEBUG
651 timeout_us = 0;
652#ifdef CONFIG_HAS_EARLYSUSPEND
653 timeout_suspend_us = 0;
654#endif
655#endif
656
657 for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
658 dev = alloc_netdev(sizeof(struct rmnet_private),
659 "rmnet_sdio%d", rmnet_setup);
660
661 if (!dev)
662 return -ENOMEM;
663
664 d = &(dev->dev);
665 p = netdev_priv(dev);
666 /* Initial config uses Ethernet */
667 p->operation_mode = RMNET_MODE_LLP_ETH;
668 p->ch_id = n;
669 spin_lock_init(&p->lock);
670#ifdef CONFIG_MSM_RMNET_DEBUG
671 p->timeout_us = timeout_us;
672 p->wakeups_xmit = p->wakeups_rcv = 0;
673#endif
674
675 ret = register_netdev(dev);
676 if (ret) {
677 free_netdev(dev);
678 return ret;
679 }
680
681#ifdef CONFIG_MSM_RMNET_DEBUG
682 if (device_create_file(d, &dev_attr_timeout))
683 continue;
684 if (device_create_file(d, &dev_attr_wakeups_xmit))
685 continue;
686 if (device_create_file(d, &dev_attr_wakeups_rcv))
687 continue;
688#ifdef CONFIG_HAS_EARLYSUSPEND
689 if (device_create_file(d, &dev_attr_timeout_suspend))
690 continue;
691
692 /* Only care about rmnet0 for suspend/resume tiemout hooks. */
693 if (n == 0)
694 rmnet0 = d;
695#endif
696#endif
697 }
698 return 0;
699}
700
701module_init(rmnet_init);
702MODULE_DESCRIPTION("MSM RMNET SDIO TRANSPORT");
703MODULE_LICENSE("GPL v2");
704