blob: 6889425d022f8d0d9135034addcd6499eebd8578 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* linux/drivers/net/msm_rmnet.c
2 *
3 * Virtual Ethernet Interface for MSM7K Networking
4 *
5 * Copyright (C) 2007 Google, Inc.
6 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
7 * Author: Brian Swetland <swetland@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/delay.h>
24#include <linux/errno.h>
25#include <linux/interrupt.h>
26#include <linux/init.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/wakelock.h>
31#include <linux/platform_device.h>
32#include <linux/if_arp.h>
33#include <linux/msm_rmnet.h>
34
35#ifdef CONFIG_HAS_EARLYSUSPEND
36#include <linux/earlysuspend.h>
37#endif
38
39#include <mach/msm_smd.h>
40#include <mach/peripheral-loader.h>
41
42/* Debug message support */
43static int msm_rmnet_debug_mask;
44module_param_named(debug_enable, msm_rmnet_debug_mask,
45 int, S_IRUGO | S_IWUSR | S_IWGRP);
46
47#define DEBUG_MASK_LVL0 (1U << 0)
48#define DEBUG_MASK_LVL1 (1U << 1)
49#define DEBUG_MASK_LVL2 (1U << 2)
50
51#define DBG(m, x...) do { \
52 if (msm_rmnet_debug_mask & m) \
53 pr_info(x); \
54} while (0)
55#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
56#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
57#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
58
59/* Configure device instances */
60#define RMNET_DEVICE_COUNT (8)
61static const char *ch_name[RMNET_DEVICE_COUNT] = {
62 "DATA5",
63 "DATA6",
64 "DATA7",
65 "DATA8",
66 "DATA9",
67 "DATA12",
68 "DATA13",
69 "DATA14",
70};
71
72/* XXX should come from smd headers */
73#define SMD_PORT_ETHER0 11
74
75/* allow larger frames */
76#define RMNET_DATA_LEN 2000
77
78#define HEADROOM_FOR_QOS 8
79
80static struct completion *port_complete[RMNET_DEVICE_COUNT];
81
82struct rmnet_private
83{
84 smd_channel_t *ch;
85 struct net_device_stats stats;
86 const char *chname;
87 struct wake_lock wake_lock;
88#ifdef CONFIG_MSM_RMNET_DEBUG
89 ktime_t last_packet;
90 unsigned long wakeups_xmit;
91 unsigned long wakeups_rcv;
92 unsigned long timeout_us;
93#endif
94 struct sk_buff *skb;
95 spinlock_t lock;
96 struct tasklet_struct tsklt;
97 u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
98 struct platform_driver pdrv;
99 struct completion complete;
100 void *pil;
101 struct mutex pil_lock;
102};
103
104static uint msm_rmnet_modem_wait;
105module_param_named(modem_wait, msm_rmnet_modem_wait,
106 uint, S_IRUGO | S_IWUSR | S_IWGRP);
107
108/* Forward declaration */
109static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
110
111static int count_this_packet(void *_hdr, int len)
112{
113 struct ethhdr *hdr = _hdr;
114
115 if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
116 return 0;
117
118 return 1;
119}
120
121#ifdef CONFIG_MSM_RMNET_DEBUG
122static unsigned long timeout_us;
123
124#ifdef CONFIG_HAS_EARLYSUSPEND
125/*
126 * If early suspend is enabled then we specify two timeout values,
127 * screen on (default), and screen is off.
128 */
129static unsigned long timeout_suspend_us;
130static struct device *rmnet0;
131
132/* Set timeout in us when the screen is off. */
133static ssize_t timeout_suspend_store(struct device *d, struct device_attribute *attr, const char *buf, size_t n)
134{
135 timeout_suspend_us = simple_strtoul(buf, NULL, 10);
136 return n;
137}
138
139static ssize_t timeout_suspend_show(struct device *d,
140 struct device_attribute *attr,
141 char *buf)
142{
143 return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
144}
145
146static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show, timeout_suspend_store);
147
148static void rmnet_early_suspend(struct early_suspend *handler) {
149 if (rmnet0) {
150 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
151 p->timeout_us = timeout_suspend_us;
152 }
153}
154
155static void rmnet_late_resume(struct early_suspend *handler) {
156 if (rmnet0) {
157 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
158 p->timeout_us = timeout_us;
159 }
160}
161
162static struct early_suspend rmnet_power_suspend = {
163 .suspend = rmnet_early_suspend,
164 .resume = rmnet_late_resume,
165};
166
167static int __init rmnet_late_init(void)
168{
169 register_early_suspend(&rmnet_power_suspend);
170 return 0;
171}
172
173late_initcall(rmnet_late_init);
174#endif
175
176/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
177static int rmnet_cause_wakeup(struct rmnet_private *p) {
178 int ret = 0;
179 ktime_t now;
180 if (p->timeout_us == 0) /* Check if disabled */
181 return 0;
182
183 /* Use real (wall) time. */
184 now = ktime_get_real();
185
186 if (ktime_us_delta(now, p->last_packet) > p->timeout_us) {
187 ret = 1;
188 }
189 p->last_packet = now;
190 return ret;
191}
192
193static ssize_t wakeups_xmit_show(struct device *d,
194 struct device_attribute *attr,
195 char *buf)
196{
197 struct rmnet_private *p = netdev_priv(to_net_dev(d));
198 return sprintf(buf, "%lu\n", p->wakeups_xmit);
199}
200
201DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
202
203static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
204 char *buf)
205{
206 struct rmnet_private *p = netdev_priv(to_net_dev(d));
207 return sprintf(buf, "%lu\n", p->wakeups_rcv);
208}
209
210DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
211
212/* Set timeout in us. */
213static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
214 const char *buf, size_t n)
215{
216#ifndef CONFIG_HAS_EARLYSUSPEND
217 struct rmnet_private *p = netdev_priv(to_net_dev(d));
218 p->timeout_us = timeout_us = simple_strtoul(buf, NULL, 10);
219#else
220/* If using early suspend/resume hooks do not write the value on store. */
221 timeout_us = simple_strtoul(buf, NULL, 10);
222#endif
223 return n;
224}
225
226static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
227 char *buf)
228{
229 struct rmnet_private *p = netdev_priv(to_net_dev(d));
230 p = netdev_priv(to_net_dev(d));
231 return sprintf(buf, "%lu\n", timeout_us);
232}
233
234DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
235#endif
236
237static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
238{
239 __be16 protocol = 0;
240
241 skb->dev = dev;
242
243 /* Determine L3 protocol */
244 switch (skb->data[0] & 0xf0) {
245 case 0x40:
246 protocol = htons(ETH_P_IP);
247 break;
248 case 0x60:
249 protocol = htons(ETH_P_IPV6);
250 break;
251 default:
252 pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
253 dev->name, skb->data[0] & 0xf0);
254 /* skb will be dropped in uppder layer for unknown protocol */
255 }
256 return protocol;
257}
258
259/* Called in soft-irq context */
260static void smd_net_data_handler(unsigned long arg)
261{
262 struct net_device *dev = (struct net_device *) arg;
263 struct rmnet_private *p = netdev_priv(dev);
264 struct sk_buff *skb;
265 void *ptr = 0;
266 int sz;
267 u32 opmode = p->operation_mode;
268 unsigned long flags;
269
270 for (;;) {
271 sz = smd_cur_packet_size(p->ch);
272 if (sz == 0) break;
273 if (smd_read_avail(p->ch) < sz) break;
274
275 if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) :
276 (sz > (dev->mtu + ETH_HLEN))) {
277 pr_err("[%s] rmnet_recv() discarding packet len %d (%d mtu)\n",
278 dev->name, sz, RMNET_IS_MODE_IP(opmode) ?
279 dev->mtu : (dev->mtu + ETH_HLEN));
280 ptr = 0;
281 } else {
282 skb = dev_alloc_skb(sz + NET_IP_ALIGN);
283 if (skb == NULL) {
284 pr_err("[%s] rmnet_recv() cannot allocate skb\n",
285 dev->name);
286 } else {
287 skb->dev = dev;
288 skb_reserve(skb, NET_IP_ALIGN);
289 ptr = skb_put(skb, sz);
290 wake_lock_timeout(&p->wake_lock, HZ / 2);
291 if (smd_read(p->ch, ptr, sz) != sz) {
292 pr_err("[%s] rmnet_recv() smd lied about avail?!",
293 dev->name);
294 ptr = 0;
295 dev_kfree_skb_irq(skb);
296 } else {
297 /* Handle Rx frame format */
298 spin_lock_irqsave(&p->lock, flags);
299 opmode = p->operation_mode;
300 spin_unlock_irqrestore(&p->lock, flags);
301
302 if (RMNET_IS_MODE_IP(opmode)) {
303 /* Driver in IP mode */
304 skb->protocol =
305 rmnet_ip_type_trans(skb, dev);
306 } else {
307 /* Driver in Ethernet mode */
308 skb->protocol =
309 eth_type_trans(skb, dev);
310 }
311 if (RMNET_IS_MODE_IP(opmode) ||
312 count_this_packet(ptr, skb->len)) {
313#ifdef CONFIG_MSM_RMNET_DEBUG
314 p->wakeups_rcv +=
315 rmnet_cause_wakeup(p);
316#endif
317 p->stats.rx_packets++;
318 p->stats.rx_bytes += skb->len;
319 }
320 DBG1("[%s] Rx packet #%lu len=%d\n",
321 dev->name, p->stats.rx_packets,
322 skb->len);
323
324 /* Deliver to network stack */
325 netif_rx(skb);
326 }
327 continue;
328 }
329 }
330 if (smd_read(p->ch, ptr, sz) != sz)
331 pr_err("[%s] rmnet_recv() smd lied about avail?!",
332 dev->name);
333 }
334}
335
336static DECLARE_TASKLET(smd_net_data_tasklet, smd_net_data_handler, 0);
337
338static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
339{
340 struct rmnet_private *p = netdev_priv(dev);
341 smd_channel_t *ch = p->ch;
342 int smd_ret;
343 struct QMI_QOS_HDR_S *qmih;
344 u32 opmode;
345 unsigned long flags;
346
347 /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
348 spin_lock_irqsave(&p->lock, flags);
349 opmode = p->operation_mode;
350 spin_unlock_irqrestore(&p->lock, flags);
351
352 if (RMNET_IS_MODE_QOS(opmode)) {
353 qmih = (struct QMI_QOS_HDR_S *)
354 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
355 qmih->version = 1;
356 qmih->flags = 0;
357 qmih->flow_id = skb->mark;
358 }
359
360 dev->trans_start = jiffies;
361 smd_ret = smd_write(ch, skb->data, skb->len);
362 if (smd_ret != skb->len) {
363 pr_err("[%s] %s: smd_write returned error %d",
364 dev->name, __func__, smd_ret);
365 p->stats.tx_errors++;
366 goto xmit_out;
367 }
368
369 if (RMNET_IS_MODE_IP(opmode) ||
370 count_this_packet(skb->data, skb->len)) {
371 p->stats.tx_packets++;
372 p->stats.tx_bytes += skb->len;
373#ifdef CONFIG_MSM_RMNET_DEBUG
374 p->wakeups_xmit += rmnet_cause_wakeup(p);
375#endif
376 }
377 DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
378 dev->name, p->stats.tx_packets, skb->len, skb->mark);
379
380xmit_out:
381 /* data xmited, safe to release skb */
382 dev_kfree_skb_irq(skb);
383 return 0;
384}
385
386static void _rmnet_resume_flow(unsigned long param)
387{
388 struct net_device *dev = (struct net_device *)param;
389 struct rmnet_private *p = netdev_priv(dev);
390 struct sk_buff *skb = NULL;
391 unsigned long flags;
392
393 /* xmit and enable the flow only once even if
394 multiple tasklets were scheduled by smd_net_notify */
395 spin_lock_irqsave(&p->lock, flags);
396 if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
397 skb = p->skb;
398 p->skb = NULL;
399 spin_unlock_irqrestore(&p->lock, flags);
400 _rmnet_xmit(skb, dev);
401 netif_wake_queue(dev);
402 } else
403 spin_unlock_irqrestore(&p->lock, flags);
404}
405
406static void msm_rmnet_unload_modem(void *pil)
407{
408 if (pil)
409 pil_put(pil);
410}
411
412static void *msm_rmnet_load_modem(struct net_device *dev)
413{
414 void *pil;
415 int rc;
416 struct rmnet_private *p = netdev_priv(dev);
417
418 pil = pil_get("modem");
419 if (IS_ERR(pil))
420 pr_err("[%s] %s: modem load failed\n",
421 dev->name, __func__);
422 else if (msm_rmnet_modem_wait) {
423 rc = wait_for_completion_interruptible_timeout(
424 &p->complete,
425 msecs_to_jiffies(msm_rmnet_modem_wait * 1000));
426 if (!rc)
427 rc = -ETIMEDOUT;
428 if (rc < 0) {
429 pr_err("[%s] %s: wait for rmnet port failed %d\n",
430 dev->name, __func__, rc);
431 msm_rmnet_unload_modem(pil);
432 pil = ERR_PTR(rc);
433 }
434 }
435
436 return pil;
437}
438
439static void smd_net_notify(void *_dev, unsigned event)
440{
441 struct rmnet_private *p = netdev_priv((struct net_device *)_dev);
442
443 switch (event) {
444 case SMD_EVENT_DATA:
445 spin_lock(&p->lock);
446 if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
447 smd_disable_read_intr(p->ch);
448 tasklet_hi_schedule(&p->tsklt);
449 }
450
451 spin_unlock(&p->lock);
452
453 if (smd_read_avail(p->ch) &&
454 (smd_read_avail(p->ch) >= smd_cur_packet_size(p->ch))) {
455 smd_net_data_tasklet.data = (unsigned long) _dev;
456 tasklet_schedule(&smd_net_data_tasklet);
457 }
458 break;
459
460 case SMD_EVENT_OPEN:
461 DBG0("%s: opening SMD port\n", __func__);
462 netif_carrier_on(_dev);
463 if (netif_queue_stopped(_dev)) {
464 DBG0("%s: re-starting if queue\n", __func__);
465 netif_wake_queue(_dev);
466 }
467 break;
468
469 case SMD_EVENT_CLOSE:
470 DBG0("%s: closing SMD port\n", __func__);
471 netif_carrier_off(_dev);
472 break;
473 }
474}
475
476static int __rmnet_open(struct net_device *dev)
477{
478 int r;
479 void *pil;
480 struct rmnet_private *p = netdev_priv(dev);
481
482 mutex_lock(&p->pil_lock);
483 if (!p->pil) {
484 pil = msm_rmnet_load_modem(dev);
485 if (IS_ERR(pil)) {
486 mutex_unlock(&p->pil_lock);
487 return PTR_ERR(pil);
488 }
489 p->pil = pil;
490 }
491 mutex_unlock(&p->pil_lock);
492
493 if (!p->ch) {
494 r = smd_open(p->chname, &p->ch, dev, smd_net_notify);
495
496 if (r < 0)
497 return -ENODEV;
498 }
499
500 smd_disable_read_intr(p->ch);
501 return 0;
502}
503
504static int __rmnet_close(struct net_device *dev)
505{
506 struct rmnet_private *p = netdev_priv(dev);
507 int rc;
508 unsigned long flags;
509
510 if (p->ch) {
511 rc = smd_close(p->ch);
512 spin_lock_irqsave(&p->lock, flags);
513 p->ch = 0;
514 spin_unlock_irqrestore(&p->lock, flags);
515 return rc;
516 } else
517 return -EBADF;
518}
519
520static int rmnet_open(struct net_device *dev)
521{
522 int rc = 0;
523
524 DBG0("[%s] rmnet_open()\n", dev->name);
525
526 rc = __rmnet_open(dev);
527 if (rc == 0)
528 netif_start_queue(dev);
529
530 return rc;
531}
532
533static int rmnet_stop(struct net_device *dev)
534{
535 struct rmnet_private *p = netdev_priv(dev);
536
537 DBG0("[%s] rmnet_stop()\n", dev->name);
538
539 netif_stop_queue(dev);
540 tasklet_kill(&p->tsklt);
541
542 /* TODO: unload modem safely,
543 currently, this causes unnecessary unloads */
544 /*
545 mutex_lock(&p->pil_lock);
546 msm_rmnet_unload_modem(p->pil);
547 p->pil = NULL;
548 mutex_unlock(&p->pil_lock);
549 */
550
551 return 0;
552}
553
554static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
555{
556 if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
557 return -EINVAL;
558
559 DBG0("[%s] MTU change: old=%d new=%d\n",
560 dev->name, dev->mtu, new_mtu);
561 dev->mtu = new_mtu;
562
563 return 0;
564}
565
566static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
567{
568 struct rmnet_private *p = netdev_priv(dev);
569 smd_channel_t *ch = p->ch;
570 unsigned long flags;
571
572 if (netif_queue_stopped(dev)) {
573 pr_err("[%s] fatal: rmnet_xmit called when netif_queue is stopped",
574 dev->name);
575 return 0;
576 }
577
578 spin_lock_irqsave(&p->lock, flags);
579 smd_enable_read_intr(ch);
580 if (smd_write_avail(ch) < skb->len) {
581 netif_stop_queue(dev);
582 p->skb = skb;
583 spin_unlock_irqrestore(&p->lock, flags);
584 return 0;
585 }
586 smd_disable_read_intr(ch);
587 spin_unlock_irqrestore(&p->lock, flags);
588
589 _rmnet_xmit(skb, dev);
590
591 return 0;
592}
593
594static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
595{
596 struct rmnet_private *p = netdev_priv(dev);
597 return &p->stats;
598}
599
600static void rmnet_set_multicast_list(struct net_device *dev)
601{
602}
603
604static void rmnet_tx_timeout(struct net_device *dev)
605{
606 pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
607}
608
609
610static const struct net_device_ops rmnet_ops_ether = {
611 .ndo_open = rmnet_open,
612 .ndo_stop = rmnet_stop,
613 .ndo_start_xmit = rmnet_xmit,
614 .ndo_get_stats = rmnet_get_stats,
615 .ndo_set_multicast_list = rmnet_set_multicast_list,
616 .ndo_tx_timeout = rmnet_tx_timeout,
617 .ndo_do_ioctl = rmnet_ioctl,
618 .ndo_change_mtu = rmnet_change_mtu,
619 .ndo_set_mac_address = eth_mac_addr,
620 .ndo_validate_addr = eth_validate_addr,
621};
622
623static const struct net_device_ops rmnet_ops_ip = {
624 .ndo_open = rmnet_open,
625 .ndo_stop = rmnet_stop,
626 .ndo_start_xmit = rmnet_xmit,
627 .ndo_get_stats = rmnet_get_stats,
628 .ndo_set_multicast_list = rmnet_set_multicast_list,
629 .ndo_tx_timeout = rmnet_tx_timeout,
630 .ndo_do_ioctl = rmnet_ioctl,
631 .ndo_change_mtu = rmnet_change_mtu,
632 .ndo_set_mac_address = 0,
633 .ndo_validate_addr = 0,
634};
635
636static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
637{
638 struct rmnet_private *p = netdev_priv(dev);
639 u32 old_opmode = p->operation_mode;
640 unsigned long flags;
641 int prev_mtu = dev->mtu;
642 int rc = 0;
643
644 /* Process IOCTL command */
645 switch (cmd) {
646 case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
647 /* Perform Ethernet config only if in IP mode currently*/
648 if (p->operation_mode & RMNET_MODE_LLP_IP) {
649 ether_setup(dev);
650 random_ether_addr(dev->dev_addr);
651 dev->mtu = prev_mtu;
652
653 dev->netdev_ops = &rmnet_ops_ether;
654 spin_lock_irqsave(&p->lock, flags);
655 p->operation_mode &= ~RMNET_MODE_LLP_IP;
656 p->operation_mode |= RMNET_MODE_LLP_ETH;
657 spin_unlock_irqrestore(&p->lock, flags);
658 DBG0("[%s] rmnet_ioctl(): "
659 "set Ethernet protocol mode\n",
660 dev->name);
661 }
662 break;
663
664 case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
665 /* Perform IP config only if in Ethernet mode currently*/
666 if (p->operation_mode & RMNET_MODE_LLP_ETH) {
667
668 /* Undo config done in ether_setup() */
669 dev->header_ops = 0; /* No header */
670 dev->type = ARPHRD_RAWIP;
671 dev->hard_header_len = 0;
672 dev->mtu = prev_mtu;
673 dev->addr_len = 0;
674 dev->flags &= ~(IFF_BROADCAST|
675 IFF_MULTICAST);
676
677 dev->netdev_ops = &rmnet_ops_ip;
678 spin_lock_irqsave(&p->lock, flags);
679 p->operation_mode &= ~RMNET_MODE_LLP_ETH;
680 p->operation_mode |= RMNET_MODE_LLP_IP;
681 spin_unlock_irqrestore(&p->lock, flags);
682 DBG0("[%s] rmnet_ioctl(): set IP protocol mode\n",
683 dev->name);
684 }
685 break;
686
687 case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
688 ifr->ifr_ifru.ifru_data =
689 (void *)(p->operation_mode &
690 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
691 break;
692
693 case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
694 spin_lock_irqsave(&p->lock, flags);
695 p->operation_mode |= RMNET_MODE_QOS;
696 spin_unlock_irqrestore(&p->lock, flags);
697 DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
698 dev->name);
699 break;
700
701 case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
702 spin_lock_irqsave(&p->lock, flags);
703 p->operation_mode &= ~RMNET_MODE_QOS;
704 spin_unlock_irqrestore(&p->lock, flags);
705 DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
706 dev->name);
707 break;
708
709 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
710 ifr->ifr_ifru.ifru_data =
711 (void *)(p->operation_mode & RMNET_MODE_QOS);
712 break;
713
714 case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
715 ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
716 break;
717
718 case RMNET_IOCTL_OPEN: /* Open transport port */
719 rc = __rmnet_open(dev);
720 DBG0("[%s] rmnet_ioctl(): open transport port\n",
721 dev->name);
722 break;
723
724 case RMNET_IOCTL_CLOSE: /* Close transport port */
725 rc = __rmnet_close(dev);
726 DBG0("[%s] rmnet_ioctl(): close transport port\n",
727 dev->name);
728 break;
729
730 default:
731 pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
732 dev->name, cmd);
733 return -EINVAL;
734 }
735
736 DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
737 dev->name, __func__, cmd, old_opmode, p->operation_mode);
738 return rc;
739}
740
741
742static void __init rmnet_setup(struct net_device *dev)
743{
744 /* Using Ethernet mode by default */
745 dev->netdev_ops = &rmnet_ops_ether;
746 ether_setup(dev);
747
748 /* set this after calling ether_setup */
749 dev->mtu = RMNET_DATA_LEN;
750 dev->needed_headroom = HEADROOM_FOR_QOS;
751
752 random_ether_addr(dev->dev_addr);
753
754 dev->watchdog_timeo = 1000; /* 10 seconds? */
755}
756
757static int msm_rmnet_smd_probe(struct platform_device *pdev)
758{
759 int i;
760
761 for (i = 0; i < RMNET_DEVICE_COUNT; i++)
762 if (!strcmp(pdev->name, ch_name[i])) {
763 complete_all(port_complete[i]);
764 break;
765 }
766
767 return 0;
768}
769
770static int __init rmnet_init(void)
771{
772 int ret;
773 struct device *d;
774 struct net_device *dev;
775 struct rmnet_private *p;
776 unsigned n;
777
778 pr_info("%s: SMD devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
779
780#ifdef CONFIG_MSM_RMNET_DEBUG
781 timeout_us = 0;
782#ifdef CONFIG_HAS_EARLYSUSPEND
783 timeout_suspend_us = 0;
784#endif
785#endif
786
787 for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
788 dev = alloc_netdev(sizeof(struct rmnet_private),
789 "rmnet%d", rmnet_setup);
790
791 if (!dev)
792 return -ENOMEM;
793
794 d = &(dev->dev);
795 p = netdev_priv(dev);
796 p->chname = ch_name[n];
797 /* Initial config uses Ethernet */
798 p->operation_mode = RMNET_MODE_LLP_ETH;
799 p->skb = NULL;
800 spin_lock_init(&p->lock);
801 tasklet_init(&p->tsklt, _rmnet_resume_flow,
802 (unsigned long)dev);
803 wake_lock_init(&p->wake_lock, WAKE_LOCK_SUSPEND, ch_name[n]);
804#ifdef CONFIG_MSM_RMNET_DEBUG
805 p->timeout_us = timeout_us;
806 p->wakeups_xmit = p->wakeups_rcv = 0;
807#endif
808
809 init_completion(&p->complete);
810 port_complete[n] = &p->complete;
811 mutex_init(&p->pil_lock);
812 p->pdrv.probe = msm_rmnet_smd_probe;
813 p->pdrv.driver.name = ch_name[n];
814 p->pdrv.driver.owner = THIS_MODULE;
815 ret = platform_driver_register(&p->pdrv);
816 if (ret) {
817 free_netdev(dev);
818 return ret;
819 }
820
821 ret = register_netdev(dev);
822 if (ret) {
823 platform_driver_unregister(&p->pdrv);
824 free_netdev(dev);
825 return ret;
826 }
827
828
829#ifdef CONFIG_MSM_RMNET_DEBUG
830 if (device_create_file(d, &dev_attr_timeout))
831 continue;
832 if (device_create_file(d, &dev_attr_wakeups_xmit))
833 continue;
834 if (device_create_file(d, &dev_attr_wakeups_rcv))
835 continue;
836#ifdef CONFIG_HAS_EARLYSUSPEND
837 if (device_create_file(d, &dev_attr_timeout_suspend))
838 continue;
839
840 /* Only care about rmnet0 for suspend/resume tiemout hooks. */
841 if (n == 0)
842 rmnet0 = d;
843#endif
844#endif
845 }
846 return 0;
847}
848
849module_init(rmnet_init);