blob: c9f5f43bd8984b5c92fc38ad7ddf6465da719547 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* linux/drivers/net/msm_rmnet.c
2 *
3 * Virtual Ethernet Interface for MSM7K Networking
4 *
5 * Copyright (C) 2007 Google, Inc.
6 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
7 * Author: Brian Swetland <swetland@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/delay.h>
24#include <linux/errno.h>
25#include <linux/interrupt.h>
26#include <linux/init.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/wakelock.h>
31#include <linux/platform_device.h>
32#include <linux/if_arp.h>
33#include <linux/msm_rmnet.h>
34
35#ifdef CONFIG_HAS_EARLYSUSPEND
36#include <linux/earlysuspend.h>
37#endif
38
39#include <mach/msm_smd.h>
40#include <mach/peripheral-loader.h>
41
42/* Debug message support */
43static int msm_rmnet_debug_mask;
44module_param_named(debug_enable, msm_rmnet_debug_mask,
45 int, S_IRUGO | S_IWUSR | S_IWGRP);
46
47#define DEBUG_MASK_LVL0 (1U << 0)
48#define DEBUG_MASK_LVL1 (1U << 1)
49#define DEBUG_MASK_LVL2 (1U << 2)
50
51#define DBG(m, x...) do { \
52 if (msm_rmnet_debug_mask & m) \
53 pr_info(x); \
54} while (0)
55#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
56#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
57#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
58
59/* Configure device instances */
60#define RMNET_DEVICE_COUNT (8)
61static const char *ch_name[RMNET_DEVICE_COUNT] = {
62 "DATA5",
63 "DATA6",
64 "DATA7",
65 "DATA8",
66 "DATA9",
67 "DATA12",
68 "DATA13",
69 "DATA14",
70};
71
72/* XXX should come from smd headers */
73#define SMD_PORT_ETHER0 11
74
75/* allow larger frames */
76#define RMNET_DATA_LEN 2000
77
78#define HEADROOM_FOR_QOS 8
79
80static struct completion *port_complete[RMNET_DEVICE_COUNT];
81
82struct rmnet_private
83{
84 smd_channel_t *ch;
85 struct net_device_stats stats;
86 const char *chname;
87 struct wake_lock wake_lock;
88#ifdef CONFIG_MSM_RMNET_DEBUG
89 ktime_t last_packet;
90 unsigned long wakeups_xmit;
91 unsigned long wakeups_rcv;
92 unsigned long timeout_us;
93#endif
94 struct sk_buff *skb;
95 spinlock_t lock;
96 struct tasklet_struct tsklt;
97 u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
98 struct platform_driver pdrv;
99 struct completion complete;
100 void *pil;
101 struct mutex pil_lock;
102};
103
104static uint msm_rmnet_modem_wait;
105module_param_named(modem_wait, msm_rmnet_modem_wait,
106 uint, S_IRUGO | S_IWUSR | S_IWGRP);
107
108/* Forward declaration */
109static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
110
111static int count_this_packet(void *_hdr, int len)
112{
113 struct ethhdr *hdr = _hdr;
114
115 if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
116 return 0;
117
118 return 1;
119}
120
121#ifdef CONFIG_MSM_RMNET_DEBUG
122static unsigned long timeout_us;
123
124#ifdef CONFIG_HAS_EARLYSUSPEND
125/*
126 * If early suspend is enabled then we specify two timeout values,
127 * screen on (default), and screen is off.
128 */
129static unsigned long timeout_suspend_us;
130static struct device *rmnet0;
131
132/* Set timeout in us when the screen is off. */
133static ssize_t timeout_suspend_store(struct device *d, struct device_attribute *attr, const char *buf, size_t n)
134{
135 timeout_suspend_us = simple_strtoul(buf, NULL, 10);
136 return n;
137}
138
139static ssize_t timeout_suspend_show(struct device *d,
140 struct device_attribute *attr,
141 char *buf)
142{
143 return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
144}
145
146static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show, timeout_suspend_store);
147
148static void rmnet_early_suspend(struct early_suspend *handler) {
149 if (rmnet0) {
150 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
151 p->timeout_us = timeout_suspend_us;
152 }
153}
154
155static void rmnet_late_resume(struct early_suspend *handler) {
156 if (rmnet0) {
157 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
158 p->timeout_us = timeout_us;
159 }
160}
161
162static struct early_suspend rmnet_power_suspend = {
163 .suspend = rmnet_early_suspend,
164 .resume = rmnet_late_resume,
165};
166
167static int __init rmnet_late_init(void)
168{
169 register_early_suspend(&rmnet_power_suspend);
170 return 0;
171}
172
173late_initcall(rmnet_late_init);
174#endif
175
176/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
177static int rmnet_cause_wakeup(struct rmnet_private *p) {
178 int ret = 0;
179 ktime_t now;
180 if (p->timeout_us == 0) /* Check if disabled */
181 return 0;
182
183 /* Use real (wall) time. */
184 now = ktime_get_real();
185
186 if (ktime_us_delta(now, p->last_packet) > p->timeout_us) {
187 ret = 1;
188 }
189 p->last_packet = now;
190 return ret;
191}
192
193static ssize_t wakeups_xmit_show(struct device *d,
194 struct device_attribute *attr,
195 char *buf)
196{
197 struct rmnet_private *p = netdev_priv(to_net_dev(d));
198 return sprintf(buf, "%lu\n", p->wakeups_xmit);
199}
200
201DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
202
203static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
204 char *buf)
205{
206 struct rmnet_private *p = netdev_priv(to_net_dev(d));
207 return sprintf(buf, "%lu\n", p->wakeups_rcv);
208}
209
210DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
211
212/* Set timeout in us. */
213static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
214 const char *buf, size_t n)
215{
216#ifndef CONFIG_HAS_EARLYSUSPEND
217 struct rmnet_private *p = netdev_priv(to_net_dev(d));
218 p->timeout_us = timeout_us = simple_strtoul(buf, NULL, 10);
219#else
220/* If using early suspend/resume hooks do not write the value on store. */
221 timeout_us = simple_strtoul(buf, NULL, 10);
222#endif
223 return n;
224}
225
226static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
227 char *buf)
228{
229 struct rmnet_private *p = netdev_priv(to_net_dev(d));
230 p = netdev_priv(to_net_dev(d));
231 return sprintf(buf, "%lu\n", timeout_us);
232}
233
234DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
235#endif
236
237static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
238{
239 __be16 protocol = 0;
240
241 skb->dev = dev;
242
243 /* Determine L3 protocol */
244 switch (skb->data[0] & 0xf0) {
245 case 0x40:
246 protocol = htons(ETH_P_IP);
247 break;
248 case 0x60:
249 protocol = htons(ETH_P_IPV6);
250 break;
251 default:
252 pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
253 dev->name, skb->data[0] & 0xf0);
254 /* skb will be dropped in uppder layer for unknown protocol */
255 }
256 return protocol;
257}
258
259/* Called in soft-irq context */
260static void smd_net_data_handler(unsigned long arg)
261{
262 struct net_device *dev = (struct net_device *) arg;
263 struct rmnet_private *p = netdev_priv(dev);
264 struct sk_buff *skb;
265 void *ptr = 0;
266 int sz;
267 u32 opmode = p->operation_mode;
268 unsigned long flags;
269
270 for (;;) {
271 sz = smd_cur_packet_size(p->ch);
272 if (sz == 0) break;
273 if (smd_read_avail(p->ch) < sz) break;
274
Abhijeet Dharmapurikar5eeca522011-07-15 17:05:36 -0600275 skb = dev_alloc_skb(sz + NET_IP_ALIGN);
276 if (skb == NULL) {
277 pr_err("[%s] rmnet_recv() cannot allocate skb\n",
278 dev->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279 } else {
Abhijeet Dharmapurikar5eeca522011-07-15 17:05:36 -0600280 skb->dev = dev;
281 skb_reserve(skb, NET_IP_ALIGN);
282 ptr = skb_put(skb, sz);
283 wake_lock_timeout(&p->wake_lock, HZ / 2);
284 if (smd_read(p->ch, ptr, sz) != sz) {
285 pr_err("[%s] rmnet_recv() smd lied about avail?!",
286 dev->name);
287 ptr = 0;
288 dev_kfree_skb_irq(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289 } else {
Abhijeet Dharmapurikar5eeca522011-07-15 17:05:36 -0600290 /* Handle Rx frame format */
291 spin_lock_irqsave(&p->lock, flags);
292 opmode = p->operation_mode;
293 spin_unlock_irqrestore(&p->lock, flags);
294
295 if (RMNET_IS_MODE_IP(opmode)) {
296 /* Driver in IP mode */
297 skb->protocol =
298 rmnet_ip_type_trans(skb, dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299 } else {
Abhijeet Dharmapurikar5eeca522011-07-15 17:05:36 -0600300 /* Driver in Ethernet mode */
301 skb->protocol =
302 eth_type_trans(skb, dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303 }
Abhijeet Dharmapurikar5eeca522011-07-15 17:05:36 -0600304 if (RMNET_IS_MODE_IP(opmode) ||
305 count_this_packet(ptr, skb->len)) {
306#ifdef CONFIG_MSM_RMNET_DEBUG
307 p->wakeups_rcv +=
308 rmnet_cause_wakeup(p);
309#endif
310 p->stats.rx_packets++;
311 p->stats.rx_bytes += skb->len;
312 }
313 DBG1("[%s] Rx packet #%lu len=%d\n",
314 dev->name, p->stats.rx_packets,
315 skb->len);
316
317 /* Deliver to network stack */
318 netif_rx(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 }
Abhijeet Dharmapurikar5eeca522011-07-15 17:05:36 -0600320 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321 }
322 if (smd_read(p->ch, ptr, sz) != sz)
323 pr_err("[%s] rmnet_recv() smd lied about avail?!",
324 dev->name);
325 }
326}
327
328static DECLARE_TASKLET(smd_net_data_tasklet, smd_net_data_handler, 0);
329
330static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
331{
332 struct rmnet_private *p = netdev_priv(dev);
333 smd_channel_t *ch = p->ch;
334 int smd_ret;
335 struct QMI_QOS_HDR_S *qmih;
336 u32 opmode;
337 unsigned long flags;
338
339 /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
340 spin_lock_irqsave(&p->lock, flags);
341 opmode = p->operation_mode;
342 spin_unlock_irqrestore(&p->lock, flags);
343
344 if (RMNET_IS_MODE_QOS(opmode)) {
345 qmih = (struct QMI_QOS_HDR_S *)
346 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
347 qmih->version = 1;
348 qmih->flags = 0;
349 qmih->flow_id = skb->mark;
350 }
351
352 dev->trans_start = jiffies;
353 smd_ret = smd_write(ch, skb->data, skb->len);
354 if (smd_ret != skb->len) {
355 pr_err("[%s] %s: smd_write returned error %d",
356 dev->name, __func__, smd_ret);
357 p->stats.tx_errors++;
358 goto xmit_out;
359 }
360
361 if (RMNET_IS_MODE_IP(opmode) ||
362 count_this_packet(skb->data, skb->len)) {
363 p->stats.tx_packets++;
364 p->stats.tx_bytes += skb->len;
365#ifdef CONFIG_MSM_RMNET_DEBUG
366 p->wakeups_xmit += rmnet_cause_wakeup(p);
367#endif
368 }
369 DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
370 dev->name, p->stats.tx_packets, skb->len, skb->mark);
371
372xmit_out:
373 /* data xmited, safe to release skb */
374 dev_kfree_skb_irq(skb);
375 return 0;
376}
377
378static void _rmnet_resume_flow(unsigned long param)
379{
380 struct net_device *dev = (struct net_device *)param;
381 struct rmnet_private *p = netdev_priv(dev);
382 struct sk_buff *skb = NULL;
383 unsigned long flags;
384
385 /* xmit and enable the flow only once even if
386 multiple tasklets were scheduled by smd_net_notify */
387 spin_lock_irqsave(&p->lock, flags);
388 if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
389 skb = p->skb;
390 p->skb = NULL;
391 spin_unlock_irqrestore(&p->lock, flags);
392 _rmnet_xmit(skb, dev);
393 netif_wake_queue(dev);
394 } else
395 spin_unlock_irqrestore(&p->lock, flags);
396}
397
398static void msm_rmnet_unload_modem(void *pil)
399{
400 if (pil)
401 pil_put(pil);
402}
403
404static void *msm_rmnet_load_modem(struct net_device *dev)
405{
406 void *pil;
407 int rc;
408 struct rmnet_private *p = netdev_priv(dev);
409
410 pil = pil_get("modem");
411 if (IS_ERR(pil))
412 pr_err("[%s] %s: modem load failed\n",
413 dev->name, __func__);
414 else if (msm_rmnet_modem_wait) {
415 rc = wait_for_completion_interruptible_timeout(
416 &p->complete,
417 msecs_to_jiffies(msm_rmnet_modem_wait * 1000));
418 if (!rc)
419 rc = -ETIMEDOUT;
420 if (rc < 0) {
421 pr_err("[%s] %s: wait for rmnet port failed %d\n",
422 dev->name, __func__, rc);
423 msm_rmnet_unload_modem(pil);
424 pil = ERR_PTR(rc);
425 }
426 }
427
428 return pil;
429}
430
431static void smd_net_notify(void *_dev, unsigned event)
432{
433 struct rmnet_private *p = netdev_priv((struct net_device *)_dev);
434
435 switch (event) {
436 case SMD_EVENT_DATA:
437 spin_lock(&p->lock);
438 if (p->skb && (smd_write_avail(p->ch) >= p->skb->len)) {
439 smd_disable_read_intr(p->ch);
440 tasklet_hi_schedule(&p->tsklt);
441 }
442
443 spin_unlock(&p->lock);
444
445 if (smd_read_avail(p->ch) &&
446 (smd_read_avail(p->ch) >= smd_cur_packet_size(p->ch))) {
447 smd_net_data_tasklet.data = (unsigned long) _dev;
448 tasklet_schedule(&smd_net_data_tasklet);
449 }
450 break;
451
452 case SMD_EVENT_OPEN:
453 DBG0("%s: opening SMD port\n", __func__);
454 netif_carrier_on(_dev);
455 if (netif_queue_stopped(_dev)) {
456 DBG0("%s: re-starting if queue\n", __func__);
457 netif_wake_queue(_dev);
458 }
459 break;
460
461 case SMD_EVENT_CLOSE:
462 DBG0("%s: closing SMD port\n", __func__);
463 netif_carrier_off(_dev);
464 break;
465 }
466}
467
468static int __rmnet_open(struct net_device *dev)
469{
470 int r;
471 void *pil;
472 struct rmnet_private *p = netdev_priv(dev);
473
474 mutex_lock(&p->pil_lock);
475 if (!p->pil) {
476 pil = msm_rmnet_load_modem(dev);
477 if (IS_ERR(pil)) {
478 mutex_unlock(&p->pil_lock);
479 return PTR_ERR(pil);
480 }
481 p->pil = pil;
482 }
483 mutex_unlock(&p->pil_lock);
484
485 if (!p->ch) {
486 r = smd_open(p->chname, &p->ch, dev, smd_net_notify);
487
488 if (r < 0)
489 return -ENODEV;
490 }
491
492 smd_disable_read_intr(p->ch);
493 return 0;
494}
495
496static int __rmnet_close(struct net_device *dev)
497{
498 struct rmnet_private *p = netdev_priv(dev);
499 int rc;
500 unsigned long flags;
501
502 if (p->ch) {
503 rc = smd_close(p->ch);
504 spin_lock_irqsave(&p->lock, flags);
505 p->ch = 0;
506 spin_unlock_irqrestore(&p->lock, flags);
507 return rc;
508 } else
509 return -EBADF;
510}
511
512static int rmnet_open(struct net_device *dev)
513{
514 int rc = 0;
515
516 DBG0("[%s] rmnet_open()\n", dev->name);
517
518 rc = __rmnet_open(dev);
519 if (rc == 0)
520 netif_start_queue(dev);
521
522 return rc;
523}
524
525static int rmnet_stop(struct net_device *dev)
526{
527 struct rmnet_private *p = netdev_priv(dev);
528
529 DBG0("[%s] rmnet_stop()\n", dev->name);
530
531 netif_stop_queue(dev);
532 tasklet_kill(&p->tsklt);
533
534 /* TODO: unload modem safely,
535 currently, this causes unnecessary unloads */
536 /*
537 mutex_lock(&p->pil_lock);
538 msm_rmnet_unload_modem(p->pil);
539 p->pil = NULL;
540 mutex_unlock(&p->pil_lock);
541 */
542
543 return 0;
544}
545
546static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
547{
548 if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
549 return -EINVAL;
550
551 DBG0("[%s] MTU change: old=%d new=%d\n",
552 dev->name, dev->mtu, new_mtu);
553 dev->mtu = new_mtu;
554
555 return 0;
556}
557
558static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
559{
560 struct rmnet_private *p = netdev_priv(dev);
561 smd_channel_t *ch = p->ch;
562 unsigned long flags;
563
564 if (netif_queue_stopped(dev)) {
565 pr_err("[%s] fatal: rmnet_xmit called when netif_queue is stopped",
566 dev->name);
567 return 0;
568 }
569
570 spin_lock_irqsave(&p->lock, flags);
571 smd_enable_read_intr(ch);
572 if (smd_write_avail(ch) < skb->len) {
573 netif_stop_queue(dev);
574 p->skb = skb;
575 spin_unlock_irqrestore(&p->lock, flags);
576 return 0;
577 }
578 smd_disable_read_intr(ch);
579 spin_unlock_irqrestore(&p->lock, flags);
580
581 _rmnet_xmit(skb, dev);
582
583 return 0;
584}
585
586static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
587{
588 struct rmnet_private *p = netdev_priv(dev);
589 return &p->stats;
590}
591
592static void rmnet_set_multicast_list(struct net_device *dev)
593{
594}
595
596static void rmnet_tx_timeout(struct net_device *dev)
597{
598 pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
599}
600
601
602static const struct net_device_ops rmnet_ops_ether = {
603 .ndo_open = rmnet_open,
604 .ndo_stop = rmnet_stop,
605 .ndo_start_xmit = rmnet_xmit,
606 .ndo_get_stats = rmnet_get_stats,
607 .ndo_set_multicast_list = rmnet_set_multicast_list,
608 .ndo_tx_timeout = rmnet_tx_timeout,
609 .ndo_do_ioctl = rmnet_ioctl,
610 .ndo_change_mtu = rmnet_change_mtu,
611 .ndo_set_mac_address = eth_mac_addr,
612 .ndo_validate_addr = eth_validate_addr,
613};
614
615static const struct net_device_ops rmnet_ops_ip = {
616 .ndo_open = rmnet_open,
617 .ndo_stop = rmnet_stop,
618 .ndo_start_xmit = rmnet_xmit,
619 .ndo_get_stats = rmnet_get_stats,
620 .ndo_set_multicast_list = rmnet_set_multicast_list,
621 .ndo_tx_timeout = rmnet_tx_timeout,
622 .ndo_do_ioctl = rmnet_ioctl,
623 .ndo_change_mtu = rmnet_change_mtu,
624 .ndo_set_mac_address = 0,
625 .ndo_validate_addr = 0,
626};
627
628static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
629{
630 struct rmnet_private *p = netdev_priv(dev);
631 u32 old_opmode = p->operation_mode;
632 unsigned long flags;
633 int prev_mtu = dev->mtu;
634 int rc = 0;
635
636 /* Process IOCTL command */
637 switch (cmd) {
638 case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
639 /* Perform Ethernet config only if in IP mode currently*/
640 if (p->operation_mode & RMNET_MODE_LLP_IP) {
641 ether_setup(dev);
642 random_ether_addr(dev->dev_addr);
643 dev->mtu = prev_mtu;
644
645 dev->netdev_ops = &rmnet_ops_ether;
646 spin_lock_irqsave(&p->lock, flags);
647 p->operation_mode &= ~RMNET_MODE_LLP_IP;
648 p->operation_mode |= RMNET_MODE_LLP_ETH;
649 spin_unlock_irqrestore(&p->lock, flags);
650 DBG0("[%s] rmnet_ioctl(): "
651 "set Ethernet protocol mode\n",
652 dev->name);
653 }
654 break;
655
656 case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
657 /* Perform IP config only if in Ethernet mode currently*/
658 if (p->operation_mode & RMNET_MODE_LLP_ETH) {
659
660 /* Undo config done in ether_setup() */
661 dev->header_ops = 0; /* No header */
662 dev->type = ARPHRD_RAWIP;
663 dev->hard_header_len = 0;
664 dev->mtu = prev_mtu;
665 dev->addr_len = 0;
666 dev->flags &= ~(IFF_BROADCAST|
667 IFF_MULTICAST);
668
669 dev->netdev_ops = &rmnet_ops_ip;
670 spin_lock_irqsave(&p->lock, flags);
671 p->operation_mode &= ~RMNET_MODE_LLP_ETH;
672 p->operation_mode |= RMNET_MODE_LLP_IP;
673 spin_unlock_irqrestore(&p->lock, flags);
674 DBG0("[%s] rmnet_ioctl(): set IP protocol mode\n",
675 dev->name);
676 }
677 break;
678
679 case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
680 ifr->ifr_ifru.ifru_data =
681 (void *)(p->operation_mode &
682 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
683 break;
684
685 case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
686 spin_lock_irqsave(&p->lock, flags);
687 p->operation_mode |= RMNET_MODE_QOS;
688 spin_unlock_irqrestore(&p->lock, flags);
689 DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
690 dev->name);
691 break;
692
693 case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
694 spin_lock_irqsave(&p->lock, flags);
695 p->operation_mode &= ~RMNET_MODE_QOS;
696 spin_unlock_irqrestore(&p->lock, flags);
697 DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
698 dev->name);
699 break;
700
701 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
702 ifr->ifr_ifru.ifru_data =
703 (void *)(p->operation_mode & RMNET_MODE_QOS);
704 break;
705
706 case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
707 ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
708 break;
709
710 case RMNET_IOCTL_OPEN: /* Open transport port */
711 rc = __rmnet_open(dev);
712 DBG0("[%s] rmnet_ioctl(): open transport port\n",
713 dev->name);
714 break;
715
716 case RMNET_IOCTL_CLOSE: /* Close transport port */
717 rc = __rmnet_close(dev);
718 DBG0("[%s] rmnet_ioctl(): close transport port\n",
719 dev->name);
720 break;
721
722 default:
723 pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
724 dev->name, cmd);
725 return -EINVAL;
726 }
727
728 DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
729 dev->name, __func__, cmd, old_opmode, p->operation_mode);
730 return rc;
731}
732
733
734static void __init rmnet_setup(struct net_device *dev)
735{
736 /* Using Ethernet mode by default */
737 dev->netdev_ops = &rmnet_ops_ether;
738 ether_setup(dev);
739
740 /* set this after calling ether_setup */
741 dev->mtu = RMNET_DATA_LEN;
742 dev->needed_headroom = HEADROOM_FOR_QOS;
743
744 random_ether_addr(dev->dev_addr);
745
746 dev->watchdog_timeo = 1000; /* 10 seconds? */
747}
748
749static int msm_rmnet_smd_probe(struct platform_device *pdev)
750{
751 int i;
752
753 for (i = 0; i < RMNET_DEVICE_COUNT; i++)
754 if (!strcmp(pdev->name, ch_name[i])) {
755 complete_all(port_complete[i]);
756 break;
757 }
758
759 return 0;
760}
761
762static int __init rmnet_init(void)
763{
764 int ret;
765 struct device *d;
766 struct net_device *dev;
767 struct rmnet_private *p;
768 unsigned n;
769
770 pr_info("%s: SMD devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
771
772#ifdef CONFIG_MSM_RMNET_DEBUG
773 timeout_us = 0;
774#ifdef CONFIG_HAS_EARLYSUSPEND
775 timeout_suspend_us = 0;
776#endif
777#endif
778
779 for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
780 dev = alloc_netdev(sizeof(struct rmnet_private),
781 "rmnet%d", rmnet_setup);
782
783 if (!dev)
784 return -ENOMEM;
785
786 d = &(dev->dev);
787 p = netdev_priv(dev);
788 p->chname = ch_name[n];
789 /* Initial config uses Ethernet */
790 p->operation_mode = RMNET_MODE_LLP_ETH;
791 p->skb = NULL;
792 spin_lock_init(&p->lock);
793 tasklet_init(&p->tsklt, _rmnet_resume_flow,
794 (unsigned long)dev);
795 wake_lock_init(&p->wake_lock, WAKE_LOCK_SUSPEND, ch_name[n]);
796#ifdef CONFIG_MSM_RMNET_DEBUG
797 p->timeout_us = timeout_us;
798 p->wakeups_xmit = p->wakeups_rcv = 0;
799#endif
800
801 init_completion(&p->complete);
802 port_complete[n] = &p->complete;
803 mutex_init(&p->pil_lock);
804 p->pdrv.probe = msm_rmnet_smd_probe;
805 p->pdrv.driver.name = ch_name[n];
806 p->pdrv.driver.owner = THIS_MODULE;
807 ret = platform_driver_register(&p->pdrv);
808 if (ret) {
809 free_netdev(dev);
810 return ret;
811 }
812
813 ret = register_netdev(dev);
814 if (ret) {
815 platform_driver_unregister(&p->pdrv);
816 free_netdev(dev);
817 return ret;
818 }
819
820
821#ifdef CONFIG_MSM_RMNET_DEBUG
822 if (device_create_file(d, &dev_attr_timeout))
823 continue;
824 if (device_create_file(d, &dev_attr_wakeups_xmit))
825 continue;
826 if (device_create_file(d, &dev_attr_wakeups_rcv))
827 continue;
828#ifdef CONFIG_HAS_EARLYSUSPEND
829 if (device_create_file(d, &dev_attr_timeout_suspend))
830 continue;
831
832 /* Only care about rmnet0 for suspend/resume tiemout hooks. */
833 if (n == 0)
834 rmnet0 = d;
835#endif
836#endif
837 }
838 return 0;
839}
840
841module_init(rmnet_init);