blob: bbcf3c4cac7eadf622fda6a95bcd26c5c583ccb5 [file] [log] [blame]
Jeff Hugo4838f412012-01-20 11:19:37 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/*
15 * RMNET BAM Module.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/delay.h>
22#include <linux/errno.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/wakelock.h>
29#include <linux/if_arp.h>
30#include <linux/msm_rmnet.h>
Jeff Hugo24dfa0c2011-10-07 17:55:06 -060031#include <linux/platform_device.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33#ifdef CONFIG_HAS_EARLYSUSPEND
34#include <linux/earlysuspend.h>
35#endif
36
37#include <mach/bam_dmux.h>
38
39/* Debug message support */
40static int msm_rmnet_bam_debug_mask;
41module_param_named(debug_enable, msm_rmnet_bam_debug_mask,
42 int, S_IRUGO | S_IWUSR | S_IWGRP);
43
44#define DEBUG_MASK_LVL0 (1U << 0)
45#define DEBUG_MASK_LVL1 (1U << 1)
46#define DEBUG_MASK_LVL2 (1U << 2)
47
48#define DBG(m, x...) do { \
49 if (msm_rmnet_bam_debug_mask & m) \
50 pr_info(x); \
51} while (0)
52#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
53#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
54#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
55
56/* Configure device instances */
57#define RMNET_DEVICE_COUNT (8)
58
59/* allow larger frames */
60#define RMNET_DATA_LEN 2000
61
62#define DEVICE_ID_INVALID -1
63
64#define DEVICE_INACTIVE 0
65#define DEVICE_ACTIVE 1
66
67#define HEADROOM_FOR_BAM 8 /* for mux header */
68#define HEADROOM_FOR_QOS 8
69#define TAILROOM 8 /* for padding by mux layer */
70
71struct rmnet_private {
72 struct net_device_stats stats;
73 uint32_t ch_id;
74#ifdef CONFIG_MSM_RMNET_DEBUG
75 ktime_t last_packet;
76 unsigned long wakeups_xmit;
77 unsigned long wakeups_rcv;
78 unsigned long timeout_us;
79#endif
Eric Holmberg8d8a0702012-01-19 13:38:08 -070080 struct sk_buff *waiting_for_ul_skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081 spinlock_t lock;
Eric Holmberged3ca0a2012-04-09 15:44:58 -060082 spinlock_t tx_queue_lock;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083 struct tasklet_struct tsklt;
84 u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
85 uint8_t device_up;
Jeff Hugo24dfa0c2011-10-07 17:55:06 -060086 uint8_t in_reset;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087};
88
89#ifdef CONFIG_MSM_RMNET_DEBUG
90static unsigned long timeout_us;
91
92#ifdef CONFIG_HAS_EARLYSUSPEND
93/*
94 * If early suspend is enabled then we specify two timeout values,
95 * screen on (default), and screen is off.
96 */
97static unsigned long timeout_suspend_us;
98static struct device *rmnet0;
99
100/* Set timeout in us when the screen is off. */
101static ssize_t timeout_suspend_store(struct device *d,
102 struct device_attribute *attr,
103 const char *buf, size_t n)
104{
105 timeout_suspend_us = strict_strtoul(buf, NULL, 10);
106 return n;
107}
108
109static ssize_t timeout_suspend_show(struct device *d,
110 struct device_attribute *attr,
111 char *buf)
112{
113 return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
114}
115
116static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
117 timeout_suspend_store);
118
119static void rmnet_early_suspend(struct early_suspend *handler)
120{
121 if (rmnet0) {
122 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
123 p->timeout_us = timeout_suspend_us;
124 }
125}
126
127static void rmnet_late_resume(struct early_suspend *handler)
128{
129 if (rmnet0) {
130 struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
131 p->timeout_us = timeout_us;
132 }
133}
134
135static struct early_suspend rmnet_power_suspend = {
136 .suspend = rmnet_early_suspend,
137 .resume = rmnet_late_resume,
138};
139
140static int __init rmnet_late_init(void)
141{
142 register_early_suspend(&rmnet_power_suspend);
143 return 0;
144}
145
146late_initcall(rmnet_late_init);
147#endif
148
149/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
150static int rmnet_cause_wakeup(struct rmnet_private *p)
151{
152 int ret = 0;
153 ktime_t now;
154 if (p->timeout_us == 0) /* Check if disabled */
155 return 0;
156
157 /* Use real (wall) time. */
158 now = ktime_get_real();
159
160 if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
161 ret = 1;
162
163 p->last_packet = now;
164 return ret;
165}
166
167static ssize_t wakeups_xmit_show(struct device *d,
168 struct device_attribute *attr,
169 char *buf)
170{
171 struct rmnet_private *p = netdev_priv(to_net_dev(d));
172 return sprintf(buf, "%lu\n", p->wakeups_xmit);
173}
174
175DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
176
177static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
178 char *buf)
179{
180 struct rmnet_private *p = netdev_priv(to_net_dev(d));
181 return sprintf(buf, "%lu\n", p->wakeups_rcv);
182}
183
184DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
185
186/* Set timeout in us. */
187static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
188 const char *buf, size_t n)
189{
190#ifndef CONFIG_HAS_EARLYSUSPEND
191 struct rmnet_private *p = netdev_priv(to_net_dev(d));
192 p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
193#else
194/* If using early suspend/resume hooks do not write the value on store. */
195 timeout_us = strict_strtoul(buf, NULL, 10);
196#endif
197 return n;
198}
199
200static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
201 char *buf)
202{
203 struct rmnet_private *p = netdev_priv(to_net_dev(d));
204 p = netdev_priv(to_net_dev(d));
205 return sprintf(buf, "%lu\n", timeout_us);
206}
207
208DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
209#endif
210
211
212/* Forward declaration */
213static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
214
215static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
216{
217 __be16 protocol = 0;
218
219 skb->dev = dev;
220
221 /* Determine L3 protocol */
222 switch (skb->data[0] & 0xf0) {
223 case 0x40:
224 protocol = htons(ETH_P_IP);
225 break;
226 case 0x60:
227 protocol = htons(ETH_P_IPV6);
228 break;
229 default:
230 pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
231 dev->name, skb->data[0] & 0xf0);
232 /* skb will be dropped in upper layer for unknown protocol */
233 }
234 return protocol;
235}
236
237static int count_this_packet(void *_hdr, int len)
238{
239 struct ethhdr *hdr = _hdr;
240
241 if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
242 return 0;
243
244 return 1;
245}
246
247/* Rx Callback, Called in Work Queue context */
248static void bam_recv_notify(void *dev, struct sk_buff *skb)
249{
250 struct rmnet_private *p = netdev_priv(dev);
251 unsigned long flags;
252 u32 opmode;
253
254 if (skb) {
255 skb->dev = dev;
256 /* Handle Rx frame format */
257 spin_lock_irqsave(&p->lock, flags);
258 opmode = p->operation_mode;
259 spin_unlock_irqrestore(&p->lock, flags);
260
261 if (RMNET_IS_MODE_IP(opmode)) {
262 /* Driver in IP mode */
263 skb->protocol = rmnet_ip_type_trans(skb, dev);
264 } else {
265 /* Driver in Ethernet mode */
266 skb->protocol = eth_type_trans(skb, dev);
267 }
268 if (RMNET_IS_MODE_IP(opmode) ||
269 count_this_packet(skb->data, skb->len)) {
270#ifdef CONFIG_MSM_RMNET_DEBUG
271 p->wakeups_rcv += rmnet_cause_wakeup(p);
272#endif
273 p->stats.rx_packets++;
274 p->stats.rx_bytes += skb->len;
275 }
276 DBG1("[%s] Rx packet #%lu len=%d\n",
277 ((struct net_device *)dev)->name,
278 p->stats.rx_packets, skb->len);
279
280 /* Deliver to network stack */
281 netif_rx(skb);
282 } else
283 pr_err("[%s] %s: No skb received",
284 ((struct net_device *)dev)->name, __func__);
285}
286
287static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
288{
289 struct rmnet_private *p = netdev_priv(dev);
290 int bam_ret;
291 struct QMI_QOS_HDR_S *qmih;
292 u32 opmode;
293 unsigned long flags;
294
295 /* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
296 spin_lock_irqsave(&p->lock, flags);
297 opmode = p->operation_mode;
298 spin_unlock_irqrestore(&p->lock, flags);
299
300 if (RMNET_IS_MODE_QOS(opmode)) {
301 qmih = (struct QMI_QOS_HDR_S *)
302 skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
303 qmih->version = 1;
304 qmih->flags = 0;
305 qmih->flow_id = skb->mark;
306 }
307
308 dev->trans_start = jiffies;
Jeff Hugo347fbbb2011-11-15 16:39:20 -0700309 /* if write() succeeds, skb access is unsafe in this process */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310 bam_ret = msm_bam_dmux_write(p->ch_id, skb);
311
Jeff Hugo4838f412012-01-20 11:19:37 -0700312 if (bam_ret != 0 && bam_ret != -EAGAIN && bam_ret != -EFAULT) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313 pr_err("[%s] %s: write returned error %d",
314 dev->name, __func__, bam_ret);
Jeff Hugo523de142012-01-06 10:52:09 -0700315 return -EPERM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 }
317
Jeff Hugo523de142012-01-06 10:52:09 -0700318 return bam_ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319}
320
321static void bam_write_done(void *dev, struct sk_buff *skb)
322{
Jeff Hugo347fbbb2011-11-15 16:39:20 -0700323 struct rmnet_private *p = netdev_priv(dev);
324 u32 opmode = p->operation_mode;
Eric Holmberged3ca0a2012-04-09 15:44:58 -0600325 unsigned long flags;
Jeff Hugo347fbbb2011-11-15 16:39:20 -0700326
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327 DBG1("%s: write complete\n", __func__);
Jeff Hugo347fbbb2011-11-15 16:39:20 -0700328 if (RMNET_IS_MODE_IP(opmode) ||
329 count_this_packet(skb->data, skb->len)) {
330 p->stats.tx_packets++;
331 p->stats.tx_bytes += skb->len;
332#ifdef CONFIG_MSM_RMNET_DEBUG
333 p->wakeups_xmit += rmnet_cause_wakeup(p);
334#endif
335 }
336 DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
337 ((struct net_device *)(dev))->name, p->stats.tx_packets,
338 skb->len, skb->mark);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700339 dev_kfree_skb_any(skb);
Eric Holmberged3ca0a2012-04-09 15:44:58 -0600340
341 spin_lock_irqsave(&p->tx_queue_lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700342 if (netif_queue_stopped(dev) &&
343 msm_bam_dmux_is_ch_low(p->ch_id)) {
344 DBG0("%s: Low WM hit, waking queue=%p\n",
345 __func__, skb);
346 netif_wake_queue(dev);
347 }
Eric Holmberged3ca0a2012-04-09 15:44:58 -0600348 spin_unlock_irqrestore(&p->tx_queue_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349}
350
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600351static void bam_notify(void *dev, int event, unsigned long data)
352{
Jeff Hugobac7ea22011-10-24 10:58:48 -0600353 struct rmnet_private *p = netdev_priv(dev);
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700354 unsigned long flags;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600355
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600356 switch (event) {
357 case BAM_DMUX_RECEIVE:
358 bam_recv_notify(dev, (struct sk_buff *)(data));
359 break;
360 case BAM_DMUX_WRITE_DONE:
361 bam_write_done(dev, (struct sk_buff *)(data));
362 break;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600363 case BAM_DMUX_UL_CONNECTED:
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700364 spin_lock_irqsave(&p->lock, flags);
365 if (p->waiting_for_ul_skb != NULL) {
366 struct sk_buff *skb;
367 int ret;
368
369 skb = p->waiting_for_ul_skb;
370 p->waiting_for_ul_skb = NULL;
371 spin_unlock_irqrestore(&p->lock, flags);
372 ret = _rmnet_xmit(skb, dev);
373 if (ret) {
374 pr_err("%s: error %d dropping delayed TX SKB %p\n",
375 __func__, ret, skb);
376 dev_kfree_skb_any(skb);
377 }
Jeff Hugobac7ea22011-10-24 10:58:48 -0600378 netif_wake_queue(dev);
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700379 } else {
380 spin_unlock_irqrestore(&p->lock, flags);
Jeff Hugobac7ea22011-10-24 10:58:48 -0600381 }
382 break;
383 case BAM_DMUX_UL_DISCONNECTED:
Jeff Hugobac7ea22011-10-24 10:58:48 -0600384 break;
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600385 }
386}
387
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388static int __rmnet_open(struct net_device *dev)
389{
390 int r;
391 struct rmnet_private *p = netdev_priv(dev);
392
393 DBG0("[%s] __rmnet_open()\n", dev->name);
394
395 if (!p->device_up) {
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600396 r = msm_bam_dmux_open(p->ch_id, dev, bam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397
Eric Holmberg70385482011-11-09 10:33:51 -0700398 if (r < 0) {
399 DBG0("%s: ch=%d failed with rc %d\n",
400 __func__, p->ch_id, r);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401 return -ENODEV;
Eric Holmberg70385482011-11-09 10:33:51 -0700402 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700403 }
404
405 p->device_up = DEVICE_ACTIVE;
406 return 0;
407}
408
409static int rmnet_open(struct net_device *dev)
410{
411 int rc = 0;
412
413 DBG0("[%s] rmnet_open()\n", dev->name);
414
415 rc = __rmnet_open(dev);
416
417 if (rc == 0)
418 netif_start_queue(dev);
419
420 return rc;
421}
422
423
424static int __rmnet_close(struct net_device *dev)
425{
426 struct rmnet_private *p = netdev_priv(dev);
427 int rc = 0;
428
429 if (p->device_up) {
430 /* do not close rmnet port once up, this causes
431 remote side to hang if tried to open again */
432 p->device_up = DEVICE_INACTIVE;
433 return rc;
434 } else
435 return -EBADF;
436}
437
438
439static int rmnet_stop(struct net_device *dev)
440{
441 DBG0("[%s] rmnet_stop()\n", dev->name);
442
443 __rmnet_close(dev);
444 netif_stop_queue(dev);
445
446 return 0;
447}
448
449static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
450{
451 if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
452 return -EINVAL;
453
454 DBG0("[%s] MTU change: old=%d new=%d\n",
455 dev->name, dev->mtu, new_mtu);
456 dev->mtu = new_mtu;
457
458 return 0;
459}
460
461static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
462{
Jeff Hugobac7ea22011-10-24 10:58:48 -0600463 struct rmnet_private *p = netdev_priv(dev);
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700464 unsigned long flags;
465 int awake;
Jeff Hugo1dbacd72011-12-01 18:07:58 -0700466 int ret = 0;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600467
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468 if (netif_queue_stopped(dev)) {
469 pr_err("[%s]fatal: rmnet_xmit called when "
470 "netif_queue is stopped", dev->name);
471 return 0;
472 }
473
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700474 spin_lock_irqsave(&p->lock, flags);
475 awake = msm_bam_dmux_ul_power_vote();
476 if (!awake) {
477 /* send SKB once wakeup is complete */
Jeff Hugo523de142012-01-06 10:52:09 -0700478 netif_stop_queue(dev);
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700479 p->waiting_for_ul_skb = skb;
480 spin_unlock_irqrestore(&p->lock, flags);
481 ret = 0;
482 goto exit;
Jeff Hugobac7ea22011-10-24 10:58:48 -0600483 }
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700484 spin_unlock_irqrestore(&p->lock, flags);
485
Jeff Hugo1dbacd72011-12-01 18:07:58 -0700486 ret = _rmnet_xmit(skb, dev);
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700487 if (ret == -EPERM) {
488 ret = NETDEV_TX_BUSY;
489 goto exit;
490 }
Jeff Hugo523de142012-01-06 10:52:09 -0700491
Jeff Hugo4838f412012-01-20 11:19:37 -0700492 /*
493 * detected SSR a bit early. shut some things down now, and leave
494 * the rest to the main ssr handling code when that happens later
495 */
496 if (ret == -EFAULT) {
497 netif_carrier_off(dev);
498 dev_kfree_skb_any(skb);
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700499 ret = 0;
500 goto exit;
Jeff Hugo4838f412012-01-20 11:19:37 -0700501 }
502
Jeff Hugo1dbacd72011-12-01 18:07:58 -0700503 if (ret == -EAGAIN) {
Jeff Hugo523de142012-01-06 10:52:09 -0700504 /*
505 * This should not happen
506 * EAGAIN means we attempted to overflow the high watermark
507 * Clearly the queue is not stopped like it should be, so
508 * stop it and return BUSY to the TCP/IP framework. It will
509 * retry this packet with the queue is restarted which happens
510 * in the write_done callback when the low watermark is hit.
511 */
512 netif_stop_queue(dev);
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700513 ret = NETDEV_TX_BUSY;
514 goto exit;
Jeff Hugo1dbacd72011-12-01 18:07:58 -0700515 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516
Eric Holmberged3ca0a2012-04-09 15:44:58 -0600517 spin_lock_irqsave(&p->tx_queue_lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700518 if (msm_bam_dmux_is_ch_full(p->ch_id)) {
519 netif_stop_queue(dev);
520 DBG0("%s: High WM hit, stopping queue=%p\n", __func__, skb);
521 }
Eric Holmberged3ca0a2012-04-09 15:44:58 -0600522 spin_unlock_irqrestore(&p->tx_queue_lock, flags);
Karthikeyan Ramasubramanian7bf5ca82011-11-21 13:33:19 -0700523
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700524exit:
525 msm_bam_dmux_ul_power_unvote();
526 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527}
528
529static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
530{
531 struct rmnet_private *p = netdev_priv(dev);
532 return &p->stats;
533}
534
535static void rmnet_set_multicast_list(struct net_device *dev)
536{
537}
538
539static void rmnet_tx_timeout(struct net_device *dev)
540{
541 pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
542}
543
544static const struct net_device_ops rmnet_ops_ether = {
545 .ndo_open = rmnet_open,
546 .ndo_stop = rmnet_stop,
547 .ndo_start_xmit = rmnet_xmit,
548 .ndo_get_stats = rmnet_get_stats,
549 .ndo_set_multicast_list = rmnet_set_multicast_list,
550 .ndo_tx_timeout = rmnet_tx_timeout,
551 .ndo_do_ioctl = rmnet_ioctl,
552 .ndo_change_mtu = rmnet_change_mtu,
553 .ndo_set_mac_address = eth_mac_addr,
554 .ndo_validate_addr = eth_validate_addr,
555};
556
557static const struct net_device_ops rmnet_ops_ip = {
558 .ndo_open = rmnet_open,
559 .ndo_stop = rmnet_stop,
560 .ndo_start_xmit = rmnet_xmit,
561 .ndo_get_stats = rmnet_get_stats,
562 .ndo_set_multicast_list = rmnet_set_multicast_list,
563 .ndo_tx_timeout = rmnet_tx_timeout,
564 .ndo_do_ioctl = rmnet_ioctl,
565 .ndo_change_mtu = rmnet_change_mtu,
566 .ndo_set_mac_address = 0,
567 .ndo_validate_addr = 0,
568};
569
570static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
571{
572 struct rmnet_private *p = netdev_priv(dev);
573 u32 old_opmode = p->operation_mode;
574 unsigned long flags;
575 int prev_mtu = dev->mtu;
576 int rc = 0;
577
578 /* Process IOCTL command */
579 switch (cmd) {
580 case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
581 /* Perform Ethernet config only if in IP mode currently*/
582 if (p->operation_mode & RMNET_MODE_LLP_IP) {
583 ether_setup(dev);
584 random_ether_addr(dev->dev_addr);
585 dev->mtu = prev_mtu;
586
587 dev->netdev_ops = &rmnet_ops_ether;
588 spin_lock_irqsave(&p->lock, flags);
589 p->operation_mode &= ~RMNET_MODE_LLP_IP;
590 p->operation_mode |= RMNET_MODE_LLP_ETH;
591 spin_unlock_irqrestore(&p->lock, flags);
592 DBG0("[%s] rmnet_ioctl(): "
593 "set Ethernet protocol mode\n",
594 dev->name);
595 }
596 break;
597
598 case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
599 /* Perform IP config only if in Ethernet mode currently*/
600 if (p->operation_mode & RMNET_MODE_LLP_ETH) {
601
602 /* Undo config done in ether_setup() */
603 dev->header_ops = 0; /* No header */
604 dev->type = ARPHRD_RAWIP;
605 dev->hard_header_len = 0;
606 dev->mtu = prev_mtu;
607 dev->addr_len = 0;
608 dev->flags &= ~(IFF_BROADCAST|
609 IFF_MULTICAST);
610
611 dev->needed_headroom = HEADROOM_FOR_BAM +
612 HEADROOM_FOR_QOS;
613 dev->needed_tailroom = TAILROOM;
614 dev->netdev_ops = &rmnet_ops_ip;
615 spin_lock_irqsave(&p->lock, flags);
616 p->operation_mode &= ~RMNET_MODE_LLP_ETH;
617 p->operation_mode |= RMNET_MODE_LLP_IP;
618 spin_unlock_irqrestore(&p->lock, flags);
619 DBG0("[%s] rmnet_ioctl(): "
620 "set IP protocol mode\n",
621 dev->name);
622 }
623 break;
624
625 case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
626 ifr->ifr_ifru.ifru_data =
627 (void *)(p->operation_mode &
628 (RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
629 break;
630
631 case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
632 spin_lock_irqsave(&p->lock, flags);
633 p->operation_mode |= RMNET_MODE_QOS;
634 spin_unlock_irqrestore(&p->lock, flags);
635 DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
636 dev->name);
637 break;
638
639 case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
640 spin_lock_irqsave(&p->lock, flags);
641 p->operation_mode &= ~RMNET_MODE_QOS;
642 spin_unlock_irqrestore(&p->lock, flags);
643 DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
644 dev->name);
645 break;
646
647 case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
648 ifr->ifr_ifru.ifru_data =
649 (void *)(p->operation_mode & RMNET_MODE_QOS);
650 break;
651
652 case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
653 ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
654 break;
655
656 case RMNET_IOCTL_OPEN: /* Open transport port */
657 rc = __rmnet_open(dev);
658 DBG0("[%s] rmnet_ioctl(): open transport port\n",
659 dev->name);
660 break;
661
662 case RMNET_IOCTL_CLOSE: /* Close transport port */
663 rc = __rmnet_close(dev);
664 DBG0("[%s] rmnet_ioctl(): close transport port\n",
665 dev->name);
666 break;
667
668 default:
669 pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
670 dev->name, cmd);
671 return -EINVAL;
672 }
673
674 DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
675 dev->name, __func__, cmd, old_opmode, p->operation_mode);
676 return rc;
677}
678
679static void __init rmnet_setup(struct net_device *dev)
680{
681 /* Using Ethernet mode by default */
682 dev->netdev_ops = &rmnet_ops_ether;
683 ether_setup(dev);
684
685 /* set this after calling ether_setup */
686 dev->mtu = RMNET_DATA_LEN;
687 dev->needed_headroom = HEADROOM_FOR_BAM + HEADROOM_FOR_QOS ;
688 dev->needed_tailroom = TAILROOM;
689 random_ether_addr(dev->dev_addr);
690
691 dev->watchdog_timeo = 1000; /* 10 seconds? */
692}
693
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600694static struct net_device *netdevs[RMNET_DEVICE_COUNT];
695static struct platform_driver bam_rmnet_drivers[RMNET_DEVICE_COUNT];
696
697static int bam_rmnet_probe(struct platform_device *pdev)
698{
699 int i;
700 char name[BAM_DMUX_CH_NAME_MAX_LEN];
701 struct rmnet_private *p;
702
703 for (i = 0; i < RMNET_DEVICE_COUNT; ++i) {
704 scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i);
705 if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN))
706 break;
707 }
708
709 p = netdev_priv(netdevs[i]);
710 if (p->in_reset) {
711 p->in_reset = 0;
712 msm_bam_dmux_open(p->ch_id, netdevs[i], bam_notify);
713 netif_carrier_on(netdevs[i]);
714 netif_start_queue(netdevs[i]);
715 }
716
717 return 0;
718}
719
720static int bam_rmnet_remove(struct platform_device *pdev)
721{
722 int i;
723 char name[BAM_DMUX_CH_NAME_MAX_LEN];
724 struct rmnet_private *p;
725
726 for (i = 0; i < RMNET_DEVICE_COUNT; ++i) {
727 scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i);
728 if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN))
729 break;
730 }
731
732 p = netdev_priv(netdevs[i]);
733 p->in_reset = 1;
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700734 if (p->waiting_for_ul_skb != NULL) {
735 dev_kfree_skb_any(p->waiting_for_ul_skb);
736 p->waiting_for_ul_skb = NULL;
737 }
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600738 msm_bam_dmux_close(p->ch_id);
739 netif_carrier_off(netdevs[i]);
740 netif_stop_queue(netdevs[i]);
741 return 0;
742}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743
744static int __init rmnet_init(void)
745{
746 int ret;
747 struct device *d;
748 struct net_device *dev;
749 struct rmnet_private *p;
750 unsigned n;
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600751 char *tempname;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700752
753 pr_info("%s: BAM devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
754
755#ifdef CONFIG_MSM_RMNET_DEBUG
756 timeout_us = 0;
757#ifdef CONFIG_HAS_EARLYSUSPEND
758 timeout_suspend_us = 0;
759#endif
760#endif
761
762 for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
763 dev = alloc_netdev(sizeof(struct rmnet_private),
764 "rmnet%d", rmnet_setup);
765
Eric Holmberg70385482011-11-09 10:33:51 -0700766 if (!dev) {
767 pr_err("%s: no memory for netdev %d\n", __func__, n);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700768 return -ENOMEM;
Eric Holmberg70385482011-11-09 10:33:51 -0700769 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700770
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600771 netdevs[n] = dev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700772 d = &(dev->dev);
773 p = netdev_priv(dev);
774 /* Initial config uses Ethernet */
775 p->operation_mode = RMNET_MODE_LLP_ETH;
776 p->ch_id = n;
Eric Holmberg8d8a0702012-01-19 13:38:08 -0700777 p->waiting_for_ul_skb = NULL;
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600778 p->in_reset = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700779 spin_lock_init(&p->lock);
Eric Holmberged3ca0a2012-04-09 15:44:58 -0600780 spin_lock_init(&p->tx_queue_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700781#ifdef CONFIG_MSM_RMNET_DEBUG
782 p->timeout_us = timeout_us;
783 p->wakeups_xmit = p->wakeups_rcv = 0;
784#endif
785
786 ret = register_netdev(dev);
787 if (ret) {
Eric Holmberg70385482011-11-09 10:33:51 -0700788 pr_err("%s: unable to register netdev"
789 " %d rc=%d\n", __func__, n, ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700790 free_netdev(dev);
791 return ret;
792 }
793
794#ifdef CONFIG_MSM_RMNET_DEBUG
795 if (device_create_file(d, &dev_attr_timeout))
796 continue;
797 if (device_create_file(d, &dev_attr_wakeups_xmit))
798 continue;
799 if (device_create_file(d, &dev_attr_wakeups_rcv))
800 continue;
801#ifdef CONFIG_HAS_EARLYSUSPEND
802 if (device_create_file(d, &dev_attr_timeout_suspend))
803 continue;
804
805 /* Only care about rmnet0 for suspend/resume tiemout hooks. */
806 if (n == 0)
807 rmnet0 = d;
808#endif
809#endif
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600810 bam_rmnet_drivers[n].probe = bam_rmnet_probe;
811 bam_rmnet_drivers[n].remove = bam_rmnet_remove;
812 tempname = kmalloc(BAM_DMUX_CH_NAME_MAX_LEN, GFP_KERNEL);
813 if (tempname == NULL)
814 return -ENOMEM;
815 scnprintf(tempname, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
816 n);
817 bam_rmnet_drivers[n].driver.name = tempname;
818 bam_rmnet_drivers[n].driver.owner = THIS_MODULE;
819 ret = platform_driver_register(&bam_rmnet_drivers[n]);
Eric Holmbergbd4c3842011-11-08 13:48:19 -0700820 if (ret) {
821 pr_err("%s: registration failed n=%d rc=%d\n",
822 __func__, n, ret);
Jeff Hugo24dfa0c2011-10-07 17:55:06 -0600823 return ret;
Eric Holmbergbd4c3842011-11-08 13:48:19 -0700824 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825 }
826 return 0;
827}
828
829module_init(rmnet_init);
830MODULE_DESCRIPTION("MSM RMNET BAM TRANSPORT");
831MODULE_LICENSE("GPL v2");
832