blob: 70cbd2f44a109d59d1bcb44ad6de17db4c6916d3 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
26#include "u_rmnet.h"
27
28#define BAM_N_PORTS 1
29
30static struct workqueue_struct *gbam_wq;
31static int n_bam_ports;
32static unsigned bam_ch_ids[] = { 8 };
33
Jack Phameffd4ae2011-08-03 16:49:36 -070034static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
35
Vamsi Krishna84579552011-11-09 15:33:22 -080036#define BAM_PENDING_LIMIT 220
Vamsi Krishna8f24f252011-11-02 11:46:08 -070037#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
Vamsi Krishna84579552011-11-09 15:33:22 -080038#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
39#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
Vamsi Krishna8f24f252011-11-02 11:46:08 -070040#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
42#define BAM_MUX_HDR 8
43
Vamsi Krishna8f24f252011-11-02 11:46:08 -070044#define BAM_MUX_RX_Q_SIZE 16
45#define BAM_MUX_TX_Q_SIZE 200
46#define BAM_MUX_RX_REQ_SIZE (2048 - BAM_MUX_HDR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047
Vamsi Krishna8f24f252011-11-02 11:46:08 -070048unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
49module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Vamsi Krishna8f24f252011-11-02 11:46:08 -070051unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
52module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Vamsi Krishna8f24f252011-11-02 11:46:08 -070054unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
55module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056
Vamsi Krishna8f24f252011-11-02 11:46:08 -070057unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
58module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059
Vamsi Krishna8f24f252011-11-02 11:46:08 -070060unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
61module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Vamsi Krishna8f24f252011-11-02 11:46:08 -070063unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
64module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065
Vamsi Krishna8f24f252011-11-02 11:46:08 -070066unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
67module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
Jack Phameffd4ae2011-08-03 16:49:36 -070069#define BAM_CH_OPENED BIT(0)
70#define BAM_CH_READY BIT(1)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070072 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073 unsigned id;
74
75 struct list_head tx_idle;
76 struct sk_buff_head tx_skb_q;
77
78 struct list_head rx_idle;
79 struct sk_buff_head rx_skb_q;
80
81 struct gbam_port *port;
82 struct work_struct write_tobam_w;
83
84 /* stats */
85 unsigned int pending_with_bam;
86 unsigned int tohost_drp_cnt;
87 unsigned int tomodem_drp_cnt;
88 unsigned int tx_len;
89 unsigned int rx_len;
90 unsigned long to_modem;
91 unsigned long to_host;
92};
93
94struct gbam_port {
95 unsigned port_num;
96 spinlock_t port_lock;
97
98 struct grmnet *port_usb;
99
100 struct bam_ch_info data_ch;
101
102 struct work_struct connect_w;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800103 struct work_struct disconnect_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104};
105
106static struct bam_portmaster {
107 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700108 struct platform_driver pdrv;
109} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110
111static void gbam_start_rx(struct gbam_port *port);
112
113/*---------------misc functions---------------- */
114static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
115{
116 struct usb_request *req;
117
118 while (!list_empty(head)) {
119 req = list_entry(head->next, struct usb_request, list);
120 list_del(&req->list);
121 usb_ep_free_request(ep, req);
122 }
123}
124
125static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
126 int num,
127 void (*cb)(struct usb_ep *ep, struct usb_request *),
128 gfp_t flags)
129{
130 int i;
131 struct usb_request *req;
132
133 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
134 ep, head, num, cb);
135
136 for (i = 0; i < num; i++) {
137 req = usb_ep_alloc_request(ep, flags);
138 if (!req) {
139 pr_debug("%s: req allocated:%d\n", __func__, i);
140 return list_empty(head) ? -ENOMEM : 0;
141 }
142 req->complete = cb;
143 list_add(&req->list, head);
144 }
145
146 return 0;
147}
148/*--------------------------------------------- */
149
150/*------------data_path----------------------------*/
151static void gbam_write_data_tohost(struct gbam_port *port)
152{
153 unsigned long flags;
154 struct bam_ch_info *d = &port->data_ch;
155 struct sk_buff *skb;
156 int ret;
157 struct usb_request *req;
158 struct usb_ep *ep;
159
160 spin_lock_irqsave(&port->port_lock, flags);
161 if (!port->port_usb) {
162 spin_unlock_irqrestore(&port->port_lock, flags);
163 return;
164 }
165
166 ep = port->port_usb->in;
167
168 while (!list_empty(&d->tx_idle)) {
169 skb = __skb_dequeue(&d->tx_skb_q);
170 if (!skb) {
171 spin_unlock_irqrestore(&port->port_lock, flags);
172 return;
173 }
174 req = list_first_entry(&d->tx_idle,
175 struct usb_request,
176 list);
177 req->context = skb;
178 req->buf = skb->data;
179 req->length = skb->len;
180
181 list_del(&req->list);
182
183 spin_unlock(&port->port_lock);
184 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
185 spin_lock(&port->port_lock);
186 if (ret) {
187 pr_err("%s: usb epIn failed\n", __func__);
188 list_add(&req->list, &d->tx_idle);
189 dev_kfree_skb_any(skb);
190 break;
191 }
192 d->to_host++;
193 }
194 spin_unlock_irqrestore(&port->port_lock, flags);
195}
196
197void gbam_data_recv_cb(void *p, struct sk_buff *skb)
198{
199 struct gbam_port *port = p;
200 struct bam_ch_info *d = &port->data_ch;
201 unsigned long flags;
202
203 if (!skb)
204 return;
205
206 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
207 port, port->port_num, d, skb->len);
208
209 spin_lock_irqsave(&port->port_lock, flags);
210 if (!port->port_usb) {
211 spin_unlock_irqrestore(&port->port_lock, flags);
212 dev_kfree_skb_any(skb);
213 return;
214 }
215
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700216 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217 d->tohost_drp_cnt++;
218 if (printk_ratelimit())
219 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
220 __func__, d->tohost_drp_cnt);
221 spin_unlock_irqrestore(&port->port_lock, flags);
222 dev_kfree_skb_any(skb);
223 return;
224 }
225
226 __skb_queue_tail(&d->tx_skb_q, skb);
227 spin_unlock_irqrestore(&port->port_lock, flags);
228
229 gbam_write_data_tohost(port);
230}
231
232void gbam_data_write_done(void *p, struct sk_buff *skb)
233{
234 struct gbam_port *port = p;
235 struct bam_ch_info *d = &port->data_ch;
236 unsigned long flags;
237
238 if (!skb)
239 return;
240
241 dev_kfree_skb_any(skb);
242
243 spin_lock_irqsave(&port->port_lock, flags);
244
245 d->pending_with_bam--;
246
247 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
248 port, d, d->to_modem,
249 d->pending_with_bam, port->port_num);
250
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251 spin_unlock_irqrestore(&port->port_lock, flags);
252
Vamsi Krishna84579552011-11-09 15:33:22 -0800253 queue_work(gbam_wq, &d->write_tobam_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254}
255
256static void gbam_data_write_tobam(struct work_struct *w)
257{
258 struct gbam_port *port;
259 struct bam_ch_info *d;
260 struct sk_buff *skb;
261 unsigned long flags;
262 int ret;
Vamsi Krishna84579552011-11-09 15:33:22 -0800263 int qlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264
265 d = container_of(w, struct bam_ch_info, write_tobam_w);
266 port = d->port;
267
268 spin_lock_irqsave(&port->port_lock, flags);
269 if (!port->port_usb) {
270 spin_unlock_irqrestore(&port->port_lock, flags);
271 return;
272 }
273
Vamsi Krishna84579552011-11-09 15:33:22 -0800274 while (d->pending_with_bam < BAM_PENDING_LIMIT) {
Vamsi Krishna2327c79152011-11-08 16:12:42 -0800275 skb = __skb_dequeue(&d->rx_skb_q);
276 if (!skb) {
277 spin_unlock_irqrestore(&port->port_lock, flags);
278 return;
279 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280 d->pending_with_bam++;
281 d->to_modem++;
282
283 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
284 port, d, d->to_modem, d->pending_with_bam,
285 port->port_num);
286
287 spin_unlock_irqrestore(&port->port_lock, flags);
288 ret = msm_bam_dmux_write(d->id, skb);
289 spin_lock_irqsave(&port->port_lock, flags);
290 if (ret) {
291 pr_debug("%s: write error:%d\n", __func__, ret);
292 d->pending_with_bam--;
293 d->to_modem--;
294 d->tomodem_drp_cnt++;
295 dev_kfree_skb_any(skb);
296 break;
297 }
298 }
Vamsi Krishna84579552011-11-09 15:33:22 -0800299
300 qlen = d->rx_skb_q.qlen;
301
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302 spin_unlock_irqrestore(&port->port_lock, flags);
Vamsi Krishna84579552011-11-09 15:33:22 -0800303
304 if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD)
305 gbam_start_rx(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306}
307/*-------------------------------------------------------------*/
308
309static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
310{
311 struct gbam_port *port = ep->driver_data;
312 struct bam_ch_info *d;
313 struct sk_buff *skb = req->context;
314 int status = req->status;
315
316 switch (status) {
317 case 0:
318 /* successful completion */
319 case -ECONNRESET:
320 case -ESHUTDOWN:
321 /* connection gone */
322 break;
323 default:
324 pr_err("%s: data tx ep error %d\n",
325 __func__, status);
326 break;
327 }
328
329 dev_kfree_skb_any(skb);
330
331 if (!port)
332 return;
333
334 spin_lock(&port->port_lock);
335 d = &port->data_ch;
336 list_add_tail(&req->list, &d->tx_idle);
337 spin_unlock(&port->port_lock);
338
339 gbam_write_data_tohost(port);
340}
341
342static void
343gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
344{
345 struct gbam_port *port = ep->driver_data;
346 struct bam_ch_info *d = &port->data_ch;
347 struct sk_buff *skb = req->context;
348 int status = req->status;
349 int queue = 0;
350
351 switch (status) {
352 case 0:
353 skb_put(skb, req->actual);
354 queue = 1;
355 break;
356 case -ECONNRESET:
357 case -ESHUTDOWN:
358 /* cable disconnection */
359 dev_kfree_skb_any(skb);
360 req->buf = 0;
361 usb_ep_free_request(ep, req);
362 return;
363 default:
364 if (printk_ratelimit())
365 pr_err("%s: %s response error %d, %d/%d\n",
366 __func__, ep->name, status,
367 req->actual, req->length);
368 dev_kfree_skb_any(skb);
369 break;
370 }
371
372 spin_lock(&port->port_lock);
373 if (queue) {
374 __skb_queue_tail(&d->rx_skb_q, skb);
375 queue_work(gbam_wq, &d->write_tobam_w);
376 }
377
378 /* TODO: Handle flow control gracefully by having
379 * having call back mechanism from bam driver
380 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700381 if (bam_mux_rx_fctrl_support &&
Vamsi Krishna84579552011-11-09 15:33:22 -0800382 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383
384 list_add_tail(&req->list, &d->rx_idle);
385 spin_unlock(&port->port_lock);
386 return;
387 }
388 spin_unlock(&port->port_lock);
389
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700390 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700391 if (!skb) {
392 spin_lock(&port->port_lock);
393 list_add_tail(&req->list, &d->rx_idle);
394 spin_unlock(&port->port_lock);
395 return;
396 }
397 skb_reserve(skb, BAM_MUX_HDR);
398
399 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700400 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401 req->context = skb;
402
403 status = usb_ep_queue(ep, req, GFP_ATOMIC);
404 if (status) {
405 dev_kfree_skb_any(skb);
406
407 if (printk_ratelimit())
408 pr_err("%s: data rx enqueue err %d\n",
409 __func__, status);
410
411 spin_lock(&port->port_lock);
412 list_add_tail(&req->list, &d->rx_idle);
413 spin_unlock(&port->port_lock);
414 }
415}
416
417static void gbam_start_rx(struct gbam_port *port)
418{
419 struct usb_request *req;
420 struct bam_ch_info *d;
421 struct usb_ep *ep;
422 unsigned long flags;
423 int ret;
424 struct sk_buff *skb;
425
426 spin_lock_irqsave(&port->port_lock, flags);
427 if (!port->port_usb) {
428 spin_unlock_irqrestore(&port->port_lock, flags);
429 return;
430 }
431
432 d = &port->data_ch;
433 ep = port->port_usb->out;
434
435 while (port->port_usb && !list_empty(&d->rx_idle)) {
Vamsi Krishna84579552011-11-09 15:33:22 -0800436
437 if (bam_mux_rx_fctrl_support &&
438 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
439 break;
440
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441 req = list_first_entry(&d->rx_idle, struct usb_request, list);
442
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700443 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444 if (!skb)
445 break;
446 skb_reserve(skb, BAM_MUX_HDR);
447
448 list_del(&req->list);
449 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700450 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451 req->context = skb;
452
453 spin_unlock_irqrestore(&port->port_lock, flags);
454 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
455 spin_lock_irqsave(&port->port_lock, flags);
456 if (ret) {
457 dev_kfree_skb_any(skb);
458
459 if (printk_ratelimit())
460 pr_err("%s: rx queue failed\n", __func__);
461
462 if (port->port_usb)
463 list_add(&req->list, &d->rx_idle);
464 else
465 usb_ep_free_request(ep, req);
466 break;
467 }
468 }
469 spin_unlock_irqrestore(&port->port_lock, flags);
470}
471
472static void gbam_start_io(struct gbam_port *port)
473{
474 unsigned long flags;
475 struct usb_ep *ep;
476 int ret;
477 struct bam_ch_info *d;
478
479 pr_debug("%s: port:%p\n", __func__, port);
480
481 spin_lock_irqsave(&port->port_lock, flags);
482 if (!port->port_usb) {
483 spin_unlock_irqrestore(&port->port_lock, flags);
484 return;
485 }
486
487 d = &port->data_ch;
488 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700489 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700490 gbam_epout_complete, GFP_ATOMIC);
491 if (ret) {
492 pr_err("%s: rx req allocation failed\n", __func__);
493 return;
494 }
495
496 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700497 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498 gbam_epin_complete, GFP_ATOMIC);
499 if (ret) {
500 pr_err("%s: tx req allocation failed\n", __func__);
501 gbam_free_requests(ep, &d->rx_idle);
502 return;
503 }
504
505 spin_unlock_irqrestore(&port->port_lock, flags);
506
507 /* queue out requests */
508 gbam_start_rx(port);
509}
510
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600511static void gbam_notify(void *p, int event, unsigned long data)
512{
513 switch (event) {
514 case BAM_DMUX_RECEIVE:
515 gbam_data_recv_cb(p, (struct sk_buff *)(data));
516 break;
517 case BAM_DMUX_WRITE_DONE:
518 gbam_data_write_done(p, (struct sk_buff *)(data));
519 break;
520 }
521}
522
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800523static void gbam_disconnect_work(struct work_struct *w)
524{
525 struct gbam_port *port =
526 container_of(w, struct gbam_port, disconnect_w);
527 struct bam_ch_info *d = &port->data_ch;
528
529 if (!test_bit(BAM_CH_OPENED, &d->flags))
530 return;
531
532 msm_bam_dmux_close(d->id);
533 clear_bit(BAM_CH_OPENED, &d->flags);
534}
535
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536static void gbam_connect_work(struct work_struct *w)
537{
538 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
539 struct bam_ch_info *d = &port->data_ch;
540 int ret;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800541 unsigned long flags;
542
543 spin_lock_irqsave(&port->port_lock, flags);
544 if (!port->port_usb) {
545 spin_unlock_irqrestore(&port->port_lock, flags);
546 return;
547 }
548 spin_unlock_irqrestore(&port->port_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549
Jack Phameffd4ae2011-08-03 16:49:36 -0700550 if (!test_bit(BAM_CH_READY, &d->flags))
551 return;
552
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600553 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700554 if (ret) {
555 pr_err("%s: unable open bam ch:%d err:%d\n",
556 __func__, d->id, ret);
557 return;
558 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700559 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560
561 gbam_start_io(port);
562
563 pr_debug("%s: done\n", __func__);
564}
565
Jack Phameffd4ae2011-08-03 16:49:36 -0700566static void gbam_free_buffers(struct gbam_port *port)
567{
568 struct sk_buff *skb;
569 unsigned long flags;
570 struct bam_ch_info *d;
571
572 spin_lock_irqsave(&port->port_lock, flags);
573
574 if (!port || !port->port_usb)
575 goto free_buf_out;
576
577 d = &port->data_ch;
578
579 gbam_free_requests(port->port_usb->in, &d->tx_idle);
580 gbam_free_requests(port->port_usb->out, &d->rx_idle);
581
582 while ((skb = __skb_dequeue(&d->tx_skb_q)))
583 dev_kfree_skb_any(skb);
584
585 while ((skb = __skb_dequeue(&d->rx_skb_q)))
586 dev_kfree_skb_any(skb);
587
588free_buf_out:
589 spin_unlock_irqrestore(&port->port_lock, flags);
590}
591
592/* BAM data channel ready, allow attempt to open */
593static int gbam_data_ch_probe(struct platform_device *pdev)
594{
595 struct gbam_port *port;
596 struct bam_ch_info *d;
597 int i;
598 unsigned long flags;
599
600 pr_debug("%s: name:%s\n", __func__, pdev->name);
601
602 for (i = 0; i < n_bam_ports; i++) {
603 port = bam_ports[i].port;
604 d = &port->data_ch;
605
606 if (!strncmp(bam_ch_names[i], pdev->name,
607 BAM_DMUX_CH_NAME_MAX_LEN)) {
608 set_bit(BAM_CH_READY, &d->flags);
609
610 /* if usb is online, try opening bam_ch */
611 spin_lock_irqsave(&port->port_lock, flags);
612 if (port->port_usb)
613 queue_work(gbam_wq, &port->connect_w);
614 spin_unlock_irqrestore(&port->port_lock, flags);
615
616 break;
617 }
618 }
619
620 return 0;
621}
622
623/* BAM data channel went inactive, so close it */
624static int gbam_data_ch_remove(struct platform_device *pdev)
625{
626 struct gbam_port *port;
627 struct bam_ch_info *d;
628 struct usb_ep *ep_in = NULL;
629 struct usb_ep *ep_out = NULL;
630 unsigned long flags;
631 int i;
632
633 pr_debug("%s: name:%s\n", __func__, pdev->name);
634
635 for (i = 0; i < n_bam_ports; i++) {
636 if (!strncmp(bam_ch_names[i], pdev->name,
637 BAM_DMUX_CH_NAME_MAX_LEN)) {
638 port = bam_ports[i].port;
639 d = &port->data_ch;
640
641 spin_lock_irqsave(&port->port_lock, flags);
642 if (port->port_usb) {
643 ep_in = port->port_usb->in;
644 ep_out = port->port_usb->out;
645 }
646 spin_unlock_irqrestore(&port->port_lock, flags);
647
648 if (ep_in)
649 usb_ep_fifo_flush(ep_in);
650 if (ep_out)
651 usb_ep_fifo_flush(ep_out);
652
653 gbam_free_buffers(port);
654
655 msm_bam_dmux_close(d->id);
656
657 clear_bit(BAM_CH_READY, &d->flags);
658 clear_bit(BAM_CH_OPENED, &d->flags);
659 }
660 }
661
662 return 0;
663}
664
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665static void gbam_port_free(int portno)
666{
667 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700668 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669
Jack Phameffd4ae2011-08-03 16:49:36 -0700670 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700672 platform_driver_unregister(pdrv);
673 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674}
675
676static int gbam_port_alloc(int portno)
677{
678 struct gbam_port *port;
679 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -0700680 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681
682 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
683 if (!port)
684 return -ENOMEM;
685
686 port->port_num = portno;
687
688 /* port initialization */
689 spin_lock_init(&port->port_lock);
690 INIT_WORK(&port->connect_w, gbam_connect_work);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800691 INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700692
693 /* data ch */
694 d = &port->data_ch;
695 d->port = port;
696 INIT_LIST_HEAD(&d->tx_idle);
697 INIT_LIST_HEAD(&d->rx_idle);
698 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
699 skb_queue_head_init(&d->tx_skb_q);
700 skb_queue_head_init(&d->rx_skb_q);
701 d->id = bam_ch_ids[portno];
702
703 bam_ports[portno].port = port;
704
Jack Phameffd4ae2011-08-03 16:49:36 -0700705 pdrv = &bam_ports[portno].pdrv;
706 pdrv->probe = gbam_data_ch_probe;
707 pdrv->remove = gbam_data_ch_remove;
708 pdrv->driver.name = bam_ch_names[portno];
709 pdrv->driver.owner = THIS_MODULE;
710
711 platform_driver_register(pdrv);
712
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
714
715 return 0;
716}
717
718#if defined(CONFIG_DEBUG_FS)
719#define DEBUG_BUF_SIZE 1024
720static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
721 size_t count, loff_t *ppos)
722{
723 struct gbam_port *port;
724 struct bam_ch_info *d;
725 char *buf;
726 unsigned long flags;
727 int ret;
728 int i;
729 int temp = 0;
730
731 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
732 if (!buf)
733 return -ENOMEM;
734
735 for (i = 0; i < n_bam_ports; i++) {
736 port = bam_ports[i].port;
737 if (!port)
738 continue;
739 spin_lock_irqsave(&port->port_lock, flags);
740
741 d = &port->data_ch;
742
743 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
744 "#PORT:%d port:%p data_ch:%p#\n"
745 "dpkts_to_usbhost: %lu\n"
746 "dpkts_to_modem: %lu\n"
747 "dpkts_pwith_bam: %u\n"
748 "to_usbhost_dcnt: %u\n"
749 "tomodem__dcnt: %u\n"
750 "tx_buf_len: %u\n"
Vamsi Krishna84579552011-11-09 15:33:22 -0800751 "rx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -0700752 "data_ch_open: %d\n"
753 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700754 i, port, &port->data_ch,
755 d->to_host, d->to_modem,
756 d->pending_with_bam,
757 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Vamsi Krishna84579552011-11-09 15:33:22 -0800758 d->tx_skb_q.qlen, d->rx_skb_q.qlen,
Jack Phameffd4ae2011-08-03 16:49:36 -0700759 test_bit(BAM_CH_OPENED, &d->flags),
760 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700761
762 spin_unlock_irqrestore(&port->port_lock, flags);
763 }
764
765 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
766
767 kfree(buf);
768
769 return ret;
770}
771
772static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
773 size_t count, loff_t *ppos)
774{
775 struct gbam_port *port;
776 struct bam_ch_info *d;
777 int i;
778 unsigned long flags;
779
780 for (i = 0; i < n_bam_ports; i++) {
781 port = bam_ports[i].port;
782 if (!port)
783 continue;
784
785 spin_lock_irqsave(&port->port_lock, flags);
786
787 d = &port->data_ch;
788
789 d->to_host = 0;
790 d->to_modem = 0;
791 d->pending_with_bam = 0;
792 d->tohost_drp_cnt = 0;
793 d->tomodem_drp_cnt = 0;
794
795 spin_unlock_irqrestore(&port->port_lock, flags);
796 }
797 return count;
798}
799
800const struct file_operations gbam_stats_ops = {
801 .read = gbam_read_stats,
802 .write = gbam_reset_stats,
803};
804
805static void gbam_debugfs_init(void)
806{
807 struct dentry *dent;
808 struct dentry *dfile;
809
810 dent = debugfs_create_dir("usb_rmnet", 0);
811 if (IS_ERR(dent))
812 return;
813
814 /* TODO: Implement cleanup function to remove created file */
815 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
816 if (!dfile || IS_ERR(dfile))
817 debugfs_remove(dent);
818}
819#else
820static void gam_debugfs_init(void) { }
821#endif
822
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823void gbam_disconnect(struct grmnet *gr, u8 port_num)
824{
825 struct gbam_port *port;
826 unsigned long flags;
827 struct bam_ch_info *d;
828
829 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
830
831 if (port_num >= n_bam_ports) {
832 pr_err("%s: invalid portno#%d\n", __func__, port_num);
833 return;
834 }
835
836 if (!gr) {
837 pr_err("%s: grmnet port is null\n", __func__);
838 return;
839 }
840
841 port = bam_ports[port_num].port;
842 d = &port->data_ch;
843
844 gbam_free_buffers(port);
845
846 spin_lock_irqsave(&port->port_lock, flags);
847 port->port_usb = 0;
848 spin_unlock_irqrestore(&port->port_lock, flags);
849
850 /* disable endpoints */
851 usb_ep_disable(gr->out);
852 usb_ep_disable(gr->in);
853
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800854 queue_work(gbam_wq, &port->disconnect_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855}
856
857int gbam_connect(struct grmnet *gr, u8 port_num)
858{
859 struct gbam_port *port;
860 struct bam_ch_info *d;
861 int ret;
862 unsigned long flags;
863
864 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
865
866 if (port_num >= n_bam_ports) {
867 pr_err("%s: invalid portno#%d\n", __func__, port_num);
868 return -ENODEV;
869 }
870
871 if (!gr) {
872 pr_err("%s: grmnet port is null\n", __func__);
873 return -ENODEV;
874 }
875
876 port = bam_ports[port_num].port;
877 d = &port->data_ch;
878
879 ret = usb_ep_enable(gr->in, gr->in_desc);
880 if (ret) {
881 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
882 __func__, gr->in);
883 return ret;
884 }
885 gr->in->driver_data = port;
886
887 ret = usb_ep_enable(gr->out, gr->out_desc);
888 if (ret) {
889 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
890 __func__, gr->out);
891 gr->in->driver_data = 0;
892 return ret;
893 }
894 gr->out->driver_data = port;
895
896 spin_lock_irqsave(&port->port_lock, flags);
897 port->port_usb = gr;
898
899 d->to_host = 0;
900 d->to_modem = 0;
901 d->pending_with_bam = 0;
902 d->tohost_drp_cnt = 0;
903 d->tomodem_drp_cnt = 0;
904 spin_unlock_irqrestore(&port->port_lock, flags);
905
906
907 queue_work(gbam_wq, &port->connect_w);
908
909 return 0;
910}
911
912int gbam_setup(unsigned int count)
913{
914 int i;
915 int ret;
916
917 pr_debug("%s: requested ports:%d\n", __func__, count);
918
919 if (!count || count > BAM_N_PORTS) {
920 pr_err("%s: Invalid num of ports count:%d\n",
921 __func__, count);
922 return -EINVAL;
923 }
924
925 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
926 if (!gbam_wq) {
927 pr_err("%s: Unable to create workqueue gbam_wq\n",
928 __func__);
929 return -ENOMEM;
930 }
931
932 for (i = 0; i < count; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +0530933 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700934 ret = gbam_port_alloc(i);
935 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +0530936 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
938 goto free_bam_ports;
939 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700940 }
941
942 gbam_debugfs_init();
943
944 return 0;
945free_bam_ports:
946 for (i = 0; i < n_bam_ports; i++)
947 gbam_port_free(i);
948
949 destroy_workqueue(gbam_wq);
950
951 return ret;
952}