blob: 7f8048a5883c30493c2504daacaa2740f68862aa [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
26#include "u_rmnet.h"
27
28#define BAM_N_PORTS 1
29
30static struct workqueue_struct *gbam_wq;
31static int n_bam_ports;
32static unsigned bam_ch_ids[] = { 8 };
33
Jack Phameffd4ae2011-08-03 16:49:36 -070034static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
35
Vamsi Krishna84579552011-11-09 15:33:22 -080036#define BAM_PENDING_LIMIT 220
Vamsi Krishna8f24f252011-11-02 11:46:08 -070037#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
Vamsi Krishna84579552011-11-09 15:33:22 -080038#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
39#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
Vamsi Krishna8f24f252011-11-02 11:46:08 -070040#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
42#define BAM_MUX_HDR 8
43
Vamsi Krishna8f24f252011-11-02 11:46:08 -070044#define BAM_MUX_RX_Q_SIZE 16
45#define BAM_MUX_TX_Q_SIZE 200
46#define BAM_MUX_RX_REQ_SIZE (2048 - BAM_MUX_HDR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047
Vamsi Krishna8f24f252011-11-02 11:46:08 -070048unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
49module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Vamsi Krishna8f24f252011-11-02 11:46:08 -070051unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
52module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Vamsi Krishna8f24f252011-11-02 11:46:08 -070054unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
55module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056
Vamsi Krishna8f24f252011-11-02 11:46:08 -070057unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
58module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059
Vamsi Krishna8f24f252011-11-02 11:46:08 -070060unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
61module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Vamsi Krishna8f24f252011-11-02 11:46:08 -070063unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
64module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065
Vamsi Krishna8f24f252011-11-02 11:46:08 -070066unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
67module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
Jack Phameffd4ae2011-08-03 16:49:36 -070069#define BAM_CH_OPENED BIT(0)
70#define BAM_CH_READY BIT(1)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070072 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073 unsigned id;
74
75 struct list_head tx_idle;
76 struct sk_buff_head tx_skb_q;
77
78 struct list_head rx_idle;
79 struct sk_buff_head rx_skb_q;
80
81 struct gbam_port *port;
82 struct work_struct write_tobam_w;
83
84 /* stats */
85 unsigned int pending_with_bam;
86 unsigned int tohost_drp_cnt;
87 unsigned int tomodem_drp_cnt;
88 unsigned int tx_len;
89 unsigned int rx_len;
90 unsigned long to_modem;
91 unsigned long to_host;
92};
93
94struct gbam_port {
95 unsigned port_num;
96 spinlock_t port_lock;
97
98 struct grmnet *port_usb;
99
100 struct bam_ch_info data_ch;
101
102 struct work_struct connect_w;
103};
104
105static struct bam_portmaster {
106 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700107 struct platform_driver pdrv;
108} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109
110static void gbam_start_rx(struct gbam_port *port);
111
112/*---------------misc functions---------------- */
113static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
114{
115 struct usb_request *req;
116
117 while (!list_empty(head)) {
118 req = list_entry(head->next, struct usb_request, list);
119 list_del(&req->list);
120 usb_ep_free_request(ep, req);
121 }
122}
123
124static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
125 int num,
126 void (*cb)(struct usb_ep *ep, struct usb_request *),
127 gfp_t flags)
128{
129 int i;
130 struct usb_request *req;
131
132 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
133 ep, head, num, cb);
134
135 for (i = 0; i < num; i++) {
136 req = usb_ep_alloc_request(ep, flags);
137 if (!req) {
138 pr_debug("%s: req allocated:%d\n", __func__, i);
139 return list_empty(head) ? -ENOMEM : 0;
140 }
141 req->complete = cb;
142 list_add(&req->list, head);
143 }
144
145 return 0;
146}
147/*--------------------------------------------- */
148
149/*------------data_path----------------------------*/
150static void gbam_write_data_tohost(struct gbam_port *port)
151{
152 unsigned long flags;
153 struct bam_ch_info *d = &port->data_ch;
154 struct sk_buff *skb;
155 int ret;
156 struct usb_request *req;
157 struct usb_ep *ep;
158
159 spin_lock_irqsave(&port->port_lock, flags);
160 if (!port->port_usb) {
161 spin_unlock_irqrestore(&port->port_lock, flags);
162 return;
163 }
164
165 ep = port->port_usb->in;
166
167 while (!list_empty(&d->tx_idle)) {
168 skb = __skb_dequeue(&d->tx_skb_q);
169 if (!skb) {
170 spin_unlock_irqrestore(&port->port_lock, flags);
171 return;
172 }
173 req = list_first_entry(&d->tx_idle,
174 struct usb_request,
175 list);
176 req->context = skb;
177 req->buf = skb->data;
178 req->length = skb->len;
179
180 list_del(&req->list);
181
182 spin_unlock(&port->port_lock);
183 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
184 spin_lock(&port->port_lock);
185 if (ret) {
186 pr_err("%s: usb epIn failed\n", __func__);
187 list_add(&req->list, &d->tx_idle);
188 dev_kfree_skb_any(skb);
189 break;
190 }
191 d->to_host++;
192 }
193 spin_unlock_irqrestore(&port->port_lock, flags);
194}
195
196void gbam_data_recv_cb(void *p, struct sk_buff *skb)
197{
198 struct gbam_port *port = p;
199 struct bam_ch_info *d = &port->data_ch;
200 unsigned long flags;
201
202 if (!skb)
203 return;
204
205 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
206 port, port->port_num, d, skb->len);
207
208 spin_lock_irqsave(&port->port_lock, flags);
209 if (!port->port_usb) {
210 spin_unlock_irqrestore(&port->port_lock, flags);
211 dev_kfree_skb_any(skb);
212 return;
213 }
214
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700215 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216 d->tohost_drp_cnt++;
217 if (printk_ratelimit())
218 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
219 __func__, d->tohost_drp_cnt);
220 spin_unlock_irqrestore(&port->port_lock, flags);
221 dev_kfree_skb_any(skb);
222 return;
223 }
224
225 __skb_queue_tail(&d->tx_skb_q, skb);
226 spin_unlock_irqrestore(&port->port_lock, flags);
227
228 gbam_write_data_tohost(port);
229}
230
231void gbam_data_write_done(void *p, struct sk_buff *skb)
232{
233 struct gbam_port *port = p;
234 struct bam_ch_info *d = &port->data_ch;
235 unsigned long flags;
236
237 if (!skb)
238 return;
239
240 dev_kfree_skb_any(skb);
241
242 spin_lock_irqsave(&port->port_lock, flags);
243
244 d->pending_with_bam--;
245
246 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
247 port, d, d->to_modem,
248 d->pending_with_bam, port->port_num);
249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250 spin_unlock_irqrestore(&port->port_lock, flags);
251
Vamsi Krishna84579552011-11-09 15:33:22 -0800252 queue_work(gbam_wq, &d->write_tobam_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253}
254
255static void gbam_data_write_tobam(struct work_struct *w)
256{
257 struct gbam_port *port;
258 struct bam_ch_info *d;
259 struct sk_buff *skb;
260 unsigned long flags;
261 int ret;
Vamsi Krishna84579552011-11-09 15:33:22 -0800262 int qlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263
264 d = container_of(w, struct bam_ch_info, write_tobam_w);
265 port = d->port;
266
267 spin_lock_irqsave(&port->port_lock, flags);
268 if (!port->port_usb) {
269 spin_unlock_irqrestore(&port->port_lock, flags);
270 return;
271 }
272
Vamsi Krishna84579552011-11-09 15:33:22 -0800273 while (d->pending_with_bam < BAM_PENDING_LIMIT) {
Vamsi Krishna2327c79152011-11-08 16:12:42 -0800274 skb = __skb_dequeue(&d->rx_skb_q);
275 if (!skb) {
276 spin_unlock_irqrestore(&port->port_lock, flags);
277 return;
278 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279 d->pending_with_bam++;
280 d->to_modem++;
281
282 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
283 port, d, d->to_modem, d->pending_with_bam,
284 port->port_num);
285
286 spin_unlock_irqrestore(&port->port_lock, flags);
287 ret = msm_bam_dmux_write(d->id, skb);
288 spin_lock_irqsave(&port->port_lock, flags);
289 if (ret) {
290 pr_debug("%s: write error:%d\n", __func__, ret);
291 d->pending_with_bam--;
292 d->to_modem--;
293 d->tomodem_drp_cnt++;
294 dev_kfree_skb_any(skb);
295 break;
296 }
297 }
Vamsi Krishna84579552011-11-09 15:33:22 -0800298
299 qlen = d->rx_skb_q.qlen;
300
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301 spin_unlock_irqrestore(&port->port_lock, flags);
Vamsi Krishna84579552011-11-09 15:33:22 -0800302
303 if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD)
304 gbam_start_rx(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305}
306/*-------------------------------------------------------------*/
307
308static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
309{
310 struct gbam_port *port = ep->driver_data;
311 struct bam_ch_info *d;
312 struct sk_buff *skb = req->context;
313 int status = req->status;
314
315 switch (status) {
316 case 0:
317 /* successful completion */
318 case -ECONNRESET:
319 case -ESHUTDOWN:
320 /* connection gone */
321 break;
322 default:
323 pr_err("%s: data tx ep error %d\n",
324 __func__, status);
325 break;
326 }
327
328 dev_kfree_skb_any(skb);
329
330 if (!port)
331 return;
332
333 spin_lock(&port->port_lock);
334 d = &port->data_ch;
335 list_add_tail(&req->list, &d->tx_idle);
336 spin_unlock(&port->port_lock);
337
338 gbam_write_data_tohost(port);
339}
340
341static void
342gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
343{
344 struct gbam_port *port = ep->driver_data;
345 struct bam_ch_info *d = &port->data_ch;
346 struct sk_buff *skb = req->context;
347 int status = req->status;
348 int queue = 0;
349
350 switch (status) {
351 case 0:
352 skb_put(skb, req->actual);
353 queue = 1;
354 break;
355 case -ECONNRESET:
356 case -ESHUTDOWN:
357 /* cable disconnection */
358 dev_kfree_skb_any(skb);
359 req->buf = 0;
360 usb_ep_free_request(ep, req);
361 return;
362 default:
363 if (printk_ratelimit())
364 pr_err("%s: %s response error %d, %d/%d\n",
365 __func__, ep->name, status,
366 req->actual, req->length);
367 dev_kfree_skb_any(skb);
368 break;
369 }
370
371 spin_lock(&port->port_lock);
372 if (queue) {
373 __skb_queue_tail(&d->rx_skb_q, skb);
374 queue_work(gbam_wq, &d->write_tobam_w);
375 }
376
377 /* TODO: Handle flow control gracefully by having
378 * having call back mechanism from bam driver
379 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700380 if (bam_mux_rx_fctrl_support &&
Vamsi Krishna84579552011-11-09 15:33:22 -0800381 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700382
383 list_add_tail(&req->list, &d->rx_idle);
384 spin_unlock(&port->port_lock);
385 return;
386 }
387 spin_unlock(&port->port_lock);
388
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700389 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390 if (!skb) {
391 spin_lock(&port->port_lock);
392 list_add_tail(&req->list, &d->rx_idle);
393 spin_unlock(&port->port_lock);
394 return;
395 }
396 skb_reserve(skb, BAM_MUX_HDR);
397
398 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700399 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400 req->context = skb;
401
402 status = usb_ep_queue(ep, req, GFP_ATOMIC);
403 if (status) {
404 dev_kfree_skb_any(skb);
405
406 if (printk_ratelimit())
407 pr_err("%s: data rx enqueue err %d\n",
408 __func__, status);
409
410 spin_lock(&port->port_lock);
411 list_add_tail(&req->list, &d->rx_idle);
412 spin_unlock(&port->port_lock);
413 }
414}
415
416static void gbam_start_rx(struct gbam_port *port)
417{
418 struct usb_request *req;
419 struct bam_ch_info *d;
420 struct usb_ep *ep;
421 unsigned long flags;
422 int ret;
423 struct sk_buff *skb;
424
425 spin_lock_irqsave(&port->port_lock, flags);
426 if (!port->port_usb) {
427 spin_unlock_irqrestore(&port->port_lock, flags);
428 return;
429 }
430
431 d = &port->data_ch;
432 ep = port->port_usb->out;
433
434 while (port->port_usb && !list_empty(&d->rx_idle)) {
Vamsi Krishna84579552011-11-09 15:33:22 -0800435
436 if (bam_mux_rx_fctrl_support &&
437 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
438 break;
439
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440 req = list_first_entry(&d->rx_idle, struct usb_request, list);
441
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700442 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443 if (!skb)
444 break;
445 skb_reserve(skb, BAM_MUX_HDR);
446
447 list_del(&req->list);
448 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700449 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450 req->context = skb;
451
452 spin_unlock_irqrestore(&port->port_lock, flags);
453 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
454 spin_lock_irqsave(&port->port_lock, flags);
455 if (ret) {
456 dev_kfree_skb_any(skb);
457
458 if (printk_ratelimit())
459 pr_err("%s: rx queue failed\n", __func__);
460
461 if (port->port_usb)
462 list_add(&req->list, &d->rx_idle);
463 else
464 usb_ep_free_request(ep, req);
465 break;
466 }
467 }
468 spin_unlock_irqrestore(&port->port_lock, flags);
469}
470
471static void gbam_start_io(struct gbam_port *port)
472{
473 unsigned long flags;
474 struct usb_ep *ep;
475 int ret;
476 struct bam_ch_info *d;
477
478 pr_debug("%s: port:%p\n", __func__, port);
479
480 spin_lock_irqsave(&port->port_lock, flags);
481 if (!port->port_usb) {
482 spin_unlock_irqrestore(&port->port_lock, flags);
483 return;
484 }
485
486 d = &port->data_ch;
487 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700488 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 gbam_epout_complete, GFP_ATOMIC);
490 if (ret) {
491 pr_err("%s: rx req allocation failed\n", __func__);
492 return;
493 }
494
495 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700496 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497 gbam_epin_complete, GFP_ATOMIC);
498 if (ret) {
499 pr_err("%s: tx req allocation failed\n", __func__);
500 gbam_free_requests(ep, &d->rx_idle);
501 return;
502 }
503
504 spin_unlock_irqrestore(&port->port_lock, flags);
505
506 /* queue out requests */
507 gbam_start_rx(port);
508}
509
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600510static void gbam_notify(void *p, int event, unsigned long data)
511{
512 switch (event) {
513 case BAM_DMUX_RECEIVE:
514 gbam_data_recv_cb(p, (struct sk_buff *)(data));
515 break;
516 case BAM_DMUX_WRITE_DONE:
517 gbam_data_write_done(p, (struct sk_buff *)(data));
518 break;
519 }
520}
521
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522static void gbam_connect_work(struct work_struct *w)
523{
524 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
525 struct bam_ch_info *d = &port->data_ch;
526 int ret;
527
Jack Phameffd4ae2011-08-03 16:49:36 -0700528 if (!test_bit(BAM_CH_READY, &d->flags))
529 return;
530
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600531 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532 if (ret) {
533 pr_err("%s: unable open bam ch:%d err:%d\n",
534 __func__, d->id, ret);
535 return;
536 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700537 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538
539 gbam_start_io(port);
540
541 pr_debug("%s: done\n", __func__);
542}
543
Jack Phameffd4ae2011-08-03 16:49:36 -0700544static void gbam_free_buffers(struct gbam_port *port)
545{
546 struct sk_buff *skb;
547 unsigned long flags;
548 struct bam_ch_info *d;
549
550 spin_lock_irqsave(&port->port_lock, flags);
551
552 if (!port || !port->port_usb)
553 goto free_buf_out;
554
555 d = &port->data_ch;
556
557 gbam_free_requests(port->port_usb->in, &d->tx_idle);
558 gbam_free_requests(port->port_usb->out, &d->rx_idle);
559
560 while ((skb = __skb_dequeue(&d->tx_skb_q)))
561 dev_kfree_skb_any(skb);
562
563 while ((skb = __skb_dequeue(&d->rx_skb_q)))
564 dev_kfree_skb_any(skb);
565
566free_buf_out:
567 spin_unlock_irqrestore(&port->port_lock, flags);
568}
569
570/* BAM data channel ready, allow attempt to open */
571static int gbam_data_ch_probe(struct platform_device *pdev)
572{
573 struct gbam_port *port;
574 struct bam_ch_info *d;
575 int i;
576 unsigned long flags;
577
578 pr_debug("%s: name:%s\n", __func__, pdev->name);
579
580 for (i = 0; i < n_bam_ports; i++) {
581 port = bam_ports[i].port;
582 d = &port->data_ch;
583
584 if (!strncmp(bam_ch_names[i], pdev->name,
585 BAM_DMUX_CH_NAME_MAX_LEN)) {
586 set_bit(BAM_CH_READY, &d->flags);
587
588 /* if usb is online, try opening bam_ch */
589 spin_lock_irqsave(&port->port_lock, flags);
590 if (port->port_usb)
591 queue_work(gbam_wq, &port->connect_w);
592 spin_unlock_irqrestore(&port->port_lock, flags);
593
594 break;
595 }
596 }
597
598 return 0;
599}
600
601/* BAM data channel went inactive, so close it */
602static int gbam_data_ch_remove(struct platform_device *pdev)
603{
604 struct gbam_port *port;
605 struct bam_ch_info *d;
606 struct usb_ep *ep_in = NULL;
607 struct usb_ep *ep_out = NULL;
608 unsigned long flags;
609 int i;
610
611 pr_debug("%s: name:%s\n", __func__, pdev->name);
612
613 for (i = 0; i < n_bam_ports; i++) {
614 if (!strncmp(bam_ch_names[i], pdev->name,
615 BAM_DMUX_CH_NAME_MAX_LEN)) {
616 port = bam_ports[i].port;
617 d = &port->data_ch;
618
619 spin_lock_irqsave(&port->port_lock, flags);
620 if (port->port_usb) {
621 ep_in = port->port_usb->in;
622 ep_out = port->port_usb->out;
623 }
624 spin_unlock_irqrestore(&port->port_lock, flags);
625
626 if (ep_in)
627 usb_ep_fifo_flush(ep_in);
628 if (ep_out)
629 usb_ep_fifo_flush(ep_out);
630
631 gbam_free_buffers(port);
632
633 msm_bam_dmux_close(d->id);
634
635 clear_bit(BAM_CH_READY, &d->flags);
636 clear_bit(BAM_CH_OPENED, &d->flags);
637 }
638 }
639
640 return 0;
641}
642
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643static void gbam_port_free(int portno)
644{
645 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700646 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647
Jack Phameffd4ae2011-08-03 16:49:36 -0700648 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700650 platform_driver_unregister(pdrv);
651 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652}
653
654static int gbam_port_alloc(int portno)
655{
656 struct gbam_port *port;
657 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -0700658 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659
660 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
661 if (!port)
662 return -ENOMEM;
663
664 port->port_num = portno;
665
666 /* port initialization */
667 spin_lock_init(&port->port_lock);
668 INIT_WORK(&port->connect_w, gbam_connect_work);
669
670 /* data ch */
671 d = &port->data_ch;
672 d->port = port;
673 INIT_LIST_HEAD(&d->tx_idle);
674 INIT_LIST_HEAD(&d->rx_idle);
675 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
676 skb_queue_head_init(&d->tx_skb_q);
677 skb_queue_head_init(&d->rx_skb_q);
678 d->id = bam_ch_ids[portno];
679
680 bam_ports[portno].port = port;
681
Jack Phameffd4ae2011-08-03 16:49:36 -0700682 pdrv = &bam_ports[portno].pdrv;
683 pdrv->probe = gbam_data_ch_probe;
684 pdrv->remove = gbam_data_ch_remove;
685 pdrv->driver.name = bam_ch_names[portno];
686 pdrv->driver.owner = THIS_MODULE;
687
688 platform_driver_register(pdrv);
689
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
691
692 return 0;
693}
694
695#if defined(CONFIG_DEBUG_FS)
696#define DEBUG_BUF_SIZE 1024
697static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
698 size_t count, loff_t *ppos)
699{
700 struct gbam_port *port;
701 struct bam_ch_info *d;
702 char *buf;
703 unsigned long flags;
704 int ret;
705 int i;
706 int temp = 0;
707
708 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
709 if (!buf)
710 return -ENOMEM;
711
712 for (i = 0; i < n_bam_ports; i++) {
713 port = bam_ports[i].port;
714 if (!port)
715 continue;
716 spin_lock_irqsave(&port->port_lock, flags);
717
718 d = &port->data_ch;
719
720 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
721 "#PORT:%d port:%p data_ch:%p#\n"
722 "dpkts_to_usbhost: %lu\n"
723 "dpkts_to_modem: %lu\n"
724 "dpkts_pwith_bam: %u\n"
725 "to_usbhost_dcnt: %u\n"
726 "tomodem__dcnt: %u\n"
727 "tx_buf_len: %u\n"
Vamsi Krishna84579552011-11-09 15:33:22 -0800728 "rx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -0700729 "data_ch_open: %d\n"
730 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731 i, port, &port->data_ch,
732 d->to_host, d->to_modem,
733 d->pending_with_bam,
734 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Vamsi Krishna84579552011-11-09 15:33:22 -0800735 d->tx_skb_q.qlen, d->rx_skb_q.qlen,
Jack Phameffd4ae2011-08-03 16:49:36 -0700736 test_bit(BAM_CH_OPENED, &d->flags),
737 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738
739 spin_unlock_irqrestore(&port->port_lock, flags);
740 }
741
742 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
743
744 kfree(buf);
745
746 return ret;
747}
748
749static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
750 size_t count, loff_t *ppos)
751{
752 struct gbam_port *port;
753 struct bam_ch_info *d;
754 int i;
755 unsigned long flags;
756
757 for (i = 0; i < n_bam_ports; i++) {
758 port = bam_ports[i].port;
759 if (!port)
760 continue;
761
762 spin_lock_irqsave(&port->port_lock, flags);
763
764 d = &port->data_ch;
765
766 d->to_host = 0;
767 d->to_modem = 0;
768 d->pending_with_bam = 0;
769 d->tohost_drp_cnt = 0;
770 d->tomodem_drp_cnt = 0;
771
772 spin_unlock_irqrestore(&port->port_lock, flags);
773 }
774 return count;
775}
776
777const struct file_operations gbam_stats_ops = {
778 .read = gbam_read_stats,
779 .write = gbam_reset_stats,
780};
781
782static void gbam_debugfs_init(void)
783{
784 struct dentry *dent;
785 struct dentry *dfile;
786
787 dent = debugfs_create_dir("usb_rmnet", 0);
788 if (IS_ERR(dent))
789 return;
790
791 /* TODO: Implement cleanup function to remove created file */
792 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
793 if (!dfile || IS_ERR(dfile))
794 debugfs_remove(dent);
795}
796#else
797static void gam_debugfs_init(void) { }
798#endif
799
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700800void gbam_disconnect(struct grmnet *gr, u8 port_num)
801{
802 struct gbam_port *port;
803 unsigned long flags;
804 struct bam_ch_info *d;
805
806 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
807
808 if (port_num >= n_bam_ports) {
809 pr_err("%s: invalid portno#%d\n", __func__, port_num);
810 return;
811 }
812
813 if (!gr) {
814 pr_err("%s: grmnet port is null\n", __func__);
815 return;
816 }
817
818 port = bam_ports[port_num].port;
819 d = &port->data_ch;
820
821 gbam_free_buffers(port);
822
823 spin_lock_irqsave(&port->port_lock, flags);
824 port->port_usb = 0;
825 spin_unlock_irqrestore(&port->port_lock, flags);
826
827 /* disable endpoints */
828 usb_ep_disable(gr->out);
829 usb_ep_disable(gr->in);
830
Jack Phameffd4ae2011-08-03 16:49:36 -0700831 if (test_bit(BAM_CH_OPENED, &d->flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832 msm_bam_dmux_close(d->id);
Jack Phameffd4ae2011-08-03 16:49:36 -0700833 clear_bit(BAM_CH_OPENED, &d->flags);
834 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700835}
836
837int gbam_connect(struct grmnet *gr, u8 port_num)
838{
839 struct gbam_port *port;
840 struct bam_ch_info *d;
841 int ret;
842 unsigned long flags;
843
844 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
845
846 if (port_num >= n_bam_ports) {
847 pr_err("%s: invalid portno#%d\n", __func__, port_num);
848 return -ENODEV;
849 }
850
851 if (!gr) {
852 pr_err("%s: grmnet port is null\n", __func__);
853 return -ENODEV;
854 }
855
856 port = bam_ports[port_num].port;
857 d = &port->data_ch;
858
859 ret = usb_ep_enable(gr->in, gr->in_desc);
860 if (ret) {
861 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
862 __func__, gr->in);
863 return ret;
864 }
865 gr->in->driver_data = port;
866
867 ret = usb_ep_enable(gr->out, gr->out_desc);
868 if (ret) {
869 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
870 __func__, gr->out);
871 gr->in->driver_data = 0;
872 return ret;
873 }
874 gr->out->driver_data = port;
875
876 spin_lock_irqsave(&port->port_lock, flags);
877 port->port_usb = gr;
878
879 d->to_host = 0;
880 d->to_modem = 0;
881 d->pending_with_bam = 0;
882 d->tohost_drp_cnt = 0;
883 d->tomodem_drp_cnt = 0;
884 spin_unlock_irqrestore(&port->port_lock, flags);
885
886
887 queue_work(gbam_wq, &port->connect_w);
888
889 return 0;
890}
891
892int gbam_setup(unsigned int count)
893{
894 int i;
895 int ret;
896
897 pr_debug("%s: requested ports:%d\n", __func__, count);
898
899 if (!count || count > BAM_N_PORTS) {
900 pr_err("%s: Invalid num of ports count:%d\n",
901 __func__, count);
902 return -EINVAL;
903 }
904
905 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
906 if (!gbam_wq) {
907 pr_err("%s: Unable to create workqueue gbam_wq\n",
908 __func__);
909 return -ENOMEM;
910 }
911
912 for (i = 0; i < count; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +0530913 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914 ret = gbam_port_alloc(i);
915 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +0530916 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
918 goto free_bam_ports;
919 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 }
921
922 gbam_debugfs_init();
923
924 return 0;
925free_bam_ports:
926 for (i = 0; i < n_bam_ports; i++)
927 gbam_port_free(i);
928
929 destroy_workqueue(gbam_wq);
930
931 return ret;
932}