blob: 1de8fc1a15c747d129834f0882992c385644b75c [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
26#include "u_rmnet.h"
27
28#define BAM_N_PORTS 1
29
30static struct workqueue_struct *gbam_wq;
31static int n_bam_ports;
32static unsigned bam_ch_ids[] = { 8 };
33
Jack Phameffd4ae2011-08-03 16:49:36 -070034static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
35
Vamsi Krishna8f24f252011-11-02 11:46:08 -070036#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
37#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 200
38#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 125
39#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040
41#define BAM_MUX_HDR 8
42
Vamsi Krishna8f24f252011-11-02 11:46:08 -070043#define BAM_MUX_RX_Q_SIZE 16
44#define BAM_MUX_TX_Q_SIZE 200
45#define BAM_MUX_RX_REQ_SIZE (2048 - BAM_MUX_HDR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046
Vamsi Krishna8f24f252011-11-02 11:46:08 -070047unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
48module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
Vamsi Krishna8f24f252011-11-02 11:46:08 -070050unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
51module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052
Vamsi Krishna8f24f252011-11-02 11:46:08 -070053unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
54module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055
Vamsi Krishna8f24f252011-11-02 11:46:08 -070056unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
57module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058
Vamsi Krishna8f24f252011-11-02 11:46:08 -070059unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
60module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061
Vamsi Krishna8f24f252011-11-02 11:46:08 -070062unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
63module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Vamsi Krishna8f24f252011-11-02 11:46:08 -070065unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
66module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
Jack Phameffd4ae2011-08-03 16:49:36 -070068#define BAM_CH_OPENED BIT(0)
69#define BAM_CH_READY BIT(1)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070071 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070072 unsigned id;
73
74 struct list_head tx_idle;
75 struct sk_buff_head tx_skb_q;
76
77 struct list_head rx_idle;
78 struct sk_buff_head rx_skb_q;
79
80 struct gbam_port *port;
81 struct work_struct write_tobam_w;
82
83 /* stats */
84 unsigned int pending_with_bam;
85 unsigned int tohost_drp_cnt;
86 unsigned int tomodem_drp_cnt;
87 unsigned int tx_len;
88 unsigned int rx_len;
89 unsigned long to_modem;
90 unsigned long to_host;
91};
92
93struct gbam_port {
94 unsigned port_num;
95 spinlock_t port_lock;
96
97 struct grmnet *port_usb;
98
99 struct bam_ch_info data_ch;
100
101 struct work_struct connect_w;
102};
103
104static struct bam_portmaster {
105 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700106 struct platform_driver pdrv;
107} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108
109static void gbam_start_rx(struct gbam_port *port);
110
111/*---------------misc functions---------------- */
112static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
113{
114 struct usb_request *req;
115
116 while (!list_empty(head)) {
117 req = list_entry(head->next, struct usb_request, list);
118 list_del(&req->list);
119 usb_ep_free_request(ep, req);
120 }
121}
122
123static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
124 int num,
125 void (*cb)(struct usb_ep *ep, struct usb_request *),
126 gfp_t flags)
127{
128 int i;
129 struct usb_request *req;
130
131 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
132 ep, head, num, cb);
133
134 for (i = 0; i < num; i++) {
135 req = usb_ep_alloc_request(ep, flags);
136 if (!req) {
137 pr_debug("%s: req allocated:%d\n", __func__, i);
138 return list_empty(head) ? -ENOMEM : 0;
139 }
140 req->complete = cb;
141 list_add(&req->list, head);
142 }
143
144 return 0;
145}
146/*--------------------------------------------- */
147
148/*------------data_path----------------------------*/
149static void gbam_write_data_tohost(struct gbam_port *port)
150{
151 unsigned long flags;
152 struct bam_ch_info *d = &port->data_ch;
153 struct sk_buff *skb;
154 int ret;
155 struct usb_request *req;
156 struct usb_ep *ep;
157
158 spin_lock_irqsave(&port->port_lock, flags);
159 if (!port->port_usb) {
160 spin_unlock_irqrestore(&port->port_lock, flags);
161 return;
162 }
163
164 ep = port->port_usb->in;
165
166 while (!list_empty(&d->tx_idle)) {
167 skb = __skb_dequeue(&d->tx_skb_q);
168 if (!skb) {
169 spin_unlock_irqrestore(&port->port_lock, flags);
170 return;
171 }
172 req = list_first_entry(&d->tx_idle,
173 struct usb_request,
174 list);
175 req->context = skb;
176 req->buf = skb->data;
177 req->length = skb->len;
178
179 list_del(&req->list);
180
181 spin_unlock(&port->port_lock);
182 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
183 spin_lock(&port->port_lock);
184 if (ret) {
185 pr_err("%s: usb epIn failed\n", __func__);
186 list_add(&req->list, &d->tx_idle);
187 dev_kfree_skb_any(skb);
188 break;
189 }
190 d->to_host++;
191 }
192 spin_unlock_irqrestore(&port->port_lock, flags);
193}
194
195void gbam_data_recv_cb(void *p, struct sk_buff *skb)
196{
197 struct gbam_port *port = p;
198 struct bam_ch_info *d = &port->data_ch;
199 unsigned long flags;
200
201 if (!skb)
202 return;
203
204 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
205 port, port->port_num, d, skb->len);
206
207 spin_lock_irqsave(&port->port_lock, flags);
208 if (!port->port_usb) {
209 spin_unlock_irqrestore(&port->port_lock, flags);
210 dev_kfree_skb_any(skb);
211 return;
212 }
213
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700214 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 d->tohost_drp_cnt++;
216 if (printk_ratelimit())
217 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
218 __func__, d->tohost_drp_cnt);
219 spin_unlock_irqrestore(&port->port_lock, flags);
220 dev_kfree_skb_any(skb);
221 return;
222 }
223
224 __skb_queue_tail(&d->tx_skb_q, skb);
225 spin_unlock_irqrestore(&port->port_lock, flags);
226
227 gbam_write_data_tohost(port);
228}
229
230void gbam_data_write_done(void *p, struct sk_buff *skb)
231{
232 struct gbam_port *port = p;
233 struct bam_ch_info *d = &port->data_ch;
234 unsigned long flags;
235
236 if (!skb)
237 return;
238
239 dev_kfree_skb_any(skb);
240
241 spin_lock_irqsave(&port->port_lock, flags);
242
243 d->pending_with_bam--;
244
245 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
246 port, d, d->to_modem,
247 d->pending_with_bam, port->port_num);
248
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700249 if (bam_mux_rx_fctrl_support &&
250 d->pending_with_bam >= bam_mux_rx_fctrl_dis_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251
252 spin_unlock_irqrestore(&port->port_lock, flags);
253 return;
254 }
255 spin_unlock_irqrestore(&port->port_lock, flags);
256
257 gbam_start_rx(port);
258}
259
260static void gbam_data_write_tobam(struct work_struct *w)
261{
262 struct gbam_port *port;
263 struct bam_ch_info *d;
264 struct sk_buff *skb;
265 unsigned long flags;
266 int ret;
267
268 d = container_of(w, struct bam_ch_info, write_tobam_w);
269 port = d->port;
270
271 spin_lock_irqsave(&port->port_lock, flags);
272 if (!port->port_usb) {
273 spin_unlock_irqrestore(&port->port_lock, flags);
274 return;
275 }
276
Vamsi Krishna2327c79152011-11-08 16:12:42 -0800277 while (!bam_mux_rx_fctrl_support ||
278 (d->pending_with_bam < bam_mux_rx_fctrl_en_thld)) {
279 skb = __skb_dequeue(&d->rx_skb_q);
280 if (!skb) {
281 spin_unlock_irqrestore(&port->port_lock, flags);
282 return;
283 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700284 d->pending_with_bam++;
285 d->to_modem++;
286
287 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
288 port, d, d->to_modem, d->pending_with_bam,
289 port->port_num);
290
291 spin_unlock_irqrestore(&port->port_lock, flags);
292 ret = msm_bam_dmux_write(d->id, skb);
293 spin_lock_irqsave(&port->port_lock, flags);
294 if (ret) {
295 pr_debug("%s: write error:%d\n", __func__, ret);
296 d->pending_with_bam--;
297 d->to_modem--;
298 d->tomodem_drp_cnt++;
299 dev_kfree_skb_any(skb);
300 break;
301 }
302 }
303 spin_unlock_irqrestore(&port->port_lock, flags);
304}
305/*-------------------------------------------------------------*/
306
307static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
308{
309 struct gbam_port *port = ep->driver_data;
310 struct bam_ch_info *d;
311 struct sk_buff *skb = req->context;
312 int status = req->status;
313
314 switch (status) {
315 case 0:
316 /* successful completion */
317 case -ECONNRESET:
318 case -ESHUTDOWN:
319 /* connection gone */
320 break;
321 default:
322 pr_err("%s: data tx ep error %d\n",
323 __func__, status);
324 break;
325 }
326
327 dev_kfree_skb_any(skb);
328
329 if (!port)
330 return;
331
332 spin_lock(&port->port_lock);
333 d = &port->data_ch;
334 list_add_tail(&req->list, &d->tx_idle);
335 spin_unlock(&port->port_lock);
336
337 gbam_write_data_tohost(port);
338}
339
340static void
341gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
342{
343 struct gbam_port *port = ep->driver_data;
344 struct bam_ch_info *d = &port->data_ch;
345 struct sk_buff *skb = req->context;
346 int status = req->status;
347 int queue = 0;
348
349 switch (status) {
350 case 0:
351 skb_put(skb, req->actual);
352 queue = 1;
353 break;
354 case -ECONNRESET:
355 case -ESHUTDOWN:
356 /* cable disconnection */
357 dev_kfree_skb_any(skb);
358 req->buf = 0;
359 usb_ep_free_request(ep, req);
360 return;
361 default:
362 if (printk_ratelimit())
363 pr_err("%s: %s response error %d, %d/%d\n",
364 __func__, ep->name, status,
365 req->actual, req->length);
366 dev_kfree_skb_any(skb);
367 break;
368 }
369
370 spin_lock(&port->port_lock);
371 if (queue) {
372 __skb_queue_tail(&d->rx_skb_q, skb);
373 queue_work(gbam_wq, &d->write_tobam_w);
374 }
375
376 /* TODO: Handle flow control gracefully by having
377 * having call back mechanism from bam driver
378 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700379 if (bam_mux_rx_fctrl_support &&
380 d->pending_with_bam >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381
382 list_add_tail(&req->list, &d->rx_idle);
383 spin_unlock(&port->port_lock);
384 return;
385 }
386 spin_unlock(&port->port_lock);
387
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700388 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700389 if (!skb) {
390 spin_lock(&port->port_lock);
391 list_add_tail(&req->list, &d->rx_idle);
392 spin_unlock(&port->port_lock);
393 return;
394 }
395 skb_reserve(skb, BAM_MUX_HDR);
396
397 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700398 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399 req->context = skb;
400
401 status = usb_ep_queue(ep, req, GFP_ATOMIC);
402 if (status) {
403 dev_kfree_skb_any(skb);
404
405 if (printk_ratelimit())
406 pr_err("%s: data rx enqueue err %d\n",
407 __func__, status);
408
409 spin_lock(&port->port_lock);
410 list_add_tail(&req->list, &d->rx_idle);
411 spin_unlock(&port->port_lock);
412 }
413}
414
415static void gbam_start_rx(struct gbam_port *port)
416{
417 struct usb_request *req;
418 struct bam_ch_info *d;
419 struct usb_ep *ep;
420 unsigned long flags;
421 int ret;
422 struct sk_buff *skb;
423
424 spin_lock_irqsave(&port->port_lock, flags);
425 if (!port->port_usb) {
426 spin_unlock_irqrestore(&port->port_lock, flags);
427 return;
428 }
429
430 d = &port->data_ch;
431 ep = port->port_usb->out;
432
433 while (port->port_usb && !list_empty(&d->rx_idle)) {
434 req = list_first_entry(&d->rx_idle, struct usb_request, list);
435
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700436 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700437 if (!skb)
438 break;
439 skb_reserve(skb, BAM_MUX_HDR);
440
441 list_del(&req->list);
442 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700443 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444 req->context = skb;
445
446 spin_unlock_irqrestore(&port->port_lock, flags);
447 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
448 spin_lock_irqsave(&port->port_lock, flags);
449 if (ret) {
450 dev_kfree_skb_any(skb);
451
452 if (printk_ratelimit())
453 pr_err("%s: rx queue failed\n", __func__);
454
455 if (port->port_usb)
456 list_add(&req->list, &d->rx_idle);
457 else
458 usb_ep_free_request(ep, req);
459 break;
460 }
461 }
462 spin_unlock_irqrestore(&port->port_lock, flags);
463}
464
465static void gbam_start_io(struct gbam_port *port)
466{
467 unsigned long flags;
468 struct usb_ep *ep;
469 int ret;
470 struct bam_ch_info *d;
471
472 pr_debug("%s: port:%p\n", __func__, port);
473
474 spin_lock_irqsave(&port->port_lock, flags);
475 if (!port->port_usb) {
476 spin_unlock_irqrestore(&port->port_lock, flags);
477 return;
478 }
479
480 d = &port->data_ch;
481 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700482 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483 gbam_epout_complete, GFP_ATOMIC);
484 if (ret) {
485 pr_err("%s: rx req allocation failed\n", __func__);
486 return;
487 }
488
489 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700490 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491 gbam_epin_complete, GFP_ATOMIC);
492 if (ret) {
493 pr_err("%s: tx req allocation failed\n", __func__);
494 gbam_free_requests(ep, &d->rx_idle);
495 return;
496 }
497
498 spin_unlock_irqrestore(&port->port_lock, flags);
499
500 /* queue out requests */
501 gbam_start_rx(port);
502}
503
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600504static void gbam_notify(void *p, int event, unsigned long data)
505{
506 switch (event) {
507 case BAM_DMUX_RECEIVE:
508 gbam_data_recv_cb(p, (struct sk_buff *)(data));
509 break;
510 case BAM_DMUX_WRITE_DONE:
511 gbam_data_write_done(p, (struct sk_buff *)(data));
512 break;
513 }
514}
515
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516static void gbam_connect_work(struct work_struct *w)
517{
518 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
519 struct bam_ch_info *d = &port->data_ch;
520 int ret;
521
Jack Phameffd4ae2011-08-03 16:49:36 -0700522 if (!test_bit(BAM_CH_READY, &d->flags))
523 return;
524
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600525 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 if (ret) {
527 pr_err("%s: unable open bam ch:%d err:%d\n",
528 __func__, d->id, ret);
529 return;
530 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700531 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532
533 gbam_start_io(port);
534
535 pr_debug("%s: done\n", __func__);
536}
537
Jack Phameffd4ae2011-08-03 16:49:36 -0700538static void gbam_free_buffers(struct gbam_port *port)
539{
540 struct sk_buff *skb;
541 unsigned long flags;
542 struct bam_ch_info *d;
543
544 spin_lock_irqsave(&port->port_lock, flags);
545
546 if (!port || !port->port_usb)
547 goto free_buf_out;
548
549 d = &port->data_ch;
550
551 gbam_free_requests(port->port_usb->in, &d->tx_idle);
552 gbam_free_requests(port->port_usb->out, &d->rx_idle);
553
554 while ((skb = __skb_dequeue(&d->tx_skb_q)))
555 dev_kfree_skb_any(skb);
556
557 while ((skb = __skb_dequeue(&d->rx_skb_q)))
558 dev_kfree_skb_any(skb);
559
560free_buf_out:
561 spin_unlock_irqrestore(&port->port_lock, flags);
562}
563
564/* BAM data channel ready, allow attempt to open */
565static int gbam_data_ch_probe(struct platform_device *pdev)
566{
567 struct gbam_port *port;
568 struct bam_ch_info *d;
569 int i;
570 unsigned long flags;
571
572 pr_debug("%s: name:%s\n", __func__, pdev->name);
573
574 for (i = 0; i < n_bam_ports; i++) {
575 port = bam_ports[i].port;
576 d = &port->data_ch;
577
578 if (!strncmp(bam_ch_names[i], pdev->name,
579 BAM_DMUX_CH_NAME_MAX_LEN)) {
580 set_bit(BAM_CH_READY, &d->flags);
581
582 /* if usb is online, try opening bam_ch */
583 spin_lock_irqsave(&port->port_lock, flags);
584 if (port->port_usb)
585 queue_work(gbam_wq, &port->connect_w);
586 spin_unlock_irqrestore(&port->port_lock, flags);
587
588 break;
589 }
590 }
591
592 return 0;
593}
594
595/* BAM data channel went inactive, so close it */
596static int gbam_data_ch_remove(struct platform_device *pdev)
597{
598 struct gbam_port *port;
599 struct bam_ch_info *d;
600 struct usb_ep *ep_in = NULL;
601 struct usb_ep *ep_out = NULL;
602 unsigned long flags;
603 int i;
604
605 pr_debug("%s: name:%s\n", __func__, pdev->name);
606
607 for (i = 0; i < n_bam_ports; i++) {
608 if (!strncmp(bam_ch_names[i], pdev->name,
609 BAM_DMUX_CH_NAME_MAX_LEN)) {
610 port = bam_ports[i].port;
611 d = &port->data_ch;
612
613 spin_lock_irqsave(&port->port_lock, flags);
614 if (port->port_usb) {
615 ep_in = port->port_usb->in;
616 ep_out = port->port_usb->out;
617 }
618 spin_unlock_irqrestore(&port->port_lock, flags);
619
620 if (ep_in)
621 usb_ep_fifo_flush(ep_in);
622 if (ep_out)
623 usb_ep_fifo_flush(ep_out);
624
625 gbam_free_buffers(port);
626
627 msm_bam_dmux_close(d->id);
628
629 clear_bit(BAM_CH_READY, &d->flags);
630 clear_bit(BAM_CH_OPENED, &d->flags);
631 }
632 }
633
634 return 0;
635}
636
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637static void gbam_port_free(int portno)
638{
639 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700640 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641
Jack Phameffd4ae2011-08-03 16:49:36 -0700642 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700644 platform_driver_unregister(pdrv);
645 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646}
647
648static int gbam_port_alloc(int portno)
649{
650 struct gbam_port *port;
651 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -0700652 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653
654 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
655 if (!port)
656 return -ENOMEM;
657
658 port->port_num = portno;
659
660 /* port initialization */
661 spin_lock_init(&port->port_lock);
662 INIT_WORK(&port->connect_w, gbam_connect_work);
663
664 /* data ch */
665 d = &port->data_ch;
666 d->port = port;
667 INIT_LIST_HEAD(&d->tx_idle);
668 INIT_LIST_HEAD(&d->rx_idle);
669 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
670 skb_queue_head_init(&d->tx_skb_q);
671 skb_queue_head_init(&d->rx_skb_q);
672 d->id = bam_ch_ids[portno];
673
674 bam_ports[portno].port = port;
675
Jack Phameffd4ae2011-08-03 16:49:36 -0700676 pdrv = &bam_ports[portno].pdrv;
677 pdrv->probe = gbam_data_ch_probe;
678 pdrv->remove = gbam_data_ch_remove;
679 pdrv->driver.name = bam_ch_names[portno];
680 pdrv->driver.owner = THIS_MODULE;
681
682 platform_driver_register(pdrv);
683
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700684 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
685
686 return 0;
687}
688
689#if defined(CONFIG_DEBUG_FS)
690#define DEBUG_BUF_SIZE 1024
691static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
692 size_t count, loff_t *ppos)
693{
694 struct gbam_port *port;
695 struct bam_ch_info *d;
696 char *buf;
697 unsigned long flags;
698 int ret;
699 int i;
700 int temp = 0;
701
702 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
703 if (!buf)
704 return -ENOMEM;
705
706 for (i = 0; i < n_bam_ports; i++) {
707 port = bam_ports[i].port;
708 if (!port)
709 continue;
710 spin_lock_irqsave(&port->port_lock, flags);
711
712 d = &port->data_ch;
713
714 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
715 "#PORT:%d port:%p data_ch:%p#\n"
716 "dpkts_to_usbhost: %lu\n"
717 "dpkts_to_modem: %lu\n"
718 "dpkts_pwith_bam: %u\n"
719 "to_usbhost_dcnt: %u\n"
720 "tomodem__dcnt: %u\n"
721 "tx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -0700722 "data_ch_open: %d\n"
723 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 i, port, &port->data_ch,
725 d->to_host, d->to_modem,
726 d->pending_with_bam,
727 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Jack Phameffd4ae2011-08-03 16:49:36 -0700728 d->tx_skb_q.qlen,
729 test_bit(BAM_CH_OPENED, &d->flags),
730 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731
732 spin_unlock_irqrestore(&port->port_lock, flags);
733 }
734
735 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
736
737 kfree(buf);
738
739 return ret;
740}
741
742static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
743 size_t count, loff_t *ppos)
744{
745 struct gbam_port *port;
746 struct bam_ch_info *d;
747 int i;
748 unsigned long flags;
749
750 for (i = 0; i < n_bam_ports; i++) {
751 port = bam_ports[i].port;
752 if (!port)
753 continue;
754
755 spin_lock_irqsave(&port->port_lock, flags);
756
757 d = &port->data_ch;
758
759 d->to_host = 0;
760 d->to_modem = 0;
761 d->pending_with_bam = 0;
762 d->tohost_drp_cnt = 0;
763 d->tomodem_drp_cnt = 0;
764
765 spin_unlock_irqrestore(&port->port_lock, flags);
766 }
767 return count;
768}
769
770const struct file_operations gbam_stats_ops = {
771 .read = gbam_read_stats,
772 .write = gbam_reset_stats,
773};
774
775static void gbam_debugfs_init(void)
776{
777 struct dentry *dent;
778 struct dentry *dfile;
779
780 dent = debugfs_create_dir("usb_rmnet", 0);
781 if (IS_ERR(dent))
782 return;
783
784 /* TODO: Implement cleanup function to remove created file */
785 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
786 if (!dfile || IS_ERR(dfile))
787 debugfs_remove(dent);
788}
789#else
790static void gam_debugfs_init(void) { }
791#endif
792
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700793void gbam_disconnect(struct grmnet *gr, u8 port_num)
794{
795 struct gbam_port *port;
796 unsigned long flags;
797 struct bam_ch_info *d;
798
799 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
800
801 if (port_num >= n_bam_ports) {
802 pr_err("%s: invalid portno#%d\n", __func__, port_num);
803 return;
804 }
805
806 if (!gr) {
807 pr_err("%s: grmnet port is null\n", __func__);
808 return;
809 }
810
811 port = bam_ports[port_num].port;
812 d = &port->data_ch;
813
814 gbam_free_buffers(port);
815
816 spin_lock_irqsave(&port->port_lock, flags);
817 port->port_usb = 0;
818 spin_unlock_irqrestore(&port->port_lock, flags);
819
820 /* disable endpoints */
821 usb_ep_disable(gr->out);
822 usb_ep_disable(gr->in);
823
Jack Phameffd4ae2011-08-03 16:49:36 -0700824 if (test_bit(BAM_CH_OPENED, &d->flags)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825 msm_bam_dmux_close(d->id);
Jack Phameffd4ae2011-08-03 16:49:36 -0700826 clear_bit(BAM_CH_OPENED, &d->flags);
827 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700828}
829
830int gbam_connect(struct grmnet *gr, u8 port_num)
831{
832 struct gbam_port *port;
833 struct bam_ch_info *d;
834 int ret;
835 unsigned long flags;
836
837 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
838
839 if (port_num >= n_bam_ports) {
840 pr_err("%s: invalid portno#%d\n", __func__, port_num);
841 return -ENODEV;
842 }
843
844 if (!gr) {
845 pr_err("%s: grmnet port is null\n", __func__);
846 return -ENODEV;
847 }
848
849 port = bam_ports[port_num].port;
850 d = &port->data_ch;
851
852 ret = usb_ep_enable(gr->in, gr->in_desc);
853 if (ret) {
854 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
855 __func__, gr->in);
856 return ret;
857 }
858 gr->in->driver_data = port;
859
860 ret = usb_ep_enable(gr->out, gr->out_desc);
861 if (ret) {
862 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
863 __func__, gr->out);
864 gr->in->driver_data = 0;
865 return ret;
866 }
867 gr->out->driver_data = port;
868
869 spin_lock_irqsave(&port->port_lock, flags);
870 port->port_usb = gr;
871
872 d->to_host = 0;
873 d->to_modem = 0;
874 d->pending_with_bam = 0;
875 d->tohost_drp_cnt = 0;
876 d->tomodem_drp_cnt = 0;
877 spin_unlock_irqrestore(&port->port_lock, flags);
878
879
880 queue_work(gbam_wq, &port->connect_w);
881
882 return 0;
883}
884
885int gbam_setup(unsigned int count)
886{
887 int i;
888 int ret;
889
890 pr_debug("%s: requested ports:%d\n", __func__, count);
891
892 if (!count || count > BAM_N_PORTS) {
893 pr_err("%s: Invalid num of ports count:%d\n",
894 __func__, count);
895 return -EINVAL;
896 }
897
898 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
899 if (!gbam_wq) {
900 pr_err("%s: Unable to create workqueue gbam_wq\n",
901 __func__);
902 return -ENOMEM;
903 }
904
905 for (i = 0; i < count; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +0530906 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700907 ret = gbam_port_alloc(i);
908 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +0530909 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700910 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
911 goto free_bam_ports;
912 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700913 }
914
915 gbam_debugfs_init();
916
917 return 0;
918free_bam_ports:
919 for (i = 0; i < n_bam_ports; i++)
920 gbam_port_free(i);
921
922 destroy_workqueue(gbam_wq);
923
924 return ret;
925}