blob: 0c87e3e88df829fd459691bd35c25b473134eb24 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
26#include "u_rmnet.h"
27
28#define BAM_N_PORTS 1
29
30static struct workqueue_struct *gbam_wq;
31static int n_bam_ports;
32static unsigned bam_ch_ids[] = { 8 };
33
34#define TX_PKT_DROP_THRESHOLD 1000
35#define RX_PKT_FLOW_CTRL_EN_THRESHOLD 1000
36#define RX_PKT_FLOW_CTRL_DISABLE 500
37#define RX_PKT_FLOW_CTRL_SUPPORT 1
38
39#define BAM_MUX_HDR 8
40
41#define RX_Q_SIZE 16
42#define TX_Q_SIZE 200
43#define RX_REQ_SIZE (2048 - BAM_MUX_HDR)
44
45unsigned int tx_pkt_drop_thld = TX_PKT_DROP_THRESHOLD;
46module_param(tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
47
48unsigned int rx_fctrl_en_thld = RX_PKT_FLOW_CTRL_EN_THRESHOLD;
49module_param(rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
50
51unsigned int rx_fctrl_support = RX_PKT_FLOW_CTRL_SUPPORT;
52module_param(rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
53
54unsigned int rx_fctrl_dis_thld = RX_PKT_FLOW_CTRL_DISABLE;
55module_param(rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
56
57unsigned int tx_q_size = TX_Q_SIZE;
58module_param(tx_q_size, uint, S_IRUGO | S_IWUSR);
59
60unsigned int rx_q_size = RX_Q_SIZE;
61module_param(rx_q_size, uint, S_IRUGO | S_IWUSR);
62
63unsigned int rx_req_size = RX_REQ_SIZE;
64module_param(rx_req_size, uint, S_IRUGO | S_IWUSR);
65
66struct bam_ch_info {
67 atomic_t opened;
68 unsigned id;
69
70 struct list_head tx_idle;
71 struct sk_buff_head tx_skb_q;
72
73 struct list_head rx_idle;
74 struct sk_buff_head rx_skb_q;
75
76 struct gbam_port *port;
77 struct work_struct write_tobam_w;
78
79 /* stats */
80 unsigned int pending_with_bam;
81 unsigned int tohost_drp_cnt;
82 unsigned int tomodem_drp_cnt;
83 unsigned int tx_len;
84 unsigned int rx_len;
85 unsigned long to_modem;
86 unsigned long to_host;
87};
88
89struct gbam_port {
90 unsigned port_num;
91 spinlock_t port_lock;
92
93 struct grmnet *port_usb;
94
95 struct bam_ch_info data_ch;
96
97 struct work_struct connect_w;
98};
99
100static struct bam_portmaster {
101 struct gbam_port *port;
102} bam_ports[N_PORTS];
103
104static void gbam_start_rx(struct gbam_port *port);
105
106/*---------------misc functions---------------- */
107static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
108{
109 struct usb_request *req;
110
111 while (!list_empty(head)) {
112 req = list_entry(head->next, struct usb_request, list);
113 list_del(&req->list);
114 usb_ep_free_request(ep, req);
115 }
116}
117
118static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
119 int num,
120 void (*cb)(struct usb_ep *ep, struct usb_request *),
121 gfp_t flags)
122{
123 int i;
124 struct usb_request *req;
125
126 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
127 ep, head, num, cb);
128
129 for (i = 0; i < num; i++) {
130 req = usb_ep_alloc_request(ep, flags);
131 if (!req) {
132 pr_debug("%s: req allocated:%d\n", __func__, i);
133 return list_empty(head) ? -ENOMEM : 0;
134 }
135 req->complete = cb;
136 list_add(&req->list, head);
137 }
138
139 return 0;
140}
141/*--------------------------------------------- */
142
143/*------------data_path----------------------------*/
144static void gbam_write_data_tohost(struct gbam_port *port)
145{
146 unsigned long flags;
147 struct bam_ch_info *d = &port->data_ch;
148 struct sk_buff *skb;
149 int ret;
150 struct usb_request *req;
151 struct usb_ep *ep;
152
153 spin_lock_irqsave(&port->port_lock, flags);
154 if (!port->port_usb) {
155 spin_unlock_irqrestore(&port->port_lock, flags);
156 return;
157 }
158
159 ep = port->port_usb->in;
160
161 while (!list_empty(&d->tx_idle)) {
162 skb = __skb_dequeue(&d->tx_skb_q);
163 if (!skb) {
164 spin_unlock_irqrestore(&port->port_lock, flags);
165 return;
166 }
167 req = list_first_entry(&d->tx_idle,
168 struct usb_request,
169 list);
170 req->context = skb;
171 req->buf = skb->data;
172 req->length = skb->len;
173
174 list_del(&req->list);
175
176 spin_unlock(&port->port_lock);
177 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
178 spin_lock(&port->port_lock);
179 if (ret) {
180 pr_err("%s: usb epIn failed\n", __func__);
181 list_add(&req->list, &d->tx_idle);
182 dev_kfree_skb_any(skb);
183 break;
184 }
185 d->to_host++;
186 }
187 spin_unlock_irqrestore(&port->port_lock, flags);
188}
189
190void gbam_data_recv_cb(void *p, struct sk_buff *skb)
191{
192 struct gbam_port *port = p;
193 struct bam_ch_info *d = &port->data_ch;
194 unsigned long flags;
195
196 if (!skb)
197 return;
198
199 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
200 port, port->port_num, d, skb->len);
201
202 spin_lock_irqsave(&port->port_lock, flags);
203 if (!port->port_usb) {
204 spin_unlock_irqrestore(&port->port_lock, flags);
205 dev_kfree_skb_any(skb);
206 return;
207 }
208
209 if (d->tx_skb_q.qlen > tx_pkt_drop_thld) {
210 d->tohost_drp_cnt++;
211 if (printk_ratelimit())
212 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
213 __func__, d->tohost_drp_cnt);
214 spin_unlock_irqrestore(&port->port_lock, flags);
215 dev_kfree_skb_any(skb);
216 return;
217 }
218
219 __skb_queue_tail(&d->tx_skb_q, skb);
220 spin_unlock_irqrestore(&port->port_lock, flags);
221
222 gbam_write_data_tohost(port);
223}
224
225void gbam_data_write_done(void *p, struct sk_buff *skb)
226{
227 struct gbam_port *port = p;
228 struct bam_ch_info *d = &port->data_ch;
229 unsigned long flags;
230
231 if (!skb)
232 return;
233
234 dev_kfree_skb_any(skb);
235
236 spin_lock_irqsave(&port->port_lock, flags);
237
238 d->pending_with_bam--;
239
240 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
241 port, d, d->to_modem,
242 d->pending_with_bam, port->port_num);
243
244 if (rx_fctrl_support &&
245 d->pending_with_bam >= rx_fctrl_dis_thld) {
246
247 spin_unlock_irqrestore(&port->port_lock, flags);
248 return;
249 }
250 spin_unlock_irqrestore(&port->port_lock, flags);
251
252 gbam_start_rx(port);
253}
254
255static void gbam_data_write_tobam(struct work_struct *w)
256{
257 struct gbam_port *port;
258 struct bam_ch_info *d;
259 struct sk_buff *skb;
260 unsigned long flags;
261 int ret;
262
263 d = container_of(w, struct bam_ch_info, write_tobam_w);
264 port = d->port;
265
266 spin_lock_irqsave(&port->port_lock, flags);
267 if (!port->port_usb) {
268 spin_unlock_irqrestore(&port->port_lock, flags);
269 return;
270 }
271
272 while ((skb = __skb_dequeue(&d->rx_skb_q))) {
273 d->pending_with_bam++;
274 d->to_modem++;
275
276 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
277 port, d, d->to_modem, d->pending_with_bam,
278 port->port_num);
279
280 spin_unlock_irqrestore(&port->port_lock, flags);
281 ret = msm_bam_dmux_write(d->id, skb);
282 spin_lock_irqsave(&port->port_lock, flags);
283 if (ret) {
284 pr_debug("%s: write error:%d\n", __func__, ret);
285 d->pending_with_bam--;
286 d->to_modem--;
287 d->tomodem_drp_cnt++;
288 dev_kfree_skb_any(skb);
289 break;
290 }
291 }
292 spin_unlock_irqrestore(&port->port_lock, flags);
293}
294/*-------------------------------------------------------------*/
295
296static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
297{
298 struct gbam_port *port = ep->driver_data;
299 struct bam_ch_info *d;
300 struct sk_buff *skb = req->context;
301 int status = req->status;
302
303 switch (status) {
304 case 0:
305 /* successful completion */
306 case -ECONNRESET:
307 case -ESHUTDOWN:
308 /* connection gone */
309 break;
310 default:
311 pr_err("%s: data tx ep error %d\n",
312 __func__, status);
313 break;
314 }
315
316 dev_kfree_skb_any(skb);
317
318 if (!port)
319 return;
320
321 spin_lock(&port->port_lock);
322 d = &port->data_ch;
323 list_add_tail(&req->list, &d->tx_idle);
324 spin_unlock(&port->port_lock);
325
326 gbam_write_data_tohost(port);
327}
328
329static void
330gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
331{
332 struct gbam_port *port = ep->driver_data;
333 struct bam_ch_info *d = &port->data_ch;
334 struct sk_buff *skb = req->context;
335 int status = req->status;
336 int queue = 0;
337
338 switch (status) {
339 case 0:
340 skb_put(skb, req->actual);
341 queue = 1;
342 break;
343 case -ECONNRESET:
344 case -ESHUTDOWN:
345 /* cable disconnection */
346 dev_kfree_skb_any(skb);
347 req->buf = 0;
348 usb_ep_free_request(ep, req);
349 return;
350 default:
351 if (printk_ratelimit())
352 pr_err("%s: %s response error %d, %d/%d\n",
353 __func__, ep->name, status,
354 req->actual, req->length);
355 dev_kfree_skb_any(skb);
356 break;
357 }
358
359 spin_lock(&port->port_lock);
360 if (queue) {
361 __skb_queue_tail(&d->rx_skb_q, skb);
362 queue_work(gbam_wq, &d->write_tobam_w);
363 }
364
365 /* TODO: Handle flow control gracefully by having
366 * having call back mechanism from bam driver
367 */
368 if (rx_fctrl_support &&
369 d->pending_with_bam >= rx_fctrl_en_thld) {
370
371 list_add_tail(&req->list, &d->rx_idle);
372 spin_unlock(&port->port_lock);
373 return;
374 }
375 spin_unlock(&port->port_lock);
376
377 skb = alloc_skb(rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
378 if (!skb) {
379 spin_lock(&port->port_lock);
380 list_add_tail(&req->list, &d->rx_idle);
381 spin_unlock(&port->port_lock);
382 return;
383 }
384 skb_reserve(skb, BAM_MUX_HDR);
385
386 req->buf = skb->data;
387 req->length = rx_req_size;
388 req->context = skb;
389
390 status = usb_ep_queue(ep, req, GFP_ATOMIC);
391 if (status) {
392 dev_kfree_skb_any(skb);
393
394 if (printk_ratelimit())
395 pr_err("%s: data rx enqueue err %d\n",
396 __func__, status);
397
398 spin_lock(&port->port_lock);
399 list_add_tail(&req->list, &d->rx_idle);
400 spin_unlock(&port->port_lock);
401 }
402}
403
404static void gbam_start_rx(struct gbam_port *port)
405{
406 struct usb_request *req;
407 struct bam_ch_info *d;
408 struct usb_ep *ep;
409 unsigned long flags;
410 int ret;
411 struct sk_buff *skb;
412
413 spin_lock_irqsave(&port->port_lock, flags);
414 if (!port->port_usb) {
415 spin_unlock_irqrestore(&port->port_lock, flags);
416 return;
417 }
418
419 d = &port->data_ch;
420 ep = port->port_usb->out;
421
422 while (port->port_usb && !list_empty(&d->rx_idle)) {
423 req = list_first_entry(&d->rx_idle, struct usb_request, list);
424
425 skb = alloc_skb(rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
426 if (!skb)
427 break;
428 skb_reserve(skb, BAM_MUX_HDR);
429
430 list_del(&req->list);
431 req->buf = skb->data;
432 req->length = rx_req_size;
433 req->context = skb;
434
435 spin_unlock_irqrestore(&port->port_lock, flags);
436 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
437 spin_lock_irqsave(&port->port_lock, flags);
438 if (ret) {
439 dev_kfree_skb_any(skb);
440
441 if (printk_ratelimit())
442 pr_err("%s: rx queue failed\n", __func__);
443
444 if (port->port_usb)
445 list_add(&req->list, &d->rx_idle);
446 else
447 usb_ep_free_request(ep, req);
448 break;
449 }
450 }
451 spin_unlock_irqrestore(&port->port_lock, flags);
452}
453
454static void gbam_start_io(struct gbam_port *port)
455{
456 unsigned long flags;
457 struct usb_ep *ep;
458 int ret;
459 struct bam_ch_info *d;
460
461 pr_debug("%s: port:%p\n", __func__, port);
462
463 spin_lock_irqsave(&port->port_lock, flags);
464 if (!port->port_usb) {
465 spin_unlock_irqrestore(&port->port_lock, flags);
466 return;
467 }
468
469 d = &port->data_ch;
470 ep = port->port_usb->out;
471 ret = gbam_alloc_requests(ep, &d->rx_idle, rx_q_size,
472 gbam_epout_complete, GFP_ATOMIC);
473 if (ret) {
474 pr_err("%s: rx req allocation failed\n", __func__);
475 return;
476 }
477
478 ep = port->port_usb->in;
479 ret = gbam_alloc_requests(ep, &d->tx_idle, tx_q_size,
480 gbam_epin_complete, GFP_ATOMIC);
481 if (ret) {
482 pr_err("%s: tx req allocation failed\n", __func__);
483 gbam_free_requests(ep, &d->rx_idle);
484 return;
485 }
486
487 spin_unlock_irqrestore(&port->port_lock, flags);
488
489 /* queue out requests */
490 gbam_start_rx(port);
491}
492
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600493static void gbam_notify(void *p, int event, unsigned long data)
494{
495 switch (event) {
496 case BAM_DMUX_RECEIVE:
497 gbam_data_recv_cb(p, (struct sk_buff *)(data));
498 break;
499 case BAM_DMUX_WRITE_DONE:
500 gbam_data_write_done(p, (struct sk_buff *)(data));
501 break;
502 }
503}
504
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700505static void gbam_connect_work(struct work_struct *w)
506{
507 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
508 struct bam_ch_info *d = &port->data_ch;
509 int ret;
510
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600511 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512 if (ret) {
513 pr_err("%s: unable open bam ch:%d err:%d\n",
514 __func__, d->id, ret);
515 return;
516 }
517 atomic_set(&d->opened, 1);
518
519 gbam_start_io(port);
520
521 pr_debug("%s: done\n", __func__);
522}
523
524static void gbam_port_free(int portno)
525{
526 struct gbam_port *port = bam_ports[portno].port;
527
528 if (!port)
529 kfree(port);
530}
531
532static int gbam_port_alloc(int portno)
533{
534 struct gbam_port *port;
535 struct bam_ch_info *d;
536
537 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
538 if (!port)
539 return -ENOMEM;
540
541 port->port_num = portno;
542
543 /* port initialization */
544 spin_lock_init(&port->port_lock);
545 INIT_WORK(&port->connect_w, gbam_connect_work);
546
547 /* data ch */
548 d = &port->data_ch;
549 d->port = port;
550 INIT_LIST_HEAD(&d->tx_idle);
551 INIT_LIST_HEAD(&d->rx_idle);
552 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
553 skb_queue_head_init(&d->tx_skb_q);
554 skb_queue_head_init(&d->rx_skb_q);
555 d->id = bam_ch_ids[portno];
556
557 bam_ports[portno].port = port;
558
559 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
560
561 return 0;
562}
563
564#if defined(CONFIG_DEBUG_FS)
565#define DEBUG_BUF_SIZE 1024
566static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
567 size_t count, loff_t *ppos)
568{
569 struct gbam_port *port;
570 struct bam_ch_info *d;
571 char *buf;
572 unsigned long flags;
573 int ret;
574 int i;
575 int temp = 0;
576
577 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
578 if (!buf)
579 return -ENOMEM;
580
581 for (i = 0; i < n_bam_ports; i++) {
582 port = bam_ports[i].port;
583 if (!port)
584 continue;
585 spin_lock_irqsave(&port->port_lock, flags);
586
587 d = &port->data_ch;
588
589 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
590 "#PORT:%d port:%p data_ch:%p#\n"
591 "dpkts_to_usbhost: %lu\n"
592 "dpkts_to_modem: %lu\n"
593 "dpkts_pwith_bam: %u\n"
594 "to_usbhost_dcnt: %u\n"
595 "tomodem__dcnt: %u\n"
596 "tx_buf_len: %u\n"
597 "data_ch_opened: %d\n",
598 i, port, &port->data_ch,
599 d->to_host, d->to_modem,
600 d->pending_with_bam,
601 d->tohost_drp_cnt, d->tomodem_drp_cnt,
602 d->tx_skb_q.qlen, atomic_read(&d->opened));
603
604 spin_unlock_irqrestore(&port->port_lock, flags);
605 }
606
607 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
608
609 kfree(buf);
610
611 return ret;
612}
613
614static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
615 size_t count, loff_t *ppos)
616{
617 struct gbam_port *port;
618 struct bam_ch_info *d;
619 int i;
620 unsigned long flags;
621
622 for (i = 0; i < n_bam_ports; i++) {
623 port = bam_ports[i].port;
624 if (!port)
625 continue;
626
627 spin_lock_irqsave(&port->port_lock, flags);
628
629 d = &port->data_ch;
630
631 d->to_host = 0;
632 d->to_modem = 0;
633 d->pending_with_bam = 0;
634 d->tohost_drp_cnt = 0;
635 d->tomodem_drp_cnt = 0;
636
637 spin_unlock_irqrestore(&port->port_lock, flags);
638 }
639 return count;
640}
641
642const struct file_operations gbam_stats_ops = {
643 .read = gbam_read_stats,
644 .write = gbam_reset_stats,
645};
646
647static void gbam_debugfs_init(void)
648{
649 struct dentry *dent;
650 struct dentry *dfile;
651
652 dent = debugfs_create_dir("usb_rmnet", 0);
653 if (IS_ERR(dent))
654 return;
655
656 /* TODO: Implement cleanup function to remove created file */
657 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
658 if (!dfile || IS_ERR(dfile))
659 debugfs_remove(dent);
660}
661#else
662static void gam_debugfs_init(void) { }
663#endif
664
665static void gbam_free_buffers(struct gbam_port *port)
666{
667 struct sk_buff *skb;
668 unsigned long flags;
669 struct bam_ch_info *d;
670
671 spin_lock_irqsave(&port->port_lock, flags);
672
673 if (!port || !port->port_usb)
674 goto free_buf_out;
675
676 d = &port->data_ch;
677
678 gbam_free_requests(port->port_usb->in, &d->tx_idle);
679 gbam_free_requests(port->port_usb->out, &d->rx_idle);
680
681 while ((skb = __skb_dequeue(&d->tx_skb_q)))
682 dev_kfree_skb_any(skb);
683
684 while ((skb = __skb_dequeue(&d->rx_skb_q)))
685 dev_kfree_skb_any(skb);
686
687free_buf_out:
688 spin_unlock_irqrestore(&port->port_lock, flags);
689}
690
691void gbam_disconnect(struct grmnet *gr, u8 port_num)
692{
693 struct gbam_port *port;
694 unsigned long flags;
695 struct bam_ch_info *d;
696
697 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
698
699 if (port_num >= n_bam_ports) {
700 pr_err("%s: invalid portno#%d\n", __func__, port_num);
701 return;
702 }
703
704 if (!gr) {
705 pr_err("%s: grmnet port is null\n", __func__);
706 return;
707 }
708
709 port = bam_ports[port_num].port;
710 d = &port->data_ch;
711
712 gbam_free_buffers(port);
713
714 spin_lock_irqsave(&port->port_lock, flags);
715 port->port_usb = 0;
716 spin_unlock_irqrestore(&port->port_lock, flags);
717
718 /* disable endpoints */
719 usb_ep_disable(gr->out);
720 usb_ep_disable(gr->in);
721
722 if (atomic_read(&d->opened))
723 msm_bam_dmux_close(d->id);
724
725 atomic_set(&d->opened, 0);
726}
727
728int gbam_connect(struct grmnet *gr, u8 port_num)
729{
730 struct gbam_port *port;
731 struct bam_ch_info *d;
732 int ret;
733 unsigned long flags;
734
735 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
736
737 if (port_num >= n_bam_ports) {
738 pr_err("%s: invalid portno#%d\n", __func__, port_num);
739 return -ENODEV;
740 }
741
742 if (!gr) {
743 pr_err("%s: grmnet port is null\n", __func__);
744 return -ENODEV;
745 }
746
747 port = bam_ports[port_num].port;
748 d = &port->data_ch;
749
750 ret = usb_ep_enable(gr->in, gr->in_desc);
751 if (ret) {
752 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
753 __func__, gr->in);
754 return ret;
755 }
756 gr->in->driver_data = port;
757
758 ret = usb_ep_enable(gr->out, gr->out_desc);
759 if (ret) {
760 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
761 __func__, gr->out);
762 gr->in->driver_data = 0;
763 return ret;
764 }
765 gr->out->driver_data = port;
766
767 spin_lock_irqsave(&port->port_lock, flags);
768 port->port_usb = gr;
769
770 d->to_host = 0;
771 d->to_modem = 0;
772 d->pending_with_bam = 0;
773 d->tohost_drp_cnt = 0;
774 d->tomodem_drp_cnt = 0;
775 spin_unlock_irqrestore(&port->port_lock, flags);
776
777
778 queue_work(gbam_wq, &port->connect_w);
779
780 return 0;
781}
782
783int gbam_setup(unsigned int count)
784{
785 int i;
786 int ret;
787
788 pr_debug("%s: requested ports:%d\n", __func__, count);
789
790 if (!count || count > BAM_N_PORTS) {
791 pr_err("%s: Invalid num of ports count:%d\n",
792 __func__, count);
793 return -EINVAL;
794 }
795
796 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
797 if (!gbam_wq) {
798 pr_err("%s: Unable to create workqueue gbam_wq\n",
799 __func__);
800 return -ENOMEM;
801 }
802
803 for (i = 0; i < count; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +0530804 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700805 ret = gbam_port_alloc(i);
806 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +0530807 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
809 goto free_bam_ports;
810 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700811 }
812
813 gbam_debugfs_init();
814
815 return 0;
816free_bam_ports:
817 for (i = 0; i < n_bam_ports; i++)
818 gbam_port_free(i);
819
820 destroy_workqueue(gbam_wq);
821
822 return ret;
823}