blob: e44b3391b42210e65580266d6947efc5dbd8a1fa [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
26#include "u_rmnet.h"
27
28#define BAM_N_PORTS 1
29
30static struct workqueue_struct *gbam_wq;
31static int n_bam_ports;
32static unsigned bam_ch_ids[] = { 8 };
33
34#define TX_PKT_DROP_THRESHOLD 1000
35#define RX_PKT_FLOW_CTRL_EN_THRESHOLD 1000
36#define RX_PKT_FLOW_CTRL_DISABLE 500
37#define RX_PKT_FLOW_CTRL_SUPPORT 1
38
39#define BAM_MUX_HDR 8
40
41#define RX_Q_SIZE 16
42#define TX_Q_SIZE 200
43#define RX_REQ_SIZE (2048 - BAM_MUX_HDR)
44
45unsigned int tx_pkt_drop_thld = TX_PKT_DROP_THRESHOLD;
46module_param(tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
47
48unsigned int rx_fctrl_en_thld = RX_PKT_FLOW_CTRL_EN_THRESHOLD;
49module_param(rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
50
51unsigned int rx_fctrl_support = RX_PKT_FLOW_CTRL_SUPPORT;
52module_param(rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
53
54unsigned int rx_fctrl_dis_thld = RX_PKT_FLOW_CTRL_DISABLE;
55module_param(rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
56
57unsigned int tx_q_size = TX_Q_SIZE;
58module_param(tx_q_size, uint, S_IRUGO | S_IWUSR);
59
60unsigned int rx_q_size = RX_Q_SIZE;
61module_param(rx_q_size, uint, S_IRUGO | S_IWUSR);
62
63unsigned int rx_req_size = RX_REQ_SIZE;
64module_param(rx_req_size, uint, S_IRUGO | S_IWUSR);
65
66struct bam_ch_info {
67 atomic_t opened;
68 unsigned id;
69
70 struct list_head tx_idle;
71 struct sk_buff_head tx_skb_q;
72
73 struct list_head rx_idle;
74 struct sk_buff_head rx_skb_q;
75
76 struct gbam_port *port;
77 struct work_struct write_tobam_w;
78
79 /* stats */
80 unsigned int pending_with_bam;
81 unsigned int tohost_drp_cnt;
82 unsigned int tomodem_drp_cnt;
83 unsigned int tx_len;
84 unsigned int rx_len;
85 unsigned long to_modem;
86 unsigned long to_host;
87};
88
89struct gbam_port {
90 unsigned port_num;
91 spinlock_t port_lock;
92
93 struct grmnet *port_usb;
94
95 struct bam_ch_info data_ch;
96
97 struct work_struct connect_w;
98};
99
100static struct bam_portmaster {
101 struct gbam_port *port;
102} bam_ports[N_PORTS];
103
104static void gbam_start_rx(struct gbam_port *port);
105
106/*---------------misc functions---------------- */
107static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
108{
109 struct usb_request *req;
110
111 while (!list_empty(head)) {
112 req = list_entry(head->next, struct usb_request, list);
113 list_del(&req->list);
114 usb_ep_free_request(ep, req);
115 }
116}
117
118static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
119 int num,
120 void (*cb)(struct usb_ep *ep, struct usb_request *),
121 gfp_t flags)
122{
123 int i;
124 struct usb_request *req;
125
126 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
127 ep, head, num, cb);
128
129 for (i = 0; i < num; i++) {
130 req = usb_ep_alloc_request(ep, flags);
131 if (!req) {
132 pr_debug("%s: req allocated:%d\n", __func__, i);
133 return list_empty(head) ? -ENOMEM : 0;
134 }
135 req->complete = cb;
136 list_add(&req->list, head);
137 }
138
139 return 0;
140}
141/*--------------------------------------------- */
142
143/*------------data_path----------------------------*/
144static void gbam_write_data_tohost(struct gbam_port *port)
145{
146 unsigned long flags;
147 struct bam_ch_info *d = &port->data_ch;
148 struct sk_buff *skb;
149 int ret;
150 struct usb_request *req;
151 struct usb_ep *ep;
152
153 spin_lock_irqsave(&port->port_lock, flags);
154 if (!port->port_usb) {
155 spin_unlock_irqrestore(&port->port_lock, flags);
156 return;
157 }
158
159 ep = port->port_usb->in;
160
161 while (!list_empty(&d->tx_idle)) {
162 skb = __skb_dequeue(&d->tx_skb_q);
163 if (!skb) {
164 spin_unlock_irqrestore(&port->port_lock, flags);
165 return;
166 }
167 req = list_first_entry(&d->tx_idle,
168 struct usb_request,
169 list);
170 req->context = skb;
171 req->buf = skb->data;
172 req->length = skb->len;
173
174 list_del(&req->list);
175
176 spin_unlock(&port->port_lock);
177 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
178 spin_lock(&port->port_lock);
179 if (ret) {
180 pr_err("%s: usb epIn failed\n", __func__);
181 list_add(&req->list, &d->tx_idle);
182 dev_kfree_skb_any(skb);
183 break;
184 }
185 d->to_host++;
186 }
187 spin_unlock_irqrestore(&port->port_lock, flags);
188}
189
190void gbam_data_recv_cb(void *p, struct sk_buff *skb)
191{
192 struct gbam_port *port = p;
193 struct bam_ch_info *d = &port->data_ch;
194 unsigned long flags;
195
196 if (!skb)
197 return;
198
199 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
200 port, port->port_num, d, skb->len);
201
202 spin_lock_irqsave(&port->port_lock, flags);
203 if (!port->port_usb) {
204 spin_unlock_irqrestore(&port->port_lock, flags);
205 dev_kfree_skb_any(skb);
206 return;
207 }
208
209 if (d->tx_skb_q.qlen > tx_pkt_drop_thld) {
210 d->tohost_drp_cnt++;
211 if (printk_ratelimit())
212 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
213 __func__, d->tohost_drp_cnt);
214 spin_unlock_irqrestore(&port->port_lock, flags);
215 dev_kfree_skb_any(skb);
216 return;
217 }
218
219 __skb_queue_tail(&d->tx_skb_q, skb);
220 spin_unlock_irqrestore(&port->port_lock, flags);
221
222 gbam_write_data_tohost(port);
223}
224
225void gbam_data_write_done(void *p, struct sk_buff *skb)
226{
227 struct gbam_port *port = p;
228 struct bam_ch_info *d = &port->data_ch;
229 unsigned long flags;
230
231 if (!skb)
232 return;
233
234 dev_kfree_skb_any(skb);
235
236 spin_lock_irqsave(&port->port_lock, flags);
237
238 d->pending_with_bam--;
239
240 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
241 port, d, d->to_modem,
242 d->pending_with_bam, port->port_num);
243
244 if (rx_fctrl_support &&
245 d->pending_with_bam >= rx_fctrl_dis_thld) {
246
247 spin_unlock_irqrestore(&port->port_lock, flags);
248 return;
249 }
250 spin_unlock_irqrestore(&port->port_lock, flags);
251
252 gbam_start_rx(port);
253}
254
255static void gbam_data_write_tobam(struct work_struct *w)
256{
257 struct gbam_port *port;
258 struct bam_ch_info *d;
259 struct sk_buff *skb;
260 unsigned long flags;
261 int ret;
262
263 d = container_of(w, struct bam_ch_info, write_tobam_w);
264 port = d->port;
265
266 spin_lock_irqsave(&port->port_lock, flags);
267 if (!port->port_usb) {
268 spin_unlock_irqrestore(&port->port_lock, flags);
269 return;
270 }
271
272 while ((skb = __skb_dequeue(&d->rx_skb_q))) {
273 d->pending_with_bam++;
274 d->to_modem++;
275
276 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
277 port, d, d->to_modem, d->pending_with_bam,
278 port->port_num);
279
280 spin_unlock_irqrestore(&port->port_lock, flags);
281 ret = msm_bam_dmux_write(d->id, skb);
282 spin_lock_irqsave(&port->port_lock, flags);
283 if (ret) {
284 pr_debug("%s: write error:%d\n", __func__, ret);
285 d->pending_with_bam--;
286 d->to_modem--;
287 d->tomodem_drp_cnt++;
288 dev_kfree_skb_any(skb);
289 break;
290 }
291 }
292 spin_unlock_irqrestore(&port->port_lock, flags);
293}
294/*-------------------------------------------------------------*/
295
296static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
297{
298 struct gbam_port *port = ep->driver_data;
299 struct bam_ch_info *d;
300 struct sk_buff *skb = req->context;
301 int status = req->status;
302
303 switch (status) {
304 case 0:
305 /* successful completion */
306 case -ECONNRESET:
307 case -ESHUTDOWN:
308 /* connection gone */
309 break;
310 default:
311 pr_err("%s: data tx ep error %d\n",
312 __func__, status);
313 break;
314 }
315
316 dev_kfree_skb_any(skb);
317
318 if (!port)
319 return;
320
321 spin_lock(&port->port_lock);
322 d = &port->data_ch;
323 list_add_tail(&req->list, &d->tx_idle);
324 spin_unlock(&port->port_lock);
325
326 gbam_write_data_tohost(port);
327}
328
329static void
330gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
331{
332 struct gbam_port *port = ep->driver_data;
333 struct bam_ch_info *d = &port->data_ch;
334 struct sk_buff *skb = req->context;
335 int status = req->status;
336 int queue = 0;
337
338 switch (status) {
339 case 0:
340 skb_put(skb, req->actual);
341 queue = 1;
342 break;
343 case -ECONNRESET:
344 case -ESHUTDOWN:
345 /* cable disconnection */
346 dev_kfree_skb_any(skb);
347 req->buf = 0;
348 usb_ep_free_request(ep, req);
349 return;
350 default:
351 if (printk_ratelimit())
352 pr_err("%s: %s response error %d, %d/%d\n",
353 __func__, ep->name, status,
354 req->actual, req->length);
355 dev_kfree_skb_any(skb);
356 break;
357 }
358
359 spin_lock(&port->port_lock);
360 if (queue) {
361 __skb_queue_tail(&d->rx_skb_q, skb);
362 queue_work(gbam_wq, &d->write_tobam_w);
363 }
364
365 /* TODO: Handle flow control gracefully by having
366 * having call back mechanism from bam driver
367 */
368 if (rx_fctrl_support &&
369 d->pending_with_bam >= rx_fctrl_en_thld) {
370
371 list_add_tail(&req->list, &d->rx_idle);
372 spin_unlock(&port->port_lock);
373 return;
374 }
375 spin_unlock(&port->port_lock);
376
377 skb = alloc_skb(rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
378 if (!skb) {
379 spin_lock(&port->port_lock);
380 list_add_tail(&req->list, &d->rx_idle);
381 spin_unlock(&port->port_lock);
382 return;
383 }
384 skb_reserve(skb, BAM_MUX_HDR);
385
386 req->buf = skb->data;
387 req->length = rx_req_size;
388 req->context = skb;
389
390 status = usb_ep_queue(ep, req, GFP_ATOMIC);
391 if (status) {
392 dev_kfree_skb_any(skb);
393
394 if (printk_ratelimit())
395 pr_err("%s: data rx enqueue err %d\n",
396 __func__, status);
397
398 spin_lock(&port->port_lock);
399 list_add_tail(&req->list, &d->rx_idle);
400 spin_unlock(&port->port_lock);
401 }
402}
403
404static void gbam_start_rx(struct gbam_port *port)
405{
406 struct usb_request *req;
407 struct bam_ch_info *d;
408 struct usb_ep *ep;
409 unsigned long flags;
410 int ret;
411 struct sk_buff *skb;
412
413 spin_lock_irqsave(&port->port_lock, flags);
414 if (!port->port_usb) {
415 spin_unlock_irqrestore(&port->port_lock, flags);
416 return;
417 }
418
419 d = &port->data_ch;
420 ep = port->port_usb->out;
421
422 while (port->port_usb && !list_empty(&d->rx_idle)) {
423 req = list_first_entry(&d->rx_idle, struct usb_request, list);
424
425 skb = alloc_skb(rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
426 if (!skb)
427 break;
428 skb_reserve(skb, BAM_MUX_HDR);
429
430 list_del(&req->list);
431 req->buf = skb->data;
432 req->length = rx_req_size;
433 req->context = skb;
434
435 spin_unlock_irqrestore(&port->port_lock, flags);
436 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
437 spin_lock_irqsave(&port->port_lock, flags);
438 if (ret) {
439 dev_kfree_skb_any(skb);
440
441 if (printk_ratelimit())
442 pr_err("%s: rx queue failed\n", __func__);
443
444 if (port->port_usb)
445 list_add(&req->list, &d->rx_idle);
446 else
447 usb_ep_free_request(ep, req);
448 break;
449 }
450 }
451 spin_unlock_irqrestore(&port->port_lock, flags);
452}
453
454static void gbam_start_io(struct gbam_port *port)
455{
456 unsigned long flags;
457 struct usb_ep *ep;
458 int ret;
459 struct bam_ch_info *d;
460
461 pr_debug("%s: port:%p\n", __func__, port);
462
463 spin_lock_irqsave(&port->port_lock, flags);
464 if (!port->port_usb) {
465 spin_unlock_irqrestore(&port->port_lock, flags);
466 return;
467 }
468
469 d = &port->data_ch;
470 ep = port->port_usb->out;
471 ret = gbam_alloc_requests(ep, &d->rx_idle, rx_q_size,
472 gbam_epout_complete, GFP_ATOMIC);
473 if (ret) {
474 pr_err("%s: rx req allocation failed\n", __func__);
475 return;
476 }
477
478 ep = port->port_usb->in;
479 ret = gbam_alloc_requests(ep, &d->tx_idle, tx_q_size,
480 gbam_epin_complete, GFP_ATOMIC);
481 if (ret) {
482 pr_err("%s: tx req allocation failed\n", __func__);
483 gbam_free_requests(ep, &d->rx_idle);
484 return;
485 }
486
487 spin_unlock_irqrestore(&port->port_lock, flags);
488
489 /* queue out requests */
490 gbam_start_rx(port);
491}
492
493static void gbam_connect_work(struct work_struct *w)
494{
495 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
496 struct bam_ch_info *d = &port->data_ch;
497 int ret;
498
499 ret = msm_bam_dmux_open(d->id, port,
500 gbam_data_recv_cb,
501 gbam_data_write_done);
502 if (ret) {
503 pr_err("%s: unable open bam ch:%d err:%d\n",
504 __func__, d->id, ret);
505 return;
506 }
507 atomic_set(&d->opened, 1);
508
509 gbam_start_io(port);
510
511 pr_debug("%s: done\n", __func__);
512}
513
514static void gbam_port_free(int portno)
515{
516 struct gbam_port *port = bam_ports[portno].port;
517
518 if (!port)
519 kfree(port);
520}
521
522static int gbam_port_alloc(int portno)
523{
524 struct gbam_port *port;
525 struct bam_ch_info *d;
526
527 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
528 if (!port)
529 return -ENOMEM;
530
531 port->port_num = portno;
532
533 /* port initialization */
534 spin_lock_init(&port->port_lock);
535 INIT_WORK(&port->connect_w, gbam_connect_work);
536
537 /* data ch */
538 d = &port->data_ch;
539 d->port = port;
540 INIT_LIST_HEAD(&d->tx_idle);
541 INIT_LIST_HEAD(&d->rx_idle);
542 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
543 skb_queue_head_init(&d->tx_skb_q);
544 skb_queue_head_init(&d->rx_skb_q);
545 d->id = bam_ch_ids[portno];
546
547 bam_ports[portno].port = port;
548
549 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
550
551 return 0;
552}
553
554#if defined(CONFIG_DEBUG_FS)
555#define DEBUG_BUF_SIZE 1024
556static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
557 size_t count, loff_t *ppos)
558{
559 struct gbam_port *port;
560 struct bam_ch_info *d;
561 char *buf;
562 unsigned long flags;
563 int ret;
564 int i;
565 int temp = 0;
566
567 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
568 if (!buf)
569 return -ENOMEM;
570
571 for (i = 0; i < n_bam_ports; i++) {
572 port = bam_ports[i].port;
573 if (!port)
574 continue;
575 spin_lock_irqsave(&port->port_lock, flags);
576
577 d = &port->data_ch;
578
579 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
580 "#PORT:%d port:%p data_ch:%p#\n"
581 "dpkts_to_usbhost: %lu\n"
582 "dpkts_to_modem: %lu\n"
583 "dpkts_pwith_bam: %u\n"
584 "to_usbhost_dcnt: %u\n"
585 "tomodem__dcnt: %u\n"
586 "tx_buf_len: %u\n"
587 "data_ch_opened: %d\n",
588 i, port, &port->data_ch,
589 d->to_host, d->to_modem,
590 d->pending_with_bam,
591 d->tohost_drp_cnt, d->tomodem_drp_cnt,
592 d->tx_skb_q.qlen, atomic_read(&d->opened));
593
594 spin_unlock_irqrestore(&port->port_lock, flags);
595 }
596
597 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
598
599 kfree(buf);
600
601 return ret;
602}
603
604static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
605 size_t count, loff_t *ppos)
606{
607 struct gbam_port *port;
608 struct bam_ch_info *d;
609 int i;
610 unsigned long flags;
611
612 for (i = 0; i < n_bam_ports; i++) {
613 port = bam_ports[i].port;
614 if (!port)
615 continue;
616
617 spin_lock_irqsave(&port->port_lock, flags);
618
619 d = &port->data_ch;
620
621 d->to_host = 0;
622 d->to_modem = 0;
623 d->pending_with_bam = 0;
624 d->tohost_drp_cnt = 0;
625 d->tomodem_drp_cnt = 0;
626
627 spin_unlock_irqrestore(&port->port_lock, flags);
628 }
629 return count;
630}
631
632const struct file_operations gbam_stats_ops = {
633 .read = gbam_read_stats,
634 .write = gbam_reset_stats,
635};
636
637static void gbam_debugfs_init(void)
638{
639 struct dentry *dent;
640 struct dentry *dfile;
641
642 dent = debugfs_create_dir("usb_rmnet", 0);
643 if (IS_ERR(dent))
644 return;
645
646 /* TODO: Implement cleanup function to remove created file */
647 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
648 if (!dfile || IS_ERR(dfile))
649 debugfs_remove(dent);
650}
651#else
652static void gam_debugfs_init(void) { }
653#endif
654
655static void gbam_free_buffers(struct gbam_port *port)
656{
657 struct sk_buff *skb;
658 unsigned long flags;
659 struct bam_ch_info *d;
660
661 spin_lock_irqsave(&port->port_lock, flags);
662
663 if (!port || !port->port_usb)
664 goto free_buf_out;
665
666 d = &port->data_ch;
667
668 gbam_free_requests(port->port_usb->in, &d->tx_idle);
669 gbam_free_requests(port->port_usb->out, &d->rx_idle);
670
671 while ((skb = __skb_dequeue(&d->tx_skb_q)))
672 dev_kfree_skb_any(skb);
673
674 while ((skb = __skb_dequeue(&d->rx_skb_q)))
675 dev_kfree_skb_any(skb);
676
677free_buf_out:
678 spin_unlock_irqrestore(&port->port_lock, flags);
679}
680
681void gbam_disconnect(struct grmnet *gr, u8 port_num)
682{
683 struct gbam_port *port;
684 unsigned long flags;
685 struct bam_ch_info *d;
686
687 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
688
689 if (port_num >= n_bam_ports) {
690 pr_err("%s: invalid portno#%d\n", __func__, port_num);
691 return;
692 }
693
694 if (!gr) {
695 pr_err("%s: grmnet port is null\n", __func__);
696 return;
697 }
698
699 port = bam_ports[port_num].port;
700 d = &port->data_ch;
701
702 gbam_free_buffers(port);
703
704 spin_lock_irqsave(&port->port_lock, flags);
705 port->port_usb = 0;
706 spin_unlock_irqrestore(&port->port_lock, flags);
707
708 /* disable endpoints */
709 usb_ep_disable(gr->out);
710 usb_ep_disable(gr->in);
711
712 if (atomic_read(&d->opened))
713 msm_bam_dmux_close(d->id);
714
715 atomic_set(&d->opened, 0);
716}
717
718int gbam_connect(struct grmnet *gr, u8 port_num)
719{
720 struct gbam_port *port;
721 struct bam_ch_info *d;
722 int ret;
723 unsigned long flags;
724
725 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
726
727 if (port_num >= n_bam_ports) {
728 pr_err("%s: invalid portno#%d\n", __func__, port_num);
729 return -ENODEV;
730 }
731
732 if (!gr) {
733 pr_err("%s: grmnet port is null\n", __func__);
734 return -ENODEV;
735 }
736
737 port = bam_ports[port_num].port;
738 d = &port->data_ch;
739
740 ret = usb_ep_enable(gr->in, gr->in_desc);
741 if (ret) {
742 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
743 __func__, gr->in);
744 return ret;
745 }
746 gr->in->driver_data = port;
747
748 ret = usb_ep_enable(gr->out, gr->out_desc);
749 if (ret) {
750 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
751 __func__, gr->out);
752 gr->in->driver_data = 0;
753 return ret;
754 }
755 gr->out->driver_data = port;
756
757 spin_lock_irqsave(&port->port_lock, flags);
758 port->port_usb = gr;
759
760 d->to_host = 0;
761 d->to_modem = 0;
762 d->pending_with_bam = 0;
763 d->tohost_drp_cnt = 0;
764 d->tomodem_drp_cnt = 0;
765 spin_unlock_irqrestore(&port->port_lock, flags);
766
767
768 queue_work(gbam_wq, &port->connect_w);
769
770 return 0;
771}
772
773int gbam_setup(unsigned int count)
774{
775 int i;
776 int ret;
777
778 pr_debug("%s: requested ports:%d\n", __func__, count);
779
780 if (!count || count > BAM_N_PORTS) {
781 pr_err("%s: Invalid num of ports count:%d\n",
782 __func__, count);
783 return -EINVAL;
784 }
785
786 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
787 if (!gbam_wq) {
788 pr_err("%s: Unable to create workqueue gbam_wq\n",
789 __func__);
790 return -ENOMEM;
791 }
792
793 for (i = 0; i < count; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +0530794 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700795 ret = gbam_port_alloc(i);
796 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +0530797 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700798 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
799 goto free_bam_ports;
800 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700801 }
802
803 gbam_debugfs_init();
804
805 return 0;
806free_bam_ports:
807 for (i = 0; i < n_bam_ports; i++)
808 gbam_port_free(i);
809
810 destroy_workqueue(gbam_wq);
811
812 return ret;
813}