blob: 869a541836116d7d13be8e282b8a8253cdcc4694 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
Ofir Cohena1c2a872011-12-14 10:26:34 +020026#include <mach/usb_gadget_xport.h>
27#include <mach/usb_bam.h>
28
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include "u_rmnet.h"
30
31#define BAM_N_PORTS 1
Ofir Cohena1c2a872011-12-14 10:26:34 +020032#define BAM2BAM_N_PORTS 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
34static struct workqueue_struct *gbam_wq;
35static int n_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020036static int n_bam2bam_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037static unsigned bam_ch_ids[] = { 8 };
38
Jack Phameffd4ae2011-08-03 16:49:36 -070039static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
40
Vamsi Krishna84579552011-11-09 15:33:22 -080041#define BAM_PENDING_LIMIT 220
Vamsi Krishna8f24f252011-11-02 11:46:08 -070042#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
Vamsi Krishna84579552011-11-09 15:33:22 -080043#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
44#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
Vamsi Krishna8f24f252011-11-02 11:46:08 -070045#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046
47#define BAM_MUX_HDR 8
48
Vamsi Krishna8f24f252011-11-02 11:46:08 -070049#define BAM_MUX_RX_Q_SIZE 16
50#define BAM_MUX_TX_Q_SIZE 200
51#define BAM_MUX_RX_REQ_SIZE (2048 - BAM_MUX_HDR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052
Vamsi Krishna8f24f252011-11-02 11:46:08 -070053unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
54module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055
Vamsi Krishna8f24f252011-11-02 11:46:08 -070056unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
57module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058
Vamsi Krishna8f24f252011-11-02 11:46:08 -070059unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
60module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061
Vamsi Krishna8f24f252011-11-02 11:46:08 -070062unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
63module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Vamsi Krishna8f24f252011-11-02 11:46:08 -070065unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
66module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
Vamsi Krishna8f24f252011-11-02 11:46:08 -070068unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
69module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
Vamsi Krishna8f24f252011-11-02 11:46:08 -070071unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
72module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073
Jack Phameffd4ae2011-08-03 16:49:36 -070074#define BAM_CH_OPENED BIT(0)
75#define BAM_CH_READY BIT(1)
Ofir Cohena1c2a872011-12-14 10:26:34 +020076#define SPS_PARAMS_PIPE_ID_MASK (0x1F)
77#define SPS_PARAMS_SPS_MODE BIT(5)
78#define SPS_PARAMS_TBE BIT(6)
79#define MSM_VENDOR_ID BIT(16)
80
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070082 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083 unsigned id;
84
85 struct list_head tx_idle;
86 struct sk_buff_head tx_skb_q;
87
88 struct list_head rx_idle;
89 struct sk_buff_head rx_skb_q;
90
91 struct gbam_port *port;
92 struct work_struct write_tobam_w;
93
Ofir Cohena1c2a872011-12-14 10:26:34 +020094 struct usb_request *rx_req;
95 struct usb_request *tx_req;
96
97 u8 src_pipe_idx;
98 u8 dst_pipe_idx;
99 u8 connection_idx;
100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101 /* stats */
102 unsigned int pending_with_bam;
103 unsigned int tohost_drp_cnt;
104 unsigned int tomodem_drp_cnt;
105 unsigned int tx_len;
106 unsigned int rx_len;
107 unsigned long to_modem;
108 unsigned long to_host;
109};
110
111struct gbam_port {
112 unsigned port_num;
113 spinlock_t port_lock;
114
115 struct grmnet *port_usb;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200116 struct grmnet *gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117
118 struct bam_ch_info data_ch;
119
120 struct work_struct connect_w;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800121 struct work_struct disconnect_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122};
123
124static struct bam_portmaster {
125 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700126 struct platform_driver pdrv;
127} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128
Ofir Cohena1c2a872011-12-14 10:26:34 +0200129struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130static void gbam_start_rx(struct gbam_port *port);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200131static void gbam_start_endless_rx(struct gbam_port *port);
132static void gbam_start_endless_tx(struct gbam_port *port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133
134/*---------------misc functions---------------- */
135static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
136{
137 struct usb_request *req;
138
139 while (!list_empty(head)) {
140 req = list_entry(head->next, struct usb_request, list);
141 list_del(&req->list);
142 usb_ep_free_request(ep, req);
143 }
144}
145
146static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
147 int num,
148 void (*cb)(struct usb_ep *ep, struct usb_request *),
149 gfp_t flags)
150{
151 int i;
152 struct usb_request *req;
153
154 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
155 ep, head, num, cb);
156
157 for (i = 0; i < num; i++) {
158 req = usb_ep_alloc_request(ep, flags);
159 if (!req) {
160 pr_debug("%s: req allocated:%d\n", __func__, i);
161 return list_empty(head) ? -ENOMEM : 0;
162 }
163 req->complete = cb;
164 list_add(&req->list, head);
165 }
166
167 return 0;
168}
169/*--------------------------------------------- */
170
171/*------------data_path----------------------------*/
172static void gbam_write_data_tohost(struct gbam_port *port)
173{
174 unsigned long flags;
175 struct bam_ch_info *d = &port->data_ch;
176 struct sk_buff *skb;
177 int ret;
178 struct usb_request *req;
179 struct usb_ep *ep;
180
181 spin_lock_irqsave(&port->port_lock, flags);
182 if (!port->port_usb) {
183 spin_unlock_irqrestore(&port->port_lock, flags);
184 return;
185 }
186
187 ep = port->port_usb->in;
188
189 while (!list_empty(&d->tx_idle)) {
190 skb = __skb_dequeue(&d->tx_skb_q);
191 if (!skb) {
192 spin_unlock_irqrestore(&port->port_lock, flags);
193 return;
194 }
195 req = list_first_entry(&d->tx_idle,
196 struct usb_request,
197 list);
198 req->context = skb;
199 req->buf = skb->data;
200 req->length = skb->len;
201
202 list_del(&req->list);
203
204 spin_unlock(&port->port_lock);
205 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
206 spin_lock(&port->port_lock);
207 if (ret) {
208 pr_err("%s: usb epIn failed\n", __func__);
209 list_add(&req->list, &d->tx_idle);
210 dev_kfree_skb_any(skb);
211 break;
212 }
213 d->to_host++;
214 }
215 spin_unlock_irqrestore(&port->port_lock, flags);
216}
217
218void gbam_data_recv_cb(void *p, struct sk_buff *skb)
219{
220 struct gbam_port *port = p;
221 struct bam_ch_info *d = &port->data_ch;
222 unsigned long flags;
223
224 if (!skb)
225 return;
226
227 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
228 port, port->port_num, d, skb->len);
229
230 spin_lock_irqsave(&port->port_lock, flags);
231 if (!port->port_usb) {
232 spin_unlock_irqrestore(&port->port_lock, flags);
233 dev_kfree_skb_any(skb);
234 return;
235 }
236
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700237 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238 d->tohost_drp_cnt++;
239 if (printk_ratelimit())
240 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
241 __func__, d->tohost_drp_cnt);
242 spin_unlock_irqrestore(&port->port_lock, flags);
243 dev_kfree_skb_any(skb);
244 return;
245 }
246
247 __skb_queue_tail(&d->tx_skb_q, skb);
248 spin_unlock_irqrestore(&port->port_lock, flags);
249
250 gbam_write_data_tohost(port);
251}
252
253void gbam_data_write_done(void *p, struct sk_buff *skb)
254{
255 struct gbam_port *port = p;
256 struct bam_ch_info *d = &port->data_ch;
257 unsigned long flags;
258
259 if (!skb)
260 return;
261
262 dev_kfree_skb_any(skb);
263
264 spin_lock_irqsave(&port->port_lock, flags);
265
266 d->pending_with_bam--;
267
268 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
269 port, d, d->to_modem,
270 d->pending_with_bam, port->port_num);
271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 spin_unlock_irqrestore(&port->port_lock, flags);
273
Vamsi Krishna84579552011-11-09 15:33:22 -0800274 queue_work(gbam_wq, &d->write_tobam_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275}
276
277static void gbam_data_write_tobam(struct work_struct *w)
278{
279 struct gbam_port *port;
280 struct bam_ch_info *d;
281 struct sk_buff *skb;
282 unsigned long flags;
283 int ret;
Vamsi Krishna84579552011-11-09 15:33:22 -0800284 int qlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285
286 d = container_of(w, struct bam_ch_info, write_tobam_w);
287 port = d->port;
288
289 spin_lock_irqsave(&port->port_lock, flags);
290 if (!port->port_usb) {
291 spin_unlock_irqrestore(&port->port_lock, flags);
292 return;
293 }
294
Vamsi Krishna84579552011-11-09 15:33:22 -0800295 while (d->pending_with_bam < BAM_PENDING_LIMIT) {
Vamsi Krishna2327c79152011-11-08 16:12:42 -0800296 skb = __skb_dequeue(&d->rx_skb_q);
297 if (!skb) {
298 spin_unlock_irqrestore(&port->port_lock, flags);
299 return;
300 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301 d->pending_with_bam++;
302 d->to_modem++;
303
304 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
305 port, d, d->to_modem, d->pending_with_bam,
306 port->port_num);
307
308 spin_unlock_irqrestore(&port->port_lock, flags);
309 ret = msm_bam_dmux_write(d->id, skb);
310 spin_lock_irqsave(&port->port_lock, flags);
311 if (ret) {
312 pr_debug("%s: write error:%d\n", __func__, ret);
313 d->pending_with_bam--;
314 d->to_modem--;
315 d->tomodem_drp_cnt++;
316 dev_kfree_skb_any(skb);
317 break;
318 }
319 }
Vamsi Krishna84579552011-11-09 15:33:22 -0800320
321 qlen = d->rx_skb_q.qlen;
322
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323 spin_unlock_irqrestore(&port->port_lock, flags);
Vamsi Krishna84579552011-11-09 15:33:22 -0800324
325 if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD)
326 gbam_start_rx(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327}
328/*-------------------------------------------------------------*/
329
330static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
331{
332 struct gbam_port *port = ep->driver_data;
333 struct bam_ch_info *d;
334 struct sk_buff *skb = req->context;
335 int status = req->status;
336
337 switch (status) {
338 case 0:
339 /* successful completion */
340 case -ECONNRESET:
341 case -ESHUTDOWN:
342 /* connection gone */
343 break;
344 default:
345 pr_err("%s: data tx ep error %d\n",
346 __func__, status);
347 break;
348 }
349
350 dev_kfree_skb_any(skb);
351
352 if (!port)
353 return;
354
355 spin_lock(&port->port_lock);
356 d = &port->data_ch;
357 list_add_tail(&req->list, &d->tx_idle);
358 spin_unlock(&port->port_lock);
359
360 gbam_write_data_tohost(port);
361}
362
363static void
364gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
365{
366 struct gbam_port *port = ep->driver_data;
367 struct bam_ch_info *d = &port->data_ch;
368 struct sk_buff *skb = req->context;
369 int status = req->status;
370 int queue = 0;
371
372 switch (status) {
373 case 0:
374 skb_put(skb, req->actual);
375 queue = 1;
376 break;
377 case -ECONNRESET:
378 case -ESHUTDOWN:
379 /* cable disconnection */
380 dev_kfree_skb_any(skb);
381 req->buf = 0;
382 usb_ep_free_request(ep, req);
383 return;
384 default:
385 if (printk_ratelimit())
386 pr_err("%s: %s response error %d, %d/%d\n",
387 __func__, ep->name, status,
388 req->actual, req->length);
389 dev_kfree_skb_any(skb);
390 break;
391 }
392
393 spin_lock(&port->port_lock);
394 if (queue) {
395 __skb_queue_tail(&d->rx_skb_q, skb);
396 queue_work(gbam_wq, &d->write_tobam_w);
397 }
398
399 /* TODO: Handle flow control gracefully by having
400 * having call back mechanism from bam driver
401 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700402 if (bam_mux_rx_fctrl_support &&
Vamsi Krishna84579552011-11-09 15:33:22 -0800403 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404
405 list_add_tail(&req->list, &d->rx_idle);
406 spin_unlock(&port->port_lock);
407 return;
408 }
409 spin_unlock(&port->port_lock);
410
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700411 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412 if (!skb) {
413 spin_lock(&port->port_lock);
414 list_add_tail(&req->list, &d->rx_idle);
415 spin_unlock(&port->port_lock);
416 return;
417 }
418 skb_reserve(skb, BAM_MUX_HDR);
419
420 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700421 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422 req->context = skb;
423
424 status = usb_ep_queue(ep, req, GFP_ATOMIC);
425 if (status) {
426 dev_kfree_skb_any(skb);
427
428 if (printk_ratelimit())
429 pr_err("%s: data rx enqueue err %d\n",
430 __func__, status);
431
432 spin_lock(&port->port_lock);
433 list_add_tail(&req->list, &d->rx_idle);
434 spin_unlock(&port->port_lock);
435 }
436}
437
Ofir Cohena1c2a872011-12-14 10:26:34 +0200438static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
439{
440 int status = req->status;
441
442 pr_debug("%s status: %d\n", __func__, status);
443}
444
445static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
446{
447 int status = req->status;
448
449 pr_debug("%s status: %d\n", __func__, status);
450}
451
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700452static void gbam_start_rx(struct gbam_port *port)
453{
454 struct usb_request *req;
455 struct bam_ch_info *d;
456 struct usb_ep *ep;
457 unsigned long flags;
458 int ret;
459 struct sk_buff *skb;
460
461 spin_lock_irqsave(&port->port_lock, flags);
462 if (!port->port_usb) {
463 spin_unlock_irqrestore(&port->port_lock, flags);
464 return;
465 }
466
467 d = &port->data_ch;
468 ep = port->port_usb->out;
469
470 while (port->port_usb && !list_empty(&d->rx_idle)) {
Vamsi Krishna84579552011-11-09 15:33:22 -0800471
472 if (bam_mux_rx_fctrl_support &&
473 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
474 break;
475
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700476 req = list_first_entry(&d->rx_idle, struct usb_request, list);
477
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700478 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 if (!skb)
480 break;
481 skb_reserve(skb, BAM_MUX_HDR);
482
483 list_del(&req->list);
484 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700485 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 req->context = skb;
487
488 spin_unlock_irqrestore(&port->port_lock, flags);
489 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
490 spin_lock_irqsave(&port->port_lock, flags);
491 if (ret) {
492 dev_kfree_skb_any(skb);
493
494 if (printk_ratelimit())
495 pr_err("%s: rx queue failed\n", __func__);
496
497 if (port->port_usb)
498 list_add(&req->list, &d->rx_idle);
499 else
500 usb_ep_free_request(ep, req);
501 break;
502 }
503 }
504 spin_unlock_irqrestore(&port->port_lock, flags);
505}
506
Ofir Cohena1c2a872011-12-14 10:26:34 +0200507static void gbam_start_endless_rx(struct gbam_port *port)
508{
509 struct bam_ch_info *d = &port->data_ch;
510 int status;
511
512 status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
513 if (status)
514 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
515}
516
517static void gbam_start_endless_tx(struct gbam_port *port)
518{
519 struct bam_ch_info *d = &port->data_ch;
520 int status;
521
522 status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
523 if (status)
524 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
525}
526
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527static void gbam_start_io(struct gbam_port *port)
528{
529 unsigned long flags;
530 struct usb_ep *ep;
531 int ret;
532 struct bam_ch_info *d;
533
534 pr_debug("%s: port:%p\n", __func__, port);
535
536 spin_lock_irqsave(&port->port_lock, flags);
537 if (!port->port_usb) {
538 spin_unlock_irqrestore(&port->port_lock, flags);
539 return;
540 }
541
542 d = &port->data_ch;
543 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700544 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700545 gbam_epout_complete, GFP_ATOMIC);
546 if (ret) {
547 pr_err("%s: rx req allocation failed\n", __func__);
548 return;
549 }
550
551 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700552 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553 gbam_epin_complete, GFP_ATOMIC);
554 if (ret) {
555 pr_err("%s: tx req allocation failed\n", __func__);
556 gbam_free_requests(ep, &d->rx_idle);
557 return;
558 }
559
560 spin_unlock_irqrestore(&port->port_lock, flags);
561
562 /* queue out requests */
563 gbam_start_rx(port);
564}
565
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600566static void gbam_notify(void *p, int event, unsigned long data)
567{
568 switch (event) {
569 case BAM_DMUX_RECEIVE:
570 gbam_data_recv_cb(p, (struct sk_buff *)(data));
571 break;
572 case BAM_DMUX_WRITE_DONE:
573 gbam_data_write_done(p, (struct sk_buff *)(data));
574 break;
575 }
576}
577
Ofir Cohena1c2a872011-12-14 10:26:34 +0200578static void gbam_free_buffers(struct gbam_port *port)
579{
580 struct sk_buff *skb;
581 unsigned long flags;
582 struct bam_ch_info *d;
583
584 spin_lock_irqsave(&port->port_lock, flags);
585
586 if (!port || !port->port_usb)
587 goto free_buf_out;
588
589 d = &port->data_ch;
590
591 gbam_free_requests(port->port_usb->in, &d->tx_idle);
592 gbam_free_requests(port->port_usb->out, &d->rx_idle);
593
594 while ((skb = __skb_dequeue(&d->tx_skb_q)))
595 dev_kfree_skb_any(skb);
596
597 while ((skb = __skb_dequeue(&d->rx_skb_q)))
598 dev_kfree_skb_any(skb);
599
600free_buf_out:
601 spin_unlock_irqrestore(&port->port_lock, flags);
602}
603
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800604static void gbam_disconnect_work(struct work_struct *w)
605{
606 struct gbam_port *port =
607 container_of(w, struct gbam_port, disconnect_w);
608 struct bam_ch_info *d = &port->data_ch;
609
610 if (!test_bit(BAM_CH_OPENED, &d->flags))
611 return;
612
613 msm_bam_dmux_close(d->id);
614 clear_bit(BAM_CH_OPENED, &d->flags);
615}
616
Ofir Cohena1c2a872011-12-14 10:26:34 +0200617static void gbam2bam_disconnect_work(struct work_struct *w)
618{
619 struct gbam_port *port =
620 container_of(w, struct gbam_port, disconnect_w);
621 unsigned long flags;
622
623 spin_lock_irqsave(&port->port_lock, flags);
624 port->port_usb = 0;
625 spin_unlock_irqrestore(&port->port_lock, flags);
626
627 /* disable endpoints */
628 usb_ep_disable(port->gr->out);
629 usb_ep_disable(port->gr->in);
630
631}
632
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633static void gbam_connect_work(struct work_struct *w)
634{
635 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
636 struct bam_ch_info *d = &port->data_ch;
637 int ret;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800638 unsigned long flags;
639
640 spin_lock_irqsave(&port->port_lock, flags);
641 if (!port->port_usb) {
642 spin_unlock_irqrestore(&port->port_lock, flags);
643 return;
644 }
645 spin_unlock_irqrestore(&port->port_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646
Jack Phameffd4ae2011-08-03 16:49:36 -0700647 if (!test_bit(BAM_CH_READY, &d->flags))
648 return;
649
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600650 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651 if (ret) {
652 pr_err("%s: unable open bam ch:%d err:%d\n",
653 __func__, d->id, ret);
654 return;
655 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700656 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657
658 gbam_start_io(port);
659
660 pr_debug("%s: done\n", __func__);
661}
662
Ofir Cohena1c2a872011-12-14 10:26:34 +0200663static void gbam2bam_connect_work(struct work_struct *w)
Jack Phameffd4ae2011-08-03 16:49:36 -0700664{
Ofir Cohena1c2a872011-12-14 10:26:34 +0200665 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
666 struct bam_ch_info *d = &port->data_ch;
667 u32 sps_params;
668 int ret;
Jack Phameffd4ae2011-08-03 16:49:36 -0700669
Ofir Cohena1c2a872011-12-14 10:26:34 +0200670 ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
671 &d->dst_pipe_idx);
672 if (ret) {
673 pr_err("%s: usb_bam_connect failed: err:%d\n",
674 __func__, ret);
675 return;
676 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700677
Ofir Cohena1c2a872011-12-14 10:26:34 +0200678 d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_KERNEL);
679 if (!d->rx_req)
680 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700681
Ofir Cohena1c2a872011-12-14 10:26:34 +0200682 d->rx_req->context = port;
683 d->rx_req->complete = gbam_endless_rx_complete;
684 d->rx_req->length = 0;
685 sps_params = (SPS_PARAMS_SPS_MODE | d->src_pipe_idx |
686 MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
687 d->rx_req->udc_priv = sps_params;
688 d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_KERNEL);
689 if (!d->tx_req)
690 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700691
Ofir Cohena1c2a872011-12-14 10:26:34 +0200692 d->tx_req->context = port;
693 d->tx_req->complete = gbam_endless_tx_complete;
694 d->tx_req->length = 0;
695 sps_params = (SPS_PARAMS_SPS_MODE | d->dst_pipe_idx |
696 MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
697 d->tx_req->udc_priv = sps_params;
Jack Phameffd4ae2011-08-03 16:49:36 -0700698
Ofir Cohena1c2a872011-12-14 10:26:34 +0200699 /* queue in & out requests */
700 gbam_start_endless_rx(port);
701 gbam_start_endless_tx(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700702
Ofir Cohena1c2a872011-12-14 10:26:34 +0200703 pr_debug("%s: done\n", __func__);
Jack Phameffd4ae2011-08-03 16:49:36 -0700704}
705
706/* BAM data channel ready, allow attempt to open */
707static int gbam_data_ch_probe(struct platform_device *pdev)
708{
709 struct gbam_port *port;
710 struct bam_ch_info *d;
711 int i;
712 unsigned long flags;
713
714 pr_debug("%s: name:%s\n", __func__, pdev->name);
715
716 for (i = 0; i < n_bam_ports; i++) {
717 port = bam_ports[i].port;
718 d = &port->data_ch;
719
720 if (!strncmp(bam_ch_names[i], pdev->name,
721 BAM_DMUX_CH_NAME_MAX_LEN)) {
722 set_bit(BAM_CH_READY, &d->flags);
723
724 /* if usb is online, try opening bam_ch */
725 spin_lock_irqsave(&port->port_lock, flags);
726 if (port->port_usb)
727 queue_work(gbam_wq, &port->connect_w);
728 spin_unlock_irqrestore(&port->port_lock, flags);
729
730 break;
731 }
732 }
733
734 return 0;
735}
736
737/* BAM data channel went inactive, so close it */
738static int gbam_data_ch_remove(struct platform_device *pdev)
739{
740 struct gbam_port *port;
741 struct bam_ch_info *d;
742 struct usb_ep *ep_in = NULL;
743 struct usb_ep *ep_out = NULL;
744 unsigned long flags;
745 int i;
746
747 pr_debug("%s: name:%s\n", __func__, pdev->name);
748
749 for (i = 0; i < n_bam_ports; i++) {
750 if (!strncmp(bam_ch_names[i], pdev->name,
751 BAM_DMUX_CH_NAME_MAX_LEN)) {
752 port = bam_ports[i].port;
753 d = &port->data_ch;
754
755 spin_lock_irqsave(&port->port_lock, flags);
756 if (port->port_usb) {
757 ep_in = port->port_usb->in;
758 ep_out = port->port_usb->out;
759 }
760 spin_unlock_irqrestore(&port->port_lock, flags);
761
762 if (ep_in)
763 usb_ep_fifo_flush(ep_in);
764 if (ep_out)
765 usb_ep_fifo_flush(ep_out);
766
767 gbam_free_buffers(port);
768
769 msm_bam_dmux_close(d->id);
770
771 clear_bit(BAM_CH_READY, &d->flags);
772 clear_bit(BAM_CH_OPENED, &d->flags);
773 }
774 }
775
776 return 0;
777}
778
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700779static void gbam_port_free(int portno)
780{
781 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700782 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700783
Jack Phameffd4ae2011-08-03 16:49:36 -0700784 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700785 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700786 platform_driver_unregister(pdrv);
787 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788}
789
Ofir Cohena1c2a872011-12-14 10:26:34 +0200790static void gbam2bam_port_free(int portno)
791{
792 struct gbam_port *port = bam2bam_ports[portno];
793
794 kfree(port);
795}
796
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700797static int gbam_port_alloc(int portno)
798{
799 struct gbam_port *port;
800 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -0700801 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700802
803 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
804 if (!port)
805 return -ENOMEM;
806
807 port->port_num = portno;
808
809 /* port initialization */
810 spin_lock_init(&port->port_lock);
811 INIT_WORK(&port->connect_w, gbam_connect_work);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800812 INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700813
814 /* data ch */
815 d = &port->data_ch;
816 d->port = port;
817 INIT_LIST_HEAD(&d->tx_idle);
818 INIT_LIST_HEAD(&d->rx_idle);
819 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
820 skb_queue_head_init(&d->tx_skb_q);
821 skb_queue_head_init(&d->rx_skb_q);
822 d->id = bam_ch_ids[portno];
823
824 bam_ports[portno].port = port;
825
Jack Phameffd4ae2011-08-03 16:49:36 -0700826 pdrv = &bam_ports[portno].pdrv;
827 pdrv->probe = gbam_data_ch_probe;
828 pdrv->remove = gbam_data_ch_remove;
829 pdrv->driver.name = bam_ch_names[portno];
830 pdrv->driver.owner = THIS_MODULE;
831
832 platform_driver_register(pdrv);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200833 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
834
835 return 0;
836}
837
838static int gbam2bam_port_alloc(int portno)
839{
840 struct gbam_port *port;
841 struct bam_ch_info *d;
842
843 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
844 if (!port)
845 return -ENOMEM;
846
847 port->port_num = portno;
848
849 /* port initialization */
850 spin_lock_init(&port->port_lock);
851
852 INIT_WORK(&port->connect_w, gbam2bam_connect_work);
853 INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
854
855 /* data ch */
856 d = &port->data_ch;
857 d->port = port;
858 bam2bam_ports[portno] = port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700859
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700860 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
861
862 return 0;
863}
864
865#if defined(CONFIG_DEBUG_FS)
866#define DEBUG_BUF_SIZE 1024
867static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
868 size_t count, loff_t *ppos)
869{
870 struct gbam_port *port;
871 struct bam_ch_info *d;
872 char *buf;
873 unsigned long flags;
874 int ret;
875 int i;
876 int temp = 0;
877
878 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
879 if (!buf)
880 return -ENOMEM;
881
882 for (i = 0; i < n_bam_ports; i++) {
883 port = bam_ports[i].port;
884 if (!port)
885 continue;
886 spin_lock_irqsave(&port->port_lock, flags);
887
888 d = &port->data_ch;
889
890 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
891 "#PORT:%d port:%p data_ch:%p#\n"
892 "dpkts_to_usbhost: %lu\n"
893 "dpkts_to_modem: %lu\n"
894 "dpkts_pwith_bam: %u\n"
895 "to_usbhost_dcnt: %u\n"
896 "tomodem__dcnt: %u\n"
897 "tx_buf_len: %u\n"
Vamsi Krishna84579552011-11-09 15:33:22 -0800898 "rx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -0700899 "data_ch_open: %d\n"
900 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 i, port, &port->data_ch,
902 d->to_host, d->to_modem,
903 d->pending_with_bam,
904 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Vamsi Krishna84579552011-11-09 15:33:22 -0800905 d->tx_skb_q.qlen, d->rx_skb_q.qlen,
Jack Phameffd4ae2011-08-03 16:49:36 -0700906 test_bit(BAM_CH_OPENED, &d->flags),
907 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700908
909 spin_unlock_irqrestore(&port->port_lock, flags);
910 }
911
912 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
913
914 kfree(buf);
915
916 return ret;
917}
918
919static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
920 size_t count, loff_t *ppos)
921{
922 struct gbam_port *port;
923 struct bam_ch_info *d;
924 int i;
925 unsigned long flags;
926
927 for (i = 0; i < n_bam_ports; i++) {
928 port = bam_ports[i].port;
929 if (!port)
930 continue;
931
932 spin_lock_irqsave(&port->port_lock, flags);
933
934 d = &port->data_ch;
935
936 d->to_host = 0;
937 d->to_modem = 0;
938 d->pending_with_bam = 0;
939 d->tohost_drp_cnt = 0;
940 d->tomodem_drp_cnt = 0;
941
942 spin_unlock_irqrestore(&port->port_lock, flags);
943 }
944 return count;
945}
946
947const struct file_operations gbam_stats_ops = {
948 .read = gbam_read_stats,
949 .write = gbam_reset_stats,
950};
951
952static void gbam_debugfs_init(void)
953{
954 struct dentry *dent;
955 struct dentry *dfile;
956
957 dent = debugfs_create_dir("usb_rmnet", 0);
958 if (IS_ERR(dent))
959 return;
960
961 /* TODO: Implement cleanup function to remove created file */
962 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
963 if (!dfile || IS_ERR(dfile))
964 debugfs_remove(dent);
965}
966#else
967static void gam_debugfs_init(void) { }
968#endif
969
Ofir Cohena1c2a872011-12-14 10:26:34 +0200970void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971{
972 struct gbam_port *port;
973 unsigned long flags;
974 struct bam_ch_info *d;
975
976 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
977
Ofir Cohena1c2a872011-12-14 10:26:34 +0200978 if (trans == USB_GADGET_XPORT_BAM &&
979 port_num >= n_bam_ports) {
980 pr_err("%s: invalid bam portno#%d\n",
981 __func__, port_num);
982 return;
983 }
984
985 if (trans == USB_GADGET_XPORT_BAM2BAM &&
986 port_num >= n_bam2bam_ports) {
987 pr_err("%s: invalid bam2bam portno#%d\n",
988 __func__, port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989 return;
990 }
991
992 if (!gr) {
993 pr_err("%s: grmnet port is null\n", __func__);
994 return;
995 }
Ofir Cohena1c2a872011-12-14 10:26:34 +0200996 if (trans == USB_GADGET_XPORT_BAM)
997 port = bam_ports[port_num].port;
998 else
999 port = bam2bam_ports[port_num];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001001 d = &port->data_ch;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001002 port->gr = gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001003
Ofir Cohena1c2a872011-12-14 10:26:34 +02001004 if (trans == USB_GADGET_XPORT_BAM) {
1005 gbam_free_buffers(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001006
Ofir Cohena1c2a872011-12-14 10:26:34 +02001007 spin_lock_irqsave(&port->port_lock, flags);
1008 port->port_usb = 0;
1009 spin_unlock_irqrestore(&port->port_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001010
Ofir Cohena1c2a872011-12-14 10:26:34 +02001011 /* disable endpoints */
1012 usb_ep_disable(gr->out);
1013 usb_ep_disable(gr->in);
1014 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001015
Vamsi Krishna1ad076d2011-11-10 15:03:30 -08001016 queue_work(gbam_wq, &port->disconnect_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001017}
1018
Ofir Cohena1c2a872011-12-14 10:26:34 +02001019int gbam_connect(struct grmnet *gr, u8 port_num,
1020 enum transport_type trans, u8 connection_idx)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001021{
1022 struct gbam_port *port;
1023 struct bam_ch_info *d;
1024 int ret;
1025 unsigned long flags;
1026
1027 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1028
Ofir Cohena1c2a872011-12-14 10:26:34 +02001029 if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) {
1030 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1031 return -ENODEV;
1032 }
1033
1034 if (trans == USB_GADGET_XPORT_BAM2BAM && port_num >= n_bam2bam_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001035 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1036 return -ENODEV;
1037 }
1038
1039 if (!gr) {
1040 pr_err("%s: grmnet port is null\n", __func__);
1041 return -ENODEV;
1042 }
1043
Ofir Cohena1c2a872011-12-14 10:26:34 +02001044 if (trans == USB_GADGET_XPORT_BAM)
1045 port = bam_ports[port_num].port;
1046 else
1047 port = bam2bam_ports[port_num];
1048
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001049 d = &port->data_ch;
1050
1051 ret = usb_ep_enable(gr->in, gr->in_desc);
1052 if (ret) {
1053 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
1054 __func__, gr->in);
1055 return ret;
1056 }
1057 gr->in->driver_data = port;
1058
1059 ret = usb_ep_enable(gr->out, gr->out_desc);
1060 if (ret) {
1061 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
1062 __func__, gr->out);
1063 gr->in->driver_data = 0;
1064 return ret;
1065 }
1066 gr->out->driver_data = port;
1067
1068 spin_lock_irqsave(&port->port_lock, flags);
1069 port->port_usb = gr;
1070
Ofir Cohena1c2a872011-12-14 10:26:34 +02001071 if (trans == USB_GADGET_XPORT_BAM) {
1072 d->to_host = 0;
1073 d->to_modem = 0;
1074 d->pending_with_bam = 0;
1075 d->tohost_drp_cnt = 0;
1076 d->tomodem_drp_cnt = 0;
1077 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001078 spin_unlock_irqrestore(&port->port_lock, flags);
1079
Ofir Cohena1c2a872011-12-14 10:26:34 +02001080 if (trans == USB_GADGET_XPORT_BAM2BAM)
1081 d->connection_idx = connection_idx;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001082
1083 queue_work(gbam_wq, &port->connect_w);
1084
1085 return 0;
1086}
1087
Ofir Cohena1c2a872011-12-14 10:26:34 +02001088int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001089{
1090 int i;
1091 int ret;
1092
Ofir Cohena1c2a872011-12-14 10:26:34 +02001093 pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n",
1094 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001095
Ofir Cohena1c2a872011-12-14 10:26:34 +02001096 if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS
1097 || no_bam2bam_port > BAM2BAM_N_PORTS) {
1098 pr_err("%s: Invalid num of ports count:%d,%d\n",
1099 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001100 return -EINVAL;
1101 }
1102
1103 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1104 if (!gbam_wq) {
1105 pr_err("%s: Unable to create workqueue gbam_wq\n",
1106 __func__);
1107 return -ENOMEM;
1108 }
1109
Ofir Cohena1c2a872011-12-14 10:26:34 +02001110 for (i = 0; i < no_bam_port; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301111 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001112 ret = gbam_port_alloc(i);
1113 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301114 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001115 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1116 goto free_bam_ports;
1117 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118 }
1119
Ofir Cohena1c2a872011-12-14 10:26:34 +02001120 for (i = 0; i < no_bam2bam_port; i++) {
1121 n_bam2bam_ports++;
1122 ret = gbam2bam_port_alloc(i);
1123 if (ret) {
1124 n_bam2bam_ports--;
1125 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1126 goto free_bam_ports;
1127 }
1128 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129 gbam_debugfs_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001130 return 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001131
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001132free_bam_ports:
1133 for (i = 0; i < n_bam_ports; i++)
1134 gbam_port_free(i);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001135 for (i = 0; i < n_bam2bam_ports; i++)
1136 gbam2bam_port_free(i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001137 destroy_workqueue(gbam_wq);
1138
1139 return ret;
1140}