blob: 960592728a0e226158d18fefe5ce134fd3999a46 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
Ofir Cohena1c2a872011-12-14 10:26:34 +020026#include <mach/usb_gadget_xport.h>
27#include <mach/usb_bam.h>
28
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include "u_rmnet.h"
30
31#define BAM_N_PORTS 1
Ofir Cohena1c2a872011-12-14 10:26:34 +020032#define BAM2BAM_N_PORTS 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
34static struct workqueue_struct *gbam_wq;
35static int n_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020036static int n_bam2bam_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037static unsigned bam_ch_ids[] = { 8 };
38
Jack Phameffd4ae2011-08-03 16:49:36 -070039static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
40
Vamsi Krishna84579552011-11-09 15:33:22 -080041#define BAM_PENDING_LIMIT 220
Vamsi Krishna8f24f252011-11-02 11:46:08 -070042#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
Vamsi Krishna84579552011-11-09 15:33:22 -080043#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
44#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
Vamsi Krishna8f24f252011-11-02 11:46:08 -070045#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046
47#define BAM_MUX_HDR 8
48
Vamsi Krishna8f24f252011-11-02 11:46:08 -070049#define BAM_MUX_RX_Q_SIZE 16
50#define BAM_MUX_TX_Q_SIZE 200
51#define BAM_MUX_RX_REQ_SIZE (2048 - BAM_MUX_HDR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052
Vamsi Krishna8f24f252011-11-02 11:46:08 -070053unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
54module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055
Vamsi Krishna8f24f252011-11-02 11:46:08 -070056unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
57module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058
Vamsi Krishna8f24f252011-11-02 11:46:08 -070059unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
60module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061
Vamsi Krishna8f24f252011-11-02 11:46:08 -070062unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
63module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Vamsi Krishna8f24f252011-11-02 11:46:08 -070065unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
66module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
Vamsi Krishna8f24f252011-11-02 11:46:08 -070068unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
69module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
Vamsi Krishna8f24f252011-11-02 11:46:08 -070071unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
72module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073
Jack Phameffd4ae2011-08-03 16:49:36 -070074#define BAM_CH_OPENED BIT(0)
75#define BAM_CH_READY BIT(1)
Ofir Cohena1c2a872011-12-14 10:26:34 +020076#define SPS_PARAMS_PIPE_ID_MASK (0x1F)
77#define SPS_PARAMS_SPS_MODE BIT(5)
78#define SPS_PARAMS_TBE BIT(6)
79#define MSM_VENDOR_ID BIT(16)
80
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070082 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083 unsigned id;
84
85 struct list_head tx_idle;
86 struct sk_buff_head tx_skb_q;
87
88 struct list_head rx_idle;
89 struct sk_buff_head rx_skb_q;
90
91 struct gbam_port *port;
92 struct work_struct write_tobam_w;
93
Ofir Cohena1c2a872011-12-14 10:26:34 +020094 struct usb_request *rx_req;
95 struct usb_request *tx_req;
96
97 u8 src_pipe_idx;
98 u8 dst_pipe_idx;
99 u8 connection_idx;
100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101 /* stats */
102 unsigned int pending_with_bam;
103 unsigned int tohost_drp_cnt;
104 unsigned int tomodem_drp_cnt;
105 unsigned int tx_len;
106 unsigned int rx_len;
107 unsigned long to_modem;
108 unsigned long to_host;
109};
110
111struct gbam_port {
112 unsigned port_num;
113 spinlock_t port_lock;
114
115 struct grmnet *port_usb;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200116 struct grmnet *gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117
118 struct bam_ch_info data_ch;
119
120 struct work_struct connect_w;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800121 struct work_struct disconnect_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122};
123
124static struct bam_portmaster {
125 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700126 struct platform_driver pdrv;
127} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128
Ofir Cohena1c2a872011-12-14 10:26:34 +0200129struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130static void gbam_start_rx(struct gbam_port *port);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200131static void gbam_start_endless_rx(struct gbam_port *port);
132static void gbam_start_endless_tx(struct gbam_port *port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133
134/*---------------misc functions---------------- */
135static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
136{
137 struct usb_request *req;
138
139 while (!list_empty(head)) {
140 req = list_entry(head->next, struct usb_request, list);
141 list_del(&req->list);
142 usb_ep_free_request(ep, req);
143 }
144}
145
146static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
147 int num,
148 void (*cb)(struct usb_ep *ep, struct usb_request *),
149 gfp_t flags)
150{
151 int i;
152 struct usb_request *req;
153
154 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
155 ep, head, num, cb);
156
157 for (i = 0; i < num; i++) {
158 req = usb_ep_alloc_request(ep, flags);
159 if (!req) {
160 pr_debug("%s: req allocated:%d\n", __func__, i);
161 return list_empty(head) ? -ENOMEM : 0;
162 }
163 req->complete = cb;
164 list_add(&req->list, head);
165 }
166
167 return 0;
168}
169/*--------------------------------------------- */
170
171/*------------data_path----------------------------*/
172static void gbam_write_data_tohost(struct gbam_port *port)
173{
174 unsigned long flags;
175 struct bam_ch_info *d = &port->data_ch;
176 struct sk_buff *skb;
177 int ret;
178 struct usb_request *req;
179 struct usb_ep *ep;
180
181 spin_lock_irqsave(&port->port_lock, flags);
182 if (!port->port_usb) {
183 spin_unlock_irqrestore(&port->port_lock, flags);
184 return;
185 }
186
187 ep = port->port_usb->in;
188
189 while (!list_empty(&d->tx_idle)) {
190 skb = __skb_dequeue(&d->tx_skb_q);
191 if (!skb) {
192 spin_unlock_irqrestore(&port->port_lock, flags);
193 return;
194 }
195 req = list_first_entry(&d->tx_idle,
196 struct usb_request,
197 list);
198 req->context = skb;
199 req->buf = skb->data;
200 req->length = skb->len;
201
202 list_del(&req->list);
203
204 spin_unlock(&port->port_lock);
205 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
206 spin_lock(&port->port_lock);
207 if (ret) {
208 pr_err("%s: usb epIn failed\n", __func__);
209 list_add(&req->list, &d->tx_idle);
210 dev_kfree_skb_any(skb);
211 break;
212 }
213 d->to_host++;
214 }
215 spin_unlock_irqrestore(&port->port_lock, flags);
216}
217
218void gbam_data_recv_cb(void *p, struct sk_buff *skb)
219{
220 struct gbam_port *port = p;
221 struct bam_ch_info *d = &port->data_ch;
222 unsigned long flags;
223
224 if (!skb)
225 return;
226
227 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
228 port, port->port_num, d, skb->len);
229
230 spin_lock_irqsave(&port->port_lock, flags);
231 if (!port->port_usb) {
232 spin_unlock_irqrestore(&port->port_lock, flags);
233 dev_kfree_skb_any(skb);
234 return;
235 }
236
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700237 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238 d->tohost_drp_cnt++;
239 if (printk_ratelimit())
240 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
241 __func__, d->tohost_drp_cnt);
242 spin_unlock_irqrestore(&port->port_lock, flags);
243 dev_kfree_skb_any(skb);
244 return;
245 }
246
247 __skb_queue_tail(&d->tx_skb_q, skb);
248 spin_unlock_irqrestore(&port->port_lock, flags);
249
250 gbam_write_data_tohost(port);
251}
252
253void gbam_data_write_done(void *p, struct sk_buff *skb)
254{
255 struct gbam_port *port = p;
256 struct bam_ch_info *d = &port->data_ch;
257 unsigned long flags;
258
259 if (!skb)
260 return;
261
262 dev_kfree_skb_any(skb);
263
264 spin_lock_irqsave(&port->port_lock, flags);
265
266 d->pending_with_bam--;
267
268 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
269 port, d, d->to_modem,
270 d->pending_with_bam, port->port_num);
271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 spin_unlock_irqrestore(&port->port_lock, flags);
273
Vamsi Krishna84579552011-11-09 15:33:22 -0800274 queue_work(gbam_wq, &d->write_tobam_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275}
276
277static void gbam_data_write_tobam(struct work_struct *w)
278{
279 struct gbam_port *port;
280 struct bam_ch_info *d;
281 struct sk_buff *skb;
282 unsigned long flags;
283 int ret;
Vamsi Krishna84579552011-11-09 15:33:22 -0800284 int qlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285
286 d = container_of(w, struct bam_ch_info, write_tobam_w);
287 port = d->port;
288
289 spin_lock_irqsave(&port->port_lock, flags);
290 if (!port->port_usb) {
291 spin_unlock_irqrestore(&port->port_lock, flags);
292 return;
293 }
294
Vamsi Krishna84579552011-11-09 15:33:22 -0800295 while (d->pending_with_bam < BAM_PENDING_LIMIT) {
Vamsi Krishna2327c79152011-11-08 16:12:42 -0800296 skb = __skb_dequeue(&d->rx_skb_q);
Vamsi Krishna625c28e2011-12-16 22:34:49 -0800297 if (!skb)
298 break;
299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300 d->pending_with_bam++;
301 d->to_modem++;
302
303 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
304 port, d, d->to_modem, d->pending_with_bam,
305 port->port_num);
306
307 spin_unlock_irqrestore(&port->port_lock, flags);
308 ret = msm_bam_dmux_write(d->id, skb);
309 spin_lock_irqsave(&port->port_lock, flags);
310 if (ret) {
311 pr_debug("%s: write error:%d\n", __func__, ret);
312 d->pending_with_bam--;
313 d->to_modem--;
314 d->tomodem_drp_cnt++;
315 dev_kfree_skb_any(skb);
316 break;
317 }
318 }
Vamsi Krishna84579552011-11-09 15:33:22 -0800319
320 qlen = d->rx_skb_q.qlen;
321
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700322 spin_unlock_irqrestore(&port->port_lock, flags);
Vamsi Krishna84579552011-11-09 15:33:22 -0800323
324 if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD)
325 gbam_start_rx(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700326}
327/*-------------------------------------------------------------*/
328
329static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
330{
331 struct gbam_port *port = ep->driver_data;
332 struct bam_ch_info *d;
333 struct sk_buff *skb = req->context;
334 int status = req->status;
335
336 switch (status) {
337 case 0:
338 /* successful completion */
339 case -ECONNRESET:
340 case -ESHUTDOWN:
341 /* connection gone */
342 break;
343 default:
344 pr_err("%s: data tx ep error %d\n",
345 __func__, status);
346 break;
347 }
348
349 dev_kfree_skb_any(skb);
350
351 if (!port)
352 return;
353
354 spin_lock(&port->port_lock);
355 d = &port->data_ch;
356 list_add_tail(&req->list, &d->tx_idle);
357 spin_unlock(&port->port_lock);
358
359 gbam_write_data_tohost(port);
360}
361
362static void
363gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
364{
365 struct gbam_port *port = ep->driver_data;
366 struct bam_ch_info *d = &port->data_ch;
367 struct sk_buff *skb = req->context;
368 int status = req->status;
369 int queue = 0;
370
371 switch (status) {
372 case 0:
373 skb_put(skb, req->actual);
374 queue = 1;
375 break;
376 case -ECONNRESET:
377 case -ESHUTDOWN:
378 /* cable disconnection */
379 dev_kfree_skb_any(skb);
380 req->buf = 0;
381 usb_ep_free_request(ep, req);
382 return;
383 default:
384 if (printk_ratelimit())
385 pr_err("%s: %s response error %d, %d/%d\n",
386 __func__, ep->name, status,
387 req->actual, req->length);
388 dev_kfree_skb_any(skb);
389 break;
390 }
391
392 spin_lock(&port->port_lock);
393 if (queue) {
394 __skb_queue_tail(&d->rx_skb_q, skb);
395 queue_work(gbam_wq, &d->write_tobam_w);
396 }
397
398 /* TODO: Handle flow control gracefully by having
399 * having call back mechanism from bam driver
400 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700401 if (bam_mux_rx_fctrl_support &&
Vamsi Krishna84579552011-11-09 15:33:22 -0800402 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700403
404 list_add_tail(&req->list, &d->rx_idle);
405 spin_unlock(&port->port_lock);
406 return;
407 }
408 spin_unlock(&port->port_lock);
409
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700410 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700411 if (!skb) {
412 spin_lock(&port->port_lock);
413 list_add_tail(&req->list, &d->rx_idle);
414 spin_unlock(&port->port_lock);
415 return;
416 }
417 skb_reserve(skb, BAM_MUX_HDR);
418
419 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700420 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700421 req->context = skb;
422
423 status = usb_ep_queue(ep, req, GFP_ATOMIC);
424 if (status) {
425 dev_kfree_skb_any(skb);
426
427 if (printk_ratelimit())
428 pr_err("%s: data rx enqueue err %d\n",
429 __func__, status);
430
431 spin_lock(&port->port_lock);
432 list_add_tail(&req->list, &d->rx_idle);
433 spin_unlock(&port->port_lock);
434 }
435}
436
Ofir Cohena1c2a872011-12-14 10:26:34 +0200437static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
438{
439 int status = req->status;
440
441 pr_debug("%s status: %d\n", __func__, status);
442}
443
444static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
445{
446 int status = req->status;
447
448 pr_debug("%s status: %d\n", __func__, status);
449}
450
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451static void gbam_start_rx(struct gbam_port *port)
452{
453 struct usb_request *req;
454 struct bam_ch_info *d;
455 struct usb_ep *ep;
456 unsigned long flags;
457 int ret;
458 struct sk_buff *skb;
459
460 spin_lock_irqsave(&port->port_lock, flags);
461 if (!port->port_usb) {
462 spin_unlock_irqrestore(&port->port_lock, flags);
463 return;
464 }
465
466 d = &port->data_ch;
467 ep = port->port_usb->out;
468
469 while (port->port_usb && !list_empty(&d->rx_idle)) {
Vamsi Krishna84579552011-11-09 15:33:22 -0800470
471 if (bam_mux_rx_fctrl_support &&
472 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
473 break;
474
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700475 req = list_first_entry(&d->rx_idle, struct usb_request, list);
476
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700477 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700478 if (!skb)
479 break;
480 skb_reserve(skb, BAM_MUX_HDR);
481
482 list_del(&req->list);
483 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700484 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 req->context = skb;
486
487 spin_unlock_irqrestore(&port->port_lock, flags);
488 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
489 spin_lock_irqsave(&port->port_lock, flags);
490 if (ret) {
491 dev_kfree_skb_any(skb);
492
493 if (printk_ratelimit())
494 pr_err("%s: rx queue failed\n", __func__);
495
496 if (port->port_usb)
497 list_add(&req->list, &d->rx_idle);
498 else
499 usb_ep_free_request(ep, req);
500 break;
501 }
502 }
503 spin_unlock_irqrestore(&port->port_lock, flags);
504}
505
Ofir Cohena1c2a872011-12-14 10:26:34 +0200506static void gbam_start_endless_rx(struct gbam_port *port)
507{
508 struct bam_ch_info *d = &port->data_ch;
509 int status;
510
511 status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
512 if (status)
513 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
514}
515
516static void gbam_start_endless_tx(struct gbam_port *port)
517{
518 struct bam_ch_info *d = &port->data_ch;
519 int status;
520
521 status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
522 if (status)
523 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
524}
525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526static void gbam_start_io(struct gbam_port *port)
527{
528 unsigned long flags;
529 struct usb_ep *ep;
530 int ret;
531 struct bam_ch_info *d;
532
533 pr_debug("%s: port:%p\n", __func__, port);
534
535 spin_lock_irqsave(&port->port_lock, flags);
536 if (!port->port_usb) {
537 spin_unlock_irqrestore(&port->port_lock, flags);
538 return;
539 }
540
541 d = &port->data_ch;
542 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700543 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700544 gbam_epout_complete, GFP_ATOMIC);
545 if (ret) {
546 pr_err("%s: rx req allocation failed\n", __func__);
547 return;
548 }
549
550 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700551 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552 gbam_epin_complete, GFP_ATOMIC);
553 if (ret) {
554 pr_err("%s: tx req allocation failed\n", __func__);
555 gbam_free_requests(ep, &d->rx_idle);
556 return;
557 }
558
559 spin_unlock_irqrestore(&port->port_lock, flags);
560
561 /* queue out requests */
562 gbam_start_rx(port);
563}
564
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600565static void gbam_notify(void *p, int event, unsigned long data)
566{
567 switch (event) {
568 case BAM_DMUX_RECEIVE:
569 gbam_data_recv_cb(p, (struct sk_buff *)(data));
570 break;
571 case BAM_DMUX_WRITE_DONE:
572 gbam_data_write_done(p, (struct sk_buff *)(data));
573 break;
574 }
575}
576
Ofir Cohena1c2a872011-12-14 10:26:34 +0200577static void gbam_free_buffers(struct gbam_port *port)
578{
579 struct sk_buff *skb;
580 unsigned long flags;
581 struct bam_ch_info *d;
582
583 spin_lock_irqsave(&port->port_lock, flags);
584
585 if (!port || !port->port_usb)
586 goto free_buf_out;
587
588 d = &port->data_ch;
589
590 gbam_free_requests(port->port_usb->in, &d->tx_idle);
591 gbam_free_requests(port->port_usb->out, &d->rx_idle);
592
593 while ((skb = __skb_dequeue(&d->tx_skb_q)))
594 dev_kfree_skb_any(skb);
595
596 while ((skb = __skb_dequeue(&d->rx_skb_q)))
597 dev_kfree_skb_any(skb);
598
599free_buf_out:
600 spin_unlock_irqrestore(&port->port_lock, flags);
601}
602
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800603static void gbam_disconnect_work(struct work_struct *w)
604{
605 struct gbam_port *port =
606 container_of(w, struct gbam_port, disconnect_w);
607 struct bam_ch_info *d = &port->data_ch;
608
609 if (!test_bit(BAM_CH_OPENED, &d->flags))
610 return;
611
612 msm_bam_dmux_close(d->id);
613 clear_bit(BAM_CH_OPENED, &d->flags);
614}
615
Ofir Cohena1c2a872011-12-14 10:26:34 +0200616static void gbam2bam_disconnect_work(struct work_struct *w)
617{
618 struct gbam_port *port =
619 container_of(w, struct gbam_port, disconnect_w);
620 unsigned long flags;
621
622 spin_lock_irqsave(&port->port_lock, flags);
623 port->port_usb = 0;
624 spin_unlock_irqrestore(&port->port_lock, flags);
625
626 /* disable endpoints */
627 usb_ep_disable(port->gr->out);
628 usb_ep_disable(port->gr->in);
629
630}
631
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700632static void gbam_connect_work(struct work_struct *w)
633{
634 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
635 struct bam_ch_info *d = &port->data_ch;
636 int ret;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800637 unsigned long flags;
638
639 spin_lock_irqsave(&port->port_lock, flags);
640 if (!port->port_usb) {
641 spin_unlock_irqrestore(&port->port_lock, flags);
642 return;
643 }
644 spin_unlock_irqrestore(&port->port_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645
Jack Phameffd4ae2011-08-03 16:49:36 -0700646 if (!test_bit(BAM_CH_READY, &d->flags))
647 return;
648
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600649 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650 if (ret) {
651 pr_err("%s: unable open bam ch:%d err:%d\n",
652 __func__, d->id, ret);
653 return;
654 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700655 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700656
657 gbam_start_io(port);
658
659 pr_debug("%s: done\n", __func__);
660}
661
Ofir Cohena1c2a872011-12-14 10:26:34 +0200662static void gbam2bam_connect_work(struct work_struct *w)
Jack Phameffd4ae2011-08-03 16:49:36 -0700663{
Ofir Cohena1c2a872011-12-14 10:26:34 +0200664 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
665 struct bam_ch_info *d = &port->data_ch;
666 u32 sps_params;
667 int ret;
Jack Phameffd4ae2011-08-03 16:49:36 -0700668
Ofir Cohena1c2a872011-12-14 10:26:34 +0200669 ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
670 &d->dst_pipe_idx);
671 if (ret) {
672 pr_err("%s: usb_bam_connect failed: err:%d\n",
673 __func__, ret);
674 return;
675 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700676
Ofir Cohena1c2a872011-12-14 10:26:34 +0200677 d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_KERNEL);
678 if (!d->rx_req)
679 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700680
Ofir Cohena1c2a872011-12-14 10:26:34 +0200681 d->rx_req->context = port;
682 d->rx_req->complete = gbam_endless_rx_complete;
683 d->rx_req->length = 0;
684 sps_params = (SPS_PARAMS_SPS_MODE | d->src_pipe_idx |
685 MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
686 d->rx_req->udc_priv = sps_params;
687 d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_KERNEL);
688 if (!d->tx_req)
689 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700690
Ofir Cohena1c2a872011-12-14 10:26:34 +0200691 d->tx_req->context = port;
692 d->tx_req->complete = gbam_endless_tx_complete;
693 d->tx_req->length = 0;
694 sps_params = (SPS_PARAMS_SPS_MODE | d->dst_pipe_idx |
695 MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
696 d->tx_req->udc_priv = sps_params;
Jack Phameffd4ae2011-08-03 16:49:36 -0700697
Ofir Cohena1c2a872011-12-14 10:26:34 +0200698 /* queue in & out requests */
699 gbam_start_endless_rx(port);
700 gbam_start_endless_tx(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700701
Ofir Cohena1c2a872011-12-14 10:26:34 +0200702 pr_debug("%s: done\n", __func__);
Jack Phameffd4ae2011-08-03 16:49:36 -0700703}
704
705/* BAM data channel ready, allow attempt to open */
706static int gbam_data_ch_probe(struct platform_device *pdev)
707{
708 struct gbam_port *port;
709 struct bam_ch_info *d;
710 int i;
711 unsigned long flags;
712
713 pr_debug("%s: name:%s\n", __func__, pdev->name);
714
715 for (i = 0; i < n_bam_ports; i++) {
716 port = bam_ports[i].port;
717 d = &port->data_ch;
718
719 if (!strncmp(bam_ch_names[i], pdev->name,
720 BAM_DMUX_CH_NAME_MAX_LEN)) {
721 set_bit(BAM_CH_READY, &d->flags);
722
723 /* if usb is online, try opening bam_ch */
724 spin_lock_irqsave(&port->port_lock, flags);
725 if (port->port_usb)
726 queue_work(gbam_wq, &port->connect_w);
727 spin_unlock_irqrestore(&port->port_lock, flags);
728
729 break;
730 }
731 }
732
733 return 0;
734}
735
736/* BAM data channel went inactive, so close it */
737static int gbam_data_ch_remove(struct platform_device *pdev)
738{
739 struct gbam_port *port;
740 struct bam_ch_info *d;
741 struct usb_ep *ep_in = NULL;
742 struct usb_ep *ep_out = NULL;
743 unsigned long flags;
744 int i;
745
746 pr_debug("%s: name:%s\n", __func__, pdev->name);
747
748 for (i = 0; i < n_bam_ports; i++) {
749 if (!strncmp(bam_ch_names[i], pdev->name,
750 BAM_DMUX_CH_NAME_MAX_LEN)) {
751 port = bam_ports[i].port;
752 d = &port->data_ch;
753
754 spin_lock_irqsave(&port->port_lock, flags);
755 if (port->port_usb) {
756 ep_in = port->port_usb->in;
757 ep_out = port->port_usb->out;
758 }
759 spin_unlock_irqrestore(&port->port_lock, flags);
760
761 if (ep_in)
762 usb_ep_fifo_flush(ep_in);
763 if (ep_out)
764 usb_ep_fifo_flush(ep_out);
765
766 gbam_free_buffers(port);
767
768 msm_bam_dmux_close(d->id);
769
770 clear_bit(BAM_CH_READY, &d->flags);
771 clear_bit(BAM_CH_OPENED, &d->flags);
772 }
773 }
774
775 return 0;
776}
777
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700778static void gbam_port_free(int portno)
779{
780 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700781 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700782
Jack Phameffd4ae2011-08-03 16:49:36 -0700783 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700785 platform_driver_unregister(pdrv);
786 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700787}
788
Ofir Cohena1c2a872011-12-14 10:26:34 +0200789static void gbam2bam_port_free(int portno)
790{
791 struct gbam_port *port = bam2bam_ports[portno];
792
793 kfree(port);
794}
795
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700796static int gbam_port_alloc(int portno)
797{
798 struct gbam_port *port;
799 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -0700800 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700801
802 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
803 if (!port)
804 return -ENOMEM;
805
806 port->port_num = portno;
807
808 /* port initialization */
809 spin_lock_init(&port->port_lock);
810 INIT_WORK(&port->connect_w, gbam_connect_work);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800811 INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812
813 /* data ch */
814 d = &port->data_ch;
815 d->port = port;
816 INIT_LIST_HEAD(&d->tx_idle);
817 INIT_LIST_HEAD(&d->rx_idle);
818 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
819 skb_queue_head_init(&d->tx_skb_q);
820 skb_queue_head_init(&d->rx_skb_q);
821 d->id = bam_ch_ids[portno];
822
823 bam_ports[portno].port = port;
824
Jack Phameffd4ae2011-08-03 16:49:36 -0700825 pdrv = &bam_ports[portno].pdrv;
826 pdrv->probe = gbam_data_ch_probe;
827 pdrv->remove = gbam_data_ch_remove;
828 pdrv->driver.name = bam_ch_names[portno];
829 pdrv->driver.owner = THIS_MODULE;
830
831 platform_driver_register(pdrv);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200832 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
833
834 return 0;
835}
836
837static int gbam2bam_port_alloc(int portno)
838{
839 struct gbam_port *port;
840 struct bam_ch_info *d;
841
842 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
843 if (!port)
844 return -ENOMEM;
845
846 port->port_num = portno;
847
848 /* port initialization */
849 spin_lock_init(&port->port_lock);
850
851 INIT_WORK(&port->connect_w, gbam2bam_connect_work);
852 INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
853
854 /* data ch */
855 d = &port->data_ch;
856 d->port = port;
857 bam2bam_ports[portno] = port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700858
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700859 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
860
861 return 0;
862}
863
864#if defined(CONFIG_DEBUG_FS)
865#define DEBUG_BUF_SIZE 1024
866static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
867 size_t count, loff_t *ppos)
868{
869 struct gbam_port *port;
870 struct bam_ch_info *d;
871 char *buf;
872 unsigned long flags;
873 int ret;
874 int i;
875 int temp = 0;
876
877 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
878 if (!buf)
879 return -ENOMEM;
880
881 for (i = 0; i < n_bam_ports; i++) {
882 port = bam_ports[i].port;
883 if (!port)
884 continue;
885 spin_lock_irqsave(&port->port_lock, flags);
886
887 d = &port->data_ch;
888
889 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
890 "#PORT:%d port:%p data_ch:%p#\n"
891 "dpkts_to_usbhost: %lu\n"
892 "dpkts_to_modem: %lu\n"
893 "dpkts_pwith_bam: %u\n"
894 "to_usbhost_dcnt: %u\n"
895 "tomodem__dcnt: %u\n"
896 "tx_buf_len: %u\n"
Vamsi Krishna84579552011-11-09 15:33:22 -0800897 "rx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -0700898 "data_ch_open: %d\n"
899 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700900 i, port, &port->data_ch,
901 d->to_host, d->to_modem,
902 d->pending_with_bam,
903 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Vamsi Krishna84579552011-11-09 15:33:22 -0800904 d->tx_skb_q.qlen, d->rx_skb_q.qlen,
Jack Phameffd4ae2011-08-03 16:49:36 -0700905 test_bit(BAM_CH_OPENED, &d->flags),
906 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700907
908 spin_unlock_irqrestore(&port->port_lock, flags);
909 }
910
911 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
912
913 kfree(buf);
914
915 return ret;
916}
917
918static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
919 size_t count, loff_t *ppos)
920{
921 struct gbam_port *port;
922 struct bam_ch_info *d;
923 int i;
924 unsigned long flags;
925
926 for (i = 0; i < n_bam_ports; i++) {
927 port = bam_ports[i].port;
928 if (!port)
929 continue;
930
931 spin_lock_irqsave(&port->port_lock, flags);
932
933 d = &port->data_ch;
934
935 d->to_host = 0;
936 d->to_modem = 0;
937 d->pending_with_bam = 0;
938 d->tohost_drp_cnt = 0;
939 d->tomodem_drp_cnt = 0;
940
941 spin_unlock_irqrestore(&port->port_lock, flags);
942 }
943 return count;
944}
945
946const struct file_operations gbam_stats_ops = {
947 .read = gbam_read_stats,
948 .write = gbam_reset_stats,
949};
950
951static void gbam_debugfs_init(void)
952{
953 struct dentry *dent;
954 struct dentry *dfile;
955
956 dent = debugfs_create_dir("usb_rmnet", 0);
957 if (IS_ERR(dent))
958 return;
959
960 /* TODO: Implement cleanup function to remove created file */
961 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
962 if (!dfile || IS_ERR(dfile))
963 debugfs_remove(dent);
964}
965#else
966static void gam_debugfs_init(void) { }
967#endif
968
Ofir Cohena1c2a872011-12-14 10:26:34 +0200969void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700970{
971 struct gbam_port *port;
972 unsigned long flags;
973 struct bam_ch_info *d;
974
975 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
976
Ofir Cohena1c2a872011-12-14 10:26:34 +0200977 if (trans == USB_GADGET_XPORT_BAM &&
978 port_num >= n_bam_ports) {
979 pr_err("%s: invalid bam portno#%d\n",
980 __func__, port_num);
981 return;
982 }
983
984 if (trans == USB_GADGET_XPORT_BAM2BAM &&
985 port_num >= n_bam2bam_ports) {
986 pr_err("%s: invalid bam2bam portno#%d\n",
987 __func__, port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700988 return;
989 }
990
991 if (!gr) {
992 pr_err("%s: grmnet port is null\n", __func__);
993 return;
994 }
Ofir Cohena1c2a872011-12-14 10:26:34 +0200995 if (trans == USB_GADGET_XPORT_BAM)
996 port = bam_ports[port_num].port;
997 else
998 port = bam2bam_ports[port_num];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700999
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000 d = &port->data_ch;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001001 port->gr = gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001002
Ofir Cohena1c2a872011-12-14 10:26:34 +02001003 if (trans == USB_GADGET_XPORT_BAM) {
1004 gbam_free_buffers(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001005
Ofir Cohena1c2a872011-12-14 10:26:34 +02001006 spin_lock_irqsave(&port->port_lock, flags);
1007 port->port_usb = 0;
1008 spin_unlock_irqrestore(&port->port_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001009
Ofir Cohena1c2a872011-12-14 10:26:34 +02001010 /* disable endpoints */
1011 usb_ep_disable(gr->out);
1012 usb_ep_disable(gr->in);
1013 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001014
Vamsi Krishna1ad076d2011-11-10 15:03:30 -08001015 queue_work(gbam_wq, &port->disconnect_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001016}
1017
Ofir Cohena1c2a872011-12-14 10:26:34 +02001018int gbam_connect(struct grmnet *gr, u8 port_num,
1019 enum transport_type trans, u8 connection_idx)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001020{
1021 struct gbam_port *port;
1022 struct bam_ch_info *d;
1023 int ret;
1024 unsigned long flags;
1025
1026 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1027
Ofir Cohena1c2a872011-12-14 10:26:34 +02001028 if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) {
1029 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1030 return -ENODEV;
1031 }
1032
1033 if (trans == USB_GADGET_XPORT_BAM2BAM && port_num >= n_bam2bam_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001034 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1035 return -ENODEV;
1036 }
1037
1038 if (!gr) {
1039 pr_err("%s: grmnet port is null\n", __func__);
1040 return -ENODEV;
1041 }
1042
Ofir Cohena1c2a872011-12-14 10:26:34 +02001043 if (trans == USB_GADGET_XPORT_BAM)
1044 port = bam_ports[port_num].port;
1045 else
1046 port = bam2bam_ports[port_num];
1047
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001048 d = &port->data_ch;
1049
1050 ret = usb_ep_enable(gr->in, gr->in_desc);
1051 if (ret) {
1052 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
1053 __func__, gr->in);
1054 return ret;
1055 }
1056 gr->in->driver_data = port;
1057
1058 ret = usb_ep_enable(gr->out, gr->out_desc);
1059 if (ret) {
1060 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
1061 __func__, gr->out);
1062 gr->in->driver_data = 0;
1063 return ret;
1064 }
1065 gr->out->driver_data = port;
1066
1067 spin_lock_irqsave(&port->port_lock, flags);
1068 port->port_usb = gr;
1069
Ofir Cohena1c2a872011-12-14 10:26:34 +02001070 if (trans == USB_GADGET_XPORT_BAM) {
1071 d->to_host = 0;
1072 d->to_modem = 0;
1073 d->pending_with_bam = 0;
1074 d->tohost_drp_cnt = 0;
1075 d->tomodem_drp_cnt = 0;
1076 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001077 spin_unlock_irqrestore(&port->port_lock, flags);
1078
Ofir Cohena1c2a872011-12-14 10:26:34 +02001079 if (trans == USB_GADGET_XPORT_BAM2BAM)
1080 d->connection_idx = connection_idx;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001081
1082 queue_work(gbam_wq, &port->connect_w);
1083
1084 return 0;
1085}
1086
Ofir Cohena1c2a872011-12-14 10:26:34 +02001087int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001088{
1089 int i;
1090 int ret;
1091
Ofir Cohena1c2a872011-12-14 10:26:34 +02001092 pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n",
1093 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001094
Ofir Cohena1c2a872011-12-14 10:26:34 +02001095 if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS
1096 || no_bam2bam_port > BAM2BAM_N_PORTS) {
1097 pr_err("%s: Invalid num of ports count:%d,%d\n",
1098 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001099 return -EINVAL;
1100 }
1101
1102 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1103 if (!gbam_wq) {
1104 pr_err("%s: Unable to create workqueue gbam_wq\n",
1105 __func__);
1106 return -ENOMEM;
1107 }
1108
Ofir Cohena1c2a872011-12-14 10:26:34 +02001109 for (i = 0; i < no_bam_port; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301110 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001111 ret = gbam_port_alloc(i);
1112 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301113 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001114 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1115 goto free_bam_ports;
1116 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001117 }
1118
Ofir Cohena1c2a872011-12-14 10:26:34 +02001119 for (i = 0; i < no_bam2bam_port; i++) {
1120 n_bam2bam_ports++;
1121 ret = gbam2bam_port_alloc(i);
1122 if (ret) {
1123 n_bam2bam_ports--;
1124 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1125 goto free_bam_ports;
1126 }
1127 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001128 gbam_debugfs_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129 return 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001130
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001131free_bam_ports:
1132 for (i = 0; i < n_bam_ports; i++)
1133 gbam_port_free(i);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001134 for (i = 0; i < n_bam2bam_ports; i++)
1135 gbam2bam_port_free(i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001136 destroy_workqueue(gbam_wq);
1137
1138 return ret;
1139}