blob: 4f380dfa8222791c4449e86c06e22b9653165acd [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
Ofir Cohena1c2a872011-12-14 10:26:34 +020026#include <mach/usb_gadget_xport.h>
27#include <mach/usb_bam.h>
28
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include "u_rmnet.h"
30
31#define BAM_N_PORTS 1
Ofir Cohena1c2a872011-12-14 10:26:34 +020032#define BAM2BAM_N_PORTS 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
34static struct workqueue_struct *gbam_wq;
35static int n_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020036static int n_bam2bam_ports;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053037static unsigned n_tx_req_queued;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038static unsigned bam_ch_ids[] = { 8 };
39
Jack Phameffd4ae2011-08-03 16:49:36 -070040static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
41
Vamsi Krishna84579552011-11-09 15:33:22 -080042#define BAM_PENDING_LIMIT 220
Vamsi Krishna8f24f252011-11-02 11:46:08 -070043#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
Vamsi Krishna84579552011-11-09 15:33:22 -080044#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
45#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
Vamsi Krishna8f24f252011-11-02 11:46:08 -070046#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047
48#define BAM_MUX_HDR 8
49
Vamsi Krishna8f24f252011-11-02 11:46:08 -070050#define BAM_MUX_RX_Q_SIZE 16
51#define BAM_MUX_TX_Q_SIZE 200
52#define BAM_MUX_RX_REQ_SIZE (2048 - BAM_MUX_HDR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053054#define DL_INTR_THRESHOLD 20
55
Vamsi Krishna8f24f252011-11-02 11:46:08 -070056unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
57module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058
Vamsi Krishna8f24f252011-11-02 11:46:08 -070059unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
60module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061
Vamsi Krishna8f24f252011-11-02 11:46:08 -070062unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
63module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Vamsi Krishna8f24f252011-11-02 11:46:08 -070065unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
66module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
Vamsi Krishna8f24f252011-11-02 11:46:08 -070068unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
69module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
Vamsi Krishna8f24f252011-11-02 11:46:08 -070071unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
72module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073
Vamsi Krishna8f24f252011-11-02 11:46:08 -070074unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
75module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053077unsigned int dl_intr_threshold = DL_INTR_THRESHOLD;
78module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR);
79
Jack Phameffd4ae2011-08-03 16:49:36 -070080#define BAM_CH_OPENED BIT(0)
81#define BAM_CH_READY BIT(1)
Ofir Cohena1c2a872011-12-14 10:26:34 +020082#define SPS_PARAMS_PIPE_ID_MASK (0x1F)
83#define SPS_PARAMS_SPS_MODE BIT(5)
84#define SPS_PARAMS_TBE BIT(6)
85#define MSM_VENDOR_ID BIT(16)
86
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070088 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070089 unsigned id;
90
91 struct list_head tx_idle;
92 struct sk_buff_head tx_skb_q;
93
94 struct list_head rx_idle;
95 struct sk_buff_head rx_skb_q;
96
97 struct gbam_port *port;
98 struct work_struct write_tobam_w;
99
Ofir Cohena1c2a872011-12-14 10:26:34 +0200100 struct usb_request *rx_req;
101 struct usb_request *tx_req;
102
103 u8 src_pipe_idx;
104 u8 dst_pipe_idx;
105 u8 connection_idx;
106
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107 /* stats */
108 unsigned int pending_with_bam;
109 unsigned int tohost_drp_cnt;
110 unsigned int tomodem_drp_cnt;
111 unsigned int tx_len;
112 unsigned int rx_len;
113 unsigned long to_modem;
114 unsigned long to_host;
115};
116
117struct gbam_port {
118 unsigned port_num;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530119 spinlock_t port_lock_ul;
120 spinlock_t port_lock_dl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121
122 struct grmnet *port_usb;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200123 struct grmnet *gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124
125 struct bam_ch_info data_ch;
126
127 struct work_struct connect_w;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800128 struct work_struct disconnect_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129};
130
131static struct bam_portmaster {
132 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700133 struct platform_driver pdrv;
134} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135
Ofir Cohena1c2a872011-12-14 10:26:34 +0200136struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137static void gbam_start_rx(struct gbam_port *port);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200138static void gbam_start_endless_rx(struct gbam_port *port);
139static void gbam_start_endless_tx(struct gbam_port *port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700140
141/*---------------misc functions---------------- */
142static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
143{
144 struct usb_request *req;
145
146 while (!list_empty(head)) {
147 req = list_entry(head->next, struct usb_request, list);
148 list_del(&req->list);
149 usb_ep_free_request(ep, req);
150 }
151}
152
153static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
154 int num,
155 void (*cb)(struct usb_ep *ep, struct usb_request *),
156 gfp_t flags)
157{
158 int i;
159 struct usb_request *req;
160
161 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
162 ep, head, num, cb);
163
164 for (i = 0; i < num; i++) {
165 req = usb_ep_alloc_request(ep, flags);
166 if (!req) {
167 pr_debug("%s: req allocated:%d\n", __func__, i);
168 return list_empty(head) ? -ENOMEM : 0;
169 }
170 req->complete = cb;
171 list_add(&req->list, head);
172 }
173
174 return 0;
175}
176/*--------------------------------------------- */
177
178/*------------data_path----------------------------*/
179static void gbam_write_data_tohost(struct gbam_port *port)
180{
181 unsigned long flags;
182 struct bam_ch_info *d = &port->data_ch;
183 struct sk_buff *skb;
184 int ret;
185 struct usb_request *req;
186 struct usb_ep *ep;
187
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530188 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530190 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 return;
192 }
193
194 ep = port->port_usb->in;
195
196 while (!list_empty(&d->tx_idle)) {
197 skb = __skb_dequeue(&d->tx_skb_q);
198 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530199 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200 return;
201 }
202 req = list_first_entry(&d->tx_idle,
203 struct usb_request,
204 list);
205 req->context = skb;
206 req->buf = skb->data;
207 req->length = skb->len;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +0530208 n_tx_req_queued++;
209 if (n_tx_req_queued == dl_intr_threshold) {
210 req->no_interrupt = 0;
211 n_tx_req_queued = 0;
212 } else {
213 req->no_interrupt = 1;
214 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215
216 list_del(&req->list);
217
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530218 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530220 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221 if (ret) {
222 pr_err("%s: usb epIn failed\n", __func__);
223 list_add(&req->list, &d->tx_idle);
224 dev_kfree_skb_any(skb);
225 break;
226 }
227 d->to_host++;
228 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530229 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230}
231
232void gbam_data_recv_cb(void *p, struct sk_buff *skb)
233{
234 struct gbam_port *port = p;
235 struct bam_ch_info *d = &port->data_ch;
236 unsigned long flags;
237
238 if (!skb)
239 return;
240
241 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
242 port, port->port_num, d, skb->len);
243
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530244 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530246 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700247 dev_kfree_skb_any(skb);
248 return;
249 }
250
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700251 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252 d->tohost_drp_cnt++;
253 if (printk_ratelimit())
254 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
255 __func__, d->tohost_drp_cnt);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530256 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257 dev_kfree_skb_any(skb);
258 return;
259 }
260
261 __skb_queue_tail(&d->tx_skb_q, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530262 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263
264 gbam_write_data_tohost(port);
265}
266
267void gbam_data_write_done(void *p, struct sk_buff *skb)
268{
269 struct gbam_port *port = p;
270 struct bam_ch_info *d = &port->data_ch;
271 unsigned long flags;
272
273 if (!skb)
274 return;
275
276 dev_kfree_skb_any(skb);
277
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530278 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279
280 d->pending_with_bam--;
281
282 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
283 port, d, d->to_modem,
284 d->pending_with_bam, port->port_num);
285
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530286 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287
Vamsi Krishna84579552011-11-09 15:33:22 -0800288 queue_work(gbam_wq, &d->write_tobam_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289}
290
291static void gbam_data_write_tobam(struct work_struct *w)
292{
293 struct gbam_port *port;
294 struct bam_ch_info *d;
295 struct sk_buff *skb;
296 unsigned long flags;
297 int ret;
Vamsi Krishna84579552011-11-09 15:33:22 -0800298 int qlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299
300 d = container_of(w, struct bam_ch_info, write_tobam_w);
301 port = d->port;
302
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530303 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700304 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530305 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306 return;
307 }
308
Vamsi Krishna84579552011-11-09 15:33:22 -0800309 while (d->pending_with_bam < BAM_PENDING_LIMIT) {
Vamsi Krishna2327c79152011-11-08 16:12:42 -0800310 skb = __skb_dequeue(&d->rx_skb_q);
Vamsi Krishna625c28e2011-12-16 22:34:49 -0800311 if (!skb)
312 break;
313
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700314 d->pending_with_bam++;
315 d->to_modem++;
316
317 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
318 port, d, d->to_modem, d->pending_with_bam,
319 port->port_num);
320
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530321 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700322 ret = msm_bam_dmux_write(d->id, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530323 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324 if (ret) {
325 pr_debug("%s: write error:%d\n", __func__, ret);
326 d->pending_with_bam--;
327 d->to_modem--;
328 d->tomodem_drp_cnt++;
329 dev_kfree_skb_any(skb);
330 break;
331 }
332 }
Vamsi Krishna84579552011-11-09 15:33:22 -0800333
334 qlen = d->rx_skb_q.qlen;
335
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530336 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna84579552011-11-09 15:33:22 -0800337
338 if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD)
339 gbam_start_rx(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700340}
341/*-------------------------------------------------------------*/
342
343static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
344{
345 struct gbam_port *port = ep->driver_data;
346 struct bam_ch_info *d;
347 struct sk_buff *skb = req->context;
348 int status = req->status;
349
350 switch (status) {
351 case 0:
352 /* successful completion */
353 case -ECONNRESET:
354 case -ESHUTDOWN:
355 /* connection gone */
356 break;
357 default:
358 pr_err("%s: data tx ep error %d\n",
359 __func__, status);
360 break;
361 }
362
363 dev_kfree_skb_any(skb);
364
365 if (!port)
366 return;
367
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530368 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369 d = &port->data_ch;
370 list_add_tail(&req->list, &d->tx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530371 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372
373 gbam_write_data_tohost(port);
374}
375
376static void
377gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
378{
379 struct gbam_port *port = ep->driver_data;
380 struct bam_ch_info *d = &port->data_ch;
381 struct sk_buff *skb = req->context;
382 int status = req->status;
383 int queue = 0;
384
385 switch (status) {
386 case 0:
387 skb_put(skb, req->actual);
388 queue = 1;
389 break;
390 case -ECONNRESET:
391 case -ESHUTDOWN:
392 /* cable disconnection */
393 dev_kfree_skb_any(skb);
394 req->buf = 0;
395 usb_ep_free_request(ep, req);
396 return;
397 default:
398 if (printk_ratelimit())
399 pr_err("%s: %s response error %d, %d/%d\n",
400 __func__, ep->name, status,
401 req->actual, req->length);
402 dev_kfree_skb_any(skb);
403 break;
404 }
405
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530406 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 if (queue) {
408 __skb_queue_tail(&d->rx_skb_q, skb);
409 queue_work(gbam_wq, &d->write_tobam_w);
410 }
411
412 /* TODO: Handle flow control gracefully by having
413 * having call back mechanism from bam driver
414 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700415 if (bam_mux_rx_fctrl_support &&
Vamsi Krishna84579552011-11-09 15:33:22 -0800416 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417
418 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530419 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 return;
421 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530422 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700424 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530426 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530428 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 return;
430 }
431 skb_reserve(skb, BAM_MUX_HDR);
432
433 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700434 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435 req->context = skb;
436
437 status = usb_ep_queue(ep, req, GFP_ATOMIC);
438 if (status) {
439 dev_kfree_skb_any(skb);
440
441 if (printk_ratelimit())
442 pr_err("%s: data rx enqueue err %d\n",
443 __func__, status);
444
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530445 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530447 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 }
449}
450
Ofir Cohena1c2a872011-12-14 10:26:34 +0200451static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
452{
453 int status = req->status;
454
455 pr_debug("%s status: %d\n", __func__, status);
456}
457
458static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
459{
460 int status = req->status;
461
462 pr_debug("%s status: %d\n", __func__, status);
463}
464
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465static void gbam_start_rx(struct gbam_port *port)
466{
467 struct usb_request *req;
468 struct bam_ch_info *d;
469 struct usb_ep *ep;
470 unsigned long flags;
471 int ret;
472 struct sk_buff *skb;
473
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530474 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700475 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530476 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477 return;
478 }
479
480 d = &port->data_ch;
481 ep = port->port_usb->out;
482
483 while (port->port_usb && !list_empty(&d->rx_idle)) {
Vamsi Krishna84579552011-11-09 15:33:22 -0800484
485 if (bam_mux_rx_fctrl_support &&
486 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
487 break;
488
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 req = list_first_entry(&d->rx_idle, struct usb_request, list);
490
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700491 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492 if (!skb)
493 break;
494 skb_reserve(skb, BAM_MUX_HDR);
495
496 list_del(&req->list);
497 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700498 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 req->context = skb;
500
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530501 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700502 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530503 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 if (ret) {
505 dev_kfree_skb_any(skb);
506
507 if (printk_ratelimit())
508 pr_err("%s: rx queue failed\n", __func__);
509
510 if (port->port_usb)
511 list_add(&req->list, &d->rx_idle);
512 else
513 usb_ep_free_request(ep, req);
514 break;
515 }
516 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530517 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518}
519
Ofir Cohena1c2a872011-12-14 10:26:34 +0200520static void gbam_start_endless_rx(struct gbam_port *port)
521{
522 struct bam_ch_info *d = &port->data_ch;
523 int status;
524
525 status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
526 if (status)
527 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
528}
529
530static void gbam_start_endless_tx(struct gbam_port *port)
531{
532 struct bam_ch_info *d = &port->data_ch;
533 int status;
534
535 status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
536 if (status)
537 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
538}
539
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540static void gbam_start_io(struct gbam_port *port)
541{
542 unsigned long flags;
543 struct usb_ep *ep;
544 int ret;
545 struct bam_ch_info *d;
546
547 pr_debug("%s: port:%p\n", __func__, port);
548
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530549 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530551 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552 return;
553 }
554
555 d = &port->data_ch;
556 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700557 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 gbam_epout_complete, GFP_ATOMIC);
559 if (ret) {
560 pr_err("%s: rx req allocation failed\n", __func__);
561 return;
562 }
563
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530564 spin_unlock_irqrestore(&port->port_lock_ul, flags);
565 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700567 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568 gbam_epin_complete, GFP_ATOMIC);
569 if (ret) {
570 pr_err("%s: tx req allocation failed\n", __func__);
571 gbam_free_requests(ep, &d->rx_idle);
572 return;
573 }
574
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530575 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576
577 /* queue out requests */
578 gbam_start_rx(port);
579}
580
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600581static void gbam_notify(void *p, int event, unsigned long data)
582{
583 switch (event) {
584 case BAM_DMUX_RECEIVE:
585 gbam_data_recv_cb(p, (struct sk_buff *)(data));
586 break;
587 case BAM_DMUX_WRITE_DONE:
588 gbam_data_write_done(p, (struct sk_buff *)(data));
589 break;
590 }
591}
592
Ofir Cohena1c2a872011-12-14 10:26:34 +0200593static void gbam_free_buffers(struct gbam_port *port)
594{
595 struct sk_buff *skb;
596 unsigned long flags;
597 struct bam_ch_info *d;
598
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530599 spin_lock_irqsave(&port->port_lock_ul, flags);
600 spin_lock_irqsave(&port->port_lock_dl, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200601
602 if (!port || !port->port_usb)
603 goto free_buf_out;
604
605 d = &port->data_ch;
606
607 gbam_free_requests(port->port_usb->in, &d->tx_idle);
608 gbam_free_requests(port->port_usb->out, &d->rx_idle);
609
610 while ((skb = __skb_dequeue(&d->tx_skb_q)))
611 dev_kfree_skb_any(skb);
612
613 while ((skb = __skb_dequeue(&d->rx_skb_q)))
614 dev_kfree_skb_any(skb);
615
616free_buf_out:
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530617 spin_unlock_irqrestore(&port->port_lock_dl, flags);
618 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200619}
620
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800621static void gbam_disconnect_work(struct work_struct *w)
622{
623 struct gbam_port *port =
624 container_of(w, struct gbam_port, disconnect_w);
625 struct bam_ch_info *d = &port->data_ch;
626
627 if (!test_bit(BAM_CH_OPENED, &d->flags))
628 return;
629
630 msm_bam_dmux_close(d->id);
631 clear_bit(BAM_CH_OPENED, &d->flags);
632}
633
Ofir Cohena1c2a872011-12-14 10:26:34 +0200634static void gbam2bam_disconnect_work(struct work_struct *w)
635{
636 struct gbam_port *port =
637 container_of(w, struct gbam_port, disconnect_w);
638 unsigned long flags;
639
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530640 spin_lock_irqsave(&port->port_lock_ul, flags);
641 spin_lock_irqsave(&port->port_lock_dl, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200642 port->port_usb = 0;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530643 spin_unlock_irqrestore(&port->port_lock_dl, flags);
644 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200645
646 /* disable endpoints */
647 usb_ep_disable(port->gr->out);
648 usb_ep_disable(port->gr->in);
649
650}
651
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652static void gbam_connect_work(struct work_struct *w)
653{
654 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
655 struct bam_ch_info *d = &port->data_ch;
656 int ret;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800657 unsigned long flags;
658
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530659 spin_lock_irqsave(&port->port_lock_ul, flags);
660 spin_lock_irqsave(&port->port_lock_dl, flags);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800661 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530662 spin_unlock_irqrestore(&port->port_lock_dl, flags);
663 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800664 return;
665 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530666 spin_unlock_irqrestore(&port->port_lock_dl, flags);
667 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668
Jack Phameffd4ae2011-08-03 16:49:36 -0700669 if (!test_bit(BAM_CH_READY, &d->flags))
670 return;
671
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600672 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673 if (ret) {
674 pr_err("%s: unable open bam ch:%d err:%d\n",
675 __func__, d->id, ret);
676 return;
677 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700678 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700679
680 gbam_start_io(port);
681
682 pr_debug("%s: done\n", __func__);
683}
684
Ofir Cohena1c2a872011-12-14 10:26:34 +0200685static void gbam2bam_connect_work(struct work_struct *w)
Jack Phameffd4ae2011-08-03 16:49:36 -0700686{
Ofir Cohena1c2a872011-12-14 10:26:34 +0200687 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
688 struct bam_ch_info *d = &port->data_ch;
689 u32 sps_params;
690 int ret;
Ofir Cohen4da266f2012-01-03 10:19:29 +0200691 unsigned long flags;
692
693 ret = usb_ep_enable(port->gr->in, port->gr->in_desc);
694 if (ret) {
695 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
696 __func__, port->gr->in);
697 return;
698 }
699 port->gr->in->driver_data = port;
700
701 ret = usb_ep_enable(port->gr->out, port->gr->out_desc);
702 if (ret) {
703 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
704 __func__, port->gr->out);
705 port->gr->in->driver_data = 0;
706 return;
707 }
708 port->gr->out->driver_data = port;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530709 spin_lock_irqsave(&port->port_lock_ul, flags);
710 spin_lock_irqsave(&port->port_lock_dl, flags);
Ofir Cohen4da266f2012-01-03 10:19:29 +0200711 port->port_usb = port->gr;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530712 spin_unlock_irqrestore(&port->port_lock_dl, flags);
713 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700714
Ofir Cohena1c2a872011-12-14 10:26:34 +0200715 ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
716 &d->dst_pipe_idx);
717 if (ret) {
718 pr_err("%s: usb_bam_connect failed: err:%d\n",
719 __func__, ret);
720 return;
721 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700722
Ofir Cohena1c2a872011-12-14 10:26:34 +0200723 d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_KERNEL);
724 if (!d->rx_req)
725 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700726
Ofir Cohena1c2a872011-12-14 10:26:34 +0200727 d->rx_req->context = port;
728 d->rx_req->complete = gbam_endless_rx_complete;
729 d->rx_req->length = 0;
730 sps_params = (SPS_PARAMS_SPS_MODE | d->src_pipe_idx |
731 MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
732 d->rx_req->udc_priv = sps_params;
733 d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_KERNEL);
734 if (!d->tx_req)
735 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700736
Ofir Cohena1c2a872011-12-14 10:26:34 +0200737 d->tx_req->context = port;
738 d->tx_req->complete = gbam_endless_tx_complete;
739 d->tx_req->length = 0;
740 sps_params = (SPS_PARAMS_SPS_MODE | d->dst_pipe_idx |
741 MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
742 d->tx_req->udc_priv = sps_params;
Jack Phameffd4ae2011-08-03 16:49:36 -0700743
Ofir Cohena1c2a872011-12-14 10:26:34 +0200744 /* queue in & out requests */
745 gbam_start_endless_rx(port);
746 gbam_start_endless_tx(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700747
Ofir Cohena1c2a872011-12-14 10:26:34 +0200748 pr_debug("%s: done\n", __func__);
Jack Phameffd4ae2011-08-03 16:49:36 -0700749}
750
751/* BAM data channel ready, allow attempt to open */
752static int gbam_data_ch_probe(struct platform_device *pdev)
753{
754 struct gbam_port *port;
755 struct bam_ch_info *d;
756 int i;
757 unsigned long flags;
758
759 pr_debug("%s: name:%s\n", __func__, pdev->name);
760
761 for (i = 0; i < n_bam_ports; i++) {
762 port = bam_ports[i].port;
763 d = &port->data_ch;
764
765 if (!strncmp(bam_ch_names[i], pdev->name,
766 BAM_DMUX_CH_NAME_MAX_LEN)) {
767 set_bit(BAM_CH_READY, &d->flags);
768
769 /* if usb is online, try opening bam_ch */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530770 spin_lock_irqsave(&port->port_lock_ul, flags);
771 spin_lock_irqsave(&port->port_lock_dl, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700772 if (port->port_usb)
773 queue_work(gbam_wq, &port->connect_w);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530774 spin_unlock_irqrestore(&port->port_lock_dl, flags);
775 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700776
777 break;
778 }
779 }
780
781 return 0;
782}
783
784/* BAM data channel went inactive, so close it */
785static int gbam_data_ch_remove(struct platform_device *pdev)
786{
787 struct gbam_port *port;
788 struct bam_ch_info *d;
789 struct usb_ep *ep_in = NULL;
790 struct usb_ep *ep_out = NULL;
791 unsigned long flags;
792 int i;
793
794 pr_debug("%s: name:%s\n", __func__, pdev->name);
795
796 for (i = 0; i < n_bam_ports; i++) {
797 if (!strncmp(bam_ch_names[i], pdev->name,
798 BAM_DMUX_CH_NAME_MAX_LEN)) {
799 port = bam_ports[i].port;
800 d = &port->data_ch;
801
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530802 spin_lock_irqsave(&port->port_lock_ul, flags);
803 spin_lock_irqsave(&port->port_lock_dl, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700804 if (port->port_usb) {
805 ep_in = port->port_usb->in;
806 ep_out = port->port_usb->out;
807 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530808 spin_unlock_irqrestore(&port->port_lock_dl, flags);
809 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700810
811 if (ep_in)
812 usb_ep_fifo_flush(ep_in);
813 if (ep_out)
814 usb_ep_fifo_flush(ep_out);
815
816 gbam_free_buffers(port);
817
818 msm_bam_dmux_close(d->id);
819
820 clear_bit(BAM_CH_READY, &d->flags);
821 clear_bit(BAM_CH_OPENED, &d->flags);
822 }
823 }
824
825 return 0;
826}
827
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700828static void gbam_port_free(int portno)
829{
830 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700831 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832
Jack Phameffd4ae2011-08-03 16:49:36 -0700833 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700835 platform_driver_unregister(pdrv);
836 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700837}
838
Ofir Cohena1c2a872011-12-14 10:26:34 +0200839static void gbam2bam_port_free(int portno)
840{
841 struct gbam_port *port = bam2bam_ports[portno];
842
843 kfree(port);
844}
845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846static int gbam_port_alloc(int portno)
847{
848 struct gbam_port *port;
849 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -0700850 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851
852 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
853 if (!port)
854 return -ENOMEM;
855
856 port->port_num = portno;
857
858 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530859 spin_lock_init(&port->port_lock_ul);
860 spin_lock_init(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700861 INIT_WORK(&port->connect_w, gbam_connect_work);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800862 INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700863
864 /* data ch */
865 d = &port->data_ch;
866 d->port = port;
867 INIT_LIST_HEAD(&d->tx_idle);
868 INIT_LIST_HEAD(&d->rx_idle);
869 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
870 skb_queue_head_init(&d->tx_skb_q);
871 skb_queue_head_init(&d->rx_skb_q);
872 d->id = bam_ch_ids[portno];
873
874 bam_ports[portno].port = port;
875
Jack Phameffd4ae2011-08-03 16:49:36 -0700876 pdrv = &bam_ports[portno].pdrv;
877 pdrv->probe = gbam_data_ch_probe;
878 pdrv->remove = gbam_data_ch_remove;
879 pdrv->driver.name = bam_ch_names[portno];
880 pdrv->driver.owner = THIS_MODULE;
881
882 platform_driver_register(pdrv);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200883 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
884
885 return 0;
886}
887
888static int gbam2bam_port_alloc(int portno)
889{
890 struct gbam_port *port;
891 struct bam_ch_info *d;
892
893 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
894 if (!port)
895 return -ENOMEM;
896
897 port->port_num = portno;
898
899 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530900 spin_lock_init(&port->port_lock_ul);
901 spin_lock_init(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200902
903 INIT_WORK(&port->connect_w, gbam2bam_connect_work);
904 INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
905
906 /* data ch */
907 d = &port->data_ch;
908 d->port = port;
909 bam2bam_ports[portno] = port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700910
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700911 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
912
913 return 0;
914}
915
916#if defined(CONFIG_DEBUG_FS)
917#define DEBUG_BUF_SIZE 1024
918static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
919 size_t count, loff_t *ppos)
920{
921 struct gbam_port *port;
922 struct bam_ch_info *d;
923 char *buf;
924 unsigned long flags;
925 int ret;
926 int i;
927 int temp = 0;
928
929 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
930 if (!buf)
931 return -ENOMEM;
932
933 for (i = 0; i < n_bam_ports; i++) {
934 port = bam_ports[i].port;
935 if (!port)
936 continue;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530937 spin_lock_irqsave(&port->port_lock_ul, flags);
938 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939
940 d = &port->data_ch;
941
942 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
943 "#PORT:%d port:%p data_ch:%p#\n"
944 "dpkts_to_usbhost: %lu\n"
945 "dpkts_to_modem: %lu\n"
946 "dpkts_pwith_bam: %u\n"
947 "to_usbhost_dcnt: %u\n"
948 "tomodem__dcnt: %u\n"
949 "tx_buf_len: %u\n"
Vamsi Krishna84579552011-11-09 15:33:22 -0800950 "rx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -0700951 "data_ch_open: %d\n"
952 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700953 i, port, &port->data_ch,
954 d->to_host, d->to_modem,
955 d->pending_with_bam,
956 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Vamsi Krishna84579552011-11-09 15:33:22 -0800957 d->tx_skb_q.qlen, d->rx_skb_q.qlen,
Jack Phameffd4ae2011-08-03 16:49:36 -0700958 test_bit(BAM_CH_OPENED, &d->flags),
959 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530961 spin_unlock_irqrestore(&port->port_lock_dl, flags);
962 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963 }
964
965 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
966
967 kfree(buf);
968
969 return ret;
970}
971
972static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
973 size_t count, loff_t *ppos)
974{
975 struct gbam_port *port;
976 struct bam_ch_info *d;
977 int i;
978 unsigned long flags;
979
980 for (i = 0; i < n_bam_ports; i++) {
981 port = bam_ports[i].port;
982 if (!port)
983 continue;
984
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530985 spin_lock_irqsave(&port->port_lock_ul, flags);
986 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700987
988 d = &port->data_ch;
989
990 d->to_host = 0;
991 d->to_modem = 0;
992 d->pending_with_bam = 0;
993 d->tohost_drp_cnt = 0;
994 d->tomodem_drp_cnt = 0;
995
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530996 spin_unlock_irqrestore(&port->port_lock_dl, flags);
997 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700998 }
999 return count;
1000}
1001
1002const struct file_operations gbam_stats_ops = {
1003 .read = gbam_read_stats,
1004 .write = gbam_reset_stats,
1005};
1006
1007static void gbam_debugfs_init(void)
1008{
1009 struct dentry *dent;
1010 struct dentry *dfile;
1011
1012 dent = debugfs_create_dir("usb_rmnet", 0);
1013 if (IS_ERR(dent))
1014 return;
1015
1016 /* TODO: Implement cleanup function to remove created file */
1017 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
1018 if (!dfile || IS_ERR(dfile))
1019 debugfs_remove(dent);
1020}
1021#else
1022static void gam_debugfs_init(void) { }
1023#endif
1024
Ofir Cohena1c2a872011-12-14 10:26:34 +02001025void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026{
1027 struct gbam_port *port;
1028 unsigned long flags;
1029 struct bam_ch_info *d;
1030
1031 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1032
Ofir Cohena1c2a872011-12-14 10:26:34 +02001033 if (trans == USB_GADGET_XPORT_BAM &&
1034 port_num >= n_bam_ports) {
1035 pr_err("%s: invalid bam portno#%d\n",
1036 __func__, port_num);
1037 return;
1038 }
1039
1040 if (trans == USB_GADGET_XPORT_BAM2BAM &&
1041 port_num >= n_bam2bam_ports) {
1042 pr_err("%s: invalid bam2bam portno#%d\n",
1043 __func__, port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001044 return;
1045 }
1046
1047 if (!gr) {
1048 pr_err("%s: grmnet port is null\n", __func__);
1049 return;
1050 }
Ofir Cohena1c2a872011-12-14 10:26:34 +02001051 if (trans == USB_GADGET_XPORT_BAM)
1052 port = bam_ports[port_num].port;
1053 else
1054 port = bam2bam_ports[port_num];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001056 d = &port->data_ch;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001057 port->gr = gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001058
Ofir Cohena1c2a872011-12-14 10:26:34 +02001059 if (trans == USB_GADGET_XPORT_BAM) {
1060 gbam_free_buffers(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001061
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301062 spin_lock_irqsave(&port->port_lock_ul, flags);
1063 spin_lock_irqsave(&port->port_lock_dl, flags);
1064 port->port_usb = 0;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +05301065 n_tx_req_queued = 0;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301066 spin_unlock_irqrestore(&port->port_lock_dl, flags);
1067 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001068
Ofir Cohena1c2a872011-12-14 10:26:34 +02001069 /* disable endpoints */
1070 usb_ep_disable(gr->out);
1071 usb_ep_disable(gr->in);
1072 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001073
Vamsi Krishna1ad076d2011-11-10 15:03:30 -08001074 queue_work(gbam_wq, &port->disconnect_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001075}
1076
Ofir Cohena1c2a872011-12-14 10:26:34 +02001077int gbam_connect(struct grmnet *gr, u8 port_num,
1078 enum transport_type trans, u8 connection_idx)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001079{
1080 struct gbam_port *port;
1081 struct bam_ch_info *d;
1082 int ret;
1083 unsigned long flags;
1084
1085 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1086
Ofir Cohena1c2a872011-12-14 10:26:34 +02001087 if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) {
1088 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1089 return -ENODEV;
1090 }
1091
1092 if (trans == USB_GADGET_XPORT_BAM2BAM && port_num >= n_bam2bam_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001093 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1094 return -ENODEV;
1095 }
1096
1097 if (!gr) {
1098 pr_err("%s: grmnet port is null\n", __func__);
1099 return -ENODEV;
1100 }
1101
Ofir Cohena1c2a872011-12-14 10:26:34 +02001102 if (trans == USB_GADGET_XPORT_BAM)
1103 port = bam_ports[port_num].port;
1104 else
1105 port = bam2bam_ports[port_num];
1106
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001107 d = &port->data_ch;
1108
Ofir Cohena1c2a872011-12-14 10:26:34 +02001109 if (trans == USB_GADGET_XPORT_BAM) {
Ofir Cohen4da266f2012-01-03 10:19:29 +02001110 ret = usb_ep_enable(gr->in, gr->in_desc);
1111 if (ret) {
1112 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
1113 __func__, gr->in);
1114 return ret;
1115 }
1116 gr->in->driver_data = port;
1117
1118 ret = usb_ep_enable(gr->out, gr->out_desc);
1119 if (ret) {
1120 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
1121 __func__, gr->out);
1122 gr->in->driver_data = 0;
1123 return ret;
1124 }
1125 gr->out->driver_data = port;
1126
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301127 spin_lock_irqsave(&port->port_lock_ul, flags);
1128 spin_lock_irqsave(&port->port_lock_dl, flags);
1129 port->port_usb = gr;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001130
Ofir Cohena1c2a872011-12-14 10:26:34 +02001131 d->to_host = 0;
1132 d->to_modem = 0;
1133 d->pending_with_bam = 0;
1134 d->tohost_drp_cnt = 0;
1135 d->tomodem_drp_cnt = 0;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301136 spin_unlock_irqrestore(&port->port_lock_dl, flags);
1137 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001138 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001139
Ofir Cohen4da266f2012-01-03 10:19:29 +02001140 if (trans == USB_GADGET_XPORT_BAM2BAM) {
1141 port->gr = gr;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001142 d->connection_idx = connection_idx;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001143 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001144
1145 queue_work(gbam_wq, &port->connect_w);
1146
1147 return 0;
1148}
1149
Ofir Cohena1c2a872011-12-14 10:26:34 +02001150int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001151{
1152 int i;
1153 int ret;
1154
Ofir Cohena1c2a872011-12-14 10:26:34 +02001155 pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n",
1156 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001157
Ofir Cohena1c2a872011-12-14 10:26:34 +02001158 if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS
1159 || no_bam2bam_port > BAM2BAM_N_PORTS) {
1160 pr_err("%s: Invalid num of ports count:%d,%d\n",
1161 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001162 return -EINVAL;
1163 }
1164
1165 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1166 if (!gbam_wq) {
1167 pr_err("%s: Unable to create workqueue gbam_wq\n",
1168 __func__);
1169 return -ENOMEM;
1170 }
1171
Ofir Cohena1c2a872011-12-14 10:26:34 +02001172 for (i = 0; i < no_bam_port; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301173 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174 ret = gbam_port_alloc(i);
1175 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301176 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001177 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1178 goto free_bam_ports;
1179 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001180 }
1181
Ofir Cohena1c2a872011-12-14 10:26:34 +02001182 for (i = 0; i < no_bam2bam_port; i++) {
1183 n_bam2bam_ports++;
1184 ret = gbam2bam_port_alloc(i);
1185 if (ret) {
1186 n_bam2bam_ports--;
1187 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1188 goto free_bam_ports;
1189 }
1190 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191 gbam_debugfs_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192 return 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001194free_bam_ports:
1195 for (i = 0; i < n_bam_ports; i++)
1196 gbam_port_free(i);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001197 for (i = 0; i < n_bam2bam_ports; i++)
1198 gbam2bam_port_free(i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 destroy_workqueue(gbam_wq);
1200
1201 return ret;
1202}