blob: 23e0da80a8867387eee22da83ec343f0680e1aad [file] [log] [blame]
Anna Perel97b8c222012-01-18 10:08:14 +02001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/interrupt.h>
15#include <linux/device.h>
16#include <linux/delay.h>
17#include <linux/slab.h>
18#include <linux/termios.h>
19#include <mach/msm_smd.h>
20#include <linux/netdevice.h>
21#include <mach/bam_dmux.h>
22#include <linux/debugfs.h>
23#include <linux/bitops.h>
24#include <linux/termios.h>
25
Ofir Cohena1c2a872011-12-14 10:26:34 +020026#include <mach/usb_gadget_xport.h>
27#include <mach/usb_bam.h>
28
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include "u_rmnet.h"
30
31#define BAM_N_PORTS 1
Ofir Cohena1c2a872011-12-14 10:26:34 +020032#define BAM2BAM_N_PORTS 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
34static struct workqueue_struct *gbam_wq;
35static int n_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020036static int n_bam2bam_ports;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053037static unsigned n_tx_req_queued;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038static unsigned bam_ch_ids[] = { 8 };
39
Jack Phameffd4ae2011-08-03 16:49:36 -070040static const char *bam_ch_names[] = { "bam_dmux_ch_8" };
41
Vamsi Krishna84579552011-11-09 15:33:22 -080042#define BAM_PENDING_LIMIT 220
Vamsi Krishna8f24f252011-11-02 11:46:08 -070043#define BAM_MUX_TX_PKT_DROP_THRESHOLD 1000
Vamsi Krishna84579552011-11-09 15:33:22 -080044#define BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD 500
45#define BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD 300
Vamsi Krishna8f24f252011-11-02 11:46:08 -070046#define BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047
48#define BAM_MUX_HDR 8
49
Vamsi Krishna8f24f252011-11-02 11:46:08 -070050#define BAM_MUX_RX_Q_SIZE 16
51#define BAM_MUX_TX_Q_SIZE 200
52#define BAM_MUX_RX_REQ_SIZE (2048 - BAM_MUX_HDR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053054#define DL_INTR_THRESHOLD 20
55
Vamsi Krishna8f24f252011-11-02 11:46:08 -070056unsigned int bam_mux_tx_pkt_drop_thld = BAM_MUX_TX_PKT_DROP_THRESHOLD;
57module_param(bam_mux_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070058
Vamsi Krishna8f24f252011-11-02 11:46:08 -070059unsigned int bam_mux_rx_fctrl_en_thld = BAM_MUX_RX_PKT_FCTRL_EN_TSHOLD;
60module_param(bam_mux_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061
Vamsi Krishna8f24f252011-11-02 11:46:08 -070062unsigned int bam_mux_rx_fctrl_support = BAM_MUX_RX_PKT_FLOW_CTRL_SUPPORT;
63module_param(bam_mux_rx_fctrl_support, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Vamsi Krishna8f24f252011-11-02 11:46:08 -070065unsigned int bam_mux_rx_fctrl_dis_thld = BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD;
66module_param(bam_mux_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
Vamsi Krishna8f24f252011-11-02 11:46:08 -070068unsigned int bam_mux_tx_q_size = BAM_MUX_TX_Q_SIZE;
69module_param(bam_mux_tx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
Vamsi Krishna8f24f252011-11-02 11:46:08 -070071unsigned int bam_mux_rx_q_size = BAM_MUX_RX_Q_SIZE;
72module_param(bam_mux_rx_q_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073
Vamsi Krishna8f24f252011-11-02 11:46:08 -070074unsigned int bam_mux_rx_req_size = BAM_MUX_RX_REQ_SIZE;
75module_param(bam_mux_rx_req_size, uint, S_IRUGO | S_IWUSR);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +053077unsigned int dl_intr_threshold = DL_INTR_THRESHOLD;
78module_param(dl_intr_threshold, uint, S_IRUGO | S_IWUSR);
79
Jack Phameffd4ae2011-08-03 16:49:36 -070080#define BAM_CH_OPENED BIT(0)
81#define BAM_CH_READY BIT(1)
Ofir Cohena1c2a872011-12-14 10:26:34 +020082#define SPS_PARAMS_PIPE_ID_MASK (0x1F)
83#define SPS_PARAMS_SPS_MODE BIT(5)
84#define SPS_PARAMS_TBE BIT(6)
85#define MSM_VENDOR_ID BIT(16)
86
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087struct bam_ch_info {
Jack Phameffd4ae2011-08-03 16:49:36 -070088 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070089 unsigned id;
90
91 struct list_head tx_idle;
92 struct sk_buff_head tx_skb_q;
93
94 struct list_head rx_idle;
95 struct sk_buff_head rx_skb_q;
96
97 struct gbam_port *port;
98 struct work_struct write_tobam_w;
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +053099 struct work_struct write_tohost_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100
Ofir Cohena1c2a872011-12-14 10:26:34 +0200101 struct usb_request *rx_req;
102 struct usb_request *tx_req;
103
104 u8 src_pipe_idx;
105 u8 dst_pipe_idx;
106 u8 connection_idx;
107
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108 /* stats */
109 unsigned int pending_with_bam;
110 unsigned int tohost_drp_cnt;
111 unsigned int tomodem_drp_cnt;
112 unsigned int tx_len;
113 unsigned int rx_len;
114 unsigned long to_modem;
115 unsigned long to_host;
116};
117
118struct gbam_port {
119 unsigned port_num;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530120 spinlock_t port_lock_ul;
121 spinlock_t port_lock_dl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122
123 struct grmnet *port_usb;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200124 struct grmnet *gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125
126 struct bam_ch_info data_ch;
127
128 struct work_struct connect_w;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800129 struct work_struct disconnect_w;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130};
131
132static struct bam_portmaster {
133 struct gbam_port *port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700134 struct platform_driver pdrv;
135} bam_ports[BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700136
Ofir Cohena1c2a872011-12-14 10:26:34 +0200137struct gbam_port *bam2bam_ports[BAM2BAM_N_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138static void gbam_start_rx(struct gbam_port *port);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200139static void gbam_start_endless_rx(struct gbam_port *port);
140static void gbam_start_endless_tx(struct gbam_port *port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141
142/*---------------misc functions---------------- */
143static void gbam_free_requests(struct usb_ep *ep, struct list_head *head)
144{
145 struct usb_request *req;
146
147 while (!list_empty(head)) {
148 req = list_entry(head->next, struct usb_request, list);
149 list_del(&req->list);
150 usb_ep_free_request(ep, req);
151 }
152}
153
154static int gbam_alloc_requests(struct usb_ep *ep, struct list_head *head,
155 int num,
156 void (*cb)(struct usb_ep *ep, struct usb_request *),
157 gfp_t flags)
158{
159 int i;
160 struct usb_request *req;
161
162 pr_debug("%s: ep:%p head:%p num:%d cb:%p", __func__,
163 ep, head, num, cb);
164
165 for (i = 0; i < num; i++) {
166 req = usb_ep_alloc_request(ep, flags);
167 if (!req) {
168 pr_debug("%s: req allocated:%d\n", __func__, i);
169 return list_empty(head) ? -ENOMEM : 0;
170 }
171 req->complete = cb;
172 list_add(&req->list, head);
173 }
174
175 return 0;
176}
177/*--------------------------------------------- */
178
179/*------------data_path----------------------------*/
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +0530180static void gbam_write_data_tohost(struct work_struct *w)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181{
182 unsigned long flags;
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +0530183 struct bam_ch_info *d;
184 struct gbam_port *port;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 struct sk_buff *skb;
186 int ret;
187 struct usb_request *req;
188 struct usb_ep *ep;
189
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +0530190 d = container_of(w, struct bam_ch_info, write_tohost_w);
191 port = d->port;
192
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530193 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530195 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196 return;
197 }
198
199 ep = port->port_usb->in;
200
201 while (!list_empty(&d->tx_idle)) {
202 skb = __skb_dequeue(&d->tx_skb_q);
203 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530204 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205 return;
206 }
207 req = list_first_entry(&d->tx_idle,
208 struct usb_request,
209 list);
210 req->context = skb;
211 req->buf = skb->data;
212 req->length = skb->len;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +0530213 n_tx_req_queued++;
214 if (n_tx_req_queued == dl_intr_threshold) {
215 req->no_interrupt = 0;
216 n_tx_req_queued = 0;
217 } else {
218 req->no_interrupt = 1;
219 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220
221 list_del(&req->list);
222
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530223 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530225 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226 if (ret) {
227 pr_err("%s: usb epIn failed\n", __func__);
228 list_add(&req->list, &d->tx_idle);
229 dev_kfree_skb_any(skb);
230 break;
231 }
232 d->to_host++;
233 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530234 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235}
236
237void gbam_data_recv_cb(void *p, struct sk_buff *skb)
238{
239 struct gbam_port *port = p;
240 struct bam_ch_info *d = &port->data_ch;
241 unsigned long flags;
242
243 if (!skb)
244 return;
245
246 pr_debug("%s: p:%p#%d d:%p skb_len:%d\n", __func__,
247 port, port->port_num, d, skb->len);
248
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530249 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530251 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252 dev_kfree_skb_any(skb);
253 return;
254 }
255
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700256 if (d->tx_skb_q.qlen > bam_mux_tx_pkt_drop_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257 d->tohost_drp_cnt++;
258 if (printk_ratelimit())
259 pr_err("%s: tx pkt dropped: tx_drop_cnt:%u\n",
260 __func__, d->tohost_drp_cnt);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530261 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 dev_kfree_skb_any(skb);
263 return;
264 }
265
266 __skb_queue_tail(&d->tx_skb_q, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530267 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +0530269 queue_work(gbam_wq, &d->write_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270}
271
272void gbam_data_write_done(void *p, struct sk_buff *skb)
273{
274 struct gbam_port *port = p;
275 struct bam_ch_info *d = &port->data_ch;
276 unsigned long flags;
277
278 if (!skb)
279 return;
280
281 dev_kfree_skb_any(skb);
282
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530283 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700284
285 d->pending_with_bam--;
286
287 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u, pno:%d\n", __func__,
288 port, d, d->to_modem,
289 d->pending_with_bam, port->port_num);
290
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530291 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292
Vamsi Krishna84579552011-11-09 15:33:22 -0800293 queue_work(gbam_wq, &d->write_tobam_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294}
295
296static void gbam_data_write_tobam(struct work_struct *w)
297{
298 struct gbam_port *port;
299 struct bam_ch_info *d;
300 struct sk_buff *skb;
301 unsigned long flags;
302 int ret;
Vamsi Krishna84579552011-11-09 15:33:22 -0800303 int qlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700304
305 d = container_of(w, struct bam_ch_info, write_tobam_w);
306 port = d->port;
307
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530308 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530310 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311 return;
312 }
313
Vamsi Krishna84579552011-11-09 15:33:22 -0800314 while (d->pending_with_bam < BAM_PENDING_LIMIT) {
Vamsi Krishna2327c79152011-11-08 16:12:42 -0800315 skb = __skb_dequeue(&d->rx_skb_q);
Vamsi Krishna625c28e2011-12-16 22:34:49 -0800316 if (!skb)
317 break;
318
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 d->pending_with_bam++;
320 d->to_modem++;
321
322 pr_debug("%s: port:%p d:%p tom:%lu pbam:%u pno:%d\n", __func__,
323 port, d, d->to_modem, d->pending_with_bam,
324 port->port_num);
325
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530326 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327 ret = msm_bam_dmux_write(d->id, skb);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530328 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700329 if (ret) {
330 pr_debug("%s: write error:%d\n", __func__, ret);
331 d->pending_with_bam--;
332 d->to_modem--;
333 d->tomodem_drp_cnt++;
334 dev_kfree_skb_any(skb);
335 break;
336 }
337 }
Vamsi Krishna84579552011-11-09 15:33:22 -0800338
339 qlen = d->rx_skb_q.qlen;
340
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530341 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna84579552011-11-09 15:33:22 -0800342
343 if (qlen < BAM_MUX_RX_PKT_FCTRL_DIS_TSHOLD)
344 gbam_start_rx(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345}
346/*-------------------------------------------------------------*/
347
348static void gbam_epin_complete(struct usb_ep *ep, struct usb_request *req)
349{
350 struct gbam_port *port = ep->driver_data;
351 struct bam_ch_info *d;
352 struct sk_buff *skb = req->context;
353 int status = req->status;
354
355 switch (status) {
356 case 0:
357 /* successful completion */
358 case -ECONNRESET:
359 case -ESHUTDOWN:
360 /* connection gone */
361 break;
362 default:
363 pr_err("%s: data tx ep error %d\n",
364 __func__, status);
365 break;
366 }
367
368 dev_kfree_skb_any(skb);
369
370 if (!port)
371 return;
372
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530373 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374 d = &port->data_ch;
375 list_add_tail(&req->list, &d->tx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530376 spin_unlock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +0530378 queue_work(gbam_wq, &d->write_tohost_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379}
380
381static void
382gbam_epout_complete(struct usb_ep *ep, struct usb_request *req)
383{
384 struct gbam_port *port = ep->driver_data;
385 struct bam_ch_info *d = &port->data_ch;
386 struct sk_buff *skb = req->context;
387 int status = req->status;
388 int queue = 0;
389
390 switch (status) {
391 case 0:
392 skb_put(skb, req->actual);
393 queue = 1;
394 break;
395 case -ECONNRESET:
396 case -ESHUTDOWN:
397 /* cable disconnection */
398 dev_kfree_skb_any(skb);
399 req->buf = 0;
400 usb_ep_free_request(ep, req);
401 return;
402 default:
403 if (printk_ratelimit())
404 pr_err("%s: %s response error %d, %d/%d\n",
405 __func__, ep->name, status,
406 req->actual, req->length);
407 dev_kfree_skb_any(skb);
408 break;
409 }
410
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530411 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412 if (queue) {
413 __skb_queue_tail(&d->rx_skb_q, skb);
414 queue_work(gbam_wq, &d->write_tobam_w);
415 }
416
417 /* TODO: Handle flow control gracefully by having
418 * having call back mechanism from bam driver
419 */
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700420 if (bam_mux_rx_fctrl_support &&
Vamsi Krishna84579552011-11-09 15:33:22 -0800421 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422
423 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530424 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425 return;
426 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530427 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700429 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 if (!skb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530431 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530433 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434 return;
435 }
436 skb_reserve(skb, BAM_MUX_HDR);
437
438 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700439 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440 req->context = skb;
441
442 status = usb_ep_queue(ep, req, GFP_ATOMIC);
443 if (status) {
444 dev_kfree_skb_any(skb);
445
446 if (printk_ratelimit())
447 pr_err("%s: data rx enqueue err %d\n",
448 __func__, status);
449
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530450 spin_lock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451 list_add_tail(&req->list, &d->rx_idle);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530452 spin_unlock(&port->port_lock_ul);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453 }
454}
455
Ofir Cohena1c2a872011-12-14 10:26:34 +0200456static void gbam_endless_rx_complete(struct usb_ep *ep, struct usb_request *req)
457{
458 int status = req->status;
459
460 pr_debug("%s status: %d\n", __func__, status);
461}
462
463static void gbam_endless_tx_complete(struct usb_ep *ep, struct usb_request *req)
464{
465 int status = req->status;
466
467 pr_debug("%s status: %d\n", __func__, status);
468}
469
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470static void gbam_start_rx(struct gbam_port *port)
471{
472 struct usb_request *req;
473 struct bam_ch_info *d;
474 struct usb_ep *ep;
475 unsigned long flags;
476 int ret;
477 struct sk_buff *skb;
478
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530479 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530481 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482 return;
483 }
484
485 d = &port->data_ch;
486 ep = port->port_usb->out;
487
488 while (port->port_usb && !list_empty(&d->rx_idle)) {
Vamsi Krishna84579552011-11-09 15:33:22 -0800489
490 if (bam_mux_rx_fctrl_support &&
491 d->rx_skb_q.qlen >= bam_mux_rx_fctrl_en_thld)
492 break;
493
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494 req = list_first_entry(&d->rx_idle, struct usb_request, list);
495
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700496 skb = alloc_skb(bam_mux_rx_req_size + BAM_MUX_HDR, GFP_ATOMIC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497 if (!skb)
498 break;
499 skb_reserve(skb, BAM_MUX_HDR);
500
501 list_del(&req->list);
502 req->buf = skb->data;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700503 req->length = bam_mux_rx_req_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 req->context = skb;
505
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530506 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507 ret = usb_ep_queue(ep, req, GFP_ATOMIC);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530508 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509 if (ret) {
510 dev_kfree_skb_any(skb);
511
512 if (printk_ratelimit())
513 pr_err("%s: rx queue failed\n", __func__);
514
515 if (port->port_usb)
516 list_add(&req->list, &d->rx_idle);
517 else
518 usb_ep_free_request(ep, req);
519 break;
520 }
521 }
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530522 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523}
524
Ofir Cohena1c2a872011-12-14 10:26:34 +0200525static void gbam_start_endless_rx(struct gbam_port *port)
526{
527 struct bam_ch_info *d = &port->data_ch;
528 int status;
529
530 status = usb_ep_queue(port->port_usb->out, d->rx_req, GFP_ATOMIC);
531 if (status)
532 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
533}
534
535static void gbam_start_endless_tx(struct gbam_port *port)
536{
537 struct bam_ch_info *d = &port->data_ch;
538 int status;
539
540 status = usb_ep_queue(port->port_usb->in, d->tx_req, GFP_ATOMIC);
541 if (status)
542 pr_err("%s: error enqueuing transfer, %d\n", __func__, status);
543}
544
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700545static void gbam_start_io(struct gbam_port *port)
546{
547 unsigned long flags;
548 struct usb_ep *ep;
549 int ret;
550 struct bam_ch_info *d;
551
552 pr_debug("%s: port:%p\n", __func__, port);
553
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530554 spin_lock_irqsave(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555 if (!port->port_usb) {
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530556 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557 return;
558 }
559
560 d = &port->data_ch;
561 ep = port->port_usb->out;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700562 ret = gbam_alloc_requests(ep, &d->rx_idle, bam_mux_rx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700563 gbam_epout_complete, GFP_ATOMIC);
564 if (ret) {
565 pr_err("%s: rx req allocation failed\n", __func__);
566 return;
567 }
568
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530569 spin_unlock_irqrestore(&port->port_lock_ul, flags);
570 spin_lock_irqsave(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 ep = port->port_usb->in;
Vamsi Krishna8f24f252011-11-02 11:46:08 -0700572 ret = gbam_alloc_requests(ep, &d->tx_idle, bam_mux_tx_q_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 gbam_epin_complete, GFP_ATOMIC);
574 if (ret) {
575 pr_err("%s: tx req allocation failed\n", __func__);
576 gbam_free_requests(ep, &d->rx_idle);
577 return;
578 }
579
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530580 spin_unlock_irqrestore(&port->port_lock_dl, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581
582 /* queue out requests */
583 gbam_start_rx(port);
584}
585
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600586static void gbam_notify(void *p, int event, unsigned long data)
587{
588 switch (event) {
589 case BAM_DMUX_RECEIVE:
590 gbam_data_recv_cb(p, (struct sk_buff *)(data));
591 break;
592 case BAM_DMUX_WRITE_DONE:
593 gbam_data_write_done(p, (struct sk_buff *)(data));
594 break;
595 }
596}
597
Ofir Cohena1c2a872011-12-14 10:26:34 +0200598static void gbam_free_buffers(struct gbam_port *port)
599{
600 struct sk_buff *skb;
601 unsigned long flags;
602 struct bam_ch_info *d;
603
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530604 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800605 spin_lock(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200606
607 if (!port || !port->port_usb)
608 goto free_buf_out;
609
610 d = &port->data_ch;
611
612 gbam_free_requests(port->port_usb->in, &d->tx_idle);
613 gbam_free_requests(port->port_usb->out, &d->rx_idle);
614
615 while ((skb = __skb_dequeue(&d->tx_skb_q)))
616 dev_kfree_skb_any(skb);
617
618 while ((skb = __skb_dequeue(&d->rx_skb_q)))
619 dev_kfree_skb_any(skb);
620
621free_buf_out:
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800622 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530623 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200624}
625
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800626static void gbam_disconnect_work(struct work_struct *w)
627{
628 struct gbam_port *port =
629 container_of(w, struct gbam_port, disconnect_w);
630 struct bam_ch_info *d = &port->data_ch;
631
632 if (!test_bit(BAM_CH_OPENED, &d->flags))
633 return;
634
635 msm_bam_dmux_close(d->id);
636 clear_bit(BAM_CH_OPENED, &d->flags);
637}
638
Ofir Cohena1c2a872011-12-14 10:26:34 +0200639static void gbam2bam_disconnect_work(struct work_struct *w)
640{
641 struct gbam_port *port =
642 container_of(w, struct gbam_port, disconnect_w);
643 unsigned long flags;
644
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530645 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800646 spin_lock(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200647 port->port_usb = 0;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800648 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530649 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200650
651 /* disable endpoints */
652 usb_ep_disable(port->gr->out);
653 usb_ep_disable(port->gr->in);
654
Anna Perel97b8c222012-01-18 10:08:14 +0200655 port->gr->in->driver_data = NULL;
656 port->gr->out->driver_data = NULL;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200657}
658
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659static void gbam_connect_work(struct work_struct *w)
660{
661 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
662 struct bam_ch_info *d = &port->data_ch;
663 int ret;
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800664 unsigned long flags;
665
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530666 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800667 spin_lock(&port->port_lock_dl);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800668 if (!port->port_usb) {
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800669 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530670 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800671 return;
672 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800673 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530674 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675
Jack Phameffd4ae2011-08-03 16:49:36 -0700676 if (!test_bit(BAM_CH_READY, &d->flags))
677 return;
678
Jeff Hugo1c4531c2011-08-02 14:55:37 -0600679 ret = msm_bam_dmux_open(d->id, port, gbam_notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680 if (ret) {
681 pr_err("%s: unable open bam ch:%d err:%d\n",
682 __func__, d->id, ret);
683 return;
684 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700685 set_bit(BAM_CH_OPENED, &d->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686
687 gbam_start_io(port);
688
689 pr_debug("%s: done\n", __func__);
690}
691
Ofir Cohena1c2a872011-12-14 10:26:34 +0200692static void gbam2bam_connect_work(struct work_struct *w)
Jack Phameffd4ae2011-08-03 16:49:36 -0700693{
Ofir Cohena1c2a872011-12-14 10:26:34 +0200694 struct gbam_port *port = container_of(w, struct gbam_port, connect_w);
695 struct bam_ch_info *d = &port->data_ch;
696 u32 sps_params;
697 int ret;
Ofir Cohen4da266f2012-01-03 10:19:29 +0200698 unsigned long flags;
699
700 ret = usb_ep_enable(port->gr->in, port->gr->in_desc);
701 if (ret) {
702 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
703 __func__, port->gr->in);
704 return;
705 }
706 port->gr->in->driver_data = port;
707
708 ret = usb_ep_enable(port->gr->out, port->gr->out_desc);
709 if (ret) {
710 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
711 __func__, port->gr->out);
712 port->gr->in->driver_data = 0;
713 return;
714 }
715 port->gr->out->driver_data = port;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530716 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800717 spin_lock(&port->port_lock_dl);
Ofir Cohen4da266f2012-01-03 10:19:29 +0200718 port->port_usb = port->gr;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800719 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530720 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700721
Ofir Cohena1c2a872011-12-14 10:26:34 +0200722 ret = usb_bam_connect(d->connection_idx, &d->src_pipe_idx,
723 &d->dst_pipe_idx);
724 if (ret) {
725 pr_err("%s: usb_bam_connect failed: err:%d\n",
726 __func__, ret);
727 return;
728 }
Jack Phameffd4ae2011-08-03 16:49:36 -0700729
Ofir Cohena1c2a872011-12-14 10:26:34 +0200730 d->rx_req = usb_ep_alloc_request(port->port_usb->out, GFP_KERNEL);
731 if (!d->rx_req)
732 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700733
Ofir Cohena1c2a872011-12-14 10:26:34 +0200734 d->rx_req->context = port;
735 d->rx_req->complete = gbam_endless_rx_complete;
736 d->rx_req->length = 0;
737 sps_params = (SPS_PARAMS_SPS_MODE | d->src_pipe_idx |
738 MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
739 d->rx_req->udc_priv = sps_params;
740 d->tx_req = usb_ep_alloc_request(port->port_usb->in, GFP_KERNEL);
741 if (!d->tx_req)
742 return;
Jack Phameffd4ae2011-08-03 16:49:36 -0700743
Ofir Cohena1c2a872011-12-14 10:26:34 +0200744 d->tx_req->context = port;
745 d->tx_req->complete = gbam_endless_tx_complete;
746 d->tx_req->length = 0;
747 sps_params = (SPS_PARAMS_SPS_MODE | d->dst_pipe_idx |
748 MSM_VENDOR_ID) & ~SPS_PARAMS_TBE;
749 d->tx_req->udc_priv = sps_params;
Jack Phameffd4ae2011-08-03 16:49:36 -0700750
Ofir Cohena1c2a872011-12-14 10:26:34 +0200751 /* queue in & out requests */
752 gbam_start_endless_rx(port);
753 gbam_start_endless_tx(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700754
Ofir Cohena1c2a872011-12-14 10:26:34 +0200755 pr_debug("%s: done\n", __func__);
Jack Phameffd4ae2011-08-03 16:49:36 -0700756}
757
758/* BAM data channel ready, allow attempt to open */
759static int gbam_data_ch_probe(struct platform_device *pdev)
760{
761 struct gbam_port *port;
762 struct bam_ch_info *d;
763 int i;
764 unsigned long flags;
765
766 pr_debug("%s: name:%s\n", __func__, pdev->name);
767
768 for (i = 0; i < n_bam_ports; i++) {
769 port = bam_ports[i].port;
770 d = &port->data_ch;
771
772 if (!strncmp(bam_ch_names[i], pdev->name,
773 BAM_DMUX_CH_NAME_MAX_LEN)) {
774 set_bit(BAM_CH_READY, &d->flags);
775
776 /* if usb is online, try opening bam_ch */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530777 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800778 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -0700779 if (port->port_usb)
780 queue_work(gbam_wq, &port->connect_w);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800781 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530782 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700783
784 break;
785 }
786 }
787
788 return 0;
789}
790
791/* BAM data channel went inactive, so close it */
792static int gbam_data_ch_remove(struct platform_device *pdev)
793{
794 struct gbam_port *port;
795 struct bam_ch_info *d;
796 struct usb_ep *ep_in = NULL;
797 struct usb_ep *ep_out = NULL;
798 unsigned long flags;
799 int i;
800
801 pr_debug("%s: name:%s\n", __func__, pdev->name);
802
803 for (i = 0; i < n_bam_ports; i++) {
804 if (!strncmp(bam_ch_names[i], pdev->name,
805 BAM_DMUX_CH_NAME_MAX_LEN)) {
806 port = bam_ports[i].port;
807 d = &port->data_ch;
808
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530809 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800810 spin_lock(&port->port_lock_dl);
Jack Phameffd4ae2011-08-03 16:49:36 -0700811 if (port->port_usb) {
812 ep_in = port->port_usb->in;
813 ep_out = port->port_usb->out;
814 }
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800815 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530816 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Jack Phameffd4ae2011-08-03 16:49:36 -0700817
818 if (ep_in)
819 usb_ep_fifo_flush(ep_in);
820 if (ep_out)
821 usb_ep_fifo_flush(ep_out);
822
823 gbam_free_buffers(port);
824
825 msm_bam_dmux_close(d->id);
826
Vamsi Krishna7658bd12012-01-13 10:32:00 -0800827 /* bam dmux will free all pending skbs */
828 d->pending_with_bam = 0;
829
Jack Phameffd4ae2011-08-03 16:49:36 -0700830 clear_bit(BAM_CH_READY, &d->flags);
831 clear_bit(BAM_CH_OPENED, &d->flags);
832 }
833 }
834
835 return 0;
836}
837
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700838static void gbam_port_free(int portno)
839{
840 struct gbam_port *port = bam_ports[portno].port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700841 struct platform_driver *pdrv = &bam_ports[portno].pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700842
Jack Phameffd4ae2011-08-03 16:49:36 -0700843 if (port) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844 kfree(port);
Jack Phameffd4ae2011-08-03 16:49:36 -0700845 platform_driver_unregister(pdrv);
846 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700847}
848
Ofir Cohena1c2a872011-12-14 10:26:34 +0200849static void gbam2bam_port_free(int portno)
850{
851 struct gbam_port *port = bam2bam_ports[portno];
852
853 kfree(port);
854}
855
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856static int gbam_port_alloc(int portno)
857{
858 struct gbam_port *port;
859 struct bam_ch_info *d;
Jack Phameffd4ae2011-08-03 16:49:36 -0700860 struct platform_driver *pdrv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700861
862 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
863 if (!port)
864 return -ENOMEM;
865
866 port->port_num = portno;
867
868 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530869 spin_lock_init(&port->port_lock_ul);
870 spin_lock_init(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700871 INIT_WORK(&port->connect_w, gbam_connect_work);
Vamsi Krishna1ad076d2011-11-10 15:03:30 -0800872 INIT_WORK(&port->disconnect_w, gbam_disconnect_work);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700873
874 /* data ch */
875 d = &port->data_ch;
876 d->port = port;
877 INIT_LIST_HEAD(&d->tx_idle);
878 INIT_LIST_HEAD(&d->rx_idle);
879 INIT_WORK(&d->write_tobam_w, gbam_data_write_tobam);
Vijayavardhan Vennapusa929e5792011-12-12 17:34:53 +0530880 INIT_WORK(&d->write_tohost_w, gbam_write_data_tohost);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700881 skb_queue_head_init(&d->tx_skb_q);
882 skb_queue_head_init(&d->rx_skb_q);
883 d->id = bam_ch_ids[portno];
884
885 bam_ports[portno].port = port;
886
Jack Phameffd4ae2011-08-03 16:49:36 -0700887 pdrv = &bam_ports[portno].pdrv;
888 pdrv->probe = gbam_data_ch_probe;
889 pdrv->remove = gbam_data_ch_remove;
890 pdrv->driver.name = bam_ch_names[portno];
891 pdrv->driver.owner = THIS_MODULE;
892
893 platform_driver_register(pdrv);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200894 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
895
896 return 0;
897}
898
899static int gbam2bam_port_alloc(int portno)
900{
901 struct gbam_port *port;
902 struct bam_ch_info *d;
903
904 port = kzalloc(sizeof(struct gbam_port), GFP_KERNEL);
905 if (!port)
906 return -ENOMEM;
907
908 port->port_num = portno;
909
910 /* port initialization */
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530911 spin_lock_init(&port->port_lock_ul);
912 spin_lock_init(&port->port_lock_dl);
Ofir Cohena1c2a872011-12-14 10:26:34 +0200913
914 INIT_WORK(&port->connect_w, gbam2bam_connect_work);
915 INIT_WORK(&port->disconnect_w, gbam2bam_disconnect_work);
916
917 /* data ch */
918 d = &port->data_ch;
919 d->port = port;
920 bam2bam_ports[portno] = port;
Jack Phameffd4ae2011-08-03 16:49:36 -0700921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700922 pr_debug("%s: port:%p portno:%d\n", __func__, port, portno);
923
924 return 0;
925}
926
927#if defined(CONFIG_DEBUG_FS)
928#define DEBUG_BUF_SIZE 1024
929static ssize_t gbam_read_stats(struct file *file, char __user *ubuf,
930 size_t count, loff_t *ppos)
931{
932 struct gbam_port *port;
933 struct bam_ch_info *d;
934 char *buf;
935 unsigned long flags;
936 int ret;
937 int i;
938 int temp = 0;
939
940 buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
941 if (!buf)
942 return -ENOMEM;
943
944 for (i = 0; i < n_bam_ports; i++) {
945 port = bam_ports[i].port;
946 if (!port)
947 continue;
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530948 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800949 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950
951 d = &port->data_ch;
952
953 temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
954 "#PORT:%d port:%p data_ch:%p#\n"
955 "dpkts_to_usbhost: %lu\n"
956 "dpkts_to_modem: %lu\n"
957 "dpkts_pwith_bam: %u\n"
958 "to_usbhost_dcnt: %u\n"
959 "tomodem__dcnt: %u\n"
960 "tx_buf_len: %u\n"
Vamsi Krishna84579552011-11-09 15:33:22 -0800961 "rx_buf_len: %u\n"
Jack Phameffd4ae2011-08-03 16:49:36 -0700962 "data_ch_open: %d\n"
963 "data_ch_ready: %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700964 i, port, &port->data_ch,
965 d->to_host, d->to_modem,
966 d->pending_with_bam,
967 d->tohost_drp_cnt, d->tomodem_drp_cnt,
Vamsi Krishna84579552011-11-09 15:33:22 -0800968 d->tx_skb_q.qlen, d->rx_skb_q.qlen,
Jack Phameffd4ae2011-08-03 16:49:36 -0700969 test_bit(BAM_CH_OPENED, &d->flags),
970 test_bit(BAM_CH_READY, &d->flags));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800972 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530973 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700974 }
975
976 ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
977
978 kfree(buf);
979
980 return ret;
981}
982
983static ssize_t gbam_reset_stats(struct file *file, const char __user *buf,
984 size_t count, loff_t *ppos)
985{
986 struct gbam_port *port;
987 struct bam_ch_info *d;
988 int i;
989 unsigned long flags;
990
991 for (i = 0; i < n_bam_ports; i++) {
992 port = bam_ports[i].port;
993 if (!port)
994 continue;
995
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +0530996 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -0800997 spin_lock(&port->port_lock_dl);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700998
999 d = &port->data_ch;
1000
1001 d->to_host = 0;
1002 d->to_modem = 0;
1003 d->pending_with_bam = 0;
1004 d->tohost_drp_cnt = 0;
1005 d->tomodem_drp_cnt = 0;
1006
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001007 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301008 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001009 }
1010 return count;
1011}
1012
1013const struct file_operations gbam_stats_ops = {
1014 .read = gbam_read_stats,
1015 .write = gbam_reset_stats,
1016};
1017
1018static void gbam_debugfs_init(void)
1019{
1020 struct dentry *dent;
1021 struct dentry *dfile;
1022
1023 dent = debugfs_create_dir("usb_rmnet", 0);
1024 if (IS_ERR(dent))
1025 return;
1026
1027 /* TODO: Implement cleanup function to remove created file */
1028 dfile = debugfs_create_file("status", 0444, dent, 0, &gbam_stats_ops);
1029 if (!dfile || IS_ERR(dfile))
1030 debugfs_remove(dent);
1031}
1032#else
1033static void gam_debugfs_init(void) { }
1034#endif
1035
Ofir Cohena1c2a872011-12-14 10:26:34 +02001036void gbam_disconnect(struct grmnet *gr, u8 port_num, enum transport_type trans)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001037{
1038 struct gbam_port *port;
1039 unsigned long flags;
1040 struct bam_ch_info *d;
1041
1042 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1043
Ofir Cohena1c2a872011-12-14 10:26:34 +02001044 if (trans == USB_GADGET_XPORT_BAM &&
1045 port_num >= n_bam_ports) {
1046 pr_err("%s: invalid bam portno#%d\n",
1047 __func__, port_num);
1048 return;
1049 }
1050
1051 if (trans == USB_GADGET_XPORT_BAM2BAM &&
1052 port_num >= n_bam2bam_ports) {
1053 pr_err("%s: invalid bam2bam portno#%d\n",
1054 __func__, port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001055 return;
1056 }
1057
1058 if (!gr) {
1059 pr_err("%s: grmnet port is null\n", __func__);
1060 return;
1061 }
Ofir Cohena1c2a872011-12-14 10:26:34 +02001062 if (trans == USB_GADGET_XPORT_BAM)
1063 port = bam_ports[port_num].port;
1064 else
1065 port = bam2bam_ports[port_num];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001067 d = &port->data_ch;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001068 port->gr = gr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001069
Ofir Cohena1c2a872011-12-14 10:26:34 +02001070 if (trans == USB_GADGET_XPORT_BAM) {
1071 gbam_free_buffers(port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301073 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001074 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301075 port->port_usb = 0;
Vijayavardhan Vennapusa08c31252011-12-21 13:02:49 +05301076 n_tx_req_queued = 0;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001077 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301078 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001079
Ofir Cohena1c2a872011-12-14 10:26:34 +02001080 /* disable endpoints */
1081 usb_ep_disable(gr->out);
1082 usb_ep_disable(gr->in);
1083 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001084
Vamsi Krishna1ad076d2011-11-10 15:03:30 -08001085 queue_work(gbam_wq, &port->disconnect_w);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001086}
1087
Ofir Cohena1c2a872011-12-14 10:26:34 +02001088int gbam_connect(struct grmnet *gr, u8 port_num,
1089 enum transport_type trans, u8 connection_idx)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001090{
1091 struct gbam_port *port;
1092 struct bam_ch_info *d;
1093 int ret;
1094 unsigned long flags;
1095
1096 pr_debug("%s: grmnet:%p port#%d\n", __func__, gr, port_num);
1097
Ofir Cohena1c2a872011-12-14 10:26:34 +02001098 if (trans == USB_GADGET_XPORT_BAM && port_num >= n_bam_ports) {
1099 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1100 return -ENODEV;
1101 }
1102
1103 if (trans == USB_GADGET_XPORT_BAM2BAM && port_num >= n_bam2bam_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001104 pr_err("%s: invalid portno#%d\n", __func__, port_num);
1105 return -ENODEV;
1106 }
1107
1108 if (!gr) {
1109 pr_err("%s: grmnet port is null\n", __func__);
1110 return -ENODEV;
1111 }
1112
Ofir Cohena1c2a872011-12-14 10:26:34 +02001113 if (trans == USB_GADGET_XPORT_BAM)
1114 port = bam_ports[port_num].port;
1115 else
1116 port = bam2bam_ports[port_num];
1117
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118 d = &port->data_ch;
1119
Ofir Cohena1c2a872011-12-14 10:26:34 +02001120 if (trans == USB_GADGET_XPORT_BAM) {
Ofir Cohen4da266f2012-01-03 10:19:29 +02001121 ret = usb_ep_enable(gr->in, gr->in_desc);
1122 if (ret) {
1123 pr_err("%s: usb_ep_enable failed eptype:IN ep:%p",
1124 __func__, gr->in);
1125 return ret;
1126 }
1127 gr->in->driver_data = port;
1128
1129 ret = usb_ep_enable(gr->out, gr->out_desc);
1130 if (ret) {
1131 pr_err("%s: usb_ep_enable failed eptype:OUT ep:%p",
1132 __func__, gr->out);
1133 gr->in->driver_data = 0;
1134 return ret;
1135 }
1136 gr->out->driver_data = port;
1137
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301138 spin_lock_irqsave(&port->port_lock_ul, flags);
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001139 spin_lock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301140 port->port_usb = gr;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001141
Ofir Cohena1c2a872011-12-14 10:26:34 +02001142 d->to_host = 0;
1143 d->to_modem = 0;
1144 d->pending_with_bam = 0;
1145 d->tohost_drp_cnt = 0;
1146 d->tomodem_drp_cnt = 0;
Jeff Ohlstein6f8c5fc2012-01-17 13:08:39 -08001147 spin_unlock(&port->port_lock_dl);
Vijayavardhan Vennapusaa8808532012-01-09 15:24:02 +05301148 spin_unlock_irqrestore(&port->port_lock_ul, flags);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001149 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001150
Ofir Cohen4da266f2012-01-03 10:19:29 +02001151 if (trans == USB_GADGET_XPORT_BAM2BAM) {
1152 port->gr = gr;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001153 d->connection_idx = connection_idx;
Ofir Cohen4da266f2012-01-03 10:19:29 +02001154 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001155
1156 queue_work(gbam_wq, &port->connect_w);
1157
1158 return 0;
1159}
1160
Ofir Cohena1c2a872011-12-14 10:26:34 +02001161int gbam_setup(unsigned int no_bam_port, unsigned int no_bam2bam_port)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001162{
1163 int i;
1164 int ret;
1165
Ofir Cohena1c2a872011-12-14 10:26:34 +02001166 pr_debug("%s: requested BAM ports:%d and BAM2BAM ports:%d\n",
1167 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168
Ofir Cohena1c2a872011-12-14 10:26:34 +02001169 if ((!no_bam_port && !no_bam2bam_port) || no_bam_port > BAM_N_PORTS
1170 || no_bam2bam_port > BAM2BAM_N_PORTS) {
1171 pr_err("%s: Invalid num of ports count:%d,%d\n",
1172 __func__, no_bam_port, no_bam2bam_port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001173 return -EINVAL;
1174 }
1175
1176 gbam_wq = alloc_workqueue("k_gbam", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1177 if (!gbam_wq) {
1178 pr_err("%s: Unable to create workqueue gbam_wq\n",
1179 __func__);
1180 return -ENOMEM;
1181 }
1182
Ofir Cohena1c2a872011-12-14 10:26:34 +02001183 for (i = 0; i < no_bam_port; i++) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301184 n_bam_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001185 ret = gbam_port_alloc(i);
1186 if (ret) {
Manu Gautamd59b5d32011-09-09 14:47:08 +05301187 n_bam_ports--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001188 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1189 goto free_bam_ports;
1190 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191 }
1192
Ofir Cohena1c2a872011-12-14 10:26:34 +02001193 for (i = 0; i < no_bam2bam_port; i++) {
1194 n_bam2bam_ports++;
1195 ret = gbam2bam_port_alloc(i);
1196 if (ret) {
1197 n_bam2bam_ports--;
1198 pr_err("%s: Unable to alloc port:%d\n", __func__, i);
1199 goto free_bam_ports;
1200 }
1201 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202 gbam_debugfs_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001203 return 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001204
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205free_bam_ports:
1206 for (i = 0; i < n_bam_ports; i++)
1207 gbam_port_free(i);
Ofir Cohena1c2a872011-12-14 10:26:34 +02001208 for (i = 0; i < n_bam2bam_ports; i++)
1209 gbam2bam_port_free(i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001210 destroy_workqueue(gbam_wq);
1211
1212 return ret;
1213}