blob: 2c6a31c024d14a4581497f79150b10d152b16b44 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/usb/android_composite.h>
18#include <linux/spinlock.h>
19
Hemant Kumar1b820d52011-11-03 15:08:28 -070020#include <mach/usb_gadget_xport.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021#include "u_rmnet.h"
22#include "gadget_chips.h"
23
24
25#define RMNET_NOTIFY_INTERVAL 5
26#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
27
28struct rmnet_descs {
29 struct usb_endpoint_descriptor *in;
30 struct usb_endpoint_descriptor *out;
31 struct usb_endpoint_descriptor *notify;
32};
33
34#define ACM_CTRL_DTR (1 << 0)
35
36/* TODO: use separate structures for data and
37 * control paths
38 */
39struct f_rmnet {
40 struct grmnet port;
41 int ifc_id;
42 u8 port_num;
43 atomic_t online;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -070044 atomic_t ctrl_online;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045 struct usb_composite_dev *cdev;
46
47 spinlock_t lock;
48
49 /* usb descriptors */
50 struct rmnet_descs fs;
51 struct rmnet_descs hs;
52
53 /* usb eps*/
54 struct usb_ep *notify;
55 struct usb_endpoint_descriptor *notify_desc;
56 struct usb_request *notify_req;
57
58 /* control info */
59 struct list_head cpkt_resp_q;
60 atomic_t notify_count;
61 unsigned long cpkts_len;
62};
63
Manu Gautam2b0234a2011-09-07 16:47:52 +053064#define NR_RMNET_PORTS 1
65static unsigned int nr_rmnet_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070066static unsigned int no_ctrl_smd_ports;
67static unsigned int no_data_bam_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068static struct rmnet_ports {
Hemant Kumar1b820d52011-11-03 15:08:28 -070069 enum transport_type data_xport;
70 enum transport_type ctrl_xport;
71 unsigned data_xport_num;
72 unsigned ctrl_xport_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073 unsigned port_num;
74 struct f_rmnet *port;
Manu Gautam2b0234a2011-09-07 16:47:52 +053075} rmnet_ports[NR_RMNET_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076
77static struct usb_interface_descriptor rmnet_interface_desc = {
78 .bLength = USB_DT_INTERFACE_SIZE,
79 .bDescriptorType = USB_DT_INTERFACE,
80 .bNumEndpoints = 3,
81 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
82 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
83 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
84 /* .iInterface = DYNAMIC */
85};
86
87/* Full speed support */
88static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
89 .bLength = USB_DT_ENDPOINT_SIZE,
90 .bDescriptorType = USB_DT_ENDPOINT,
91 .bEndpointAddress = USB_DIR_IN,
92 .bmAttributes = USB_ENDPOINT_XFER_INT,
93 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
94 .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
95};
96
97static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
98 .bLength = USB_DT_ENDPOINT_SIZE,
99 .bDescriptorType = USB_DT_ENDPOINT,
100 .bEndpointAddress = USB_DIR_IN,
101 .bmAttributes = USB_ENDPOINT_XFER_BULK,
102 .wMaxPacketSize = __constant_cpu_to_le16(64),
103};
104
105static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
106 .bLength = USB_DT_ENDPOINT_SIZE,
107 .bDescriptorType = USB_DT_ENDPOINT,
108 .bEndpointAddress = USB_DIR_OUT,
109 .bmAttributes = USB_ENDPOINT_XFER_BULK,
110 .wMaxPacketSize = __constant_cpu_to_le16(64),
111};
112
113static struct usb_descriptor_header *rmnet_fs_function[] = {
114 (struct usb_descriptor_header *) &rmnet_interface_desc,
115 (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
116 (struct usb_descriptor_header *) &rmnet_fs_in_desc,
117 (struct usb_descriptor_header *) &rmnet_fs_out_desc,
118 NULL,
119};
120
121/* High speed support */
122static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
123 .bLength = USB_DT_ENDPOINT_SIZE,
124 .bDescriptorType = USB_DT_ENDPOINT,
125 .bEndpointAddress = USB_DIR_IN,
126 .bmAttributes = USB_ENDPOINT_XFER_INT,
127 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
128 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
129};
130
131static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
132 .bLength = USB_DT_ENDPOINT_SIZE,
133 .bDescriptorType = USB_DT_ENDPOINT,
134 .bEndpointAddress = USB_DIR_IN,
135 .bmAttributes = USB_ENDPOINT_XFER_BULK,
136 .wMaxPacketSize = __constant_cpu_to_le16(512),
137};
138
139static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
140 .bLength = USB_DT_ENDPOINT_SIZE,
141 .bDescriptorType = USB_DT_ENDPOINT,
142 .bEndpointAddress = USB_DIR_OUT,
143 .bmAttributes = USB_ENDPOINT_XFER_BULK,
144 .wMaxPacketSize = __constant_cpu_to_le16(512),
145};
146
147static struct usb_descriptor_header *rmnet_hs_function[] = {
148 (struct usb_descriptor_header *) &rmnet_interface_desc,
149 (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
150 (struct usb_descriptor_header *) &rmnet_hs_in_desc,
151 (struct usb_descriptor_header *) &rmnet_hs_out_desc,
152 NULL,
153};
154
155/* String descriptors */
156
157static struct usb_string rmnet_string_defs[] = {
158 [0].s = "RmNet",
159 { } /* end of list */
160};
161
162static struct usb_gadget_strings rmnet_string_table = {
163 .language = 0x0409, /* en-us */
164 .strings = rmnet_string_defs,
165};
166
167static struct usb_gadget_strings *rmnet_strings[] = {
168 &rmnet_string_table,
169 NULL,
170};
171
172/* ------- misc functions --------------------*/
173
174static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
175{
176 return container_of(f, struct f_rmnet, port.func);
177}
178
179static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
180{
181 return container_of(r, struct f_rmnet, port);
182}
183
184static struct usb_request *
185frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
186{
187 struct usb_request *req;
188
189 req = usb_ep_alloc_request(ep, flags);
190 if (!req)
191 return ERR_PTR(-ENOMEM);
192
193 req->buf = kmalloc(len, flags);
194 if (!req->buf) {
195 usb_ep_free_request(ep, req);
196 return ERR_PTR(-ENOMEM);
197 }
198
199 req->length = len;
200
201 return req;
202}
203
204void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
205{
206 kfree(req->buf);
207 usb_ep_free_request(ep, req);
208}
209
210static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
211{
212 struct rmnet_ctrl_pkt *pkt;
213
214 pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
215 if (!pkt)
216 return ERR_PTR(-ENOMEM);
217
218 pkt->buf = kmalloc(len, flags);
219 if (!pkt->buf) {
220 kfree(pkt);
221 return ERR_PTR(-ENOMEM);
222 }
223 pkt->len = len;
224
225 return pkt;
226}
227
228static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
229{
230 kfree(pkt->buf);
231 kfree(pkt);
232}
233
234/* -------------------------------------------*/
235
Hemant Kumar1b820d52011-11-03 15:08:28 -0700236static int rmnet_gport_setup(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237{
238 int ret;
239
Hemant Kumar1b820d52011-11-03 15:08:28 -0700240 pr_debug("%s: bam ports: %u smd ports: %u nr_rmnet_ports: %u\n",
241 __func__, no_data_bam_ports, no_ctrl_smd_ports,
242 nr_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243
Hemant Kumar1b820d52011-11-03 15:08:28 -0700244 if (no_data_bam_ports) {
245 ret = gbam_setup(no_data_bam_ports);
246 if (ret)
247 return ret;
248 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700249
Hemant Kumar1b820d52011-11-03 15:08:28 -0700250 if (no_ctrl_smd_ports) {
251 ret = gsmd_ctrl_setup(no_ctrl_smd_ports);
252 if (ret)
253 return ret;
254 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255
256 return 0;
257}
258
Manu Gautam2b0234a2011-09-07 16:47:52 +0530259static int gport_rmnet_connect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700261 int ret;
262 unsigned port_num;
263 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
264 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265
Hemant Kumar1b820d52011-11-03 15:08:28 -0700266 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
267 __func__, xport_to_str(cxport), xport_to_str(dxport),
268 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269
Hemant Kumar1b820d52011-11-03 15:08:28 -0700270 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
271 switch (cxport) {
272 case USB_GADGET_XPORT_SMD:
273 ret = gsmd_ctrl_connect(&dev->port, port_num);
274 if (ret) {
275 pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
276 __func__, ret);
277 return ret;
278 }
279 break;
280 case USB_GADGET_XPORT_NONE:
281 break;
282 default:
283 pr_err("%s: Un-supported transport: %s\n", __func__,
284 xport_to_str(cxport));
285 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286 }
287
Hemant Kumar1b820d52011-11-03 15:08:28 -0700288 port_num = rmnet_ports[dev->port_num].data_xport_num;
289 switch (dxport) {
290 case USB_GADGET_XPORT_BAM:
291 ret = gbam_connect(&dev->port, port_num);
292 if (ret) {
293 pr_err("%s: gbam_connect failed: err:%d\n",
294 __func__, ret);
295 gsmd_ctrl_disconnect(&dev->port, port_num);
296 return ret;
297 }
298 break;
299 case USB_GADGET_XPORT_NONE:
300 break;
301 default:
302 pr_err("%s: Un-supported transport: %s\n", __func__,
303 xport_to_str(dxport));
304 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305 }
306
307 return 0;
308}
309
Manu Gautam2b0234a2011-09-07 16:47:52 +0530310static int gport_rmnet_disconnect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700312 unsigned port_num;
313 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
314 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315
Hemant Kumar1b820d52011-11-03 15:08:28 -0700316 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
317 __func__, xport_to_str(cxport), xport_to_str(dxport),
318 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319
Hemant Kumar1b820d52011-11-03 15:08:28 -0700320 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
321 switch (cxport) {
322 case USB_GADGET_XPORT_SMD:
323 gsmd_ctrl_disconnect(&dev->port, port_num);
324 break;
325 case USB_GADGET_XPORT_NONE:
326 break;
327 default:
328 pr_err("%s: Un-supported transport: %s\n", __func__,
329 xport_to_str(cxport));
330 return -ENODEV;
331 }
332
333 port_num = rmnet_ports[dev->port_num].data_xport_num;
334 switch (dxport) {
335 case USB_GADGET_XPORT_BAM:
336 gbam_disconnect(&dev->port, port_num);
337 break;
338 case USB_GADGET_XPORT_NONE:
339 break;
340 default:
341 pr_err("%s: Un-supported transport: %s\n", __func__,
342 xport_to_str(dxport));
343 return -ENODEV;
344 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345
346 return 0;
347}
348
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
350{
351 struct f_rmnet *dev = func_to_rmnet(f);
352
353 pr_debug("%s: portno:%d\n", __func__, dev->port_num);
354
355 if (gadget_is_dualspeed(c->cdev->gadget))
356 usb_free_descriptors(f->hs_descriptors);
357 usb_free_descriptors(f->descriptors);
358
359 frmnet_free_req(dev->notify, dev->notify_req);
360
Manu Gautamdd4222b2011-09-09 15:06:05 +0530361 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362}
363
364static void frmnet_disable(struct usb_function *f)
365{
366 struct f_rmnet *dev = func_to_rmnet(f);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700367 unsigned long flags;
368 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369
370 pr_debug("%s: port#%d\n", __func__, dev->port_num);
371
372 usb_ep_disable(dev->notify);
373
374 atomic_set(&dev->online, 0);
375
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700376 spin_lock_irqsave(&dev->lock, flags);
377 while (!list_empty(&dev->cpkt_resp_q)) {
378 cpkt = list_first_entry(&dev->cpkt_resp_q,
379 struct rmnet_ctrl_pkt, list);
380
381 list_del(&cpkt->list);
382 rmnet_free_ctrl_pkt(cpkt);
383 }
384 atomic_set(&dev->notify_count, 0);
385 spin_unlock_irqrestore(&dev->lock, flags);
386
Manu Gautam2b0234a2011-09-07 16:47:52 +0530387 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388}
389
390static int
391frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
392{
393 struct f_rmnet *dev = func_to_rmnet(f);
394 struct usb_composite_dev *cdev = dev->cdev;
395 int ret;
396
397 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
398
399 if (dev->notify->driver_data) {
400 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
401 usb_ep_disable(dev->notify);
402 }
403 dev->notify_desc = ep_choose(cdev->gadget,
404 dev->hs.notify,
405 dev->fs.notify);
406 ret = usb_ep_enable(dev->notify, dev->notify_desc);
407 if (ret) {
408 pr_err("%s: usb ep#%s enable failed, err#%d\n",
409 __func__, dev->notify->name, ret);
410 return ret;
411 }
412 dev->notify->driver_data = dev;
413
414 if (dev->port.in->driver_data) {
415 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530416 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417 }
418
419 dev->port.in_desc = ep_choose(cdev->gadget,
420 dev->hs.in, dev->fs.in);
421 dev->port.out_desc = ep_choose(cdev->gadget,
422 dev->hs.out, dev->fs.out);
423
Manu Gautam2b0234a2011-09-07 16:47:52 +0530424 ret = gport_rmnet_connect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425
426 atomic_set(&dev->online, 1);
427
428 return ret;
429}
430
431static void frmnet_ctrl_response_available(struct f_rmnet *dev)
432{
433 struct usb_request *req = dev->notify_req;
434 struct usb_cdc_notification *event;
435 unsigned long flags;
436 int ret;
437
438 pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
439
440 spin_lock_irqsave(&dev->lock, flags);
441 if (!atomic_read(&dev->online) || !req || !req->buf) {
442 spin_unlock_irqrestore(&dev->lock, flags);
443 return;
444 }
445
446 if (atomic_inc_return(&dev->notify_count) != 1) {
447 spin_unlock_irqrestore(&dev->lock, flags);
448 return;
449 }
450
451 event = req->buf;
452 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
453 | USB_RECIP_INTERFACE;
454 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
455 event->wValue = cpu_to_le16(0);
456 event->wIndex = cpu_to_le16(dev->ifc_id);
457 event->wLength = cpu_to_le16(0);
458 spin_unlock_irqrestore(&dev->lock, flags);
459
460 ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
461 if (ret) {
462 atomic_dec(&dev->notify_count);
463 pr_debug("ep enqueue error %d\n", ret);
464 }
465}
466
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700467static void frmnet_connect(struct grmnet *gr)
468{
469 struct f_rmnet *dev;
470
471 if (!gr) {
472 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
473 return;
474 }
475
476 dev = port_to_rmnet(gr);
477
478 atomic_set(&dev->ctrl_online, 1);
479}
480
481static void frmnet_disconnect(struct grmnet *gr)
482{
483 struct f_rmnet *dev;
484 unsigned long flags;
485 struct usb_cdc_notification *event;
486 int status;
487 struct rmnet_ctrl_pkt *cpkt;
488
489 if (!gr) {
490 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
491 return;
492 }
493
494 dev = port_to_rmnet(gr);
495
496 atomic_set(&dev->ctrl_online, 0);
497
498 if (!atomic_read(&dev->online)) {
499 pr_debug("%s: nothing to do\n", __func__);
500 return;
501 }
502
503 usb_ep_fifo_flush(dev->notify);
504
505 event = dev->notify_req->buf;
506 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
507 | USB_RECIP_INTERFACE;
508 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
509 event->wValue = cpu_to_le16(0);
510 event->wIndex = cpu_to_le16(dev->ifc_id);
511 event->wLength = cpu_to_le16(0);
512
Vamsi Krishna188078d2011-10-26 15:09:55 -0700513 status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700514 if (status < 0) {
515 if (!atomic_read(&dev->online))
516 return;
517 pr_err("%s: rmnet notify ep enqueue error %d\n",
518 __func__, status);
519 }
520
521 spin_lock_irqsave(&dev->lock, flags);
522 while (!list_empty(&dev->cpkt_resp_q)) {
523 cpkt = list_first_entry(&dev->cpkt_resp_q,
524 struct rmnet_ctrl_pkt, list);
525
526 list_del(&cpkt->list);
527 rmnet_free_ctrl_pkt(cpkt);
528 }
529 atomic_set(&dev->notify_count, 0);
530 spin_unlock_irqrestore(&dev->lock, flags);
531
532}
533
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534static int
Hemant Kumarf60c0252011-11-03 12:37:07 -0700535frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536{
537 struct f_rmnet *dev;
Hemant Kumarf60c0252011-11-03 12:37:07 -0700538 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700539 unsigned long flags;
540
Hemant Kumarf60c0252011-11-03 12:37:07 -0700541 if (!gr || !buf) {
542 pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
543 __func__, gr, buf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700544 return -ENODEV;
545 }
Hemant Kumarf60c0252011-11-03 12:37:07 -0700546 cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
547 if (IS_ERR(cpkt)) {
548 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
549 return -ENOMEM;
550 }
551 memcpy(cpkt->buf, buf, len);
552 cpkt->len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553
554 dev = port_to_rmnet(gr);
555
556 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
557
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700558 if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559 rmnet_free_ctrl_pkt(cpkt);
560 return 0;
561 }
562
563 spin_lock_irqsave(&dev->lock, flags);
Chiranjeevi Velempati263e6c82011-11-11 23:07:36 +0530564 list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565 spin_unlock_irqrestore(&dev->lock, flags);
566
567 frmnet_ctrl_response_available(dev);
568
569 return 0;
570}
571
572static void
573frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
574{
575 struct f_rmnet *dev = req->context;
576 struct usb_composite_dev *cdev;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700577 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578
579 if (!dev) {
580 pr_err("%s: rmnet dev is null\n", __func__);
581 return;
582 }
583
584 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
585
586 cdev = dev->cdev;
587
Hemant Kumar1b820d52011-11-03 15:08:28 -0700588 if (dev->port.send_encap_cmd) {
589 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
590 dev->port.send_encap_cmd(port_num, req->buf, req->actual);
591 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592}
593
594static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
595{
596 struct f_rmnet *dev = req->context;
597 int status = req->status;
598
599 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
600
601 switch (status) {
602 case -ECONNRESET:
603 case -ESHUTDOWN:
604 /* connection gone */
605 atomic_set(&dev->notify_count, 0);
606 break;
607 default:
608 pr_err("rmnet notify ep error %d\n", status);
609 /* FALLTHROUGH */
610 case 0:
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700611 if (!atomic_read(&dev->ctrl_online))
612 break;
613
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614 if (atomic_dec_and_test(&dev->notify_count))
615 break;
616
617 status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
618 if (status) {
619 atomic_dec(&dev->notify_count);
620 pr_debug("ep enqueue error %d\n", status);
621 }
622 break;
623 }
624}
625
626static int
627frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
628{
629 struct f_rmnet *dev = func_to_rmnet(f);
630 struct usb_composite_dev *cdev = dev->cdev;
631 struct usb_request *req = cdev->req;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700632 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633 u16 w_index = le16_to_cpu(ctrl->wIndex);
634 u16 w_value = le16_to_cpu(ctrl->wValue);
635 u16 w_length = le16_to_cpu(ctrl->wLength);
636 int ret = -EOPNOTSUPP;
637
638 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
639
640 if (!atomic_read(&dev->online)) {
641 pr_debug("%s: usb cable is not connected\n", __func__);
642 return -ENOTCONN;
643 }
644
645 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
646
647 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
648 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 ret = w_length;
650 req->complete = frmnet_cmd_complete;
651 req->context = dev;
652 break;
653
654
655 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
656 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
657 if (w_value)
658 goto invalid;
659 else {
660 unsigned len;
661 struct rmnet_ctrl_pkt *cpkt;
662
663 spin_lock(&dev->lock);
664 if (list_empty(&dev->cpkt_resp_q)) {
665 pr_err("ctrl resp queue empty "
666 " req%02x.%02x v%04x i%04x l%d\n",
667 ctrl->bRequestType, ctrl->bRequest,
668 w_value, w_index, w_length);
669 spin_unlock(&dev->lock);
670 goto invalid;
671 }
672
673 cpkt = list_first_entry(&dev->cpkt_resp_q,
674 struct rmnet_ctrl_pkt, list);
675 list_del(&cpkt->list);
676 spin_unlock(&dev->lock);
677
678 len = min_t(unsigned, w_length, cpkt->len);
679 memcpy(req->buf, cpkt->buf, len);
680 ret = len;
681
682 rmnet_free_ctrl_pkt(cpkt);
683 }
684 break;
685 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
686 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
Hemant Kumar1b820d52011-11-03 15:08:28 -0700687 if (dev->port.notify_modem) {
688 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
689 dev->port.notify_modem(&dev->port, port_num, w_value);
690 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691 ret = 0;
692
693 break;
694 default:
695
696invalid:
697 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
698 ctrl->bRequestType, ctrl->bRequest,
699 w_value, w_index, w_length);
700 }
701
702 /* respond with data transfer or status phase? */
703 if (ret >= 0) {
704 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
705 ctrl->bRequestType, ctrl->bRequest,
706 w_value, w_index, w_length);
707 req->zero = (ret < w_length);
708 req->length = ret;
709 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
710 if (ret < 0)
711 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
712 }
713
714 return ret;
715}
716
717static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
718{
719 struct f_rmnet *dev = func_to_rmnet(f);
720 struct usb_ep *ep;
721 struct usb_composite_dev *cdev = c->cdev;
722 int ret = -ENODEV;
723
724 dev->ifc_id = usb_interface_id(c, f);
725 if (dev->ifc_id < 0) {
726 pr_err("%s: unable to allocate ifc id, err:%d",
727 __func__, dev->ifc_id);
728 return dev->ifc_id;
729 }
730 rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
731
732 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
733 if (!ep) {
734 pr_err("%s: usb epin autoconfig failed\n", __func__);
735 return -ENODEV;
736 }
737 dev->port.in = ep;
738 ep->driver_data = cdev;
739
740 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
741 if (!ep) {
742 pr_err("%s: usb epout autoconfig failed\n", __func__);
743 ret = -ENODEV;
744 goto ep_auto_out_fail;
745 }
746 dev->port.out = ep;
747 ep->driver_data = cdev;
748
749 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
750 if (!ep) {
751 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
752 ret = -ENODEV;
753 goto ep_auto_notify_fail;
754 }
755 dev->notify = ep;
756 ep->driver_data = cdev;
757
758 dev->notify_req = frmnet_alloc_req(ep,
Hemant Kumarfbf113d2011-09-16 18:24:45 -0700759 sizeof(struct usb_cdc_notification),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700760 GFP_KERNEL);
761 if (IS_ERR(dev->notify_req)) {
762 pr_err("%s: unable to allocate memory for notify req\n",
763 __func__);
764 ret = -ENOMEM;
765 goto ep_notify_alloc_fail;
766 }
767
768 dev->notify_req->complete = frmnet_notify_complete;
769 dev->notify_req->context = dev;
770
771 f->descriptors = usb_copy_descriptors(rmnet_fs_function);
772
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530773 if (!f->descriptors)
774 goto fail;
775
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700776 dev->fs.in = usb_find_endpoint(rmnet_fs_function,
777 f->descriptors,
778 &rmnet_fs_in_desc);
779 dev->fs.out = usb_find_endpoint(rmnet_fs_function,
780 f->descriptors,
781 &rmnet_fs_out_desc);
782 dev->fs.notify = usb_find_endpoint(rmnet_fs_function,
783 f->descriptors,
784 &rmnet_fs_notify_desc);
785
786 if (gadget_is_dualspeed(cdev->gadget)) {
787 rmnet_hs_in_desc.bEndpointAddress =
788 rmnet_fs_in_desc.bEndpointAddress;
789 rmnet_hs_out_desc.bEndpointAddress =
790 rmnet_fs_out_desc.bEndpointAddress;
791 rmnet_hs_notify_desc.bEndpointAddress =
792 rmnet_fs_notify_desc.bEndpointAddress;
793
794 /* copy descriptors, and track endpoint copies */
795 f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
796
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530797 if (!f->hs_descriptors)
798 goto fail;
799
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700800 dev->hs.in = usb_find_endpoint(rmnet_hs_function,
801 f->hs_descriptors, &rmnet_hs_in_desc);
802 dev->hs.out = usb_find_endpoint(rmnet_hs_function,
803 f->hs_descriptors, &rmnet_hs_out_desc);
804 dev->hs.notify = usb_find_endpoint(rmnet_hs_function,
805 f->hs_descriptors, &rmnet_hs_notify_desc);
806 }
807
808 pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
809 __func__, dev->port_num,
810 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
811 dev->port.in->name, dev->port.out->name);
812
813 return 0;
814
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530815fail:
816 if (f->descriptors)
817 usb_free_descriptors(f->descriptors);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700818ep_notify_alloc_fail:
819 dev->notify->driver_data = NULL;
820 dev->notify = NULL;
821ep_auto_notify_fail:
822 dev->port.out->driver_data = NULL;
823 dev->port.out = NULL;
824ep_auto_out_fail:
825 dev->port.in->driver_data = NULL;
826 dev->port.in = NULL;
827
828 return ret;
829}
830
Manu Gautam2b0234a2011-09-07 16:47:52 +0530831static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833 int status;
834 struct f_rmnet *dev;
835 struct usb_function *f;
836 unsigned long flags;
837
838 pr_debug("%s: usb config:%p\n", __func__, c);
839
Manu Gautam2b0234a2011-09-07 16:47:52 +0530840 if (portno >= nr_rmnet_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841 pr_err("%s: supporting ports#%u port_id:%u", __func__,
Manu Gautam2b0234a2011-09-07 16:47:52 +0530842 nr_rmnet_ports, portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700843 return -ENODEV;
844 }
845
846 if (rmnet_string_defs[0].id == 0) {
847 status = usb_string_id(c->cdev);
848 if (status < 0) {
849 pr_err("%s: failed to get string id, err:%d\n",
850 __func__, status);
851 return status;
852 }
853 rmnet_string_defs[0].id = status;
854 }
855
Manu Gautam2b0234a2011-09-07 16:47:52 +0530856 dev = rmnet_ports[portno].port;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700857
858 spin_lock_irqsave(&dev->lock, flags);
859 dev->cdev = c->cdev;
860 f = &dev->port.func;
Vamsi Krishna188078d2011-10-26 15:09:55 -0700861 f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700862 spin_unlock_irqrestore(&dev->lock, flags);
Vamsi Krishna188078d2011-10-26 15:09:55 -0700863 if (!f->name) {
864 pr_err("%s: cannot allocate memory for name\n", __func__);
865 return -ENOMEM;
866 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700867
868 f->strings = rmnet_strings;
869 f->bind = frmnet_bind;
870 f->unbind = frmnet_unbind;
871 f->disable = frmnet_disable;
872 f->set_alt = frmnet_set_alt;
873 f->setup = frmnet_setup;
874 dev->port.send_cpkt_response = frmnet_send_cpkt_response;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700875 dev->port.disconnect = frmnet_disconnect;
876 dev->port.connect = frmnet_connect;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700877
878 status = usb_add_function(c, f);
879 if (status) {
880 pr_err("%s: usb add function failed: %d\n",
881 __func__, status);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530882 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883 return status;
884 }
885
886 pr_debug("%s: complete\n", __func__);
887
888 return status;
889}
890
Manu Gautame3e897c2011-09-12 17:18:46 +0530891static void frmnet_cleanup(void)
892{
893 int i;
894
895 for (i = 0; i < nr_rmnet_ports; i++)
896 kfree(rmnet_ports[i].port);
897
898 nr_rmnet_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700899 no_ctrl_smd_ports = 0;
900 no_data_bam_ports = 0;
Manu Gautame3e897c2011-09-12 17:18:46 +0530901}
902
Hemant Kumar1b820d52011-11-03 15:08:28 -0700903static int frmnet_init_port(const char *ctrl_name, const char *data_name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700905 struct f_rmnet *dev;
906 struct rmnet_ports *rmnet_port;
907 int ret;
908 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700909
Hemant Kumar1b820d52011-11-03 15:08:28 -0700910 if (nr_rmnet_ports >= NR_RMNET_PORTS) {
911 pr_err("%s: Max-%d instances supported\n",
912 __func__, NR_RMNET_PORTS);
Manu Gautame3e897c2011-09-12 17:18:46 +0530913 return -EINVAL;
914 }
915
Hemant Kumar1b820d52011-11-03 15:08:28 -0700916 pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n",
917 __func__, nr_rmnet_ports, ctrl_name, data_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918
Hemant Kumar1b820d52011-11-03 15:08:28 -0700919 dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
920 if (!dev) {
921 pr_err("%s: Unable to allocate rmnet device\n", __func__);
922 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700923 }
924
Hemant Kumar1b820d52011-11-03 15:08:28 -0700925 dev->port_num = nr_rmnet_ports;
926 spin_lock_init(&dev->lock);
927 INIT_LIST_HEAD(&dev->cpkt_resp_q);
928
929 rmnet_port = &rmnet_ports[nr_rmnet_ports];
930 rmnet_port->port = dev;
931 rmnet_port->port_num = nr_rmnet_ports;
932 rmnet_port->ctrl_xport = str_to_xport(ctrl_name);
933 rmnet_port->data_xport = str_to_xport(data_name);
934
935 switch (rmnet_port->ctrl_xport) {
936 case USB_GADGET_XPORT_SMD:
937 rmnet_port->ctrl_xport_num = no_ctrl_smd_ports;
938 no_ctrl_smd_ports++;
939 break;
940 case USB_GADGET_XPORT_NONE:
941 break;
942 default:
943 pr_err("%s: Un-supported transport: %u\n", __func__,
944 rmnet_port->ctrl_xport);
945 ret = -ENODEV;
946 goto fail_probe;
947 }
948
949 switch (rmnet_port->data_xport) {
950 case USB_GADGET_XPORT_BAM:
951 rmnet_port->data_xport_num = no_data_bam_ports;
952 no_data_bam_ports++;
953 break;
954 case USB_GADGET_XPORT_NONE:
955 break;
956 default:
957 pr_err("%s: Un-supported transport: %u\n", __func__,
958 rmnet_port->data_xport);
959 ret = -ENODEV;
960 goto fail_probe;
961 }
962 nr_rmnet_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963
964 return 0;
965
966fail_probe:
Manu Gautam2b0234a2011-09-07 16:47:52 +0530967 for (i = 0; i < nr_rmnet_ports; i++)
968 kfree(rmnet_ports[i].port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700969
Hemant Kumar1b820d52011-11-03 15:08:28 -0700970 nr_rmnet_ports = 0;
971 no_ctrl_smd_ports = 0;
972 no_data_bam_ports = 0;
973
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700974 return ret;
975}