blob: 32791d99316bd0afc249abc3db29414983ffbdf1 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/usb/android_composite.h>
18#include <linux/spinlock.h>
19
Hemant Kumar1b820d52011-11-03 15:08:28 -070020#include <mach/usb_gadget_xport.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021#include "u_rmnet.h"
22#include "gadget_chips.h"
23
24
25#define RMNET_NOTIFY_INTERVAL 5
26#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
27
28struct rmnet_descs {
29 struct usb_endpoint_descriptor *in;
30 struct usb_endpoint_descriptor *out;
31 struct usb_endpoint_descriptor *notify;
32};
33
34#define ACM_CTRL_DTR (1 << 0)
35
36/* TODO: use separate structures for data and
37 * control paths
38 */
39struct f_rmnet {
40 struct grmnet port;
41 int ifc_id;
42 u8 port_num;
43 atomic_t online;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -070044 atomic_t ctrl_online;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045 struct usb_composite_dev *cdev;
46
47 spinlock_t lock;
48
49 /* usb descriptors */
50 struct rmnet_descs fs;
51 struct rmnet_descs hs;
52
53 /* usb eps*/
54 struct usb_ep *notify;
55 struct usb_endpoint_descriptor *notify_desc;
56 struct usb_request *notify_req;
57
58 /* control info */
59 struct list_head cpkt_resp_q;
60 atomic_t notify_count;
61 unsigned long cpkts_len;
62};
63
Manu Gautam2b0234a2011-09-07 16:47:52 +053064#define NR_RMNET_PORTS 1
65static unsigned int nr_rmnet_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070066static unsigned int no_ctrl_smd_ports;
Jack Pham427f6922011-11-23 19:42:00 -080067static unsigned int no_ctrl_hsic_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070068static unsigned int no_data_bam_ports;
Jack Pham427f6922011-11-23 19:42:00 -080069static unsigned int no_data_hsic_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070static struct rmnet_ports {
Hemant Kumar1b820d52011-11-03 15:08:28 -070071 enum transport_type data_xport;
72 enum transport_type ctrl_xport;
73 unsigned data_xport_num;
74 unsigned ctrl_xport_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070075 unsigned port_num;
76 struct f_rmnet *port;
Manu Gautam2b0234a2011-09-07 16:47:52 +053077} rmnet_ports[NR_RMNET_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070078
79static struct usb_interface_descriptor rmnet_interface_desc = {
80 .bLength = USB_DT_INTERFACE_SIZE,
81 .bDescriptorType = USB_DT_INTERFACE,
82 .bNumEndpoints = 3,
83 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
84 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
85 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
86 /* .iInterface = DYNAMIC */
87};
88
89/* Full speed support */
90static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
91 .bLength = USB_DT_ENDPOINT_SIZE,
92 .bDescriptorType = USB_DT_ENDPOINT,
93 .bEndpointAddress = USB_DIR_IN,
94 .bmAttributes = USB_ENDPOINT_XFER_INT,
95 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
96 .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
97};
98
99static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
100 .bLength = USB_DT_ENDPOINT_SIZE,
101 .bDescriptorType = USB_DT_ENDPOINT,
102 .bEndpointAddress = USB_DIR_IN,
103 .bmAttributes = USB_ENDPOINT_XFER_BULK,
104 .wMaxPacketSize = __constant_cpu_to_le16(64),
105};
106
107static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
108 .bLength = USB_DT_ENDPOINT_SIZE,
109 .bDescriptorType = USB_DT_ENDPOINT,
110 .bEndpointAddress = USB_DIR_OUT,
111 .bmAttributes = USB_ENDPOINT_XFER_BULK,
112 .wMaxPacketSize = __constant_cpu_to_le16(64),
113};
114
115static struct usb_descriptor_header *rmnet_fs_function[] = {
116 (struct usb_descriptor_header *) &rmnet_interface_desc,
117 (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
118 (struct usb_descriptor_header *) &rmnet_fs_in_desc,
119 (struct usb_descriptor_header *) &rmnet_fs_out_desc,
120 NULL,
121};
122
123/* High speed support */
124static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
125 .bLength = USB_DT_ENDPOINT_SIZE,
126 .bDescriptorType = USB_DT_ENDPOINT,
127 .bEndpointAddress = USB_DIR_IN,
128 .bmAttributes = USB_ENDPOINT_XFER_INT,
129 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
130 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
131};
132
133static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
134 .bLength = USB_DT_ENDPOINT_SIZE,
135 .bDescriptorType = USB_DT_ENDPOINT,
136 .bEndpointAddress = USB_DIR_IN,
137 .bmAttributes = USB_ENDPOINT_XFER_BULK,
138 .wMaxPacketSize = __constant_cpu_to_le16(512),
139};
140
141static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
142 .bLength = USB_DT_ENDPOINT_SIZE,
143 .bDescriptorType = USB_DT_ENDPOINT,
144 .bEndpointAddress = USB_DIR_OUT,
145 .bmAttributes = USB_ENDPOINT_XFER_BULK,
146 .wMaxPacketSize = __constant_cpu_to_le16(512),
147};
148
149static struct usb_descriptor_header *rmnet_hs_function[] = {
150 (struct usb_descriptor_header *) &rmnet_interface_desc,
151 (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
152 (struct usb_descriptor_header *) &rmnet_hs_in_desc,
153 (struct usb_descriptor_header *) &rmnet_hs_out_desc,
154 NULL,
155};
156
157/* String descriptors */
158
159static struct usb_string rmnet_string_defs[] = {
160 [0].s = "RmNet",
161 { } /* end of list */
162};
163
164static struct usb_gadget_strings rmnet_string_table = {
165 .language = 0x0409, /* en-us */
166 .strings = rmnet_string_defs,
167};
168
169static struct usb_gadget_strings *rmnet_strings[] = {
170 &rmnet_string_table,
171 NULL,
172};
173
174/* ------- misc functions --------------------*/
175
176static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
177{
178 return container_of(f, struct f_rmnet, port.func);
179}
180
181static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
182{
183 return container_of(r, struct f_rmnet, port);
184}
185
186static struct usb_request *
187frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
188{
189 struct usb_request *req;
190
191 req = usb_ep_alloc_request(ep, flags);
192 if (!req)
193 return ERR_PTR(-ENOMEM);
194
195 req->buf = kmalloc(len, flags);
196 if (!req->buf) {
197 usb_ep_free_request(ep, req);
198 return ERR_PTR(-ENOMEM);
199 }
200
201 req->length = len;
202
203 return req;
204}
205
206void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
207{
208 kfree(req->buf);
209 usb_ep_free_request(ep, req);
210}
211
212static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
213{
214 struct rmnet_ctrl_pkt *pkt;
215
216 pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
217 if (!pkt)
218 return ERR_PTR(-ENOMEM);
219
220 pkt->buf = kmalloc(len, flags);
221 if (!pkt->buf) {
222 kfree(pkt);
223 return ERR_PTR(-ENOMEM);
224 }
225 pkt->len = len;
226
227 return pkt;
228}
229
230static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
231{
232 kfree(pkt->buf);
233 kfree(pkt);
234}
235
236/* -------------------------------------------*/
237
Hemant Kumar1b820d52011-11-03 15:08:28 -0700238static int rmnet_gport_setup(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239{
Jack Pham427f6922011-11-23 19:42:00 -0800240 int ret;
241 int port_idx;
242 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243
Jack Pham427f6922011-11-23 19:42:00 -0800244 pr_debug("%s: bam ports: %u data hsic ports: %u smd ports: %u"
245 " ctrl hsic ports: %u nr_rmnet_ports: %u\n",
246 __func__, no_data_bam_ports, no_data_hsic_ports,
247 no_ctrl_smd_ports, no_ctrl_hsic_ports, nr_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248
Hemant Kumar1b820d52011-11-03 15:08:28 -0700249 if (no_data_bam_ports) {
250 ret = gbam_setup(no_data_bam_ports);
251 if (ret)
252 return ret;
253 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254
Hemant Kumar1b820d52011-11-03 15:08:28 -0700255 if (no_ctrl_smd_ports) {
256 ret = gsmd_ctrl_setup(no_ctrl_smd_ports);
257 if (ret)
258 return ret;
259 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260
Jack Pham427f6922011-11-23 19:42:00 -0800261 if (no_data_hsic_ports) {
262 port_idx = ghsic_data_setup(no_data_hsic_ports,
263 USB_GADGET_RMNET);
264 if (port_idx < 0)
265 return port_idx;
266 for (i = 0; i < nr_rmnet_ports; i++) {
267 if (rmnet_ports[i].data_xport ==
268 USB_GADGET_XPORT_HSIC) {
269 rmnet_ports[i].data_xport_num = port_idx;
270 port_idx++;
271 }
272 }
273 }
274
275 if (no_ctrl_hsic_ports) {
276 port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports,
277 USB_GADGET_RMNET);
278 if (port_idx < 0)
279 return port_idx;
280 for (i = 0; i < nr_rmnet_ports; i++) {
281 if (rmnet_ports[i].ctrl_xport ==
282 USB_GADGET_XPORT_HSIC) {
283 rmnet_ports[i].ctrl_xport_num = port_idx;
284 port_idx++;
285 }
286 }
287 }
288
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289 return 0;
290}
291
Manu Gautam2b0234a2011-09-07 16:47:52 +0530292static int gport_rmnet_connect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700294 int ret;
295 unsigned port_num;
296 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
297 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298
Hemant Kumar1b820d52011-11-03 15:08:28 -0700299 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
300 __func__, xport_to_str(cxport), xport_to_str(dxport),
301 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302
Hemant Kumar1b820d52011-11-03 15:08:28 -0700303 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
304 switch (cxport) {
305 case USB_GADGET_XPORT_SMD:
306 ret = gsmd_ctrl_connect(&dev->port, port_num);
307 if (ret) {
308 pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
309 __func__, ret);
310 return ret;
311 }
312 break;
Jack Pham427f6922011-11-23 19:42:00 -0800313 case USB_GADGET_XPORT_HSIC:
314 ret = ghsic_ctrl_connect(&dev->port, port_num);
315 if (ret) {
316 pr_err("%s: ghsic_ctrl_connect failed: err:%d\n",
317 __func__, ret);
318 return ret;
319 }
320 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700321 case USB_GADGET_XPORT_NONE:
322 break;
323 default:
324 pr_err("%s: Un-supported transport: %s\n", __func__,
325 xport_to_str(cxport));
326 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327 }
328
Hemant Kumar1b820d52011-11-03 15:08:28 -0700329 port_num = rmnet_ports[dev->port_num].data_xport_num;
330 switch (dxport) {
331 case USB_GADGET_XPORT_BAM:
332 ret = gbam_connect(&dev->port, port_num);
333 if (ret) {
334 pr_err("%s: gbam_connect failed: err:%d\n",
335 __func__, ret);
336 gsmd_ctrl_disconnect(&dev->port, port_num);
337 return ret;
338 }
339 break;
Jack Pham427f6922011-11-23 19:42:00 -0800340 case USB_GADGET_XPORT_HSIC:
341 ret = ghsic_data_connect(&dev->port, port_num);
342 if (ret) {
343 pr_err("%s: ghsic_data_connect failed: err:%d\n",
344 __func__, ret);
345 ghsic_ctrl_disconnect(&dev->port, port_num);
346 return ret;
347 }
348 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700349 case USB_GADGET_XPORT_NONE:
350 break;
351 default:
352 pr_err("%s: Un-supported transport: %s\n", __func__,
353 xport_to_str(dxport));
354 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355 }
356
357 return 0;
358}
359
Manu Gautam2b0234a2011-09-07 16:47:52 +0530360static int gport_rmnet_disconnect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700362 unsigned port_num;
363 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
364 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700365
Hemant Kumar1b820d52011-11-03 15:08:28 -0700366 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
367 __func__, xport_to_str(cxport), xport_to_str(dxport),
368 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369
Hemant Kumar1b820d52011-11-03 15:08:28 -0700370 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
371 switch (cxport) {
372 case USB_GADGET_XPORT_SMD:
373 gsmd_ctrl_disconnect(&dev->port, port_num);
374 break;
Jack Pham427f6922011-11-23 19:42:00 -0800375 case USB_GADGET_XPORT_HSIC:
376 ghsic_ctrl_disconnect(&dev->port, port_num);
377 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700378 case USB_GADGET_XPORT_NONE:
379 break;
380 default:
381 pr_err("%s: Un-supported transport: %s\n", __func__,
382 xport_to_str(cxport));
383 return -ENODEV;
384 }
385
386 port_num = rmnet_ports[dev->port_num].data_xport_num;
387 switch (dxport) {
388 case USB_GADGET_XPORT_BAM:
389 gbam_disconnect(&dev->port, port_num);
390 break;
Jack Pham427f6922011-11-23 19:42:00 -0800391 case USB_GADGET_XPORT_HSIC:
392 ghsic_data_disconnect(&dev->port, port_num);
393 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700394 case USB_GADGET_XPORT_NONE:
395 break;
396 default:
397 pr_err("%s: Un-supported transport: %s\n", __func__,
398 xport_to_str(dxport));
399 return -ENODEV;
400 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401
402 return 0;
403}
404
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
406{
407 struct f_rmnet *dev = func_to_rmnet(f);
408
409 pr_debug("%s: portno:%d\n", __func__, dev->port_num);
410
411 if (gadget_is_dualspeed(c->cdev->gadget))
412 usb_free_descriptors(f->hs_descriptors);
413 usb_free_descriptors(f->descriptors);
414
415 frmnet_free_req(dev->notify, dev->notify_req);
416
Manu Gautamdd4222b2011-09-09 15:06:05 +0530417 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418}
419
420static void frmnet_disable(struct usb_function *f)
421{
422 struct f_rmnet *dev = func_to_rmnet(f);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700423 unsigned long flags;
424 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425
426 pr_debug("%s: port#%d\n", __func__, dev->port_num);
427
428 usb_ep_disable(dev->notify);
429
430 atomic_set(&dev->online, 0);
431
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700432 spin_lock_irqsave(&dev->lock, flags);
433 while (!list_empty(&dev->cpkt_resp_q)) {
434 cpkt = list_first_entry(&dev->cpkt_resp_q,
435 struct rmnet_ctrl_pkt, list);
436
437 list_del(&cpkt->list);
438 rmnet_free_ctrl_pkt(cpkt);
439 }
440 atomic_set(&dev->notify_count, 0);
441 spin_unlock_irqrestore(&dev->lock, flags);
442
Manu Gautam2b0234a2011-09-07 16:47:52 +0530443 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444}
445
446static int
447frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
448{
449 struct f_rmnet *dev = func_to_rmnet(f);
450 struct usb_composite_dev *cdev = dev->cdev;
451 int ret;
452
453 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
454
455 if (dev->notify->driver_data) {
456 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
457 usb_ep_disable(dev->notify);
458 }
459 dev->notify_desc = ep_choose(cdev->gadget,
460 dev->hs.notify,
461 dev->fs.notify);
462 ret = usb_ep_enable(dev->notify, dev->notify_desc);
463 if (ret) {
464 pr_err("%s: usb ep#%s enable failed, err#%d\n",
465 __func__, dev->notify->name, ret);
466 return ret;
467 }
468 dev->notify->driver_data = dev;
469
470 if (dev->port.in->driver_data) {
471 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530472 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 }
474
475 dev->port.in_desc = ep_choose(cdev->gadget,
476 dev->hs.in, dev->fs.in);
477 dev->port.out_desc = ep_choose(cdev->gadget,
478 dev->hs.out, dev->fs.out);
479
Manu Gautam2b0234a2011-09-07 16:47:52 +0530480 ret = gport_rmnet_connect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481
482 atomic_set(&dev->online, 1);
483
484 return ret;
485}
486
487static void frmnet_ctrl_response_available(struct f_rmnet *dev)
488{
489 struct usb_request *req = dev->notify_req;
490 struct usb_cdc_notification *event;
491 unsigned long flags;
492 int ret;
493
494 pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
495
496 spin_lock_irqsave(&dev->lock, flags);
497 if (!atomic_read(&dev->online) || !req || !req->buf) {
498 spin_unlock_irqrestore(&dev->lock, flags);
499 return;
500 }
501
502 if (atomic_inc_return(&dev->notify_count) != 1) {
503 spin_unlock_irqrestore(&dev->lock, flags);
504 return;
505 }
506
507 event = req->buf;
508 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
509 | USB_RECIP_INTERFACE;
510 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
511 event->wValue = cpu_to_le16(0);
512 event->wIndex = cpu_to_le16(dev->ifc_id);
513 event->wLength = cpu_to_le16(0);
514 spin_unlock_irqrestore(&dev->lock, flags);
515
516 ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
517 if (ret) {
518 atomic_dec(&dev->notify_count);
519 pr_debug("ep enqueue error %d\n", ret);
520 }
521}
522
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700523static void frmnet_connect(struct grmnet *gr)
524{
525 struct f_rmnet *dev;
526
527 if (!gr) {
528 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
529 return;
530 }
531
532 dev = port_to_rmnet(gr);
533
534 atomic_set(&dev->ctrl_online, 1);
535}
536
537static void frmnet_disconnect(struct grmnet *gr)
538{
539 struct f_rmnet *dev;
540 unsigned long flags;
541 struct usb_cdc_notification *event;
542 int status;
543 struct rmnet_ctrl_pkt *cpkt;
544
545 if (!gr) {
546 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
547 return;
548 }
549
550 dev = port_to_rmnet(gr);
551
552 atomic_set(&dev->ctrl_online, 0);
553
554 if (!atomic_read(&dev->online)) {
555 pr_debug("%s: nothing to do\n", __func__);
556 return;
557 }
558
559 usb_ep_fifo_flush(dev->notify);
560
561 event = dev->notify_req->buf;
562 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
563 | USB_RECIP_INTERFACE;
564 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
565 event->wValue = cpu_to_le16(0);
566 event->wIndex = cpu_to_le16(dev->ifc_id);
567 event->wLength = cpu_to_le16(0);
568
Vamsi Krishna188078d2011-10-26 15:09:55 -0700569 status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700570 if (status < 0) {
571 if (!atomic_read(&dev->online))
572 return;
573 pr_err("%s: rmnet notify ep enqueue error %d\n",
574 __func__, status);
575 }
576
577 spin_lock_irqsave(&dev->lock, flags);
578 while (!list_empty(&dev->cpkt_resp_q)) {
579 cpkt = list_first_entry(&dev->cpkt_resp_q,
580 struct rmnet_ctrl_pkt, list);
581
582 list_del(&cpkt->list);
583 rmnet_free_ctrl_pkt(cpkt);
584 }
585 atomic_set(&dev->notify_count, 0);
586 spin_unlock_irqrestore(&dev->lock, flags);
587
588}
589
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590static int
Hemant Kumarf60c0252011-11-03 12:37:07 -0700591frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592{
593 struct f_rmnet *dev;
Hemant Kumarf60c0252011-11-03 12:37:07 -0700594 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 unsigned long flags;
596
Hemant Kumarf60c0252011-11-03 12:37:07 -0700597 if (!gr || !buf) {
598 pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
599 __func__, gr, buf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600 return -ENODEV;
601 }
Hemant Kumarf60c0252011-11-03 12:37:07 -0700602 cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
603 if (IS_ERR(cpkt)) {
604 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
605 return -ENOMEM;
606 }
607 memcpy(cpkt->buf, buf, len);
608 cpkt->len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609
610 dev = port_to_rmnet(gr);
611
612 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
613
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700614 if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615 rmnet_free_ctrl_pkt(cpkt);
616 return 0;
617 }
618
619 spin_lock_irqsave(&dev->lock, flags);
Chiranjeevi Velempati263e6c82011-11-11 23:07:36 +0530620 list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621 spin_unlock_irqrestore(&dev->lock, flags);
622
623 frmnet_ctrl_response_available(dev);
624
625 return 0;
626}
627
628static void
629frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
630{
631 struct f_rmnet *dev = req->context;
632 struct usb_composite_dev *cdev;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700633 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700634
635 if (!dev) {
636 pr_err("%s: rmnet dev is null\n", __func__);
637 return;
638 }
639
640 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
641
642 cdev = dev->cdev;
643
Hemant Kumar1b820d52011-11-03 15:08:28 -0700644 if (dev->port.send_encap_cmd) {
645 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
646 dev->port.send_encap_cmd(port_num, req->buf, req->actual);
647 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700648}
649
650static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
651{
652 struct f_rmnet *dev = req->context;
653 int status = req->status;
654
655 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
656
657 switch (status) {
658 case -ECONNRESET:
659 case -ESHUTDOWN:
660 /* connection gone */
661 atomic_set(&dev->notify_count, 0);
662 break;
663 default:
664 pr_err("rmnet notify ep error %d\n", status);
665 /* FALLTHROUGH */
666 case 0:
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700667 if (!atomic_read(&dev->ctrl_online))
668 break;
669
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670 if (atomic_dec_and_test(&dev->notify_count))
671 break;
672
673 status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
674 if (status) {
675 atomic_dec(&dev->notify_count);
676 pr_debug("ep enqueue error %d\n", status);
677 }
678 break;
679 }
680}
681
682static int
683frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
684{
685 struct f_rmnet *dev = func_to_rmnet(f);
686 struct usb_composite_dev *cdev = dev->cdev;
687 struct usb_request *req = cdev->req;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700688 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689 u16 w_index = le16_to_cpu(ctrl->wIndex);
690 u16 w_value = le16_to_cpu(ctrl->wValue);
691 u16 w_length = le16_to_cpu(ctrl->wLength);
692 int ret = -EOPNOTSUPP;
693
694 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
695
696 if (!atomic_read(&dev->online)) {
697 pr_debug("%s: usb cable is not connected\n", __func__);
698 return -ENOTCONN;
699 }
700
701 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
702
703 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
704 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705 ret = w_length;
706 req->complete = frmnet_cmd_complete;
707 req->context = dev;
708 break;
709
710
711 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
712 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
713 if (w_value)
714 goto invalid;
715 else {
716 unsigned len;
717 struct rmnet_ctrl_pkt *cpkt;
718
719 spin_lock(&dev->lock);
720 if (list_empty(&dev->cpkt_resp_q)) {
721 pr_err("ctrl resp queue empty "
722 " req%02x.%02x v%04x i%04x l%d\n",
723 ctrl->bRequestType, ctrl->bRequest,
724 w_value, w_index, w_length);
725 spin_unlock(&dev->lock);
726 goto invalid;
727 }
728
729 cpkt = list_first_entry(&dev->cpkt_resp_q,
730 struct rmnet_ctrl_pkt, list);
731 list_del(&cpkt->list);
732 spin_unlock(&dev->lock);
733
734 len = min_t(unsigned, w_length, cpkt->len);
735 memcpy(req->buf, cpkt->buf, len);
736 ret = len;
737
738 rmnet_free_ctrl_pkt(cpkt);
739 }
740 break;
741 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
742 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
Hemant Kumar1b820d52011-11-03 15:08:28 -0700743 if (dev->port.notify_modem) {
744 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
745 dev->port.notify_modem(&dev->port, port_num, w_value);
746 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700747 ret = 0;
748
749 break;
750 default:
751
752invalid:
753 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
754 ctrl->bRequestType, ctrl->bRequest,
755 w_value, w_index, w_length);
756 }
757
758 /* respond with data transfer or status phase? */
759 if (ret >= 0) {
760 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
761 ctrl->bRequestType, ctrl->bRequest,
762 w_value, w_index, w_length);
763 req->zero = (ret < w_length);
764 req->length = ret;
765 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
766 if (ret < 0)
767 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
768 }
769
770 return ret;
771}
772
773static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
774{
775 struct f_rmnet *dev = func_to_rmnet(f);
776 struct usb_ep *ep;
777 struct usb_composite_dev *cdev = c->cdev;
778 int ret = -ENODEV;
779
780 dev->ifc_id = usb_interface_id(c, f);
781 if (dev->ifc_id < 0) {
782 pr_err("%s: unable to allocate ifc id, err:%d",
783 __func__, dev->ifc_id);
784 return dev->ifc_id;
785 }
786 rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
787
788 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
789 if (!ep) {
790 pr_err("%s: usb epin autoconfig failed\n", __func__);
791 return -ENODEV;
792 }
793 dev->port.in = ep;
794 ep->driver_data = cdev;
795
796 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
797 if (!ep) {
798 pr_err("%s: usb epout autoconfig failed\n", __func__);
799 ret = -ENODEV;
800 goto ep_auto_out_fail;
801 }
802 dev->port.out = ep;
803 ep->driver_data = cdev;
804
805 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
806 if (!ep) {
807 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
808 ret = -ENODEV;
809 goto ep_auto_notify_fail;
810 }
811 dev->notify = ep;
812 ep->driver_data = cdev;
813
814 dev->notify_req = frmnet_alloc_req(ep,
Hemant Kumarfbf113d2011-09-16 18:24:45 -0700815 sizeof(struct usb_cdc_notification),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 GFP_KERNEL);
817 if (IS_ERR(dev->notify_req)) {
818 pr_err("%s: unable to allocate memory for notify req\n",
819 __func__);
820 ret = -ENOMEM;
821 goto ep_notify_alloc_fail;
822 }
823
824 dev->notify_req->complete = frmnet_notify_complete;
825 dev->notify_req->context = dev;
826
827 f->descriptors = usb_copy_descriptors(rmnet_fs_function);
828
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530829 if (!f->descriptors)
830 goto fail;
831
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832 dev->fs.in = usb_find_endpoint(rmnet_fs_function,
833 f->descriptors,
834 &rmnet_fs_in_desc);
835 dev->fs.out = usb_find_endpoint(rmnet_fs_function,
836 f->descriptors,
837 &rmnet_fs_out_desc);
838 dev->fs.notify = usb_find_endpoint(rmnet_fs_function,
839 f->descriptors,
840 &rmnet_fs_notify_desc);
841
842 if (gadget_is_dualspeed(cdev->gadget)) {
843 rmnet_hs_in_desc.bEndpointAddress =
844 rmnet_fs_in_desc.bEndpointAddress;
845 rmnet_hs_out_desc.bEndpointAddress =
846 rmnet_fs_out_desc.bEndpointAddress;
847 rmnet_hs_notify_desc.bEndpointAddress =
848 rmnet_fs_notify_desc.bEndpointAddress;
849
850 /* copy descriptors, and track endpoint copies */
851 f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
852
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530853 if (!f->hs_descriptors)
854 goto fail;
855
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856 dev->hs.in = usb_find_endpoint(rmnet_hs_function,
857 f->hs_descriptors, &rmnet_hs_in_desc);
858 dev->hs.out = usb_find_endpoint(rmnet_hs_function,
859 f->hs_descriptors, &rmnet_hs_out_desc);
860 dev->hs.notify = usb_find_endpoint(rmnet_hs_function,
861 f->hs_descriptors, &rmnet_hs_notify_desc);
862 }
863
864 pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
865 __func__, dev->port_num,
866 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
867 dev->port.in->name, dev->port.out->name);
868
869 return 0;
870
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530871fail:
872 if (f->descriptors)
873 usb_free_descriptors(f->descriptors);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874ep_notify_alloc_fail:
875 dev->notify->driver_data = NULL;
876 dev->notify = NULL;
877ep_auto_notify_fail:
878 dev->port.out->driver_data = NULL;
879 dev->port.out = NULL;
880ep_auto_out_fail:
881 dev->port.in->driver_data = NULL;
882 dev->port.in = NULL;
883
884 return ret;
885}
886
Manu Gautam2b0234a2011-09-07 16:47:52 +0530887static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700888{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700889 int status;
890 struct f_rmnet *dev;
891 struct usb_function *f;
892 unsigned long flags;
893
894 pr_debug("%s: usb config:%p\n", __func__, c);
895
Manu Gautam2b0234a2011-09-07 16:47:52 +0530896 if (portno >= nr_rmnet_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897 pr_err("%s: supporting ports#%u port_id:%u", __func__,
Manu Gautam2b0234a2011-09-07 16:47:52 +0530898 nr_rmnet_ports, portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 return -ENODEV;
900 }
901
902 if (rmnet_string_defs[0].id == 0) {
903 status = usb_string_id(c->cdev);
904 if (status < 0) {
905 pr_err("%s: failed to get string id, err:%d\n",
906 __func__, status);
907 return status;
908 }
909 rmnet_string_defs[0].id = status;
910 }
911
Manu Gautam2b0234a2011-09-07 16:47:52 +0530912 dev = rmnet_ports[portno].port;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700913
914 spin_lock_irqsave(&dev->lock, flags);
915 dev->cdev = c->cdev;
916 f = &dev->port.func;
Vamsi Krishna188078d2011-10-26 15:09:55 -0700917 f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918 spin_unlock_irqrestore(&dev->lock, flags);
Vamsi Krishna188078d2011-10-26 15:09:55 -0700919 if (!f->name) {
920 pr_err("%s: cannot allocate memory for name\n", __func__);
921 return -ENOMEM;
922 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700923
924 f->strings = rmnet_strings;
925 f->bind = frmnet_bind;
926 f->unbind = frmnet_unbind;
927 f->disable = frmnet_disable;
928 f->set_alt = frmnet_set_alt;
929 f->setup = frmnet_setup;
930 dev->port.send_cpkt_response = frmnet_send_cpkt_response;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700931 dev->port.disconnect = frmnet_disconnect;
932 dev->port.connect = frmnet_connect;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933
934 status = usb_add_function(c, f);
935 if (status) {
936 pr_err("%s: usb add function failed: %d\n",
937 __func__, status);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530938 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 return status;
940 }
941
942 pr_debug("%s: complete\n", __func__);
943
944 return status;
945}
946
Manu Gautame3e897c2011-09-12 17:18:46 +0530947static void frmnet_cleanup(void)
948{
949 int i;
950
951 for (i = 0; i < nr_rmnet_ports; i++)
952 kfree(rmnet_ports[i].port);
953
954 nr_rmnet_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700955 no_ctrl_smd_ports = 0;
956 no_data_bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -0800957 no_ctrl_hsic_ports = 0;
958 no_data_hsic_ports = 0;
Manu Gautame3e897c2011-09-12 17:18:46 +0530959}
960
Hemant Kumar1b820d52011-11-03 15:08:28 -0700961static int frmnet_init_port(const char *ctrl_name, const char *data_name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700962{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700963 struct f_rmnet *dev;
964 struct rmnet_ports *rmnet_port;
965 int ret;
966 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967
Hemant Kumar1b820d52011-11-03 15:08:28 -0700968 if (nr_rmnet_ports >= NR_RMNET_PORTS) {
969 pr_err("%s: Max-%d instances supported\n",
970 __func__, NR_RMNET_PORTS);
Manu Gautame3e897c2011-09-12 17:18:46 +0530971 return -EINVAL;
972 }
973
Hemant Kumar1b820d52011-11-03 15:08:28 -0700974 pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n",
975 __func__, nr_rmnet_ports, ctrl_name, data_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976
Hemant Kumar1b820d52011-11-03 15:08:28 -0700977 dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
978 if (!dev) {
979 pr_err("%s: Unable to allocate rmnet device\n", __func__);
980 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 }
982
Hemant Kumar1b820d52011-11-03 15:08:28 -0700983 dev->port_num = nr_rmnet_ports;
984 spin_lock_init(&dev->lock);
985 INIT_LIST_HEAD(&dev->cpkt_resp_q);
986
987 rmnet_port = &rmnet_ports[nr_rmnet_ports];
988 rmnet_port->port = dev;
989 rmnet_port->port_num = nr_rmnet_ports;
990 rmnet_port->ctrl_xport = str_to_xport(ctrl_name);
991 rmnet_port->data_xport = str_to_xport(data_name);
992
993 switch (rmnet_port->ctrl_xport) {
994 case USB_GADGET_XPORT_SMD:
995 rmnet_port->ctrl_xport_num = no_ctrl_smd_ports;
996 no_ctrl_smd_ports++;
997 break;
Jack Pham427f6922011-11-23 19:42:00 -0800998 case USB_GADGET_XPORT_HSIC:
999 rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports;
1000 no_ctrl_hsic_ports++;
1001 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001002 case USB_GADGET_XPORT_NONE:
1003 break;
1004 default:
1005 pr_err("%s: Un-supported transport: %u\n", __func__,
1006 rmnet_port->ctrl_xport);
1007 ret = -ENODEV;
1008 goto fail_probe;
1009 }
1010
1011 switch (rmnet_port->data_xport) {
1012 case USB_GADGET_XPORT_BAM:
1013 rmnet_port->data_xport_num = no_data_bam_ports;
1014 no_data_bam_ports++;
1015 break;
Jack Pham427f6922011-11-23 19:42:00 -08001016 case USB_GADGET_XPORT_HSIC:
1017 rmnet_port->data_xport_num = no_data_hsic_ports;
1018 no_data_hsic_ports++;
1019 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001020 case USB_GADGET_XPORT_NONE:
1021 break;
1022 default:
1023 pr_err("%s: Un-supported transport: %u\n", __func__,
1024 rmnet_port->data_xport);
1025 ret = -ENODEV;
1026 goto fail_probe;
1027 }
1028 nr_rmnet_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001029
1030 return 0;
1031
1032fail_probe:
Manu Gautam2b0234a2011-09-07 16:47:52 +05301033 for (i = 0; i < nr_rmnet_ports; i++)
1034 kfree(rmnet_ports[i].port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001035
Hemant Kumar1b820d52011-11-03 15:08:28 -07001036 nr_rmnet_ports = 0;
1037 no_ctrl_smd_ports = 0;
1038 no_data_bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -08001039 no_ctrl_hsic_ports = 0;
1040 no_data_hsic_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001041
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042 return ret;
1043}