blob: 177176e7d4da234a9a6d42c98d1f033da1424546 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
Anna Perel97b8c222012-01-18 10:08:14 +02002 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/usb/android_composite.h>
18#include <linux/spinlock.h>
19
Hemant Kumar1b820d52011-11-03 15:08:28 -070020#include <mach/usb_gadget_xport.h>
Ofir Cohena1c2a872011-12-14 10:26:34 +020021
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include "u_rmnet.h"
23#include "gadget_chips.h"
24
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#define RMNET_NOTIFY_INTERVAL 5
26#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
27
28struct rmnet_descs {
29 struct usb_endpoint_descriptor *in;
30 struct usb_endpoint_descriptor *out;
31 struct usb_endpoint_descriptor *notify;
32};
33
34#define ACM_CTRL_DTR (1 << 0)
35
36/* TODO: use separate structures for data and
37 * control paths
38 */
39struct f_rmnet {
40 struct grmnet port;
41 int ifc_id;
42 u8 port_num;
43 atomic_t online;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -070044 atomic_t ctrl_online;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045 struct usb_composite_dev *cdev;
46
47 spinlock_t lock;
48
49 /* usb descriptors */
50 struct rmnet_descs fs;
51 struct rmnet_descs hs;
52
53 /* usb eps*/
54 struct usb_ep *notify;
55 struct usb_endpoint_descriptor *notify_desc;
56 struct usb_request *notify_req;
57
58 /* control info */
59 struct list_head cpkt_resp_q;
60 atomic_t notify_count;
61 unsigned long cpkts_len;
62};
63
Manu Gautam2b0234a2011-09-07 16:47:52 +053064#define NR_RMNET_PORTS 1
65static unsigned int nr_rmnet_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070066static unsigned int no_ctrl_smd_ports;
Jack Pham427f6922011-11-23 19:42:00 -080067static unsigned int no_ctrl_hsic_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070068static unsigned int no_data_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020069static unsigned int no_data_bam2bam_ports;
Jack Pham427f6922011-11-23 19:42:00 -080070static unsigned int no_data_hsic_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071static struct rmnet_ports {
Hemant Kumar1b820d52011-11-03 15:08:28 -070072 enum transport_type data_xport;
73 enum transport_type ctrl_xport;
74 unsigned data_xport_num;
75 unsigned ctrl_xport_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076 unsigned port_num;
77 struct f_rmnet *port;
Manu Gautam2b0234a2011-09-07 16:47:52 +053078} rmnet_ports[NR_RMNET_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079
80static struct usb_interface_descriptor rmnet_interface_desc = {
81 .bLength = USB_DT_INTERFACE_SIZE,
82 .bDescriptorType = USB_DT_INTERFACE,
83 .bNumEndpoints = 3,
84 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
85 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
86 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
87 /* .iInterface = DYNAMIC */
88};
89
90/* Full speed support */
91static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
92 .bLength = USB_DT_ENDPOINT_SIZE,
93 .bDescriptorType = USB_DT_ENDPOINT,
94 .bEndpointAddress = USB_DIR_IN,
95 .bmAttributes = USB_ENDPOINT_XFER_INT,
96 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
97 .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
98};
99
100static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
101 .bLength = USB_DT_ENDPOINT_SIZE,
102 .bDescriptorType = USB_DT_ENDPOINT,
103 .bEndpointAddress = USB_DIR_IN,
104 .bmAttributes = USB_ENDPOINT_XFER_BULK,
105 .wMaxPacketSize = __constant_cpu_to_le16(64),
106};
107
108static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
109 .bLength = USB_DT_ENDPOINT_SIZE,
110 .bDescriptorType = USB_DT_ENDPOINT,
111 .bEndpointAddress = USB_DIR_OUT,
112 .bmAttributes = USB_ENDPOINT_XFER_BULK,
113 .wMaxPacketSize = __constant_cpu_to_le16(64),
114};
115
116static struct usb_descriptor_header *rmnet_fs_function[] = {
117 (struct usb_descriptor_header *) &rmnet_interface_desc,
118 (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
119 (struct usb_descriptor_header *) &rmnet_fs_in_desc,
120 (struct usb_descriptor_header *) &rmnet_fs_out_desc,
121 NULL,
122};
123
124/* High speed support */
125static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
126 .bLength = USB_DT_ENDPOINT_SIZE,
127 .bDescriptorType = USB_DT_ENDPOINT,
128 .bEndpointAddress = USB_DIR_IN,
129 .bmAttributes = USB_ENDPOINT_XFER_INT,
130 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
131 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
132};
133
134static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
135 .bLength = USB_DT_ENDPOINT_SIZE,
136 .bDescriptorType = USB_DT_ENDPOINT,
137 .bEndpointAddress = USB_DIR_IN,
138 .bmAttributes = USB_ENDPOINT_XFER_BULK,
139 .wMaxPacketSize = __constant_cpu_to_le16(512),
140};
141
142static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
143 .bLength = USB_DT_ENDPOINT_SIZE,
144 .bDescriptorType = USB_DT_ENDPOINT,
145 .bEndpointAddress = USB_DIR_OUT,
146 .bmAttributes = USB_ENDPOINT_XFER_BULK,
147 .wMaxPacketSize = __constant_cpu_to_le16(512),
148};
149
150static struct usb_descriptor_header *rmnet_hs_function[] = {
151 (struct usb_descriptor_header *) &rmnet_interface_desc,
152 (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
153 (struct usb_descriptor_header *) &rmnet_hs_in_desc,
154 (struct usb_descriptor_header *) &rmnet_hs_out_desc,
155 NULL,
156};
157
158/* String descriptors */
159
160static struct usb_string rmnet_string_defs[] = {
161 [0].s = "RmNet",
162 { } /* end of list */
163};
164
165static struct usb_gadget_strings rmnet_string_table = {
166 .language = 0x0409, /* en-us */
167 .strings = rmnet_string_defs,
168};
169
170static struct usb_gadget_strings *rmnet_strings[] = {
171 &rmnet_string_table,
172 NULL,
173};
174
175/* ------- misc functions --------------------*/
176
177static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
178{
179 return container_of(f, struct f_rmnet, port.func);
180}
181
182static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
183{
184 return container_of(r, struct f_rmnet, port);
185}
186
187static struct usb_request *
188frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
189{
190 struct usb_request *req;
191
192 req = usb_ep_alloc_request(ep, flags);
193 if (!req)
194 return ERR_PTR(-ENOMEM);
195
196 req->buf = kmalloc(len, flags);
197 if (!req->buf) {
198 usb_ep_free_request(ep, req);
199 return ERR_PTR(-ENOMEM);
200 }
201
202 req->length = len;
203
204 return req;
205}
206
207void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
208{
209 kfree(req->buf);
210 usb_ep_free_request(ep, req);
211}
212
213static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
214{
215 struct rmnet_ctrl_pkt *pkt;
216
217 pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
218 if (!pkt)
219 return ERR_PTR(-ENOMEM);
220
221 pkt->buf = kmalloc(len, flags);
222 if (!pkt->buf) {
223 kfree(pkt);
224 return ERR_PTR(-ENOMEM);
225 }
226 pkt->len = len;
227
228 return pkt;
229}
230
231static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
232{
233 kfree(pkt->buf);
234 kfree(pkt);
235}
236
237/* -------------------------------------------*/
238
Hemant Kumar1b820d52011-11-03 15:08:28 -0700239static int rmnet_gport_setup(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240{
Jack Pham427f6922011-11-23 19:42:00 -0800241 int ret;
242 int port_idx;
243 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244
Ofir Cohena1c2a872011-12-14 10:26:34 +0200245 pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u"
246 " smd ports: %u ctrl hsic ports: %u"
247 " nr_rmnet_ports: %u\n",
248 __func__, no_data_bam_ports, no_data_bam2bam_ports,
249 no_data_hsic_ports, no_ctrl_smd_ports,
250 no_ctrl_hsic_ports, nr_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251
Ofir Cohena1c2a872011-12-14 10:26:34 +0200252 if (no_data_bam_ports || no_data_bam2bam_ports) {
253 ret = gbam_setup(no_data_bam_ports,
254 no_data_bam2bam_ports);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700255 if (ret)
256 return ret;
257 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258
Hemant Kumar1b820d52011-11-03 15:08:28 -0700259 if (no_ctrl_smd_ports) {
260 ret = gsmd_ctrl_setup(no_ctrl_smd_ports);
261 if (ret)
262 return ret;
263 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264
Jack Pham427f6922011-11-23 19:42:00 -0800265 if (no_data_hsic_ports) {
266 port_idx = ghsic_data_setup(no_data_hsic_ports,
267 USB_GADGET_RMNET);
268 if (port_idx < 0)
269 return port_idx;
270 for (i = 0; i < nr_rmnet_ports; i++) {
271 if (rmnet_ports[i].data_xport ==
272 USB_GADGET_XPORT_HSIC) {
273 rmnet_ports[i].data_xport_num = port_idx;
274 port_idx++;
275 }
276 }
277 }
278
279 if (no_ctrl_hsic_ports) {
280 port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports,
281 USB_GADGET_RMNET);
282 if (port_idx < 0)
283 return port_idx;
284 for (i = 0; i < nr_rmnet_ports; i++) {
285 if (rmnet_ports[i].ctrl_xport ==
286 USB_GADGET_XPORT_HSIC) {
287 rmnet_ports[i].ctrl_xport_num = port_idx;
288 port_idx++;
289 }
290 }
291 }
292
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293 return 0;
294}
295
Manu Gautam2b0234a2011-09-07 16:47:52 +0530296static int gport_rmnet_connect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700298 int ret;
299 unsigned port_num;
300 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
301 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302
Hemant Kumar1b820d52011-11-03 15:08:28 -0700303 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
304 __func__, xport_to_str(cxport), xport_to_str(dxport),
305 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306
Hemant Kumar1b820d52011-11-03 15:08:28 -0700307 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
308 switch (cxport) {
309 case USB_GADGET_XPORT_SMD:
310 ret = gsmd_ctrl_connect(&dev->port, port_num);
311 if (ret) {
312 pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
313 __func__, ret);
314 return ret;
315 }
316 break;
Jack Pham427f6922011-11-23 19:42:00 -0800317 case USB_GADGET_XPORT_HSIC:
318 ret = ghsic_ctrl_connect(&dev->port, port_num);
319 if (ret) {
320 pr_err("%s: ghsic_ctrl_connect failed: err:%d\n",
321 __func__, ret);
322 return ret;
323 }
324 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700325 case USB_GADGET_XPORT_NONE:
326 break;
327 default:
328 pr_err("%s: Un-supported transport: %s\n", __func__,
329 xport_to_str(cxport));
330 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331 }
332
Hemant Kumar1b820d52011-11-03 15:08:28 -0700333 port_num = rmnet_ports[dev->port_num].data_xport_num;
334 switch (dxport) {
335 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200336 case USB_GADGET_XPORT_BAM2BAM:
337 /* currently only one connection (idx 0)
338 is supported */
339 ret = gbam_connect(&dev->port, port_num,
340 dxport, 0);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700341 if (ret) {
342 pr_err("%s: gbam_connect failed: err:%d\n",
343 __func__, ret);
344 gsmd_ctrl_disconnect(&dev->port, port_num);
345 return ret;
346 }
347 break;
Jack Pham427f6922011-11-23 19:42:00 -0800348 case USB_GADGET_XPORT_HSIC:
349 ret = ghsic_data_connect(&dev->port, port_num);
350 if (ret) {
351 pr_err("%s: ghsic_data_connect failed: err:%d\n",
352 __func__, ret);
353 ghsic_ctrl_disconnect(&dev->port, port_num);
354 return ret;
355 }
356 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700357 case USB_GADGET_XPORT_NONE:
358 break;
359 default:
360 pr_err("%s: Un-supported transport: %s\n", __func__,
361 xport_to_str(dxport));
362 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363 }
364
365 return 0;
366}
367
Manu Gautam2b0234a2011-09-07 16:47:52 +0530368static int gport_rmnet_disconnect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700370 unsigned port_num;
371 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
372 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373
Hemant Kumar1b820d52011-11-03 15:08:28 -0700374 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
375 __func__, xport_to_str(cxport), xport_to_str(dxport),
376 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377
Hemant Kumar1b820d52011-11-03 15:08:28 -0700378 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
379 switch (cxport) {
380 case USB_GADGET_XPORT_SMD:
381 gsmd_ctrl_disconnect(&dev->port, port_num);
382 break;
Jack Pham427f6922011-11-23 19:42:00 -0800383 case USB_GADGET_XPORT_HSIC:
384 ghsic_ctrl_disconnect(&dev->port, port_num);
385 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700386 case USB_GADGET_XPORT_NONE:
387 break;
388 default:
389 pr_err("%s: Un-supported transport: %s\n", __func__,
390 xport_to_str(cxport));
391 return -ENODEV;
392 }
393
394 port_num = rmnet_ports[dev->port_num].data_xport_num;
395 switch (dxport) {
396 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200397 case USB_GADGET_XPORT_BAM2BAM:
398 gbam_disconnect(&dev->port, port_num, dxport);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700399 break;
Jack Pham427f6922011-11-23 19:42:00 -0800400 case USB_GADGET_XPORT_HSIC:
401 ghsic_data_disconnect(&dev->port, port_num);
402 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700403 case USB_GADGET_XPORT_NONE:
404 break;
405 default:
406 pr_err("%s: Un-supported transport: %s\n", __func__,
407 xport_to_str(dxport));
408 return -ENODEV;
409 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410
411 return 0;
412}
413
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
415{
416 struct f_rmnet *dev = func_to_rmnet(f);
417
418 pr_debug("%s: portno:%d\n", __func__, dev->port_num);
419
420 if (gadget_is_dualspeed(c->cdev->gadget))
421 usb_free_descriptors(f->hs_descriptors);
422 usb_free_descriptors(f->descriptors);
423
424 frmnet_free_req(dev->notify, dev->notify_req);
425
Manu Gautamdd4222b2011-09-09 15:06:05 +0530426 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427}
428
429static void frmnet_disable(struct usb_function *f)
430{
431 struct f_rmnet *dev = func_to_rmnet(f);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700432 unsigned long flags;
433 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434
435 pr_debug("%s: port#%d\n", __func__, dev->port_num);
436
437 usb_ep_disable(dev->notify);
Anna Perel97b8c222012-01-18 10:08:14 +0200438 dev->notify->driver_data = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439
440 atomic_set(&dev->online, 0);
441
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700442 spin_lock_irqsave(&dev->lock, flags);
443 while (!list_empty(&dev->cpkt_resp_q)) {
444 cpkt = list_first_entry(&dev->cpkt_resp_q,
445 struct rmnet_ctrl_pkt, list);
446
447 list_del(&cpkt->list);
448 rmnet_free_ctrl_pkt(cpkt);
449 }
450 atomic_set(&dev->notify_count, 0);
451 spin_unlock_irqrestore(&dev->lock, flags);
452
Manu Gautam2b0234a2011-09-07 16:47:52 +0530453 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454}
455
456static int
457frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
458{
459 struct f_rmnet *dev = func_to_rmnet(f);
460 struct usb_composite_dev *cdev = dev->cdev;
461 int ret;
462
463 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
464
465 if (dev->notify->driver_data) {
466 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
467 usb_ep_disable(dev->notify);
468 }
469 dev->notify_desc = ep_choose(cdev->gadget,
470 dev->hs.notify,
471 dev->fs.notify);
472 ret = usb_ep_enable(dev->notify, dev->notify_desc);
473 if (ret) {
474 pr_err("%s: usb ep#%s enable failed, err#%d\n",
475 __func__, dev->notify->name, ret);
476 return ret;
477 }
478 dev->notify->driver_data = dev;
479
480 if (dev->port.in->driver_data) {
481 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530482 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483 }
484
485 dev->port.in_desc = ep_choose(cdev->gadget,
486 dev->hs.in, dev->fs.in);
487 dev->port.out_desc = ep_choose(cdev->gadget,
488 dev->hs.out, dev->fs.out);
489
Manu Gautam2b0234a2011-09-07 16:47:52 +0530490 ret = gport_rmnet_connect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491
492 atomic_set(&dev->online, 1);
493
494 return ret;
495}
496
497static void frmnet_ctrl_response_available(struct f_rmnet *dev)
498{
499 struct usb_request *req = dev->notify_req;
500 struct usb_cdc_notification *event;
501 unsigned long flags;
502 int ret;
503
504 pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
505
506 spin_lock_irqsave(&dev->lock, flags);
507 if (!atomic_read(&dev->online) || !req || !req->buf) {
508 spin_unlock_irqrestore(&dev->lock, flags);
509 return;
510 }
511
512 if (atomic_inc_return(&dev->notify_count) != 1) {
513 spin_unlock_irqrestore(&dev->lock, flags);
514 return;
515 }
516
517 event = req->buf;
518 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
519 | USB_RECIP_INTERFACE;
520 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
521 event->wValue = cpu_to_le16(0);
522 event->wIndex = cpu_to_le16(dev->ifc_id);
523 event->wLength = cpu_to_le16(0);
524 spin_unlock_irqrestore(&dev->lock, flags);
525
526 ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
527 if (ret) {
528 atomic_dec(&dev->notify_count);
529 pr_debug("ep enqueue error %d\n", ret);
530 }
531}
532
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700533static void frmnet_connect(struct grmnet *gr)
534{
535 struct f_rmnet *dev;
536
537 if (!gr) {
538 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
539 return;
540 }
541
542 dev = port_to_rmnet(gr);
543
544 atomic_set(&dev->ctrl_online, 1);
545}
546
547static void frmnet_disconnect(struct grmnet *gr)
548{
549 struct f_rmnet *dev;
550 unsigned long flags;
551 struct usb_cdc_notification *event;
552 int status;
553 struct rmnet_ctrl_pkt *cpkt;
554
555 if (!gr) {
556 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
557 return;
558 }
559
560 dev = port_to_rmnet(gr);
561
562 atomic_set(&dev->ctrl_online, 0);
563
564 if (!atomic_read(&dev->online)) {
565 pr_debug("%s: nothing to do\n", __func__);
566 return;
567 }
568
569 usb_ep_fifo_flush(dev->notify);
570
571 event = dev->notify_req->buf;
572 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
573 | USB_RECIP_INTERFACE;
574 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
575 event->wValue = cpu_to_le16(0);
576 event->wIndex = cpu_to_le16(dev->ifc_id);
577 event->wLength = cpu_to_le16(0);
578
Vamsi Krishna188078d2011-10-26 15:09:55 -0700579 status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700580 if (status < 0) {
581 if (!atomic_read(&dev->online))
582 return;
583 pr_err("%s: rmnet notify ep enqueue error %d\n",
584 __func__, status);
585 }
586
587 spin_lock_irqsave(&dev->lock, flags);
588 while (!list_empty(&dev->cpkt_resp_q)) {
589 cpkt = list_first_entry(&dev->cpkt_resp_q,
590 struct rmnet_ctrl_pkt, list);
591
592 list_del(&cpkt->list);
593 rmnet_free_ctrl_pkt(cpkt);
594 }
595 atomic_set(&dev->notify_count, 0);
596 spin_unlock_irqrestore(&dev->lock, flags);
597
598}
599
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600static int
Hemant Kumarf60c0252011-11-03 12:37:07 -0700601frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602{
603 struct f_rmnet *dev;
Hemant Kumarf60c0252011-11-03 12:37:07 -0700604 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605 unsigned long flags;
606
Hemant Kumarf60c0252011-11-03 12:37:07 -0700607 if (!gr || !buf) {
608 pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
609 __func__, gr, buf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700610 return -ENODEV;
611 }
Hemant Kumarf60c0252011-11-03 12:37:07 -0700612 cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
613 if (IS_ERR(cpkt)) {
614 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
615 return -ENOMEM;
616 }
617 memcpy(cpkt->buf, buf, len);
618 cpkt->len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619
620 dev = port_to_rmnet(gr);
621
622 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
623
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700624 if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 rmnet_free_ctrl_pkt(cpkt);
626 return 0;
627 }
628
629 spin_lock_irqsave(&dev->lock, flags);
Chiranjeevi Velempati263e6c82011-11-11 23:07:36 +0530630 list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631 spin_unlock_irqrestore(&dev->lock, flags);
632
633 frmnet_ctrl_response_available(dev);
634
635 return 0;
636}
637
638static void
639frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
640{
641 struct f_rmnet *dev = req->context;
642 struct usb_composite_dev *cdev;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700643 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644
645 if (!dev) {
646 pr_err("%s: rmnet dev is null\n", __func__);
647 return;
648 }
649
650 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
651
652 cdev = dev->cdev;
653
Hemant Kumar1b820d52011-11-03 15:08:28 -0700654 if (dev->port.send_encap_cmd) {
655 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
656 dev->port.send_encap_cmd(port_num, req->buf, req->actual);
657 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700658}
659
660static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
661{
662 struct f_rmnet *dev = req->context;
663 int status = req->status;
664
665 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
666
667 switch (status) {
668 case -ECONNRESET:
669 case -ESHUTDOWN:
670 /* connection gone */
671 atomic_set(&dev->notify_count, 0);
672 break;
673 default:
674 pr_err("rmnet notify ep error %d\n", status);
675 /* FALLTHROUGH */
676 case 0:
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700677 if (!atomic_read(&dev->ctrl_online))
678 break;
679
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680 if (atomic_dec_and_test(&dev->notify_count))
681 break;
682
683 status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
684 if (status) {
685 atomic_dec(&dev->notify_count);
686 pr_debug("ep enqueue error %d\n", status);
687 }
688 break;
689 }
690}
691
692static int
693frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
694{
695 struct f_rmnet *dev = func_to_rmnet(f);
696 struct usb_composite_dev *cdev = dev->cdev;
697 struct usb_request *req = cdev->req;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700698 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699 u16 w_index = le16_to_cpu(ctrl->wIndex);
700 u16 w_value = le16_to_cpu(ctrl->wValue);
701 u16 w_length = le16_to_cpu(ctrl->wLength);
702 int ret = -EOPNOTSUPP;
703
704 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
705
706 if (!atomic_read(&dev->online)) {
707 pr_debug("%s: usb cable is not connected\n", __func__);
708 return -ENOTCONN;
709 }
710
711 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
712
713 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
714 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715 ret = w_length;
716 req->complete = frmnet_cmd_complete;
717 req->context = dev;
718 break;
719
720
721 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
722 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
723 if (w_value)
724 goto invalid;
725 else {
726 unsigned len;
727 struct rmnet_ctrl_pkt *cpkt;
728
729 spin_lock(&dev->lock);
730 if (list_empty(&dev->cpkt_resp_q)) {
731 pr_err("ctrl resp queue empty "
732 " req%02x.%02x v%04x i%04x l%d\n",
733 ctrl->bRequestType, ctrl->bRequest,
734 w_value, w_index, w_length);
735 spin_unlock(&dev->lock);
736 goto invalid;
737 }
738
739 cpkt = list_first_entry(&dev->cpkt_resp_q,
740 struct rmnet_ctrl_pkt, list);
741 list_del(&cpkt->list);
742 spin_unlock(&dev->lock);
743
744 len = min_t(unsigned, w_length, cpkt->len);
745 memcpy(req->buf, cpkt->buf, len);
746 ret = len;
747
748 rmnet_free_ctrl_pkt(cpkt);
749 }
750 break;
751 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
752 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
Hemant Kumar1b820d52011-11-03 15:08:28 -0700753 if (dev->port.notify_modem) {
754 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
755 dev->port.notify_modem(&dev->port, port_num, w_value);
756 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757 ret = 0;
758
759 break;
760 default:
761
762invalid:
763 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
764 ctrl->bRequestType, ctrl->bRequest,
765 w_value, w_index, w_length);
766 }
767
768 /* respond with data transfer or status phase? */
769 if (ret >= 0) {
770 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
771 ctrl->bRequestType, ctrl->bRequest,
772 w_value, w_index, w_length);
773 req->zero = (ret < w_length);
774 req->length = ret;
775 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
776 if (ret < 0)
777 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
778 }
779
780 return ret;
781}
782
783static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
784{
785 struct f_rmnet *dev = func_to_rmnet(f);
786 struct usb_ep *ep;
787 struct usb_composite_dev *cdev = c->cdev;
788 int ret = -ENODEV;
789
790 dev->ifc_id = usb_interface_id(c, f);
791 if (dev->ifc_id < 0) {
792 pr_err("%s: unable to allocate ifc id, err:%d",
793 __func__, dev->ifc_id);
794 return dev->ifc_id;
795 }
796 rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
797
798 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
799 if (!ep) {
800 pr_err("%s: usb epin autoconfig failed\n", __func__);
801 return -ENODEV;
802 }
803 dev->port.in = ep;
804 ep->driver_data = cdev;
805
806 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
807 if (!ep) {
808 pr_err("%s: usb epout autoconfig failed\n", __func__);
809 ret = -ENODEV;
810 goto ep_auto_out_fail;
811 }
812 dev->port.out = ep;
813 ep->driver_data = cdev;
814
815 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
816 if (!ep) {
817 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
818 ret = -ENODEV;
819 goto ep_auto_notify_fail;
820 }
821 dev->notify = ep;
822 ep->driver_data = cdev;
823
824 dev->notify_req = frmnet_alloc_req(ep,
Hemant Kumarfbf113d2011-09-16 18:24:45 -0700825 sizeof(struct usb_cdc_notification),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700826 GFP_KERNEL);
827 if (IS_ERR(dev->notify_req)) {
828 pr_err("%s: unable to allocate memory for notify req\n",
829 __func__);
830 ret = -ENOMEM;
831 goto ep_notify_alloc_fail;
832 }
833
834 dev->notify_req->complete = frmnet_notify_complete;
835 dev->notify_req->context = dev;
836
837 f->descriptors = usb_copy_descriptors(rmnet_fs_function);
838
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530839 if (!f->descriptors)
840 goto fail;
841
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700842 dev->fs.in = usb_find_endpoint(rmnet_fs_function,
843 f->descriptors,
844 &rmnet_fs_in_desc);
845 dev->fs.out = usb_find_endpoint(rmnet_fs_function,
846 f->descriptors,
847 &rmnet_fs_out_desc);
848 dev->fs.notify = usb_find_endpoint(rmnet_fs_function,
849 f->descriptors,
850 &rmnet_fs_notify_desc);
851
852 if (gadget_is_dualspeed(cdev->gadget)) {
853 rmnet_hs_in_desc.bEndpointAddress =
854 rmnet_fs_in_desc.bEndpointAddress;
855 rmnet_hs_out_desc.bEndpointAddress =
856 rmnet_fs_out_desc.bEndpointAddress;
857 rmnet_hs_notify_desc.bEndpointAddress =
858 rmnet_fs_notify_desc.bEndpointAddress;
859
860 /* copy descriptors, and track endpoint copies */
861 f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
862
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530863 if (!f->hs_descriptors)
864 goto fail;
865
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866 dev->hs.in = usb_find_endpoint(rmnet_hs_function,
867 f->hs_descriptors, &rmnet_hs_in_desc);
868 dev->hs.out = usb_find_endpoint(rmnet_hs_function,
869 f->hs_descriptors, &rmnet_hs_out_desc);
870 dev->hs.notify = usb_find_endpoint(rmnet_hs_function,
871 f->hs_descriptors, &rmnet_hs_notify_desc);
872 }
873
874 pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
875 __func__, dev->port_num,
876 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
877 dev->port.in->name, dev->port.out->name);
878
879 return 0;
880
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530881fail:
882 if (f->descriptors)
883 usb_free_descriptors(f->descriptors);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700884ep_notify_alloc_fail:
885 dev->notify->driver_data = NULL;
886 dev->notify = NULL;
887ep_auto_notify_fail:
888 dev->port.out->driver_data = NULL;
889 dev->port.out = NULL;
890ep_auto_out_fail:
891 dev->port.in->driver_data = NULL;
892 dev->port.in = NULL;
893
894 return ret;
895}
896
Manu Gautam2b0234a2011-09-07 16:47:52 +0530897static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 int status;
900 struct f_rmnet *dev;
901 struct usb_function *f;
902 unsigned long flags;
903
904 pr_debug("%s: usb config:%p\n", __func__, c);
905
Manu Gautam2b0234a2011-09-07 16:47:52 +0530906 if (portno >= nr_rmnet_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700907 pr_err("%s: supporting ports#%u port_id:%u", __func__,
Manu Gautam2b0234a2011-09-07 16:47:52 +0530908 nr_rmnet_ports, portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700909 return -ENODEV;
910 }
911
912 if (rmnet_string_defs[0].id == 0) {
913 status = usb_string_id(c->cdev);
914 if (status < 0) {
915 pr_err("%s: failed to get string id, err:%d\n",
916 __func__, status);
917 return status;
918 }
919 rmnet_string_defs[0].id = status;
920 }
921
Manu Gautam2b0234a2011-09-07 16:47:52 +0530922 dev = rmnet_ports[portno].port;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700923
924 spin_lock_irqsave(&dev->lock, flags);
925 dev->cdev = c->cdev;
926 f = &dev->port.func;
Vamsi Krishna188078d2011-10-26 15:09:55 -0700927 f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700928 spin_unlock_irqrestore(&dev->lock, flags);
Vamsi Krishna188078d2011-10-26 15:09:55 -0700929 if (!f->name) {
930 pr_err("%s: cannot allocate memory for name\n", __func__);
931 return -ENOMEM;
932 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933
934 f->strings = rmnet_strings;
935 f->bind = frmnet_bind;
936 f->unbind = frmnet_unbind;
937 f->disable = frmnet_disable;
938 f->set_alt = frmnet_set_alt;
939 f->setup = frmnet_setup;
940 dev->port.send_cpkt_response = frmnet_send_cpkt_response;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700941 dev->port.disconnect = frmnet_disconnect;
942 dev->port.connect = frmnet_connect;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700943
944 status = usb_add_function(c, f);
945 if (status) {
946 pr_err("%s: usb add function failed: %d\n",
947 __func__, status);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530948 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700949 return status;
950 }
951
952 pr_debug("%s: complete\n", __func__);
953
954 return status;
955}
956
Manu Gautame3e897c2011-09-12 17:18:46 +0530957static void frmnet_cleanup(void)
958{
959 int i;
960
961 for (i = 0; i < nr_rmnet_ports; i++)
962 kfree(rmnet_ports[i].port);
963
964 nr_rmnet_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700965 no_ctrl_smd_ports = 0;
966 no_data_bam_ports = 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200967 no_data_bam2bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -0800968 no_ctrl_hsic_ports = 0;
969 no_data_hsic_ports = 0;
Manu Gautame3e897c2011-09-12 17:18:46 +0530970}
971
Hemant Kumar1b820d52011-11-03 15:08:28 -0700972static int frmnet_init_port(const char *ctrl_name, const char *data_name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700974 struct f_rmnet *dev;
975 struct rmnet_ports *rmnet_port;
976 int ret;
977 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700978
Hemant Kumar1b820d52011-11-03 15:08:28 -0700979 if (nr_rmnet_ports >= NR_RMNET_PORTS) {
980 pr_err("%s: Max-%d instances supported\n",
981 __func__, NR_RMNET_PORTS);
Manu Gautame3e897c2011-09-12 17:18:46 +0530982 return -EINVAL;
983 }
984
Hemant Kumar1b820d52011-11-03 15:08:28 -0700985 pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n",
986 __func__, nr_rmnet_ports, ctrl_name, data_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700987
Hemant Kumar1b820d52011-11-03 15:08:28 -0700988 dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
989 if (!dev) {
990 pr_err("%s: Unable to allocate rmnet device\n", __func__);
991 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992 }
993
Hemant Kumar1b820d52011-11-03 15:08:28 -0700994 dev->port_num = nr_rmnet_ports;
995 spin_lock_init(&dev->lock);
996 INIT_LIST_HEAD(&dev->cpkt_resp_q);
997
998 rmnet_port = &rmnet_ports[nr_rmnet_ports];
999 rmnet_port->port = dev;
1000 rmnet_port->port_num = nr_rmnet_ports;
1001 rmnet_port->ctrl_xport = str_to_xport(ctrl_name);
1002 rmnet_port->data_xport = str_to_xport(data_name);
1003
1004 switch (rmnet_port->ctrl_xport) {
1005 case USB_GADGET_XPORT_SMD:
1006 rmnet_port->ctrl_xport_num = no_ctrl_smd_ports;
1007 no_ctrl_smd_ports++;
1008 break;
Jack Pham427f6922011-11-23 19:42:00 -08001009 case USB_GADGET_XPORT_HSIC:
1010 rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports;
1011 no_ctrl_hsic_ports++;
1012 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001013 case USB_GADGET_XPORT_NONE:
1014 break;
1015 default:
1016 pr_err("%s: Un-supported transport: %u\n", __func__,
1017 rmnet_port->ctrl_xport);
1018 ret = -ENODEV;
1019 goto fail_probe;
1020 }
1021
1022 switch (rmnet_port->data_xport) {
1023 case USB_GADGET_XPORT_BAM:
1024 rmnet_port->data_xport_num = no_data_bam_ports;
1025 no_data_bam_ports++;
1026 break;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001027 case USB_GADGET_XPORT_BAM2BAM:
1028 rmnet_port->data_xport_num = no_data_bam2bam_ports;
1029 no_data_bam2bam_ports++;
1030 break;
Jack Pham427f6922011-11-23 19:42:00 -08001031 case USB_GADGET_XPORT_HSIC:
1032 rmnet_port->data_xport_num = no_data_hsic_ports;
1033 no_data_hsic_ports++;
1034 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001035 case USB_GADGET_XPORT_NONE:
1036 break;
1037 default:
1038 pr_err("%s: Un-supported transport: %u\n", __func__,
1039 rmnet_port->data_xport);
1040 ret = -ENODEV;
1041 goto fail_probe;
1042 }
1043 nr_rmnet_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001044
1045 return 0;
1046
1047fail_probe:
Manu Gautam2b0234a2011-09-07 16:47:52 +05301048 for (i = 0; i < nr_rmnet_ports; i++)
1049 kfree(rmnet_ports[i].port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001050
Hemant Kumar1b820d52011-11-03 15:08:28 -07001051 nr_rmnet_ports = 0;
1052 no_ctrl_smd_ports = 0;
1053 no_data_bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -08001054 no_ctrl_hsic_ports = 0;
1055 no_data_hsic_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001056
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001057 return ret;
1058}