blob: 86f8a255cab868f7cb19b20631e46505e401a4b5 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
Anna Perel97b8c222012-01-18 10:08:14 +02002 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/usb/android_composite.h>
18#include <linux/spinlock.h>
19
Hemant Kumar1b820d52011-11-03 15:08:28 -070020#include <mach/usb_gadget_xport.h>
Ofir Cohena1c2a872011-12-14 10:26:34 +020021
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include "u_rmnet.h"
23#include "gadget_chips.h"
24
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#define RMNET_NOTIFY_INTERVAL 5
26#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
27
28struct rmnet_descs {
29 struct usb_endpoint_descriptor *in;
30 struct usb_endpoint_descriptor *out;
31 struct usb_endpoint_descriptor *notify;
32};
33
34#define ACM_CTRL_DTR (1 << 0)
35
36/* TODO: use separate structures for data and
37 * control paths
38 */
39struct f_rmnet {
40 struct grmnet port;
41 int ifc_id;
42 u8 port_num;
43 atomic_t online;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -070044 atomic_t ctrl_online;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045 struct usb_composite_dev *cdev;
46
47 spinlock_t lock;
48
49 /* usb descriptors */
50 struct rmnet_descs fs;
51 struct rmnet_descs hs;
52
53 /* usb eps*/
54 struct usb_ep *notify;
55 struct usb_endpoint_descriptor *notify_desc;
56 struct usb_request *notify_req;
57
58 /* control info */
59 struct list_head cpkt_resp_q;
60 atomic_t notify_count;
61 unsigned long cpkts_len;
62};
63
Manu Gautam2b0234a2011-09-07 16:47:52 +053064#define NR_RMNET_PORTS 1
65static unsigned int nr_rmnet_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070066static unsigned int no_ctrl_smd_ports;
Jack Pham427f6922011-11-23 19:42:00 -080067static unsigned int no_ctrl_hsic_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070068static unsigned int no_data_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020069static unsigned int no_data_bam2bam_ports;
Jack Pham427f6922011-11-23 19:42:00 -080070static unsigned int no_data_hsic_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071static struct rmnet_ports {
Hemant Kumar1b820d52011-11-03 15:08:28 -070072 enum transport_type data_xport;
73 enum transport_type ctrl_xport;
74 unsigned data_xport_num;
75 unsigned ctrl_xport_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076 unsigned port_num;
77 struct f_rmnet *port;
Manu Gautam2b0234a2011-09-07 16:47:52 +053078} rmnet_ports[NR_RMNET_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079
80static struct usb_interface_descriptor rmnet_interface_desc = {
81 .bLength = USB_DT_INTERFACE_SIZE,
82 .bDescriptorType = USB_DT_INTERFACE,
83 .bNumEndpoints = 3,
84 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
85 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
86 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
87 /* .iInterface = DYNAMIC */
88};
89
90/* Full speed support */
91static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
92 .bLength = USB_DT_ENDPOINT_SIZE,
93 .bDescriptorType = USB_DT_ENDPOINT,
94 .bEndpointAddress = USB_DIR_IN,
95 .bmAttributes = USB_ENDPOINT_XFER_INT,
96 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
97 .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
98};
99
100static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
101 .bLength = USB_DT_ENDPOINT_SIZE,
102 .bDescriptorType = USB_DT_ENDPOINT,
103 .bEndpointAddress = USB_DIR_IN,
104 .bmAttributes = USB_ENDPOINT_XFER_BULK,
105 .wMaxPacketSize = __constant_cpu_to_le16(64),
106};
107
108static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
109 .bLength = USB_DT_ENDPOINT_SIZE,
110 .bDescriptorType = USB_DT_ENDPOINT,
111 .bEndpointAddress = USB_DIR_OUT,
112 .bmAttributes = USB_ENDPOINT_XFER_BULK,
113 .wMaxPacketSize = __constant_cpu_to_le16(64),
114};
115
116static struct usb_descriptor_header *rmnet_fs_function[] = {
117 (struct usb_descriptor_header *) &rmnet_interface_desc,
118 (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
119 (struct usb_descriptor_header *) &rmnet_fs_in_desc,
120 (struct usb_descriptor_header *) &rmnet_fs_out_desc,
121 NULL,
122};
123
124/* High speed support */
125static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
126 .bLength = USB_DT_ENDPOINT_SIZE,
127 .bDescriptorType = USB_DT_ENDPOINT,
128 .bEndpointAddress = USB_DIR_IN,
129 .bmAttributes = USB_ENDPOINT_XFER_INT,
130 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
131 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
132};
133
134static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
135 .bLength = USB_DT_ENDPOINT_SIZE,
136 .bDescriptorType = USB_DT_ENDPOINT,
137 .bEndpointAddress = USB_DIR_IN,
138 .bmAttributes = USB_ENDPOINT_XFER_BULK,
139 .wMaxPacketSize = __constant_cpu_to_le16(512),
140};
141
142static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
143 .bLength = USB_DT_ENDPOINT_SIZE,
144 .bDescriptorType = USB_DT_ENDPOINT,
145 .bEndpointAddress = USB_DIR_OUT,
146 .bmAttributes = USB_ENDPOINT_XFER_BULK,
147 .wMaxPacketSize = __constant_cpu_to_le16(512),
148};
149
150static struct usb_descriptor_header *rmnet_hs_function[] = {
151 (struct usb_descriptor_header *) &rmnet_interface_desc,
152 (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
153 (struct usb_descriptor_header *) &rmnet_hs_in_desc,
154 (struct usb_descriptor_header *) &rmnet_hs_out_desc,
155 NULL,
156};
157
158/* String descriptors */
159
160static struct usb_string rmnet_string_defs[] = {
161 [0].s = "RmNet",
162 { } /* end of list */
163};
164
165static struct usb_gadget_strings rmnet_string_table = {
166 .language = 0x0409, /* en-us */
167 .strings = rmnet_string_defs,
168};
169
170static struct usb_gadget_strings *rmnet_strings[] = {
171 &rmnet_string_table,
172 NULL,
173};
174
175/* ------- misc functions --------------------*/
176
177static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
178{
179 return container_of(f, struct f_rmnet, port.func);
180}
181
182static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
183{
184 return container_of(r, struct f_rmnet, port);
185}
186
187static struct usb_request *
188frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
189{
190 struct usb_request *req;
191
192 req = usb_ep_alloc_request(ep, flags);
193 if (!req)
194 return ERR_PTR(-ENOMEM);
195
196 req->buf = kmalloc(len, flags);
197 if (!req->buf) {
198 usb_ep_free_request(ep, req);
199 return ERR_PTR(-ENOMEM);
200 }
201
202 req->length = len;
203
204 return req;
205}
206
207void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
208{
209 kfree(req->buf);
210 usb_ep_free_request(ep, req);
211}
212
213static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
214{
215 struct rmnet_ctrl_pkt *pkt;
216
217 pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
218 if (!pkt)
219 return ERR_PTR(-ENOMEM);
220
221 pkt->buf = kmalloc(len, flags);
222 if (!pkt->buf) {
223 kfree(pkt);
224 return ERR_PTR(-ENOMEM);
225 }
226 pkt->len = len;
227
228 return pkt;
229}
230
231static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
232{
233 kfree(pkt->buf);
234 kfree(pkt);
235}
236
237/* -------------------------------------------*/
238
Hemant Kumar1b820d52011-11-03 15:08:28 -0700239static int rmnet_gport_setup(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240{
Jack Pham427f6922011-11-23 19:42:00 -0800241 int ret;
242 int port_idx;
243 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244
Ofir Cohena1c2a872011-12-14 10:26:34 +0200245 pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u"
246 " smd ports: %u ctrl hsic ports: %u"
247 " nr_rmnet_ports: %u\n",
248 __func__, no_data_bam_ports, no_data_bam2bam_ports,
249 no_data_hsic_ports, no_ctrl_smd_ports,
250 no_ctrl_hsic_ports, nr_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251
Ofir Cohena1c2a872011-12-14 10:26:34 +0200252 if (no_data_bam_ports || no_data_bam2bam_ports) {
253 ret = gbam_setup(no_data_bam_ports,
254 no_data_bam2bam_ports);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700255 if (ret)
256 return ret;
257 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258
Hemant Kumar1b820d52011-11-03 15:08:28 -0700259 if (no_ctrl_smd_ports) {
260 ret = gsmd_ctrl_setup(no_ctrl_smd_ports);
261 if (ret)
262 return ret;
263 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264
Jack Pham427f6922011-11-23 19:42:00 -0800265 if (no_data_hsic_ports) {
266 port_idx = ghsic_data_setup(no_data_hsic_ports,
267 USB_GADGET_RMNET);
268 if (port_idx < 0)
269 return port_idx;
270 for (i = 0; i < nr_rmnet_ports; i++) {
271 if (rmnet_ports[i].data_xport ==
272 USB_GADGET_XPORT_HSIC) {
273 rmnet_ports[i].data_xport_num = port_idx;
274 port_idx++;
275 }
276 }
277 }
278
279 if (no_ctrl_hsic_ports) {
280 port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports,
281 USB_GADGET_RMNET);
282 if (port_idx < 0)
283 return port_idx;
284 for (i = 0; i < nr_rmnet_ports; i++) {
285 if (rmnet_ports[i].ctrl_xport ==
286 USB_GADGET_XPORT_HSIC) {
287 rmnet_ports[i].ctrl_xport_num = port_idx;
288 port_idx++;
289 }
290 }
291 }
292
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293 return 0;
294}
295
Manu Gautam2b0234a2011-09-07 16:47:52 +0530296static int gport_rmnet_connect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700298 int ret;
299 unsigned port_num;
300 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
301 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302
Hemant Kumar1b820d52011-11-03 15:08:28 -0700303 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
304 __func__, xport_to_str(cxport), xport_to_str(dxport),
305 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306
Hemant Kumar1b820d52011-11-03 15:08:28 -0700307 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
308 switch (cxport) {
309 case USB_GADGET_XPORT_SMD:
310 ret = gsmd_ctrl_connect(&dev->port, port_num);
311 if (ret) {
312 pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
313 __func__, ret);
314 return ret;
315 }
316 break;
Jack Pham427f6922011-11-23 19:42:00 -0800317 case USB_GADGET_XPORT_HSIC:
318 ret = ghsic_ctrl_connect(&dev->port, port_num);
319 if (ret) {
320 pr_err("%s: ghsic_ctrl_connect failed: err:%d\n",
321 __func__, ret);
322 return ret;
323 }
324 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700325 case USB_GADGET_XPORT_NONE:
326 break;
327 default:
328 pr_err("%s: Un-supported transport: %s\n", __func__,
329 xport_to_str(cxport));
330 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331 }
332
Hemant Kumar1b820d52011-11-03 15:08:28 -0700333 port_num = rmnet_ports[dev->port_num].data_xport_num;
334 switch (dxport) {
335 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200336 case USB_GADGET_XPORT_BAM2BAM:
337 /* currently only one connection (idx 0)
338 is supported */
339 ret = gbam_connect(&dev->port, port_num,
340 dxport, 0);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700341 if (ret) {
342 pr_err("%s: gbam_connect failed: err:%d\n",
343 __func__, ret);
344 gsmd_ctrl_disconnect(&dev->port, port_num);
345 return ret;
346 }
347 break;
Jack Pham427f6922011-11-23 19:42:00 -0800348 case USB_GADGET_XPORT_HSIC:
349 ret = ghsic_data_connect(&dev->port, port_num);
350 if (ret) {
351 pr_err("%s: ghsic_data_connect failed: err:%d\n",
352 __func__, ret);
353 ghsic_ctrl_disconnect(&dev->port, port_num);
354 return ret;
355 }
356 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700357 case USB_GADGET_XPORT_NONE:
358 break;
359 default:
360 pr_err("%s: Un-supported transport: %s\n", __func__,
361 xport_to_str(dxport));
362 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363 }
364
365 return 0;
366}
367
Manu Gautam2b0234a2011-09-07 16:47:52 +0530368static int gport_rmnet_disconnect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700370 unsigned port_num;
371 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
372 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373
Hemant Kumar1b820d52011-11-03 15:08:28 -0700374 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
375 __func__, xport_to_str(cxport), xport_to_str(dxport),
376 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377
Hemant Kumar1b820d52011-11-03 15:08:28 -0700378 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
379 switch (cxport) {
380 case USB_GADGET_XPORT_SMD:
381 gsmd_ctrl_disconnect(&dev->port, port_num);
382 break;
Jack Pham427f6922011-11-23 19:42:00 -0800383 case USB_GADGET_XPORT_HSIC:
384 ghsic_ctrl_disconnect(&dev->port, port_num);
385 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700386 case USB_GADGET_XPORT_NONE:
387 break;
388 default:
389 pr_err("%s: Un-supported transport: %s\n", __func__,
390 xport_to_str(cxport));
391 return -ENODEV;
392 }
393
394 port_num = rmnet_ports[dev->port_num].data_xport_num;
395 switch (dxport) {
396 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200397 case USB_GADGET_XPORT_BAM2BAM:
398 gbam_disconnect(&dev->port, port_num, dxport);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700399 break;
Jack Pham427f6922011-11-23 19:42:00 -0800400 case USB_GADGET_XPORT_HSIC:
401 ghsic_data_disconnect(&dev->port, port_num);
402 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700403 case USB_GADGET_XPORT_NONE:
404 break;
405 default:
406 pr_err("%s: Un-supported transport: %s\n", __func__,
407 xport_to_str(dxport));
408 return -ENODEV;
409 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410
411 return 0;
412}
413
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
415{
416 struct f_rmnet *dev = func_to_rmnet(f);
417
418 pr_debug("%s: portno:%d\n", __func__, dev->port_num);
419
420 if (gadget_is_dualspeed(c->cdev->gadget))
421 usb_free_descriptors(f->hs_descriptors);
422 usb_free_descriptors(f->descriptors);
423
424 frmnet_free_req(dev->notify, dev->notify_req);
425
Manu Gautamdd4222b2011-09-09 15:06:05 +0530426 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427}
428
Chiranjeevi Velempatie007bed2012-01-24 09:54:11 +0530429static void frmnet_suspend(struct usb_function *f)
430{
431 struct f_rmnet *dev = func_to_rmnet(f);
432 unsigned port_num;
433
434 if (!atomic_read(&dev->online))
435 return;
436 /* This is a workaround for a bug in Windows 7/XP hosts in which
437 * the DTR bit is not set low when going into suspend. Hence force it
438 * low here when this function driver is suspended.
439 */
440 if (dev->port.notify_modem) {
441 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
442 dev->port.notify_modem(&dev->port, port_num, ~ACM_CTRL_DTR);
443 }
444}
445
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446static void frmnet_disable(struct usb_function *f)
447{
448 struct f_rmnet *dev = func_to_rmnet(f);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700449 unsigned long flags;
450 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451
452 pr_debug("%s: port#%d\n", __func__, dev->port_num);
453
454 usb_ep_disable(dev->notify);
Anna Perel97b8c222012-01-18 10:08:14 +0200455 dev->notify->driver_data = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700456
457 atomic_set(&dev->online, 0);
458
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700459 spin_lock_irqsave(&dev->lock, flags);
460 while (!list_empty(&dev->cpkt_resp_q)) {
461 cpkt = list_first_entry(&dev->cpkt_resp_q,
462 struct rmnet_ctrl_pkt, list);
463
464 list_del(&cpkt->list);
465 rmnet_free_ctrl_pkt(cpkt);
466 }
467 atomic_set(&dev->notify_count, 0);
468 spin_unlock_irqrestore(&dev->lock, flags);
469
Manu Gautam2b0234a2011-09-07 16:47:52 +0530470 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471}
472
473static int
474frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
475{
476 struct f_rmnet *dev = func_to_rmnet(f);
477 struct usb_composite_dev *cdev = dev->cdev;
478 int ret;
479
480 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
481
482 if (dev->notify->driver_data) {
483 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
484 usb_ep_disable(dev->notify);
485 }
486 dev->notify_desc = ep_choose(cdev->gadget,
487 dev->hs.notify,
488 dev->fs.notify);
489 ret = usb_ep_enable(dev->notify, dev->notify_desc);
490 if (ret) {
491 pr_err("%s: usb ep#%s enable failed, err#%d\n",
492 __func__, dev->notify->name, ret);
493 return ret;
494 }
495 dev->notify->driver_data = dev;
496
497 if (dev->port.in->driver_data) {
498 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530499 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500 }
501
502 dev->port.in_desc = ep_choose(cdev->gadget,
503 dev->hs.in, dev->fs.in);
504 dev->port.out_desc = ep_choose(cdev->gadget,
505 dev->hs.out, dev->fs.out);
506
Manu Gautam2b0234a2011-09-07 16:47:52 +0530507 ret = gport_rmnet_connect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508
509 atomic_set(&dev->online, 1);
510
511 return ret;
512}
513
514static void frmnet_ctrl_response_available(struct f_rmnet *dev)
515{
516 struct usb_request *req = dev->notify_req;
517 struct usb_cdc_notification *event;
518 unsigned long flags;
519 int ret;
520
521 pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
522
523 spin_lock_irqsave(&dev->lock, flags);
524 if (!atomic_read(&dev->online) || !req || !req->buf) {
525 spin_unlock_irqrestore(&dev->lock, flags);
526 return;
527 }
528
529 if (atomic_inc_return(&dev->notify_count) != 1) {
530 spin_unlock_irqrestore(&dev->lock, flags);
531 return;
532 }
533
534 event = req->buf;
535 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
536 | USB_RECIP_INTERFACE;
537 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
538 event->wValue = cpu_to_le16(0);
539 event->wIndex = cpu_to_le16(dev->ifc_id);
540 event->wLength = cpu_to_le16(0);
541 spin_unlock_irqrestore(&dev->lock, flags);
542
543 ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
544 if (ret) {
545 atomic_dec(&dev->notify_count);
546 pr_debug("ep enqueue error %d\n", ret);
547 }
548}
549
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700550static void frmnet_connect(struct grmnet *gr)
551{
552 struct f_rmnet *dev;
553
554 if (!gr) {
555 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
556 return;
557 }
558
559 dev = port_to_rmnet(gr);
560
561 atomic_set(&dev->ctrl_online, 1);
562}
563
564static void frmnet_disconnect(struct grmnet *gr)
565{
566 struct f_rmnet *dev;
567 unsigned long flags;
568 struct usb_cdc_notification *event;
569 int status;
570 struct rmnet_ctrl_pkt *cpkt;
571
572 if (!gr) {
573 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
574 return;
575 }
576
577 dev = port_to_rmnet(gr);
578
579 atomic_set(&dev->ctrl_online, 0);
580
581 if (!atomic_read(&dev->online)) {
582 pr_debug("%s: nothing to do\n", __func__);
583 return;
584 }
585
586 usb_ep_fifo_flush(dev->notify);
587
588 event = dev->notify_req->buf;
589 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
590 | USB_RECIP_INTERFACE;
591 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
592 event->wValue = cpu_to_le16(0);
593 event->wIndex = cpu_to_le16(dev->ifc_id);
594 event->wLength = cpu_to_le16(0);
595
Vamsi Krishna188078d2011-10-26 15:09:55 -0700596 status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700597 if (status < 0) {
598 if (!atomic_read(&dev->online))
599 return;
600 pr_err("%s: rmnet notify ep enqueue error %d\n",
601 __func__, status);
602 }
603
604 spin_lock_irqsave(&dev->lock, flags);
605 while (!list_empty(&dev->cpkt_resp_q)) {
606 cpkt = list_first_entry(&dev->cpkt_resp_q,
607 struct rmnet_ctrl_pkt, list);
608
609 list_del(&cpkt->list);
610 rmnet_free_ctrl_pkt(cpkt);
611 }
612 atomic_set(&dev->notify_count, 0);
613 spin_unlock_irqrestore(&dev->lock, flags);
614
615}
616
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617static int
Hemant Kumarf60c0252011-11-03 12:37:07 -0700618frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619{
620 struct f_rmnet *dev;
Hemant Kumarf60c0252011-11-03 12:37:07 -0700621 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622 unsigned long flags;
623
Hemant Kumarf60c0252011-11-03 12:37:07 -0700624 if (!gr || !buf) {
625 pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
626 __func__, gr, buf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627 return -ENODEV;
628 }
Hemant Kumarf60c0252011-11-03 12:37:07 -0700629 cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
630 if (IS_ERR(cpkt)) {
631 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
632 return -ENOMEM;
633 }
634 memcpy(cpkt->buf, buf, len);
635 cpkt->len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636
637 dev = port_to_rmnet(gr);
638
639 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
640
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700641 if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642 rmnet_free_ctrl_pkt(cpkt);
643 return 0;
644 }
645
646 spin_lock_irqsave(&dev->lock, flags);
Chiranjeevi Velempati263e6c82011-11-11 23:07:36 +0530647 list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700648 spin_unlock_irqrestore(&dev->lock, flags);
649
650 frmnet_ctrl_response_available(dev);
651
652 return 0;
653}
654
655static void
656frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
657{
658 struct f_rmnet *dev = req->context;
659 struct usb_composite_dev *cdev;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700660 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661
662 if (!dev) {
663 pr_err("%s: rmnet dev is null\n", __func__);
664 return;
665 }
666
667 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
668
669 cdev = dev->cdev;
670
Hemant Kumar1b820d52011-11-03 15:08:28 -0700671 if (dev->port.send_encap_cmd) {
672 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
673 dev->port.send_encap_cmd(port_num, req->buf, req->actual);
674 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675}
676
677static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
678{
679 struct f_rmnet *dev = req->context;
680 int status = req->status;
681
682 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
683
684 switch (status) {
685 case -ECONNRESET:
686 case -ESHUTDOWN:
687 /* connection gone */
688 atomic_set(&dev->notify_count, 0);
689 break;
690 default:
691 pr_err("rmnet notify ep error %d\n", status);
692 /* FALLTHROUGH */
693 case 0:
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700694 if (!atomic_read(&dev->ctrl_online))
695 break;
696
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697 if (atomic_dec_and_test(&dev->notify_count))
698 break;
699
700 status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
701 if (status) {
702 atomic_dec(&dev->notify_count);
703 pr_debug("ep enqueue error %d\n", status);
704 }
705 break;
706 }
707}
708
709static int
710frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
711{
712 struct f_rmnet *dev = func_to_rmnet(f);
713 struct usb_composite_dev *cdev = dev->cdev;
714 struct usb_request *req = cdev->req;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700715 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716 u16 w_index = le16_to_cpu(ctrl->wIndex);
717 u16 w_value = le16_to_cpu(ctrl->wValue);
718 u16 w_length = le16_to_cpu(ctrl->wLength);
719 int ret = -EOPNOTSUPP;
720
721 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
722
723 if (!atomic_read(&dev->online)) {
724 pr_debug("%s: usb cable is not connected\n", __func__);
725 return -ENOTCONN;
726 }
727
728 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
729
730 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
731 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732 ret = w_length;
733 req->complete = frmnet_cmd_complete;
734 req->context = dev;
735 break;
736
737
738 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
739 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
740 if (w_value)
741 goto invalid;
742 else {
743 unsigned len;
744 struct rmnet_ctrl_pkt *cpkt;
745
746 spin_lock(&dev->lock);
747 if (list_empty(&dev->cpkt_resp_q)) {
748 pr_err("ctrl resp queue empty "
749 " req%02x.%02x v%04x i%04x l%d\n",
750 ctrl->bRequestType, ctrl->bRequest,
751 w_value, w_index, w_length);
752 spin_unlock(&dev->lock);
753 goto invalid;
754 }
755
756 cpkt = list_first_entry(&dev->cpkt_resp_q,
757 struct rmnet_ctrl_pkt, list);
758 list_del(&cpkt->list);
759 spin_unlock(&dev->lock);
760
761 len = min_t(unsigned, w_length, cpkt->len);
762 memcpy(req->buf, cpkt->buf, len);
763 ret = len;
764
765 rmnet_free_ctrl_pkt(cpkt);
766 }
767 break;
768 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
769 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
Hemant Kumar1b820d52011-11-03 15:08:28 -0700770 if (dev->port.notify_modem) {
771 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
772 dev->port.notify_modem(&dev->port, port_num, w_value);
773 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700774 ret = 0;
775
776 break;
777 default:
778
779invalid:
780 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
781 ctrl->bRequestType, ctrl->bRequest,
782 w_value, w_index, w_length);
783 }
784
785 /* respond with data transfer or status phase? */
786 if (ret >= 0) {
787 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
788 ctrl->bRequestType, ctrl->bRequest,
789 w_value, w_index, w_length);
790 req->zero = (ret < w_length);
791 req->length = ret;
792 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
793 if (ret < 0)
794 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
795 }
796
797 return ret;
798}
799
800static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
801{
802 struct f_rmnet *dev = func_to_rmnet(f);
803 struct usb_ep *ep;
804 struct usb_composite_dev *cdev = c->cdev;
805 int ret = -ENODEV;
806
807 dev->ifc_id = usb_interface_id(c, f);
808 if (dev->ifc_id < 0) {
809 pr_err("%s: unable to allocate ifc id, err:%d",
810 __func__, dev->ifc_id);
811 return dev->ifc_id;
812 }
813 rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
814
815 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
816 if (!ep) {
817 pr_err("%s: usb epin autoconfig failed\n", __func__);
818 return -ENODEV;
819 }
820 dev->port.in = ep;
821 ep->driver_data = cdev;
822
823 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
824 if (!ep) {
825 pr_err("%s: usb epout autoconfig failed\n", __func__);
826 ret = -ENODEV;
827 goto ep_auto_out_fail;
828 }
829 dev->port.out = ep;
830 ep->driver_data = cdev;
831
832 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
833 if (!ep) {
834 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
835 ret = -ENODEV;
836 goto ep_auto_notify_fail;
837 }
838 dev->notify = ep;
839 ep->driver_data = cdev;
840
841 dev->notify_req = frmnet_alloc_req(ep,
Hemant Kumarfbf113d2011-09-16 18:24:45 -0700842 sizeof(struct usb_cdc_notification),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700843 GFP_KERNEL);
844 if (IS_ERR(dev->notify_req)) {
845 pr_err("%s: unable to allocate memory for notify req\n",
846 __func__);
847 ret = -ENOMEM;
848 goto ep_notify_alloc_fail;
849 }
850
851 dev->notify_req->complete = frmnet_notify_complete;
852 dev->notify_req->context = dev;
853
854 f->descriptors = usb_copy_descriptors(rmnet_fs_function);
855
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530856 if (!f->descriptors)
857 goto fail;
858
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700859 dev->fs.in = usb_find_endpoint(rmnet_fs_function,
860 f->descriptors,
861 &rmnet_fs_in_desc);
862 dev->fs.out = usb_find_endpoint(rmnet_fs_function,
863 f->descriptors,
864 &rmnet_fs_out_desc);
865 dev->fs.notify = usb_find_endpoint(rmnet_fs_function,
866 f->descriptors,
867 &rmnet_fs_notify_desc);
868
869 if (gadget_is_dualspeed(cdev->gadget)) {
870 rmnet_hs_in_desc.bEndpointAddress =
871 rmnet_fs_in_desc.bEndpointAddress;
872 rmnet_hs_out_desc.bEndpointAddress =
873 rmnet_fs_out_desc.bEndpointAddress;
874 rmnet_hs_notify_desc.bEndpointAddress =
875 rmnet_fs_notify_desc.bEndpointAddress;
876
877 /* copy descriptors, and track endpoint copies */
878 f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
879
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530880 if (!f->hs_descriptors)
881 goto fail;
882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883 dev->hs.in = usb_find_endpoint(rmnet_hs_function,
884 f->hs_descriptors, &rmnet_hs_in_desc);
885 dev->hs.out = usb_find_endpoint(rmnet_hs_function,
886 f->hs_descriptors, &rmnet_hs_out_desc);
887 dev->hs.notify = usb_find_endpoint(rmnet_hs_function,
888 f->hs_descriptors, &rmnet_hs_notify_desc);
889 }
890
891 pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
892 __func__, dev->port_num,
893 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
894 dev->port.in->name, dev->port.out->name);
895
896 return 0;
897
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530898fail:
899 if (f->descriptors)
900 usb_free_descriptors(f->descriptors);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901ep_notify_alloc_fail:
902 dev->notify->driver_data = NULL;
903 dev->notify = NULL;
904ep_auto_notify_fail:
905 dev->port.out->driver_data = NULL;
906 dev->port.out = NULL;
907ep_auto_out_fail:
908 dev->port.in->driver_data = NULL;
909 dev->port.in = NULL;
910
911 return ret;
912}
913
Manu Gautam2b0234a2011-09-07 16:47:52 +0530914static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700916 int status;
917 struct f_rmnet *dev;
918 struct usb_function *f;
919 unsigned long flags;
920
921 pr_debug("%s: usb config:%p\n", __func__, c);
922
Manu Gautam2b0234a2011-09-07 16:47:52 +0530923 if (portno >= nr_rmnet_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700924 pr_err("%s: supporting ports#%u port_id:%u", __func__,
Manu Gautam2b0234a2011-09-07 16:47:52 +0530925 nr_rmnet_ports, portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700926 return -ENODEV;
927 }
928
929 if (rmnet_string_defs[0].id == 0) {
930 status = usb_string_id(c->cdev);
931 if (status < 0) {
932 pr_err("%s: failed to get string id, err:%d\n",
933 __func__, status);
934 return status;
935 }
936 rmnet_string_defs[0].id = status;
937 }
938
Manu Gautam2b0234a2011-09-07 16:47:52 +0530939 dev = rmnet_ports[portno].port;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700940
941 spin_lock_irqsave(&dev->lock, flags);
942 dev->cdev = c->cdev;
943 f = &dev->port.func;
Vamsi Krishna188078d2011-10-26 15:09:55 -0700944 f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700945 spin_unlock_irqrestore(&dev->lock, flags);
Vamsi Krishna188078d2011-10-26 15:09:55 -0700946 if (!f->name) {
947 pr_err("%s: cannot allocate memory for name\n", __func__);
948 return -ENOMEM;
949 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950
951 f->strings = rmnet_strings;
952 f->bind = frmnet_bind;
953 f->unbind = frmnet_unbind;
954 f->disable = frmnet_disable;
955 f->set_alt = frmnet_set_alt;
956 f->setup = frmnet_setup;
Chiranjeevi Velempatie007bed2012-01-24 09:54:11 +0530957 f->suspend = frmnet_suspend;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700958 dev->port.send_cpkt_response = frmnet_send_cpkt_response;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700959 dev->port.disconnect = frmnet_disconnect;
960 dev->port.connect = frmnet_connect;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961
962 status = usb_add_function(c, f);
963 if (status) {
964 pr_err("%s: usb add function failed: %d\n",
965 __func__, status);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530966 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 return status;
968 }
969
970 pr_debug("%s: complete\n", __func__);
971
972 return status;
973}
974
Manu Gautame3e897c2011-09-12 17:18:46 +0530975static void frmnet_cleanup(void)
976{
977 int i;
978
979 for (i = 0; i < nr_rmnet_ports; i++)
980 kfree(rmnet_ports[i].port);
981
982 nr_rmnet_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700983 no_ctrl_smd_ports = 0;
984 no_data_bam_ports = 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200985 no_data_bam2bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -0800986 no_ctrl_hsic_ports = 0;
987 no_data_hsic_ports = 0;
Manu Gautame3e897c2011-09-12 17:18:46 +0530988}
989
Hemant Kumar1b820d52011-11-03 15:08:28 -0700990static int frmnet_init_port(const char *ctrl_name, const char *data_name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700991{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700992 struct f_rmnet *dev;
993 struct rmnet_ports *rmnet_port;
994 int ret;
995 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700996
Hemant Kumar1b820d52011-11-03 15:08:28 -0700997 if (nr_rmnet_ports >= NR_RMNET_PORTS) {
998 pr_err("%s: Max-%d instances supported\n",
999 __func__, NR_RMNET_PORTS);
Manu Gautame3e897c2011-09-12 17:18:46 +05301000 return -EINVAL;
1001 }
1002
Hemant Kumar1b820d52011-11-03 15:08:28 -07001003 pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n",
1004 __func__, nr_rmnet_ports, ctrl_name, data_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001005
Hemant Kumar1b820d52011-11-03 15:08:28 -07001006 dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
1007 if (!dev) {
1008 pr_err("%s: Unable to allocate rmnet device\n", __func__);
1009 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001010 }
1011
Hemant Kumar1b820d52011-11-03 15:08:28 -07001012 dev->port_num = nr_rmnet_ports;
1013 spin_lock_init(&dev->lock);
1014 INIT_LIST_HEAD(&dev->cpkt_resp_q);
1015
1016 rmnet_port = &rmnet_ports[nr_rmnet_ports];
1017 rmnet_port->port = dev;
1018 rmnet_port->port_num = nr_rmnet_ports;
1019 rmnet_port->ctrl_xport = str_to_xport(ctrl_name);
1020 rmnet_port->data_xport = str_to_xport(data_name);
1021
1022 switch (rmnet_port->ctrl_xport) {
1023 case USB_GADGET_XPORT_SMD:
1024 rmnet_port->ctrl_xport_num = no_ctrl_smd_ports;
1025 no_ctrl_smd_ports++;
1026 break;
Jack Pham427f6922011-11-23 19:42:00 -08001027 case USB_GADGET_XPORT_HSIC:
1028 rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports;
1029 no_ctrl_hsic_ports++;
1030 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001031 case USB_GADGET_XPORT_NONE:
1032 break;
1033 default:
1034 pr_err("%s: Un-supported transport: %u\n", __func__,
1035 rmnet_port->ctrl_xport);
1036 ret = -ENODEV;
1037 goto fail_probe;
1038 }
1039
1040 switch (rmnet_port->data_xport) {
1041 case USB_GADGET_XPORT_BAM:
1042 rmnet_port->data_xport_num = no_data_bam_ports;
1043 no_data_bam_ports++;
1044 break;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001045 case USB_GADGET_XPORT_BAM2BAM:
1046 rmnet_port->data_xport_num = no_data_bam2bam_ports;
1047 no_data_bam2bam_ports++;
1048 break;
Jack Pham427f6922011-11-23 19:42:00 -08001049 case USB_GADGET_XPORT_HSIC:
1050 rmnet_port->data_xport_num = no_data_hsic_ports;
1051 no_data_hsic_ports++;
1052 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001053 case USB_GADGET_XPORT_NONE:
1054 break;
1055 default:
1056 pr_err("%s: Un-supported transport: %u\n", __func__,
1057 rmnet_port->data_xport);
1058 ret = -ENODEV;
1059 goto fail_probe;
1060 }
1061 nr_rmnet_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062
1063 return 0;
1064
1065fail_probe:
Manu Gautam2b0234a2011-09-07 16:47:52 +05301066 for (i = 0; i < nr_rmnet_ports; i++)
1067 kfree(rmnet_ports[i].port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001068
Hemant Kumar1b820d52011-11-03 15:08:28 -07001069 nr_rmnet_ports = 0;
1070 no_ctrl_smd_ports = 0;
1071 no_data_bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -08001072 no_ctrl_hsic_ports = 0;
1073 no_data_hsic_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001074
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001075 return ret;
1076}