blob: d846c4ebac41b81ed262591a41721d11b97aa3b5 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
Anna Perel97b8c222012-01-18 10:08:14 +02002 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/usb/android_composite.h>
18#include <linux/spinlock.h>
19
Hemant Kumar1b820d52011-11-03 15:08:28 -070020#include <mach/usb_gadget_xport.h>
Ofir Cohena1c2a872011-12-14 10:26:34 +020021
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include "u_rmnet.h"
23#include "gadget_chips.h"
24
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#define RMNET_NOTIFY_INTERVAL 5
26#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
27
David Brownac5d1542012-02-06 10:37:22 -080028struct rmnet_descs {
29 struct usb_endpoint_descriptor *in;
30 struct usb_endpoint_descriptor *out;
31 struct usb_endpoint_descriptor *notify;
32};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
34#define ACM_CTRL_DTR (1 << 0)
35
36/* TODO: use separate structures for data and
37 * control paths
38 */
39struct f_rmnet {
40 struct grmnet port;
41 int ifc_id;
42 u8 port_num;
43 atomic_t online;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -070044 atomic_t ctrl_online;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045 struct usb_composite_dev *cdev;
46
47 spinlock_t lock;
48
David Brownac5d1542012-02-06 10:37:22 -080049 /* usb descriptors */
50 struct rmnet_descs fs;
51 struct rmnet_descs hs;
52
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053 /* usb eps*/
54 struct usb_ep *notify;
David Brownac5d1542012-02-06 10:37:22 -080055 struct usb_endpoint_descriptor *notify_desc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056 struct usb_request *notify_req;
57
58 /* control info */
59 struct list_head cpkt_resp_q;
60 atomic_t notify_count;
61 unsigned long cpkts_len;
62};
63
Anna Perel21515162012-02-02 20:50:02 +020064#define NR_RMNET_PORTS 3
Manu Gautam2b0234a2011-09-07 16:47:52 +053065static unsigned int nr_rmnet_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070066static unsigned int no_ctrl_smd_ports;
Jack Pham427f6922011-11-23 19:42:00 -080067static unsigned int no_ctrl_hsic_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070068static unsigned int no_data_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020069static unsigned int no_data_bam2bam_ports;
Jack Pham427f6922011-11-23 19:42:00 -080070static unsigned int no_data_hsic_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071static struct rmnet_ports {
Hemant Kumar1b820d52011-11-03 15:08:28 -070072 enum transport_type data_xport;
73 enum transport_type ctrl_xport;
74 unsigned data_xport_num;
75 unsigned ctrl_xport_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076 unsigned port_num;
77 struct f_rmnet *port;
Manu Gautam2b0234a2011-09-07 16:47:52 +053078} rmnet_ports[NR_RMNET_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079
80static struct usb_interface_descriptor rmnet_interface_desc = {
81 .bLength = USB_DT_INTERFACE_SIZE,
82 .bDescriptorType = USB_DT_INTERFACE,
83 .bNumEndpoints = 3,
84 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
85 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
86 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
87 /* .iInterface = DYNAMIC */
88};
89
90/* Full speed support */
91static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
92 .bLength = USB_DT_ENDPOINT_SIZE,
93 .bDescriptorType = USB_DT_ENDPOINT,
94 .bEndpointAddress = USB_DIR_IN,
95 .bmAttributes = USB_ENDPOINT_XFER_INT,
96 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
97 .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
98};
99
100static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
101 .bLength = USB_DT_ENDPOINT_SIZE,
102 .bDescriptorType = USB_DT_ENDPOINT,
103 .bEndpointAddress = USB_DIR_IN,
104 .bmAttributes = USB_ENDPOINT_XFER_BULK,
105 .wMaxPacketSize = __constant_cpu_to_le16(64),
106};
107
108static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
109 .bLength = USB_DT_ENDPOINT_SIZE,
110 .bDescriptorType = USB_DT_ENDPOINT,
111 .bEndpointAddress = USB_DIR_OUT,
112 .bmAttributes = USB_ENDPOINT_XFER_BULK,
113 .wMaxPacketSize = __constant_cpu_to_le16(64),
114};
115
116static struct usb_descriptor_header *rmnet_fs_function[] = {
117 (struct usb_descriptor_header *) &rmnet_interface_desc,
118 (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
119 (struct usb_descriptor_header *) &rmnet_fs_in_desc,
120 (struct usb_descriptor_header *) &rmnet_fs_out_desc,
121 NULL,
122};
123
124/* High speed support */
125static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
126 .bLength = USB_DT_ENDPOINT_SIZE,
127 .bDescriptorType = USB_DT_ENDPOINT,
128 .bEndpointAddress = USB_DIR_IN,
129 .bmAttributes = USB_ENDPOINT_XFER_INT,
130 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
131 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
132};
133
134static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
135 .bLength = USB_DT_ENDPOINT_SIZE,
136 .bDescriptorType = USB_DT_ENDPOINT,
137 .bEndpointAddress = USB_DIR_IN,
138 .bmAttributes = USB_ENDPOINT_XFER_BULK,
139 .wMaxPacketSize = __constant_cpu_to_le16(512),
140};
141
142static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
143 .bLength = USB_DT_ENDPOINT_SIZE,
144 .bDescriptorType = USB_DT_ENDPOINT,
145 .bEndpointAddress = USB_DIR_OUT,
146 .bmAttributes = USB_ENDPOINT_XFER_BULK,
147 .wMaxPacketSize = __constant_cpu_to_le16(512),
148};
149
150static struct usb_descriptor_header *rmnet_hs_function[] = {
151 (struct usb_descriptor_header *) &rmnet_interface_desc,
152 (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
153 (struct usb_descriptor_header *) &rmnet_hs_in_desc,
154 (struct usb_descriptor_header *) &rmnet_hs_out_desc,
155 NULL,
156};
157
158/* String descriptors */
159
160static struct usb_string rmnet_string_defs[] = {
161 [0].s = "RmNet",
162 { } /* end of list */
163};
164
165static struct usb_gadget_strings rmnet_string_table = {
166 .language = 0x0409, /* en-us */
167 .strings = rmnet_string_defs,
168};
169
170static struct usb_gadget_strings *rmnet_strings[] = {
171 &rmnet_string_table,
172 NULL,
173};
174
175/* ------- misc functions --------------------*/
176
177static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
178{
179 return container_of(f, struct f_rmnet, port.func);
180}
181
182static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
183{
184 return container_of(r, struct f_rmnet, port);
185}
186
187static struct usb_request *
188frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
189{
190 struct usb_request *req;
191
192 req = usb_ep_alloc_request(ep, flags);
193 if (!req)
194 return ERR_PTR(-ENOMEM);
195
196 req->buf = kmalloc(len, flags);
197 if (!req->buf) {
198 usb_ep_free_request(ep, req);
199 return ERR_PTR(-ENOMEM);
200 }
201
202 req->length = len;
203
204 return req;
205}
206
207void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
208{
209 kfree(req->buf);
210 usb_ep_free_request(ep, req);
211}
212
213static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
214{
215 struct rmnet_ctrl_pkt *pkt;
216
217 pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
218 if (!pkt)
219 return ERR_PTR(-ENOMEM);
220
221 pkt->buf = kmalloc(len, flags);
222 if (!pkt->buf) {
223 kfree(pkt);
224 return ERR_PTR(-ENOMEM);
225 }
226 pkt->len = len;
227
228 return pkt;
229}
230
231static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
232{
233 kfree(pkt->buf);
234 kfree(pkt);
235}
236
237/* -------------------------------------------*/
238
Hemant Kumar1b820d52011-11-03 15:08:28 -0700239static int rmnet_gport_setup(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240{
Jack Pham427f6922011-11-23 19:42:00 -0800241 int ret;
242 int port_idx;
243 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244
Ofir Cohena1c2a872011-12-14 10:26:34 +0200245 pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u"
246 " smd ports: %u ctrl hsic ports: %u"
247 " nr_rmnet_ports: %u\n",
248 __func__, no_data_bam_ports, no_data_bam2bam_ports,
249 no_data_hsic_ports, no_ctrl_smd_ports,
250 no_ctrl_hsic_ports, nr_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251
Ofir Cohena1c2a872011-12-14 10:26:34 +0200252 if (no_data_bam_ports || no_data_bam2bam_ports) {
253 ret = gbam_setup(no_data_bam_ports,
254 no_data_bam2bam_ports);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700255 if (ret)
256 return ret;
257 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258
Hemant Kumar1b820d52011-11-03 15:08:28 -0700259 if (no_ctrl_smd_ports) {
260 ret = gsmd_ctrl_setup(no_ctrl_smd_ports);
261 if (ret)
262 return ret;
263 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264
Jack Pham427f6922011-11-23 19:42:00 -0800265 if (no_data_hsic_ports) {
266 port_idx = ghsic_data_setup(no_data_hsic_ports,
267 USB_GADGET_RMNET);
268 if (port_idx < 0)
269 return port_idx;
270 for (i = 0; i < nr_rmnet_ports; i++) {
271 if (rmnet_ports[i].data_xport ==
272 USB_GADGET_XPORT_HSIC) {
273 rmnet_ports[i].data_xport_num = port_idx;
274 port_idx++;
275 }
276 }
277 }
278
279 if (no_ctrl_hsic_ports) {
280 port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports,
281 USB_GADGET_RMNET);
282 if (port_idx < 0)
283 return port_idx;
284 for (i = 0; i < nr_rmnet_ports; i++) {
285 if (rmnet_ports[i].ctrl_xport ==
286 USB_GADGET_XPORT_HSIC) {
287 rmnet_ports[i].ctrl_xport_num = port_idx;
288 port_idx++;
289 }
290 }
291 }
292
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293 return 0;
294}
295
Manu Gautam2b0234a2011-09-07 16:47:52 +0530296static int gport_rmnet_connect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700298 int ret;
299 unsigned port_num;
300 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
301 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302
Hemant Kumar1b820d52011-11-03 15:08:28 -0700303 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
304 __func__, xport_to_str(cxport), xport_to_str(dxport),
305 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306
Hemant Kumar1b820d52011-11-03 15:08:28 -0700307 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
308 switch (cxport) {
309 case USB_GADGET_XPORT_SMD:
310 ret = gsmd_ctrl_connect(&dev->port, port_num);
311 if (ret) {
312 pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
313 __func__, ret);
314 return ret;
315 }
316 break;
Jack Pham427f6922011-11-23 19:42:00 -0800317 case USB_GADGET_XPORT_HSIC:
318 ret = ghsic_ctrl_connect(&dev->port, port_num);
319 if (ret) {
320 pr_err("%s: ghsic_ctrl_connect failed: err:%d\n",
321 __func__, ret);
322 return ret;
323 }
324 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700325 case USB_GADGET_XPORT_NONE:
326 break;
327 default:
328 pr_err("%s: Un-supported transport: %s\n", __func__,
329 xport_to_str(cxport));
330 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331 }
332
Hemant Kumar1b820d52011-11-03 15:08:28 -0700333 port_num = rmnet_ports[dev->port_num].data_xport_num;
334 switch (dxport) {
335 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200336 case USB_GADGET_XPORT_BAM2BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200337 ret = gbam_connect(&dev->port, port_num,
Anna Perel21515162012-02-02 20:50:02 +0200338 dxport, port_num);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700339 if (ret) {
340 pr_err("%s: gbam_connect failed: err:%d\n",
341 __func__, ret);
342 gsmd_ctrl_disconnect(&dev->port, port_num);
343 return ret;
344 }
345 break;
Jack Pham427f6922011-11-23 19:42:00 -0800346 case USB_GADGET_XPORT_HSIC:
347 ret = ghsic_data_connect(&dev->port, port_num);
348 if (ret) {
349 pr_err("%s: ghsic_data_connect failed: err:%d\n",
350 __func__, ret);
351 ghsic_ctrl_disconnect(&dev->port, port_num);
352 return ret;
353 }
354 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700355 case USB_GADGET_XPORT_NONE:
356 break;
357 default:
358 pr_err("%s: Un-supported transport: %s\n", __func__,
359 xport_to_str(dxport));
360 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361 }
362
363 return 0;
364}
365
Manu Gautam2b0234a2011-09-07 16:47:52 +0530366static int gport_rmnet_disconnect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700368 unsigned port_num;
369 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
370 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371
Hemant Kumar1b820d52011-11-03 15:08:28 -0700372 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
373 __func__, xport_to_str(cxport), xport_to_str(dxport),
374 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375
Hemant Kumar1b820d52011-11-03 15:08:28 -0700376 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
377 switch (cxport) {
378 case USB_GADGET_XPORT_SMD:
379 gsmd_ctrl_disconnect(&dev->port, port_num);
380 break;
Jack Pham427f6922011-11-23 19:42:00 -0800381 case USB_GADGET_XPORT_HSIC:
382 ghsic_ctrl_disconnect(&dev->port, port_num);
383 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700384 case USB_GADGET_XPORT_NONE:
385 break;
386 default:
387 pr_err("%s: Un-supported transport: %s\n", __func__,
388 xport_to_str(cxport));
389 return -ENODEV;
390 }
391
392 port_num = rmnet_ports[dev->port_num].data_xport_num;
393 switch (dxport) {
394 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200395 case USB_GADGET_XPORT_BAM2BAM:
396 gbam_disconnect(&dev->port, port_num, dxport);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700397 break;
Jack Pham427f6922011-11-23 19:42:00 -0800398 case USB_GADGET_XPORT_HSIC:
399 ghsic_data_disconnect(&dev->port, port_num);
400 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700401 case USB_GADGET_XPORT_NONE:
402 break;
403 default:
404 pr_err("%s: Un-supported transport: %s\n", __func__,
405 xport_to_str(dxport));
406 return -ENODEV;
407 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408
409 return 0;
410}
411
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
413{
414 struct f_rmnet *dev = func_to_rmnet(f);
415
416 pr_debug("%s: portno:%d\n", __func__, dev->port_num);
417
418 if (gadget_is_dualspeed(c->cdev->gadget))
419 usb_free_descriptors(f->hs_descriptors);
420 usb_free_descriptors(f->descriptors);
421
422 frmnet_free_req(dev->notify, dev->notify_req);
423
Manu Gautamdd4222b2011-09-09 15:06:05 +0530424 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425}
426
Chiranjeevi Velempatie007bed2012-01-24 09:54:11 +0530427static void frmnet_suspend(struct usb_function *f)
428{
429 struct f_rmnet *dev = func_to_rmnet(f);
430 unsigned port_num;
431
432 if (!atomic_read(&dev->online))
433 return;
434 /* This is a workaround for a bug in Windows 7/XP hosts in which
435 * the DTR bit is not set low when going into suspend. Hence force it
436 * low here when this function driver is suspended.
437 */
438 if (dev->port.notify_modem) {
439 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
440 dev->port.notify_modem(&dev->port, port_num, ~ACM_CTRL_DTR);
441 }
442}
443
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444static void frmnet_disable(struct usb_function *f)
445{
446 struct f_rmnet *dev = func_to_rmnet(f);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700447 unsigned long flags;
448 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449
450 pr_debug("%s: port#%d\n", __func__, dev->port_num);
451
452 usb_ep_disable(dev->notify);
Anna Perel97b8c222012-01-18 10:08:14 +0200453 dev->notify->driver_data = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454
455 atomic_set(&dev->online, 0);
456
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700457 spin_lock_irqsave(&dev->lock, flags);
458 while (!list_empty(&dev->cpkt_resp_q)) {
459 cpkt = list_first_entry(&dev->cpkt_resp_q,
460 struct rmnet_ctrl_pkt, list);
461
462 list_del(&cpkt->list);
463 rmnet_free_ctrl_pkt(cpkt);
464 }
465 atomic_set(&dev->notify_count, 0);
466 spin_unlock_irqrestore(&dev->lock, flags);
467
Manu Gautam2b0234a2011-09-07 16:47:52 +0530468 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469}
470
471static int
472frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
473{
474 struct f_rmnet *dev = func_to_rmnet(f);
475 struct usb_composite_dev *cdev = dev->cdev;
476 int ret;
477
478 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
479
480 if (dev->notify->driver_data) {
481 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
482 usb_ep_disable(dev->notify);
483 }
David Brownac5d1542012-02-06 10:37:22 -0800484 dev->notify_desc = ep_choose(cdev->gadget,
485 dev->hs.notify,
486 dev->fs.notify);
487 ret = usb_ep_enable(dev->notify, dev->notify_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 if (ret) {
489 pr_err("%s: usb ep#%s enable failed, err#%d\n",
490 __func__, dev->notify->name, ret);
491 return ret;
492 }
493 dev->notify->driver_data = dev;
494
495 if (dev->port.in->driver_data) {
496 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530497 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498 }
499
David Brownac5d1542012-02-06 10:37:22 -0800500 dev->port.in_desc = ep_choose(cdev->gadget,
501 dev->hs.in, dev->fs.in);
502 dev->port.out_desc = ep_choose(cdev->gadget,
503 dev->hs.out, dev->fs.out);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504
Manu Gautam2b0234a2011-09-07 16:47:52 +0530505 ret = gport_rmnet_connect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506
507 atomic_set(&dev->online, 1);
508
509 return ret;
510}
511
512static void frmnet_ctrl_response_available(struct f_rmnet *dev)
513{
514 struct usb_request *req = dev->notify_req;
515 struct usb_cdc_notification *event;
516 unsigned long flags;
517 int ret;
518
519 pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
520
521 spin_lock_irqsave(&dev->lock, flags);
522 if (!atomic_read(&dev->online) || !req || !req->buf) {
523 spin_unlock_irqrestore(&dev->lock, flags);
524 return;
525 }
526
527 if (atomic_inc_return(&dev->notify_count) != 1) {
528 spin_unlock_irqrestore(&dev->lock, flags);
529 return;
530 }
531
532 event = req->buf;
533 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
534 | USB_RECIP_INTERFACE;
535 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
536 event->wValue = cpu_to_le16(0);
537 event->wIndex = cpu_to_le16(dev->ifc_id);
538 event->wLength = cpu_to_le16(0);
539 spin_unlock_irqrestore(&dev->lock, flags);
540
541 ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
542 if (ret) {
543 atomic_dec(&dev->notify_count);
544 pr_debug("ep enqueue error %d\n", ret);
545 }
546}
547
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700548static void frmnet_connect(struct grmnet *gr)
549{
550 struct f_rmnet *dev;
551
552 if (!gr) {
553 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
554 return;
555 }
556
557 dev = port_to_rmnet(gr);
558
559 atomic_set(&dev->ctrl_online, 1);
560}
561
562static void frmnet_disconnect(struct grmnet *gr)
563{
564 struct f_rmnet *dev;
565 unsigned long flags;
566 struct usb_cdc_notification *event;
567 int status;
568 struct rmnet_ctrl_pkt *cpkt;
569
570 if (!gr) {
571 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
572 return;
573 }
574
575 dev = port_to_rmnet(gr);
576
577 atomic_set(&dev->ctrl_online, 0);
578
579 if (!atomic_read(&dev->online)) {
580 pr_debug("%s: nothing to do\n", __func__);
581 return;
582 }
583
584 usb_ep_fifo_flush(dev->notify);
585
586 event = dev->notify_req->buf;
587 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
588 | USB_RECIP_INTERFACE;
589 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
590 event->wValue = cpu_to_le16(0);
591 event->wIndex = cpu_to_le16(dev->ifc_id);
592 event->wLength = cpu_to_le16(0);
593
Vamsi Krishna188078d2011-10-26 15:09:55 -0700594 status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700595 if (status < 0) {
596 if (!atomic_read(&dev->online))
597 return;
598 pr_err("%s: rmnet notify ep enqueue error %d\n",
599 __func__, status);
600 }
601
602 spin_lock_irqsave(&dev->lock, flags);
603 while (!list_empty(&dev->cpkt_resp_q)) {
604 cpkt = list_first_entry(&dev->cpkt_resp_q,
605 struct rmnet_ctrl_pkt, list);
606
607 list_del(&cpkt->list);
608 rmnet_free_ctrl_pkt(cpkt);
609 }
610 atomic_set(&dev->notify_count, 0);
611 spin_unlock_irqrestore(&dev->lock, flags);
612
613}
614
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615static int
Hemant Kumarf60c0252011-11-03 12:37:07 -0700616frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700617{
618 struct f_rmnet *dev;
Hemant Kumarf60c0252011-11-03 12:37:07 -0700619 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620 unsigned long flags;
621
Hemant Kumarf60c0252011-11-03 12:37:07 -0700622 if (!gr || !buf) {
623 pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
624 __func__, gr, buf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 return -ENODEV;
626 }
Hemant Kumarf60c0252011-11-03 12:37:07 -0700627 cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
628 if (IS_ERR(cpkt)) {
629 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
630 return -ENOMEM;
631 }
632 memcpy(cpkt->buf, buf, len);
633 cpkt->len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700634
635 dev = port_to_rmnet(gr);
636
637 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
638
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700639 if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 rmnet_free_ctrl_pkt(cpkt);
641 return 0;
642 }
643
644 spin_lock_irqsave(&dev->lock, flags);
Chiranjeevi Velempati263e6c82011-11-11 23:07:36 +0530645 list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 spin_unlock_irqrestore(&dev->lock, flags);
647
648 frmnet_ctrl_response_available(dev);
649
650 return 0;
651}
652
653static void
654frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
655{
656 struct f_rmnet *dev = req->context;
657 struct usb_composite_dev *cdev;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700658 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659
660 if (!dev) {
661 pr_err("%s: rmnet dev is null\n", __func__);
662 return;
663 }
664
665 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
666
667 cdev = dev->cdev;
668
Hemant Kumar1b820d52011-11-03 15:08:28 -0700669 if (dev->port.send_encap_cmd) {
670 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
671 dev->port.send_encap_cmd(port_num, req->buf, req->actual);
672 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673}
674
675static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
676{
677 struct f_rmnet *dev = req->context;
678 int status = req->status;
679
680 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
681
682 switch (status) {
683 case -ECONNRESET:
684 case -ESHUTDOWN:
685 /* connection gone */
686 atomic_set(&dev->notify_count, 0);
687 break;
688 default:
689 pr_err("rmnet notify ep error %d\n", status);
690 /* FALLTHROUGH */
691 case 0:
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700692 if (!atomic_read(&dev->ctrl_online))
693 break;
694
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695 if (atomic_dec_and_test(&dev->notify_count))
696 break;
697
698 status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
699 if (status) {
700 atomic_dec(&dev->notify_count);
701 pr_debug("ep enqueue error %d\n", status);
702 }
703 break;
704 }
705}
706
707static int
708frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
709{
710 struct f_rmnet *dev = func_to_rmnet(f);
711 struct usb_composite_dev *cdev = dev->cdev;
712 struct usb_request *req = cdev->req;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700713 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714 u16 w_index = le16_to_cpu(ctrl->wIndex);
715 u16 w_value = le16_to_cpu(ctrl->wValue);
716 u16 w_length = le16_to_cpu(ctrl->wLength);
717 int ret = -EOPNOTSUPP;
718
719 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
720
721 if (!atomic_read(&dev->online)) {
722 pr_debug("%s: usb cable is not connected\n", __func__);
723 return -ENOTCONN;
724 }
725
726 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
727
728 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
729 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700730 ret = w_length;
731 req->complete = frmnet_cmd_complete;
732 req->context = dev;
733 break;
734
735
736 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
737 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
738 if (w_value)
739 goto invalid;
740 else {
741 unsigned len;
742 struct rmnet_ctrl_pkt *cpkt;
743
744 spin_lock(&dev->lock);
745 if (list_empty(&dev->cpkt_resp_q)) {
746 pr_err("ctrl resp queue empty "
747 " req%02x.%02x v%04x i%04x l%d\n",
748 ctrl->bRequestType, ctrl->bRequest,
749 w_value, w_index, w_length);
750 spin_unlock(&dev->lock);
751 goto invalid;
752 }
753
754 cpkt = list_first_entry(&dev->cpkt_resp_q,
755 struct rmnet_ctrl_pkt, list);
756 list_del(&cpkt->list);
757 spin_unlock(&dev->lock);
758
759 len = min_t(unsigned, w_length, cpkt->len);
760 memcpy(req->buf, cpkt->buf, len);
761 ret = len;
762
763 rmnet_free_ctrl_pkt(cpkt);
764 }
765 break;
766 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
767 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
Hemant Kumar1b820d52011-11-03 15:08:28 -0700768 if (dev->port.notify_modem) {
769 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
770 dev->port.notify_modem(&dev->port, port_num, w_value);
771 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700772 ret = 0;
773
774 break;
775 default:
776
777invalid:
778 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
779 ctrl->bRequestType, ctrl->bRequest,
780 w_value, w_index, w_length);
781 }
782
783 /* respond with data transfer or status phase? */
784 if (ret >= 0) {
785 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
786 ctrl->bRequestType, ctrl->bRequest,
787 w_value, w_index, w_length);
788 req->zero = (ret < w_length);
789 req->length = ret;
790 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
791 if (ret < 0)
792 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
793 }
794
795 return ret;
796}
797
798static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
799{
800 struct f_rmnet *dev = func_to_rmnet(f);
801 struct usb_ep *ep;
802 struct usb_composite_dev *cdev = c->cdev;
803 int ret = -ENODEV;
804
805 dev->ifc_id = usb_interface_id(c, f);
806 if (dev->ifc_id < 0) {
807 pr_err("%s: unable to allocate ifc id, err:%d",
808 __func__, dev->ifc_id);
809 return dev->ifc_id;
810 }
811 rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
812
813 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
814 if (!ep) {
815 pr_err("%s: usb epin autoconfig failed\n", __func__);
816 return -ENODEV;
817 }
818 dev->port.in = ep;
819 ep->driver_data = cdev;
820
821 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
822 if (!ep) {
823 pr_err("%s: usb epout autoconfig failed\n", __func__);
824 ret = -ENODEV;
825 goto ep_auto_out_fail;
826 }
827 dev->port.out = ep;
828 ep->driver_data = cdev;
829
830 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
831 if (!ep) {
832 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
833 ret = -ENODEV;
834 goto ep_auto_notify_fail;
835 }
836 dev->notify = ep;
837 ep->driver_data = cdev;
838
839 dev->notify_req = frmnet_alloc_req(ep,
Hemant Kumarfbf113d2011-09-16 18:24:45 -0700840 sizeof(struct usb_cdc_notification),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841 GFP_KERNEL);
842 if (IS_ERR(dev->notify_req)) {
843 pr_err("%s: unable to allocate memory for notify req\n",
844 __func__);
845 ret = -ENOMEM;
846 goto ep_notify_alloc_fail;
847 }
848
849 dev->notify_req->complete = frmnet_notify_complete;
850 dev->notify_req->context = dev;
851
852 f->descriptors = usb_copy_descriptors(rmnet_fs_function);
853
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530854 if (!f->descriptors)
855 goto fail;
856
David Brownac5d1542012-02-06 10:37:22 -0800857 dev->fs.in = usb_find_endpoint(rmnet_fs_function,
858 f->descriptors,
859 &rmnet_fs_in_desc);
860 dev->fs.out = usb_find_endpoint(rmnet_fs_function,
861 f->descriptors,
862 &rmnet_fs_out_desc);
863 dev->fs.notify = usb_find_endpoint(rmnet_fs_function,
864 f->descriptors,
865 &rmnet_fs_notify_desc);
866
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700867 if (gadget_is_dualspeed(cdev->gadget)) {
868 rmnet_hs_in_desc.bEndpointAddress =
869 rmnet_fs_in_desc.bEndpointAddress;
870 rmnet_hs_out_desc.bEndpointAddress =
871 rmnet_fs_out_desc.bEndpointAddress;
872 rmnet_hs_notify_desc.bEndpointAddress =
873 rmnet_fs_notify_desc.bEndpointAddress;
874
875 /* copy descriptors, and track endpoint copies */
876 f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
877
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530878 if (!f->hs_descriptors)
879 goto fail;
David Brownac5d1542012-02-06 10:37:22 -0800880
881 dev->hs.in = usb_find_endpoint(rmnet_hs_function,
882 f->hs_descriptors, &rmnet_hs_in_desc);
883 dev->hs.out = usb_find_endpoint(rmnet_hs_function,
884 f->hs_descriptors, &rmnet_hs_out_desc);
885 dev->hs.notify = usb_find_endpoint(rmnet_hs_function,
886 f->hs_descriptors, &rmnet_hs_notify_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700887 }
888
889 pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
890 __func__, dev->port_num,
891 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
892 dev->port.in->name, dev->port.out->name);
893
894 return 0;
895
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530896fail:
897 if (f->descriptors)
898 usb_free_descriptors(f->descriptors);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899ep_notify_alloc_fail:
900 dev->notify->driver_data = NULL;
901 dev->notify = NULL;
902ep_auto_notify_fail:
903 dev->port.out->driver_data = NULL;
904 dev->port.out = NULL;
905ep_auto_out_fail:
906 dev->port.in->driver_data = NULL;
907 dev->port.in = NULL;
908
909 return ret;
910}
911
Manu Gautam2b0234a2011-09-07 16:47:52 +0530912static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700913{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914 int status;
915 struct f_rmnet *dev;
916 struct usb_function *f;
917 unsigned long flags;
918
919 pr_debug("%s: usb config:%p\n", __func__, c);
920
Manu Gautam2b0234a2011-09-07 16:47:52 +0530921 if (portno >= nr_rmnet_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700922 pr_err("%s: supporting ports#%u port_id:%u", __func__,
Manu Gautam2b0234a2011-09-07 16:47:52 +0530923 nr_rmnet_ports, portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700924 return -ENODEV;
925 }
926
927 if (rmnet_string_defs[0].id == 0) {
928 status = usb_string_id(c->cdev);
929 if (status < 0) {
930 pr_err("%s: failed to get string id, err:%d\n",
931 __func__, status);
932 return status;
933 }
934 rmnet_string_defs[0].id = status;
935 }
936
Manu Gautam2b0234a2011-09-07 16:47:52 +0530937 dev = rmnet_ports[portno].port;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700938
939 spin_lock_irqsave(&dev->lock, flags);
940 dev->cdev = c->cdev;
941 f = &dev->port.func;
Vamsi Krishna188078d2011-10-26 15:09:55 -0700942 f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700943 spin_unlock_irqrestore(&dev->lock, flags);
Vamsi Krishna188078d2011-10-26 15:09:55 -0700944 if (!f->name) {
945 pr_err("%s: cannot allocate memory for name\n", __func__);
946 return -ENOMEM;
947 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948
949 f->strings = rmnet_strings;
950 f->bind = frmnet_bind;
951 f->unbind = frmnet_unbind;
952 f->disable = frmnet_disable;
953 f->set_alt = frmnet_set_alt;
954 f->setup = frmnet_setup;
Chiranjeevi Velempatie007bed2012-01-24 09:54:11 +0530955 f->suspend = frmnet_suspend;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956 dev->port.send_cpkt_response = frmnet_send_cpkt_response;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700957 dev->port.disconnect = frmnet_disconnect;
958 dev->port.connect = frmnet_connect;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700959
960 status = usb_add_function(c, f);
961 if (status) {
962 pr_err("%s: usb add function failed: %d\n",
963 __func__, status);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530964 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700965 return status;
966 }
967
968 pr_debug("%s: complete\n", __func__);
969
970 return status;
971}
972
Manu Gautame3e897c2011-09-12 17:18:46 +0530973static void frmnet_cleanup(void)
974{
975 int i;
976
977 for (i = 0; i < nr_rmnet_ports; i++)
978 kfree(rmnet_ports[i].port);
979
980 nr_rmnet_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700981 no_ctrl_smd_ports = 0;
982 no_data_bam_ports = 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200983 no_data_bam2bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -0800984 no_ctrl_hsic_ports = 0;
985 no_data_hsic_ports = 0;
Manu Gautame3e897c2011-09-12 17:18:46 +0530986}
987
Hemant Kumar1b820d52011-11-03 15:08:28 -0700988static int frmnet_init_port(const char *ctrl_name, const char *data_name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700990 struct f_rmnet *dev;
991 struct rmnet_ports *rmnet_port;
992 int ret;
993 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700994
Hemant Kumar1b820d52011-11-03 15:08:28 -0700995 if (nr_rmnet_ports >= NR_RMNET_PORTS) {
996 pr_err("%s: Max-%d instances supported\n",
997 __func__, NR_RMNET_PORTS);
Manu Gautame3e897c2011-09-12 17:18:46 +0530998 return -EINVAL;
999 }
1000
Hemant Kumar1b820d52011-11-03 15:08:28 -07001001 pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n",
1002 __func__, nr_rmnet_ports, ctrl_name, data_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001003
Hemant Kumar1b820d52011-11-03 15:08:28 -07001004 dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
1005 if (!dev) {
1006 pr_err("%s: Unable to allocate rmnet device\n", __func__);
1007 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001008 }
1009
Hemant Kumar1b820d52011-11-03 15:08:28 -07001010 dev->port_num = nr_rmnet_ports;
1011 spin_lock_init(&dev->lock);
1012 INIT_LIST_HEAD(&dev->cpkt_resp_q);
1013
1014 rmnet_port = &rmnet_ports[nr_rmnet_ports];
1015 rmnet_port->port = dev;
1016 rmnet_port->port_num = nr_rmnet_ports;
1017 rmnet_port->ctrl_xport = str_to_xport(ctrl_name);
1018 rmnet_port->data_xport = str_to_xport(data_name);
1019
1020 switch (rmnet_port->ctrl_xport) {
1021 case USB_GADGET_XPORT_SMD:
1022 rmnet_port->ctrl_xport_num = no_ctrl_smd_ports;
1023 no_ctrl_smd_ports++;
1024 break;
Jack Pham427f6922011-11-23 19:42:00 -08001025 case USB_GADGET_XPORT_HSIC:
1026 rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports;
1027 no_ctrl_hsic_ports++;
1028 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001029 case USB_GADGET_XPORT_NONE:
1030 break;
1031 default:
1032 pr_err("%s: Un-supported transport: %u\n", __func__,
1033 rmnet_port->ctrl_xport);
1034 ret = -ENODEV;
1035 goto fail_probe;
1036 }
1037
1038 switch (rmnet_port->data_xport) {
1039 case USB_GADGET_XPORT_BAM:
1040 rmnet_port->data_xport_num = no_data_bam_ports;
1041 no_data_bam_ports++;
1042 break;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001043 case USB_GADGET_XPORT_BAM2BAM:
1044 rmnet_port->data_xport_num = no_data_bam2bam_ports;
1045 no_data_bam2bam_ports++;
1046 break;
Jack Pham427f6922011-11-23 19:42:00 -08001047 case USB_GADGET_XPORT_HSIC:
1048 rmnet_port->data_xport_num = no_data_hsic_ports;
1049 no_data_hsic_ports++;
1050 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001051 case USB_GADGET_XPORT_NONE:
1052 break;
1053 default:
1054 pr_err("%s: Un-supported transport: %u\n", __func__,
1055 rmnet_port->data_xport);
1056 ret = -ENODEV;
1057 goto fail_probe;
1058 }
1059 nr_rmnet_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060
1061 return 0;
1062
1063fail_probe:
Manu Gautam2b0234a2011-09-07 16:47:52 +05301064 for (i = 0; i < nr_rmnet_ports; i++)
1065 kfree(rmnet_ports[i].port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001066
Hemant Kumar1b820d52011-11-03 15:08:28 -07001067 nr_rmnet_ports = 0;
1068 no_ctrl_smd_ports = 0;
1069 no_data_bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -08001070 no_ctrl_hsic_ports = 0;
1071 no_data_hsic_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001072
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001073 return ret;
1074}