blob: 72e8f8fce8160ed3df4a15f9fd5c800bc1b77551 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
Anna Perel97b8c222012-01-18 10:08:14 +02002 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/usb/android_composite.h>
18#include <linux/spinlock.h>
19
Hemant Kumar1b820d52011-11-03 15:08:28 -070020#include <mach/usb_gadget_xport.h>
Ofir Cohena1c2a872011-12-14 10:26:34 +020021
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include "u_rmnet.h"
23#include "gadget_chips.h"
24
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#define RMNET_NOTIFY_INTERVAL 5
26#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
27
David Brownac5d1542012-02-06 10:37:22 -080028struct rmnet_descs {
29 struct usb_endpoint_descriptor *in;
30 struct usb_endpoint_descriptor *out;
31 struct usb_endpoint_descriptor *notify;
32};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
34#define ACM_CTRL_DTR (1 << 0)
35
36/* TODO: use separate structures for data and
37 * control paths
38 */
39struct f_rmnet {
40 struct grmnet port;
41 int ifc_id;
42 u8 port_num;
43 atomic_t online;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -070044 atomic_t ctrl_online;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045 struct usb_composite_dev *cdev;
46
47 spinlock_t lock;
48
David Brownac5d1542012-02-06 10:37:22 -080049 /* usb descriptors */
50 struct rmnet_descs fs;
51 struct rmnet_descs hs;
52
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053 /* usb eps*/
54 struct usb_ep *notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055 struct usb_request *notify_req;
56
57 /* control info */
58 struct list_head cpkt_resp_q;
59 atomic_t notify_count;
60 unsigned long cpkts_len;
61};
62
Anna Perel21515162012-02-02 20:50:02 +020063#define NR_RMNET_PORTS 3
Manu Gautam2b0234a2011-09-07 16:47:52 +053064static unsigned int nr_rmnet_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070065static unsigned int no_ctrl_smd_ports;
Jack Pham427f6922011-11-23 19:42:00 -080066static unsigned int no_ctrl_hsic_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070067static unsigned int no_data_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020068static unsigned int no_data_bam2bam_ports;
Jack Pham427f6922011-11-23 19:42:00 -080069static unsigned int no_data_hsic_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070static struct rmnet_ports {
Hemant Kumar1b820d52011-11-03 15:08:28 -070071 enum transport_type data_xport;
72 enum transport_type ctrl_xport;
73 unsigned data_xport_num;
74 unsigned ctrl_xport_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070075 unsigned port_num;
76 struct f_rmnet *port;
Manu Gautam2b0234a2011-09-07 16:47:52 +053077} rmnet_ports[NR_RMNET_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070078
79static struct usb_interface_descriptor rmnet_interface_desc = {
80 .bLength = USB_DT_INTERFACE_SIZE,
81 .bDescriptorType = USB_DT_INTERFACE,
82 .bNumEndpoints = 3,
83 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
84 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
85 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
86 /* .iInterface = DYNAMIC */
87};
88
89/* Full speed support */
90static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
91 .bLength = USB_DT_ENDPOINT_SIZE,
92 .bDescriptorType = USB_DT_ENDPOINT,
93 .bEndpointAddress = USB_DIR_IN,
94 .bmAttributes = USB_ENDPOINT_XFER_INT,
95 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
96 .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
97};
98
99static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
100 .bLength = USB_DT_ENDPOINT_SIZE,
101 .bDescriptorType = USB_DT_ENDPOINT,
102 .bEndpointAddress = USB_DIR_IN,
103 .bmAttributes = USB_ENDPOINT_XFER_BULK,
104 .wMaxPacketSize = __constant_cpu_to_le16(64),
105};
106
107static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
108 .bLength = USB_DT_ENDPOINT_SIZE,
109 .bDescriptorType = USB_DT_ENDPOINT,
110 .bEndpointAddress = USB_DIR_OUT,
111 .bmAttributes = USB_ENDPOINT_XFER_BULK,
112 .wMaxPacketSize = __constant_cpu_to_le16(64),
113};
114
115static struct usb_descriptor_header *rmnet_fs_function[] = {
116 (struct usb_descriptor_header *) &rmnet_interface_desc,
117 (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
118 (struct usb_descriptor_header *) &rmnet_fs_in_desc,
119 (struct usb_descriptor_header *) &rmnet_fs_out_desc,
120 NULL,
121};
122
123/* High speed support */
124static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
125 .bLength = USB_DT_ENDPOINT_SIZE,
126 .bDescriptorType = USB_DT_ENDPOINT,
127 .bEndpointAddress = USB_DIR_IN,
128 .bmAttributes = USB_ENDPOINT_XFER_INT,
129 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
130 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
131};
132
133static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
134 .bLength = USB_DT_ENDPOINT_SIZE,
135 .bDescriptorType = USB_DT_ENDPOINT,
136 .bEndpointAddress = USB_DIR_IN,
137 .bmAttributes = USB_ENDPOINT_XFER_BULK,
138 .wMaxPacketSize = __constant_cpu_to_le16(512),
139};
140
141static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
142 .bLength = USB_DT_ENDPOINT_SIZE,
143 .bDescriptorType = USB_DT_ENDPOINT,
144 .bEndpointAddress = USB_DIR_OUT,
145 .bmAttributes = USB_ENDPOINT_XFER_BULK,
146 .wMaxPacketSize = __constant_cpu_to_le16(512),
147};
148
149static struct usb_descriptor_header *rmnet_hs_function[] = {
150 (struct usb_descriptor_header *) &rmnet_interface_desc,
151 (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
152 (struct usb_descriptor_header *) &rmnet_hs_in_desc,
153 (struct usb_descriptor_header *) &rmnet_hs_out_desc,
154 NULL,
155};
156
157/* String descriptors */
158
159static struct usb_string rmnet_string_defs[] = {
160 [0].s = "RmNet",
161 { } /* end of list */
162};
163
164static struct usb_gadget_strings rmnet_string_table = {
165 .language = 0x0409, /* en-us */
166 .strings = rmnet_string_defs,
167};
168
169static struct usb_gadget_strings *rmnet_strings[] = {
170 &rmnet_string_table,
171 NULL,
172};
173
174/* ------- misc functions --------------------*/
175
176static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
177{
178 return container_of(f, struct f_rmnet, port.func);
179}
180
181static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
182{
183 return container_of(r, struct f_rmnet, port);
184}
185
186static struct usb_request *
187frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
188{
189 struct usb_request *req;
190
191 req = usb_ep_alloc_request(ep, flags);
192 if (!req)
193 return ERR_PTR(-ENOMEM);
194
195 req->buf = kmalloc(len, flags);
196 if (!req->buf) {
197 usb_ep_free_request(ep, req);
198 return ERR_PTR(-ENOMEM);
199 }
200
201 req->length = len;
202
203 return req;
204}
205
206void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
207{
208 kfree(req->buf);
209 usb_ep_free_request(ep, req);
210}
211
212static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
213{
214 struct rmnet_ctrl_pkt *pkt;
215
216 pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
217 if (!pkt)
218 return ERR_PTR(-ENOMEM);
219
220 pkt->buf = kmalloc(len, flags);
221 if (!pkt->buf) {
222 kfree(pkt);
223 return ERR_PTR(-ENOMEM);
224 }
225 pkt->len = len;
226
227 return pkt;
228}
229
230static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
231{
232 kfree(pkt->buf);
233 kfree(pkt);
234}
235
236/* -------------------------------------------*/
237
Hemant Kumar1b820d52011-11-03 15:08:28 -0700238static int rmnet_gport_setup(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239{
Jack Pham427f6922011-11-23 19:42:00 -0800240 int ret;
241 int port_idx;
242 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243
Ofir Cohena1c2a872011-12-14 10:26:34 +0200244 pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u"
245 " smd ports: %u ctrl hsic ports: %u"
246 " nr_rmnet_ports: %u\n",
247 __func__, no_data_bam_ports, no_data_bam2bam_ports,
248 no_data_hsic_ports, no_ctrl_smd_ports,
249 no_ctrl_hsic_ports, nr_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250
Ofir Cohena1c2a872011-12-14 10:26:34 +0200251 if (no_data_bam_ports || no_data_bam2bam_ports) {
252 ret = gbam_setup(no_data_bam_ports,
253 no_data_bam2bam_ports);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700254 if (ret)
255 return ret;
256 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257
Hemant Kumar1b820d52011-11-03 15:08:28 -0700258 if (no_ctrl_smd_ports) {
259 ret = gsmd_ctrl_setup(no_ctrl_smd_ports);
260 if (ret)
261 return ret;
262 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263
Jack Pham427f6922011-11-23 19:42:00 -0800264 if (no_data_hsic_ports) {
265 port_idx = ghsic_data_setup(no_data_hsic_ports,
266 USB_GADGET_RMNET);
267 if (port_idx < 0)
268 return port_idx;
269 for (i = 0; i < nr_rmnet_ports; i++) {
270 if (rmnet_ports[i].data_xport ==
271 USB_GADGET_XPORT_HSIC) {
272 rmnet_ports[i].data_xport_num = port_idx;
273 port_idx++;
274 }
275 }
276 }
277
278 if (no_ctrl_hsic_ports) {
279 port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports,
280 USB_GADGET_RMNET);
281 if (port_idx < 0)
282 return port_idx;
283 for (i = 0; i < nr_rmnet_ports; i++) {
284 if (rmnet_ports[i].ctrl_xport ==
285 USB_GADGET_XPORT_HSIC) {
286 rmnet_ports[i].ctrl_xport_num = port_idx;
287 port_idx++;
288 }
289 }
290 }
291
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292 return 0;
293}
294
Manu Gautam2b0234a2011-09-07 16:47:52 +0530295static int gport_rmnet_connect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700297 int ret;
298 unsigned port_num;
299 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
300 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301
Hemant Kumar1b820d52011-11-03 15:08:28 -0700302 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
303 __func__, xport_to_str(cxport), xport_to_str(dxport),
304 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305
Hemant Kumar1b820d52011-11-03 15:08:28 -0700306 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
307 switch (cxport) {
308 case USB_GADGET_XPORT_SMD:
309 ret = gsmd_ctrl_connect(&dev->port, port_num);
310 if (ret) {
311 pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
312 __func__, ret);
313 return ret;
314 }
315 break;
Jack Pham427f6922011-11-23 19:42:00 -0800316 case USB_GADGET_XPORT_HSIC:
317 ret = ghsic_ctrl_connect(&dev->port, port_num);
318 if (ret) {
319 pr_err("%s: ghsic_ctrl_connect failed: err:%d\n",
320 __func__, ret);
321 return ret;
322 }
323 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700324 case USB_GADGET_XPORT_NONE:
325 break;
326 default:
327 pr_err("%s: Un-supported transport: %s\n", __func__,
328 xport_to_str(cxport));
329 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330 }
331
Hemant Kumar1b820d52011-11-03 15:08:28 -0700332 port_num = rmnet_ports[dev->port_num].data_xport_num;
333 switch (dxport) {
334 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200335 case USB_GADGET_XPORT_BAM2BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200336 ret = gbam_connect(&dev->port, port_num,
Anna Perel21515162012-02-02 20:50:02 +0200337 dxport, port_num);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700338 if (ret) {
339 pr_err("%s: gbam_connect failed: err:%d\n",
340 __func__, ret);
341 gsmd_ctrl_disconnect(&dev->port, port_num);
342 return ret;
343 }
344 break;
Jack Pham427f6922011-11-23 19:42:00 -0800345 case USB_GADGET_XPORT_HSIC:
346 ret = ghsic_data_connect(&dev->port, port_num);
347 if (ret) {
348 pr_err("%s: ghsic_data_connect failed: err:%d\n",
349 __func__, ret);
350 ghsic_ctrl_disconnect(&dev->port, port_num);
351 return ret;
352 }
353 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700354 case USB_GADGET_XPORT_NONE:
355 break;
356 default:
357 pr_err("%s: Un-supported transport: %s\n", __func__,
358 xport_to_str(dxport));
359 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 }
361
362 return 0;
363}
364
Manu Gautam2b0234a2011-09-07 16:47:52 +0530365static int gport_rmnet_disconnect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700367 unsigned port_num;
368 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
369 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370
Hemant Kumar1b820d52011-11-03 15:08:28 -0700371 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
372 __func__, xport_to_str(cxport), xport_to_str(dxport),
373 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374
Hemant Kumar1b820d52011-11-03 15:08:28 -0700375 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
376 switch (cxport) {
377 case USB_GADGET_XPORT_SMD:
378 gsmd_ctrl_disconnect(&dev->port, port_num);
379 break;
Jack Pham427f6922011-11-23 19:42:00 -0800380 case USB_GADGET_XPORT_HSIC:
381 ghsic_ctrl_disconnect(&dev->port, port_num);
382 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700383 case USB_GADGET_XPORT_NONE:
384 break;
385 default:
386 pr_err("%s: Un-supported transport: %s\n", __func__,
387 xport_to_str(cxport));
388 return -ENODEV;
389 }
390
391 port_num = rmnet_ports[dev->port_num].data_xport_num;
392 switch (dxport) {
393 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200394 case USB_GADGET_XPORT_BAM2BAM:
395 gbam_disconnect(&dev->port, port_num, dxport);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700396 break;
Jack Pham427f6922011-11-23 19:42:00 -0800397 case USB_GADGET_XPORT_HSIC:
398 ghsic_data_disconnect(&dev->port, port_num);
399 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700400 case USB_GADGET_XPORT_NONE:
401 break;
402 default:
403 pr_err("%s: Un-supported transport: %s\n", __func__,
404 xport_to_str(dxport));
405 return -ENODEV;
406 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407
408 return 0;
409}
410
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700411static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
412{
413 struct f_rmnet *dev = func_to_rmnet(f);
414
415 pr_debug("%s: portno:%d\n", __func__, dev->port_num);
416
417 if (gadget_is_dualspeed(c->cdev->gadget))
418 usb_free_descriptors(f->hs_descriptors);
419 usb_free_descriptors(f->descriptors);
420
421 frmnet_free_req(dev->notify, dev->notify_req);
422
Manu Gautamdd4222b2011-09-09 15:06:05 +0530423 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424}
425
Chiranjeevi Velempatie007bed2012-01-24 09:54:11 +0530426static void frmnet_suspend(struct usb_function *f)
427{
428 struct f_rmnet *dev = func_to_rmnet(f);
429 unsigned port_num;
430
431 if (!atomic_read(&dev->online))
432 return;
433 /* This is a workaround for a bug in Windows 7/XP hosts in which
434 * the DTR bit is not set low when going into suspend. Hence force it
435 * low here when this function driver is suspended.
436 */
437 if (dev->port.notify_modem) {
438 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
439 dev->port.notify_modem(&dev->port, port_num, ~ACM_CTRL_DTR);
440 }
441}
442
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443static void frmnet_disable(struct usb_function *f)
444{
445 struct f_rmnet *dev = func_to_rmnet(f);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700446 unsigned long flags;
447 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448
449 pr_debug("%s: port#%d\n", __func__, dev->port_num);
450
451 usb_ep_disable(dev->notify);
Anna Perel97b8c222012-01-18 10:08:14 +0200452 dev->notify->driver_data = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453
454 atomic_set(&dev->online, 0);
455
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700456 spin_lock_irqsave(&dev->lock, flags);
457 while (!list_empty(&dev->cpkt_resp_q)) {
458 cpkt = list_first_entry(&dev->cpkt_resp_q,
459 struct rmnet_ctrl_pkt, list);
460
461 list_del(&cpkt->list);
462 rmnet_free_ctrl_pkt(cpkt);
463 }
464 atomic_set(&dev->notify_count, 0);
465 spin_unlock_irqrestore(&dev->lock, flags);
466
Manu Gautam2b0234a2011-09-07 16:47:52 +0530467 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468}
469
470static int
471frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
472{
473 struct f_rmnet *dev = func_to_rmnet(f);
474 struct usb_composite_dev *cdev = dev->cdev;
475 int ret;
476
477 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
478
479 if (dev->notify->driver_data) {
480 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
481 usb_ep_disable(dev->notify);
482 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300483 dev->notify->desc = ep_choose(cdev->gadget,
David Brownac5d1542012-02-06 10:37:22 -0800484 dev->hs.notify,
485 dev->fs.notify);
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300486 ret = usb_ep_enable(dev->notify);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 if (ret) {
488 pr_err("%s: usb ep#%s enable failed, err#%d\n",
489 __func__, dev->notify->name, ret);
490 return ret;
491 }
492 dev->notify->driver_data = dev;
493
494 if (dev->port.in->driver_data) {
495 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530496 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497 }
498
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300499 dev->port.in->desc = ep_choose(cdev->gadget,
David Brownac5d1542012-02-06 10:37:22 -0800500 dev->hs.in, dev->fs.in);
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300501 dev->port.out->desc = ep_choose(cdev->gadget,
David Brownac5d1542012-02-06 10:37:22 -0800502 dev->hs.out, dev->fs.out);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503
Manu Gautam2b0234a2011-09-07 16:47:52 +0530504 ret = gport_rmnet_connect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700505
506 atomic_set(&dev->online, 1);
507
508 return ret;
509}
510
511static void frmnet_ctrl_response_available(struct f_rmnet *dev)
512{
513 struct usb_request *req = dev->notify_req;
514 struct usb_cdc_notification *event;
515 unsigned long flags;
516 int ret;
517
518 pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
519
520 spin_lock_irqsave(&dev->lock, flags);
521 if (!atomic_read(&dev->online) || !req || !req->buf) {
522 spin_unlock_irqrestore(&dev->lock, flags);
523 return;
524 }
525
526 if (atomic_inc_return(&dev->notify_count) != 1) {
527 spin_unlock_irqrestore(&dev->lock, flags);
528 return;
529 }
530
531 event = req->buf;
532 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
533 | USB_RECIP_INTERFACE;
534 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
535 event->wValue = cpu_to_le16(0);
536 event->wIndex = cpu_to_le16(dev->ifc_id);
537 event->wLength = cpu_to_le16(0);
538 spin_unlock_irqrestore(&dev->lock, flags);
539
540 ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
541 if (ret) {
542 atomic_dec(&dev->notify_count);
543 pr_debug("ep enqueue error %d\n", ret);
544 }
545}
546
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700547static void frmnet_connect(struct grmnet *gr)
548{
549 struct f_rmnet *dev;
550
551 if (!gr) {
552 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
553 return;
554 }
555
556 dev = port_to_rmnet(gr);
557
558 atomic_set(&dev->ctrl_online, 1);
559}
560
561static void frmnet_disconnect(struct grmnet *gr)
562{
563 struct f_rmnet *dev;
564 unsigned long flags;
565 struct usb_cdc_notification *event;
566 int status;
567 struct rmnet_ctrl_pkt *cpkt;
568
569 if (!gr) {
570 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
571 return;
572 }
573
574 dev = port_to_rmnet(gr);
575
576 atomic_set(&dev->ctrl_online, 0);
577
578 if (!atomic_read(&dev->online)) {
579 pr_debug("%s: nothing to do\n", __func__);
580 return;
581 }
582
583 usb_ep_fifo_flush(dev->notify);
584
585 event = dev->notify_req->buf;
586 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
587 | USB_RECIP_INTERFACE;
588 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
589 event->wValue = cpu_to_le16(0);
590 event->wIndex = cpu_to_le16(dev->ifc_id);
591 event->wLength = cpu_to_le16(0);
592
Vamsi Krishna188078d2011-10-26 15:09:55 -0700593 status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700594 if (status < 0) {
595 if (!atomic_read(&dev->online))
596 return;
597 pr_err("%s: rmnet notify ep enqueue error %d\n",
598 __func__, status);
599 }
600
601 spin_lock_irqsave(&dev->lock, flags);
602 while (!list_empty(&dev->cpkt_resp_q)) {
603 cpkt = list_first_entry(&dev->cpkt_resp_q,
604 struct rmnet_ctrl_pkt, list);
605
606 list_del(&cpkt->list);
607 rmnet_free_ctrl_pkt(cpkt);
608 }
609 atomic_set(&dev->notify_count, 0);
610 spin_unlock_irqrestore(&dev->lock, flags);
611
612}
613
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614static int
Hemant Kumarf60c0252011-11-03 12:37:07 -0700615frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616{
617 struct f_rmnet *dev;
Hemant Kumarf60c0252011-11-03 12:37:07 -0700618 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619 unsigned long flags;
620
Hemant Kumarf60c0252011-11-03 12:37:07 -0700621 if (!gr || !buf) {
622 pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
623 __func__, gr, buf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700624 return -ENODEV;
625 }
Hemant Kumarf60c0252011-11-03 12:37:07 -0700626 cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
627 if (IS_ERR(cpkt)) {
628 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
629 return -ENOMEM;
630 }
631 memcpy(cpkt->buf, buf, len);
632 cpkt->len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633
634 dev = port_to_rmnet(gr);
635
636 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
637
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700638 if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 rmnet_free_ctrl_pkt(cpkt);
640 return 0;
641 }
642
643 spin_lock_irqsave(&dev->lock, flags);
Chiranjeevi Velempati263e6c82011-11-11 23:07:36 +0530644 list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645 spin_unlock_irqrestore(&dev->lock, flags);
646
647 frmnet_ctrl_response_available(dev);
648
649 return 0;
650}
651
652static void
653frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
654{
655 struct f_rmnet *dev = req->context;
656 struct usb_composite_dev *cdev;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700657 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700658
659 if (!dev) {
660 pr_err("%s: rmnet dev is null\n", __func__);
661 return;
662 }
663
664 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
665
666 cdev = dev->cdev;
667
Hemant Kumar1b820d52011-11-03 15:08:28 -0700668 if (dev->port.send_encap_cmd) {
669 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
670 dev->port.send_encap_cmd(port_num, req->buf, req->actual);
671 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672}
673
674static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
675{
676 struct f_rmnet *dev = req->context;
677 int status = req->status;
678
679 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
680
681 switch (status) {
682 case -ECONNRESET:
683 case -ESHUTDOWN:
684 /* connection gone */
685 atomic_set(&dev->notify_count, 0);
686 break;
687 default:
688 pr_err("rmnet notify ep error %d\n", status);
689 /* FALLTHROUGH */
690 case 0:
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700691 if (!atomic_read(&dev->ctrl_online))
692 break;
693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 if (atomic_dec_and_test(&dev->notify_count))
695 break;
696
697 status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
698 if (status) {
699 atomic_dec(&dev->notify_count);
700 pr_debug("ep enqueue error %d\n", status);
701 }
702 break;
703 }
704}
705
706static int
707frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
708{
709 struct f_rmnet *dev = func_to_rmnet(f);
710 struct usb_composite_dev *cdev = dev->cdev;
711 struct usb_request *req = cdev->req;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700712 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713 u16 w_index = le16_to_cpu(ctrl->wIndex);
714 u16 w_value = le16_to_cpu(ctrl->wValue);
715 u16 w_length = le16_to_cpu(ctrl->wLength);
716 int ret = -EOPNOTSUPP;
717
718 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
719
720 if (!atomic_read(&dev->online)) {
721 pr_debug("%s: usb cable is not connected\n", __func__);
722 return -ENOTCONN;
723 }
724
725 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
726
727 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
728 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729 ret = w_length;
730 req->complete = frmnet_cmd_complete;
731 req->context = dev;
732 break;
733
734
735 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
736 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
737 if (w_value)
738 goto invalid;
739 else {
740 unsigned len;
741 struct rmnet_ctrl_pkt *cpkt;
742
743 spin_lock(&dev->lock);
744 if (list_empty(&dev->cpkt_resp_q)) {
745 pr_err("ctrl resp queue empty "
746 " req%02x.%02x v%04x i%04x l%d\n",
747 ctrl->bRequestType, ctrl->bRequest,
748 w_value, w_index, w_length);
749 spin_unlock(&dev->lock);
750 goto invalid;
751 }
752
753 cpkt = list_first_entry(&dev->cpkt_resp_q,
754 struct rmnet_ctrl_pkt, list);
755 list_del(&cpkt->list);
756 spin_unlock(&dev->lock);
757
758 len = min_t(unsigned, w_length, cpkt->len);
759 memcpy(req->buf, cpkt->buf, len);
760 ret = len;
761
762 rmnet_free_ctrl_pkt(cpkt);
763 }
764 break;
765 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
766 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
Hemant Kumar1b820d52011-11-03 15:08:28 -0700767 if (dev->port.notify_modem) {
768 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
769 dev->port.notify_modem(&dev->port, port_num, w_value);
770 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771 ret = 0;
772
773 break;
774 default:
775
776invalid:
777 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
778 ctrl->bRequestType, ctrl->bRequest,
779 w_value, w_index, w_length);
780 }
781
782 /* respond with data transfer or status phase? */
783 if (ret >= 0) {
784 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
785 ctrl->bRequestType, ctrl->bRequest,
786 w_value, w_index, w_length);
787 req->zero = (ret < w_length);
788 req->length = ret;
789 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
790 if (ret < 0)
791 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
792 }
793
794 return ret;
795}
796
797static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
798{
799 struct f_rmnet *dev = func_to_rmnet(f);
800 struct usb_ep *ep;
801 struct usb_composite_dev *cdev = c->cdev;
802 int ret = -ENODEV;
803
804 dev->ifc_id = usb_interface_id(c, f);
805 if (dev->ifc_id < 0) {
806 pr_err("%s: unable to allocate ifc id, err:%d",
807 __func__, dev->ifc_id);
808 return dev->ifc_id;
809 }
810 rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
811
812 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
813 if (!ep) {
814 pr_err("%s: usb epin autoconfig failed\n", __func__);
815 return -ENODEV;
816 }
817 dev->port.in = ep;
818 ep->driver_data = cdev;
819
820 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
821 if (!ep) {
822 pr_err("%s: usb epout autoconfig failed\n", __func__);
823 ret = -ENODEV;
824 goto ep_auto_out_fail;
825 }
826 dev->port.out = ep;
827 ep->driver_data = cdev;
828
829 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
830 if (!ep) {
831 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
832 ret = -ENODEV;
833 goto ep_auto_notify_fail;
834 }
835 dev->notify = ep;
836 ep->driver_data = cdev;
837
838 dev->notify_req = frmnet_alloc_req(ep,
Hemant Kumarfbf113d2011-09-16 18:24:45 -0700839 sizeof(struct usb_cdc_notification),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700840 GFP_KERNEL);
841 if (IS_ERR(dev->notify_req)) {
842 pr_err("%s: unable to allocate memory for notify req\n",
843 __func__);
844 ret = -ENOMEM;
845 goto ep_notify_alloc_fail;
846 }
847
848 dev->notify_req->complete = frmnet_notify_complete;
849 dev->notify_req->context = dev;
850
851 f->descriptors = usb_copy_descriptors(rmnet_fs_function);
852
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530853 if (!f->descriptors)
854 goto fail;
855
David Brownac5d1542012-02-06 10:37:22 -0800856 dev->fs.in = usb_find_endpoint(rmnet_fs_function,
857 f->descriptors,
858 &rmnet_fs_in_desc);
859 dev->fs.out = usb_find_endpoint(rmnet_fs_function,
860 f->descriptors,
861 &rmnet_fs_out_desc);
862 dev->fs.notify = usb_find_endpoint(rmnet_fs_function,
863 f->descriptors,
864 &rmnet_fs_notify_desc);
865
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866 if (gadget_is_dualspeed(cdev->gadget)) {
867 rmnet_hs_in_desc.bEndpointAddress =
868 rmnet_fs_in_desc.bEndpointAddress;
869 rmnet_hs_out_desc.bEndpointAddress =
870 rmnet_fs_out_desc.bEndpointAddress;
871 rmnet_hs_notify_desc.bEndpointAddress =
872 rmnet_fs_notify_desc.bEndpointAddress;
873
874 /* copy descriptors, and track endpoint copies */
875 f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
876
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530877 if (!f->hs_descriptors)
878 goto fail;
David Brownac5d1542012-02-06 10:37:22 -0800879
880 dev->hs.in = usb_find_endpoint(rmnet_hs_function,
881 f->hs_descriptors, &rmnet_hs_in_desc);
882 dev->hs.out = usb_find_endpoint(rmnet_hs_function,
883 f->hs_descriptors, &rmnet_hs_out_desc);
884 dev->hs.notify = usb_find_endpoint(rmnet_hs_function,
885 f->hs_descriptors, &rmnet_hs_notify_desc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700886 }
887
888 pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
889 __func__, dev->port_num,
890 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
891 dev->port.in->name, dev->port.out->name);
892
893 return 0;
894
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530895fail:
896 if (f->descriptors)
897 usb_free_descriptors(f->descriptors);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898ep_notify_alloc_fail:
899 dev->notify->driver_data = NULL;
900 dev->notify = NULL;
901ep_auto_notify_fail:
902 dev->port.out->driver_data = NULL;
903 dev->port.out = NULL;
904ep_auto_out_fail:
905 dev->port.in->driver_data = NULL;
906 dev->port.in = NULL;
907
908 return ret;
909}
910
Manu Gautam2b0234a2011-09-07 16:47:52 +0530911static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700912{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700913 int status;
914 struct f_rmnet *dev;
915 struct usb_function *f;
916 unsigned long flags;
917
918 pr_debug("%s: usb config:%p\n", __func__, c);
919
Manu Gautam2b0234a2011-09-07 16:47:52 +0530920 if (portno >= nr_rmnet_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921 pr_err("%s: supporting ports#%u port_id:%u", __func__,
Manu Gautam2b0234a2011-09-07 16:47:52 +0530922 nr_rmnet_ports, portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700923 return -ENODEV;
924 }
925
926 if (rmnet_string_defs[0].id == 0) {
927 status = usb_string_id(c->cdev);
928 if (status < 0) {
929 pr_err("%s: failed to get string id, err:%d\n",
930 __func__, status);
931 return status;
932 }
933 rmnet_string_defs[0].id = status;
934 }
935
Manu Gautam2b0234a2011-09-07 16:47:52 +0530936 dev = rmnet_ports[portno].port;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937
938 spin_lock_irqsave(&dev->lock, flags);
939 dev->cdev = c->cdev;
940 f = &dev->port.func;
Vamsi Krishna188078d2011-10-26 15:09:55 -0700941 f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700942 spin_unlock_irqrestore(&dev->lock, flags);
Vamsi Krishna188078d2011-10-26 15:09:55 -0700943 if (!f->name) {
944 pr_err("%s: cannot allocate memory for name\n", __func__);
945 return -ENOMEM;
946 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700947
948 f->strings = rmnet_strings;
949 f->bind = frmnet_bind;
950 f->unbind = frmnet_unbind;
951 f->disable = frmnet_disable;
952 f->set_alt = frmnet_set_alt;
953 f->setup = frmnet_setup;
Chiranjeevi Velempatie007bed2012-01-24 09:54:11 +0530954 f->suspend = frmnet_suspend;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700955 dev->port.send_cpkt_response = frmnet_send_cpkt_response;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700956 dev->port.disconnect = frmnet_disconnect;
957 dev->port.connect = frmnet_connect;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700958
959 status = usb_add_function(c, f);
960 if (status) {
961 pr_err("%s: usb add function failed: %d\n",
962 __func__, status);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530963 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700964 return status;
965 }
966
967 pr_debug("%s: complete\n", __func__);
968
969 return status;
970}
971
Manu Gautame3e897c2011-09-12 17:18:46 +0530972static void frmnet_cleanup(void)
973{
974 int i;
975
976 for (i = 0; i < nr_rmnet_ports; i++)
977 kfree(rmnet_ports[i].port);
978
979 nr_rmnet_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700980 no_ctrl_smd_ports = 0;
981 no_data_bam_ports = 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200982 no_data_bam2bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -0800983 no_ctrl_hsic_ports = 0;
984 no_data_hsic_ports = 0;
Manu Gautame3e897c2011-09-12 17:18:46 +0530985}
986
Hemant Kumar1b820d52011-11-03 15:08:28 -0700987static int frmnet_init_port(const char *ctrl_name, const char *data_name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700988{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700989 struct f_rmnet *dev;
990 struct rmnet_ports *rmnet_port;
991 int ret;
992 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700993
Hemant Kumar1b820d52011-11-03 15:08:28 -0700994 if (nr_rmnet_ports >= NR_RMNET_PORTS) {
995 pr_err("%s: Max-%d instances supported\n",
996 __func__, NR_RMNET_PORTS);
Manu Gautame3e897c2011-09-12 17:18:46 +0530997 return -EINVAL;
998 }
999
Hemant Kumar1b820d52011-11-03 15:08:28 -07001000 pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n",
1001 __func__, nr_rmnet_ports, ctrl_name, data_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001002
Hemant Kumar1b820d52011-11-03 15:08:28 -07001003 dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
1004 if (!dev) {
1005 pr_err("%s: Unable to allocate rmnet device\n", __func__);
1006 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001007 }
1008
Hemant Kumar1b820d52011-11-03 15:08:28 -07001009 dev->port_num = nr_rmnet_ports;
1010 spin_lock_init(&dev->lock);
1011 INIT_LIST_HEAD(&dev->cpkt_resp_q);
1012
1013 rmnet_port = &rmnet_ports[nr_rmnet_ports];
1014 rmnet_port->port = dev;
1015 rmnet_port->port_num = nr_rmnet_ports;
1016 rmnet_port->ctrl_xport = str_to_xport(ctrl_name);
1017 rmnet_port->data_xport = str_to_xport(data_name);
1018
1019 switch (rmnet_port->ctrl_xport) {
1020 case USB_GADGET_XPORT_SMD:
1021 rmnet_port->ctrl_xport_num = no_ctrl_smd_ports;
1022 no_ctrl_smd_ports++;
1023 break;
Jack Pham427f6922011-11-23 19:42:00 -08001024 case USB_GADGET_XPORT_HSIC:
1025 rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports;
1026 no_ctrl_hsic_ports++;
1027 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001028 case USB_GADGET_XPORT_NONE:
1029 break;
1030 default:
1031 pr_err("%s: Un-supported transport: %u\n", __func__,
1032 rmnet_port->ctrl_xport);
1033 ret = -ENODEV;
1034 goto fail_probe;
1035 }
1036
1037 switch (rmnet_port->data_xport) {
1038 case USB_GADGET_XPORT_BAM:
1039 rmnet_port->data_xport_num = no_data_bam_ports;
1040 no_data_bam_ports++;
1041 break;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001042 case USB_GADGET_XPORT_BAM2BAM:
1043 rmnet_port->data_xport_num = no_data_bam2bam_ports;
1044 no_data_bam2bam_ports++;
1045 break;
Jack Pham427f6922011-11-23 19:42:00 -08001046 case USB_GADGET_XPORT_HSIC:
1047 rmnet_port->data_xport_num = no_data_hsic_ports;
1048 no_data_hsic_ports++;
1049 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001050 case USB_GADGET_XPORT_NONE:
1051 break;
1052 default:
1053 pr_err("%s: Un-supported transport: %u\n", __func__,
1054 rmnet_port->data_xport);
1055 ret = -ENODEV;
1056 goto fail_probe;
1057 }
1058 nr_rmnet_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001059
1060 return 0;
1061
1062fail_probe:
Manu Gautam2b0234a2011-09-07 16:47:52 +05301063 for (i = 0; i < nr_rmnet_ports; i++)
1064 kfree(rmnet_ports[i].port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001065
Hemant Kumar1b820d52011-11-03 15:08:28 -07001066 nr_rmnet_ports = 0;
1067 no_ctrl_smd_ports = 0;
1068 no_data_bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -08001069 no_ctrl_hsic_ports = 0;
1070 no_data_hsic_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001071
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072 return ret;
1073}