blob: cc26c85b0042f573789e144eadc935280e0b0f5a [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
Anna Perel97b8c222012-01-18 10:08:14 +02002 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/usb/android_composite.h>
18#include <linux/spinlock.h>
19
Hemant Kumar1b820d52011-11-03 15:08:28 -070020#include <mach/usb_gadget_xport.h>
Ofir Cohena1c2a872011-12-14 10:26:34 +020021
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include "u_rmnet.h"
23#include "gadget_chips.h"
24
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#define RMNET_NOTIFY_INTERVAL 5
26#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
27
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028
29#define ACM_CTRL_DTR (1 << 0)
30
31/* TODO: use separate structures for data and
32 * control paths
33 */
34struct f_rmnet {
35 struct grmnet port;
36 int ifc_id;
37 u8 port_num;
38 atomic_t online;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -070039 atomic_t ctrl_online;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040 struct usb_composite_dev *cdev;
41
42 spinlock_t lock;
43
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044 /* usb eps*/
45 struct usb_ep *notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046 struct usb_request *notify_req;
47
48 /* control info */
49 struct list_head cpkt_resp_q;
50 atomic_t notify_count;
51 unsigned long cpkts_len;
52};
53
Anna Perel21515162012-02-02 20:50:02 +020054#define NR_RMNET_PORTS 3
Manu Gautam2b0234a2011-09-07 16:47:52 +053055static unsigned int nr_rmnet_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070056static unsigned int no_ctrl_smd_ports;
Jack Pham427f6922011-11-23 19:42:00 -080057static unsigned int no_ctrl_hsic_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070058static unsigned int no_data_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020059static unsigned int no_data_bam2bam_ports;
Jack Pham427f6922011-11-23 19:42:00 -080060static unsigned int no_data_hsic_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061static struct rmnet_ports {
Hemant Kumar1b820d52011-11-03 15:08:28 -070062 enum transport_type data_xport;
63 enum transport_type ctrl_xport;
64 unsigned data_xport_num;
65 unsigned ctrl_xport_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066 unsigned port_num;
67 struct f_rmnet *port;
Manu Gautam2b0234a2011-09-07 16:47:52 +053068} rmnet_ports[NR_RMNET_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069
70static struct usb_interface_descriptor rmnet_interface_desc = {
71 .bLength = USB_DT_INTERFACE_SIZE,
72 .bDescriptorType = USB_DT_INTERFACE,
73 .bNumEndpoints = 3,
74 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
75 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
76 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
77 /* .iInterface = DYNAMIC */
78};
79
80/* Full speed support */
81static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
82 .bLength = USB_DT_ENDPOINT_SIZE,
83 .bDescriptorType = USB_DT_ENDPOINT,
84 .bEndpointAddress = USB_DIR_IN,
85 .bmAttributes = USB_ENDPOINT_XFER_INT,
86 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
87 .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
88};
89
90static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
91 .bLength = USB_DT_ENDPOINT_SIZE,
92 .bDescriptorType = USB_DT_ENDPOINT,
93 .bEndpointAddress = USB_DIR_IN,
94 .bmAttributes = USB_ENDPOINT_XFER_BULK,
95 .wMaxPacketSize = __constant_cpu_to_le16(64),
96};
97
98static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
99 .bLength = USB_DT_ENDPOINT_SIZE,
100 .bDescriptorType = USB_DT_ENDPOINT,
101 .bEndpointAddress = USB_DIR_OUT,
102 .bmAttributes = USB_ENDPOINT_XFER_BULK,
103 .wMaxPacketSize = __constant_cpu_to_le16(64),
104};
105
106static struct usb_descriptor_header *rmnet_fs_function[] = {
107 (struct usb_descriptor_header *) &rmnet_interface_desc,
108 (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
109 (struct usb_descriptor_header *) &rmnet_fs_in_desc,
110 (struct usb_descriptor_header *) &rmnet_fs_out_desc,
111 NULL,
112};
113
114/* High speed support */
115static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
116 .bLength = USB_DT_ENDPOINT_SIZE,
117 .bDescriptorType = USB_DT_ENDPOINT,
118 .bEndpointAddress = USB_DIR_IN,
119 .bmAttributes = USB_ENDPOINT_XFER_INT,
120 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
121 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
122};
123
124static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
125 .bLength = USB_DT_ENDPOINT_SIZE,
126 .bDescriptorType = USB_DT_ENDPOINT,
127 .bEndpointAddress = USB_DIR_IN,
128 .bmAttributes = USB_ENDPOINT_XFER_BULK,
129 .wMaxPacketSize = __constant_cpu_to_le16(512),
130};
131
132static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
133 .bLength = USB_DT_ENDPOINT_SIZE,
134 .bDescriptorType = USB_DT_ENDPOINT,
135 .bEndpointAddress = USB_DIR_OUT,
136 .bmAttributes = USB_ENDPOINT_XFER_BULK,
137 .wMaxPacketSize = __constant_cpu_to_le16(512),
138};
139
140static struct usb_descriptor_header *rmnet_hs_function[] = {
141 (struct usb_descriptor_header *) &rmnet_interface_desc,
142 (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
143 (struct usb_descriptor_header *) &rmnet_hs_in_desc,
144 (struct usb_descriptor_header *) &rmnet_hs_out_desc,
145 NULL,
146};
147
148/* String descriptors */
149
150static struct usb_string rmnet_string_defs[] = {
151 [0].s = "RmNet",
152 { } /* end of list */
153};
154
155static struct usb_gadget_strings rmnet_string_table = {
156 .language = 0x0409, /* en-us */
157 .strings = rmnet_string_defs,
158};
159
160static struct usb_gadget_strings *rmnet_strings[] = {
161 &rmnet_string_table,
162 NULL,
163};
164
165/* ------- misc functions --------------------*/
166
167static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
168{
169 return container_of(f, struct f_rmnet, port.func);
170}
171
172static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
173{
174 return container_of(r, struct f_rmnet, port);
175}
176
177static struct usb_request *
178frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
179{
180 struct usb_request *req;
181
182 req = usb_ep_alloc_request(ep, flags);
183 if (!req)
184 return ERR_PTR(-ENOMEM);
185
186 req->buf = kmalloc(len, flags);
187 if (!req->buf) {
188 usb_ep_free_request(ep, req);
189 return ERR_PTR(-ENOMEM);
190 }
191
192 req->length = len;
193
194 return req;
195}
196
197void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
198{
199 kfree(req->buf);
200 usb_ep_free_request(ep, req);
201}
202
203static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
204{
205 struct rmnet_ctrl_pkt *pkt;
206
207 pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
208 if (!pkt)
209 return ERR_PTR(-ENOMEM);
210
211 pkt->buf = kmalloc(len, flags);
212 if (!pkt->buf) {
213 kfree(pkt);
214 return ERR_PTR(-ENOMEM);
215 }
216 pkt->len = len;
217
218 return pkt;
219}
220
221static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
222{
223 kfree(pkt->buf);
224 kfree(pkt);
225}
226
227/* -------------------------------------------*/
228
Hemant Kumar1b820d52011-11-03 15:08:28 -0700229static int rmnet_gport_setup(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230{
Jack Pham427f6922011-11-23 19:42:00 -0800231 int ret;
232 int port_idx;
233 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234
Ofir Cohena1c2a872011-12-14 10:26:34 +0200235 pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u"
236 " smd ports: %u ctrl hsic ports: %u"
237 " nr_rmnet_ports: %u\n",
238 __func__, no_data_bam_ports, no_data_bam2bam_ports,
239 no_data_hsic_ports, no_ctrl_smd_ports,
240 no_ctrl_hsic_ports, nr_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241
Ofir Cohena1c2a872011-12-14 10:26:34 +0200242 if (no_data_bam_ports || no_data_bam2bam_ports) {
243 ret = gbam_setup(no_data_bam_ports,
244 no_data_bam2bam_ports);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700245 if (ret)
246 return ret;
247 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248
Hemant Kumar1b820d52011-11-03 15:08:28 -0700249 if (no_ctrl_smd_ports) {
250 ret = gsmd_ctrl_setup(no_ctrl_smd_ports);
251 if (ret)
252 return ret;
253 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254
Jack Pham427f6922011-11-23 19:42:00 -0800255 if (no_data_hsic_ports) {
256 port_idx = ghsic_data_setup(no_data_hsic_ports,
257 USB_GADGET_RMNET);
258 if (port_idx < 0)
259 return port_idx;
260 for (i = 0; i < nr_rmnet_ports; i++) {
261 if (rmnet_ports[i].data_xport ==
262 USB_GADGET_XPORT_HSIC) {
263 rmnet_ports[i].data_xport_num = port_idx;
264 port_idx++;
265 }
266 }
267 }
268
269 if (no_ctrl_hsic_ports) {
270 port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports,
271 USB_GADGET_RMNET);
272 if (port_idx < 0)
273 return port_idx;
274 for (i = 0; i < nr_rmnet_ports; i++) {
275 if (rmnet_ports[i].ctrl_xport ==
276 USB_GADGET_XPORT_HSIC) {
277 rmnet_ports[i].ctrl_xport_num = port_idx;
278 port_idx++;
279 }
280 }
281 }
282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283 return 0;
284}
285
Manu Gautam2b0234a2011-09-07 16:47:52 +0530286static int gport_rmnet_connect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700288 int ret;
289 unsigned port_num;
290 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
291 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292
Hemant Kumar1b820d52011-11-03 15:08:28 -0700293 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
294 __func__, xport_to_str(cxport), xport_to_str(dxport),
295 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296
Hemant Kumar1b820d52011-11-03 15:08:28 -0700297 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
298 switch (cxport) {
299 case USB_GADGET_XPORT_SMD:
300 ret = gsmd_ctrl_connect(&dev->port, port_num);
301 if (ret) {
302 pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
303 __func__, ret);
304 return ret;
305 }
306 break;
Jack Pham427f6922011-11-23 19:42:00 -0800307 case USB_GADGET_XPORT_HSIC:
308 ret = ghsic_ctrl_connect(&dev->port, port_num);
309 if (ret) {
310 pr_err("%s: ghsic_ctrl_connect failed: err:%d\n",
311 __func__, ret);
312 return ret;
313 }
314 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700315 case USB_GADGET_XPORT_NONE:
316 break;
317 default:
318 pr_err("%s: Un-supported transport: %s\n", __func__,
319 xport_to_str(cxport));
320 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321 }
322
Hemant Kumar1b820d52011-11-03 15:08:28 -0700323 port_num = rmnet_ports[dev->port_num].data_xport_num;
324 switch (dxport) {
325 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200326 case USB_GADGET_XPORT_BAM2BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200327 ret = gbam_connect(&dev->port, port_num,
Anna Perel21515162012-02-02 20:50:02 +0200328 dxport, port_num);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700329 if (ret) {
330 pr_err("%s: gbam_connect failed: err:%d\n",
331 __func__, ret);
332 gsmd_ctrl_disconnect(&dev->port, port_num);
333 return ret;
334 }
335 break;
Jack Pham427f6922011-11-23 19:42:00 -0800336 case USB_GADGET_XPORT_HSIC:
337 ret = ghsic_data_connect(&dev->port, port_num);
338 if (ret) {
339 pr_err("%s: ghsic_data_connect failed: err:%d\n",
340 __func__, ret);
341 ghsic_ctrl_disconnect(&dev->port, port_num);
342 return ret;
343 }
344 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700345 case USB_GADGET_XPORT_NONE:
346 break;
347 default:
348 pr_err("%s: Un-supported transport: %s\n", __func__,
349 xport_to_str(dxport));
350 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351 }
352
353 return 0;
354}
355
Manu Gautam2b0234a2011-09-07 16:47:52 +0530356static int gport_rmnet_disconnect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700358 unsigned port_num;
359 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
360 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361
Hemant Kumar1b820d52011-11-03 15:08:28 -0700362 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
363 __func__, xport_to_str(cxport), xport_to_str(dxport),
364 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700365
Hemant Kumar1b820d52011-11-03 15:08:28 -0700366 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
367 switch (cxport) {
368 case USB_GADGET_XPORT_SMD:
369 gsmd_ctrl_disconnect(&dev->port, port_num);
370 break;
Jack Pham427f6922011-11-23 19:42:00 -0800371 case USB_GADGET_XPORT_HSIC:
372 ghsic_ctrl_disconnect(&dev->port, port_num);
373 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700374 case USB_GADGET_XPORT_NONE:
375 break;
376 default:
377 pr_err("%s: Un-supported transport: %s\n", __func__,
378 xport_to_str(cxport));
379 return -ENODEV;
380 }
381
382 port_num = rmnet_ports[dev->port_num].data_xport_num;
383 switch (dxport) {
384 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200385 case USB_GADGET_XPORT_BAM2BAM:
386 gbam_disconnect(&dev->port, port_num, dxport);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700387 break;
Jack Pham427f6922011-11-23 19:42:00 -0800388 case USB_GADGET_XPORT_HSIC:
389 ghsic_data_disconnect(&dev->port, port_num);
390 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700391 case USB_GADGET_XPORT_NONE:
392 break;
393 default:
394 pr_err("%s: Un-supported transport: %s\n", __func__,
395 xport_to_str(dxport));
396 return -ENODEV;
397 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398
399 return 0;
400}
401
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
403{
404 struct f_rmnet *dev = func_to_rmnet(f);
405
406 pr_debug("%s: portno:%d\n", __func__, dev->port_num);
407
408 if (gadget_is_dualspeed(c->cdev->gadget))
409 usb_free_descriptors(f->hs_descriptors);
410 usb_free_descriptors(f->descriptors);
411
412 frmnet_free_req(dev->notify, dev->notify_req);
413
Manu Gautamdd4222b2011-09-09 15:06:05 +0530414 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415}
416
Chiranjeevi Velempatie007bed2012-01-24 09:54:11 +0530417static void frmnet_suspend(struct usb_function *f)
418{
419 struct f_rmnet *dev = func_to_rmnet(f);
420 unsigned port_num;
421
422 if (!atomic_read(&dev->online))
423 return;
424 /* This is a workaround for a bug in Windows 7/XP hosts in which
425 * the DTR bit is not set low when going into suspend. Hence force it
426 * low here when this function driver is suspended.
427 */
428 if (dev->port.notify_modem) {
429 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
430 dev->port.notify_modem(&dev->port, port_num, ~ACM_CTRL_DTR);
431 }
432}
433
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434static void frmnet_disable(struct usb_function *f)
435{
436 struct f_rmnet *dev = func_to_rmnet(f);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700437 unsigned long flags;
438 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439
440 pr_debug("%s: port#%d\n", __func__, dev->port_num);
441
442 usb_ep_disable(dev->notify);
Anna Perel97b8c222012-01-18 10:08:14 +0200443 dev->notify->driver_data = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444
445 atomic_set(&dev->online, 0);
446
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700447 spin_lock_irqsave(&dev->lock, flags);
448 while (!list_empty(&dev->cpkt_resp_q)) {
449 cpkt = list_first_entry(&dev->cpkt_resp_q,
450 struct rmnet_ctrl_pkt, list);
451
452 list_del(&cpkt->list);
453 rmnet_free_ctrl_pkt(cpkt);
454 }
455 atomic_set(&dev->notify_count, 0);
456 spin_unlock_irqrestore(&dev->lock, flags);
457
Manu Gautam2b0234a2011-09-07 16:47:52 +0530458 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459}
460
461static int
462frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
463{
464 struct f_rmnet *dev = func_to_rmnet(f);
465 struct usb_composite_dev *cdev = dev->cdev;
466 int ret;
467
468 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
469
470 if (dev->notify->driver_data) {
471 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
472 usb_ep_disable(dev->notify);
473 }
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200474
475 ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
476 if (ret) {
477 dev->notify->desc = NULL;
478 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
479 dev->notify->name, ret);
480 return ret;
481 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300482 ret = usb_ep_enable(dev->notify);
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 if (ret) {
485 pr_err("%s: usb ep#%s enable failed, err#%d\n",
486 __func__, dev->notify->name, ret);
487 return ret;
488 }
489 dev->notify->driver_data = dev;
490
491 if (dev->port.in->driver_data) {
492 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530493 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494 }
495
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200496 if (config_ep_by_speed(cdev->gadget, f, dev->port.in) ||
497 config_ep_by_speed(cdev->gadget, f, dev->port.out)) {
498 dev->port.in->desc = NULL;
499 dev->port.out->desc = NULL;
500 return -EINVAL;
501 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700502
Manu Gautam2b0234a2011-09-07 16:47:52 +0530503 ret = gport_rmnet_connect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504
505 atomic_set(&dev->online, 1);
506
507 return ret;
508}
509
510static void frmnet_ctrl_response_available(struct f_rmnet *dev)
511{
512 struct usb_request *req = dev->notify_req;
513 struct usb_cdc_notification *event;
514 unsigned long flags;
515 int ret;
516
517 pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
518
519 spin_lock_irqsave(&dev->lock, flags);
520 if (!atomic_read(&dev->online) || !req || !req->buf) {
521 spin_unlock_irqrestore(&dev->lock, flags);
522 return;
523 }
524
525 if (atomic_inc_return(&dev->notify_count) != 1) {
526 spin_unlock_irqrestore(&dev->lock, flags);
527 return;
528 }
529
530 event = req->buf;
531 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
532 | USB_RECIP_INTERFACE;
533 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
534 event->wValue = cpu_to_le16(0);
535 event->wIndex = cpu_to_le16(dev->ifc_id);
536 event->wLength = cpu_to_le16(0);
537 spin_unlock_irqrestore(&dev->lock, flags);
538
539 ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
540 if (ret) {
541 atomic_dec(&dev->notify_count);
542 pr_debug("ep enqueue error %d\n", ret);
543 }
544}
545
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700546static void frmnet_connect(struct grmnet *gr)
547{
548 struct f_rmnet *dev;
549
550 if (!gr) {
551 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
552 return;
553 }
554
555 dev = port_to_rmnet(gr);
556
557 atomic_set(&dev->ctrl_online, 1);
558}
559
560static void frmnet_disconnect(struct grmnet *gr)
561{
562 struct f_rmnet *dev;
563 unsigned long flags;
564 struct usb_cdc_notification *event;
565 int status;
566 struct rmnet_ctrl_pkt *cpkt;
567
568 if (!gr) {
569 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
570 return;
571 }
572
573 dev = port_to_rmnet(gr);
574
575 atomic_set(&dev->ctrl_online, 0);
576
577 if (!atomic_read(&dev->online)) {
578 pr_debug("%s: nothing to do\n", __func__);
579 return;
580 }
581
582 usb_ep_fifo_flush(dev->notify);
583
584 event = dev->notify_req->buf;
585 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
586 | USB_RECIP_INTERFACE;
587 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
588 event->wValue = cpu_to_le16(0);
589 event->wIndex = cpu_to_le16(dev->ifc_id);
590 event->wLength = cpu_to_le16(0);
591
Vamsi Krishna188078d2011-10-26 15:09:55 -0700592 status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700593 if (status < 0) {
594 if (!atomic_read(&dev->online))
595 return;
596 pr_err("%s: rmnet notify ep enqueue error %d\n",
597 __func__, status);
598 }
599
600 spin_lock_irqsave(&dev->lock, flags);
601 while (!list_empty(&dev->cpkt_resp_q)) {
602 cpkt = list_first_entry(&dev->cpkt_resp_q,
603 struct rmnet_ctrl_pkt, list);
604
605 list_del(&cpkt->list);
606 rmnet_free_ctrl_pkt(cpkt);
607 }
608 atomic_set(&dev->notify_count, 0);
609 spin_unlock_irqrestore(&dev->lock, flags);
610
611}
612
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700613static int
Hemant Kumarf60c0252011-11-03 12:37:07 -0700614frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615{
616 struct f_rmnet *dev;
Hemant Kumarf60c0252011-11-03 12:37:07 -0700617 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618 unsigned long flags;
619
Hemant Kumarf60c0252011-11-03 12:37:07 -0700620 if (!gr || !buf) {
621 pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
622 __func__, gr, buf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623 return -ENODEV;
624 }
Hemant Kumarf60c0252011-11-03 12:37:07 -0700625 cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
626 if (IS_ERR(cpkt)) {
627 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
628 return -ENOMEM;
629 }
630 memcpy(cpkt->buf, buf, len);
631 cpkt->len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700632
633 dev = port_to_rmnet(gr);
634
635 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
636
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700637 if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700638 rmnet_free_ctrl_pkt(cpkt);
639 return 0;
640 }
641
642 spin_lock_irqsave(&dev->lock, flags);
Chiranjeevi Velempati263e6c82011-11-11 23:07:36 +0530643 list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644 spin_unlock_irqrestore(&dev->lock, flags);
645
646 frmnet_ctrl_response_available(dev);
647
648 return 0;
649}
650
651static void
652frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
653{
654 struct f_rmnet *dev = req->context;
655 struct usb_composite_dev *cdev;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700656 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657
658 if (!dev) {
659 pr_err("%s: rmnet dev is null\n", __func__);
660 return;
661 }
662
663 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
664
665 cdev = dev->cdev;
666
Hemant Kumar1b820d52011-11-03 15:08:28 -0700667 if (dev->port.send_encap_cmd) {
668 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
669 dev->port.send_encap_cmd(port_num, req->buf, req->actual);
670 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671}
672
673static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
674{
675 struct f_rmnet *dev = req->context;
676 int status = req->status;
677
678 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
679
680 switch (status) {
681 case -ECONNRESET:
682 case -ESHUTDOWN:
683 /* connection gone */
684 atomic_set(&dev->notify_count, 0);
685 break;
686 default:
687 pr_err("rmnet notify ep error %d\n", status);
688 /* FALLTHROUGH */
689 case 0:
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700690 if (!atomic_read(&dev->ctrl_online))
691 break;
692
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700693 if (atomic_dec_and_test(&dev->notify_count))
694 break;
695
696 status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
697 if (status) {
698 atomic_dec(&dev->notify_count);
699 pr_debug("ep enqueue error %d\n", status);
700 }
701 break;
702 }
703}
704
705static int
706frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
707{
708 struct f_rmnet *dev = func_to_rmnet(f);
709 struct usb_composite_dev *cdev = dev->cdev;
710 struct usb_request *req = cdev->req;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700711 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712 u16 w_index = le16_to_cpu(ctrl->wIndex);
713 u16 w_value = le16_to_cpu(ctrl->wValue);
714 u16 w_length = le16_to_cpu(ctrl->wLength);
715 int ret = -EOPNOTSUPP;
716
717 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
718
719 if (!atomic_read(&dev->online)) {
720 pr_debug("%s: usb cable is not connected\n", __func__);
721 return -ENOTCONN;
722 }
723
724 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
725
726 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
727 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700728 ret = w_length;
729 req->complete = frmnet_cmd_complete;
730 req->context = dev;
731 break;
732
733
734 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
735 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
736 if (w_value)
737 goto invalid;
738 else {
739 unsigned len;
740 struct rmnet_ctrl_pkt *cpkt;
741
742 spin_lock(&dev->lock);
743 if (list_empty(&dev->cpkt_resp_q)) {
744 pr_err("ctrl resp queue empty "
745 " req%02x.%02x v%04x i%04x l%d\n",
746 ctrl->bRequestType, ctrl->bRequest,
747 w_value, w_index, w_length);
748 spin_unlock(&dev->lock);
749 goto invalid;
750 }
751
752 cpkt = list_first_entry(&dev->cpkt_resp_q,
753 struct rmnet_ctrl_pkt, list);
754 list_del(&cpkt->list);
755 spin_unlock(&dev->lock);
756
757 len = min_t(unsigned, w_length, cpkt->len);
758 memcpy(req->buf, cpkt->buf, len);
759 ret = len;
760
761 rmnet_free_ctrl_pkt(cpkt);
762 }
763 break;
764 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
765 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
Hemant Kumar1b820d52011-11-03 15:08:28 -0700766 if (dev->port.notify_modem) {
767 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
768 dev->port.notify_modem(&dev->port, port_num, w_value);
769 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700770 ret = 0;
771
772 break;
773 default:
774
775invalid:
776 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
777 ctrl->bRequestType, ctrl->bRequest,
778 w_value, w_index, w_length);
779 }
780
781 /* respond with data transfer or status phase? */
782 if (ret >= 0) {
783 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
784 ctrl->bRequestType, ctrl->bRequest,
785 w_value, w_index, w_length);
786 req->zero = (ret < w_length);
787 req->length = ret;
788 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
789 if (ret < 0)
790 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
791 }
792
793 return ret;
794}
795
796static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
797{
798 struct f_rmnet *dev = func_to_rmnet(f);
799 struct usb_ep *ep;
800 struct usb_composite_dev *cdev = c->cdev;
801 int ret = -ENODEV;
802
803 dev->ifc_id = usb_interface_id(c, f);
804 if (dev->ifc_id < 0) {
805 pr_err("%s: unable to allocate ifc id, err:%d",
806 __func__, dev->ifc_id);
807 return dev->ifc_id;
808 }
809 rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
810
811 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
812 if (!ep) {
813 pr_err("%s: usb epin autoconfig failed\n", __func__);
814 return -ENODEV;
815 }
816 dev->port.in = ep;
817 ep->driver_data = cdev;
818
819 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
820 if (!ep) {
821 pr_err("%s: usb epout autoconfig failed\n", __func__);
822 ret = -ENODEV;
823 goto ep_auto_out_fail;
824 }
825 dev->port.out = ep;
826 ep->driver_data = cdev;
827
828 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
829 if (!ep) {
830 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
831 ret = -ENODEV;
832 goto ep_auto_notify_fail;
833 }
834 dev->notify = ep;
835 ep->driver_data = cdev;
836
837 dev->notify_req = frmnet_alloc_req(ep,
Hemant Kumarfbf113d2011-09-16 18:24:45 -0700838 sizeof(struct usb_cdc_notification),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700839 GFP_KERNEL);
840 if (IS_ERR(dev->notify_req)) {
841 pr_err("%s: unable to allocate memory for notify req\n",
842 __func__);
843 ret = -ENOMEM;
844 goto ep_notify_alloc_fail;
845 }
846
847 dev->notify_req->complete = frmnet_notify_complete;
848 dev->notify_req->context = dev;
849
850 f->descriptors = usb_copy_descriptors(rmnet_fs_function);
851
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530852 if (!f->descriptors)
853 goto fail;
854
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700855 if (gadget_is_dualspeed(cdev->gadget)) {
856 rmnet_hs_in_desc.bEndpointAddress =
857 rmnet_fs_in_desc.bEndpointAddress;
858 rmnet_hs_out_desc.bEndpointAddress =
859 rmnet_fs_out_desc.bEndpointAddress;
860 rmnet_hs_notify_desc.bEndpointAddress =
861 rmnet_fs_notify_desc.bEndpointAddress;
862
863 /* copy descriptors, and track endpoint copies */
864 f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
865
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530866 if (!f->hs_descriptors)
867 goto fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700868 }
869
870 pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
871 __func__, dev->port_num,
872 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
873 dev->port.in->name, dev->port.out->name);
874
875 return 0;
876
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530877fail:
878 if (f->descriptors)
879 usb_free_descriptors(f->descriptors);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700880ep_notify_alloc_fail:
881 dev->notify->driver_data = NULL;
882 dev->notify = NULL;
883ep_auto_notify_fail:
884 dev->port.out->driver_data = NULL;
885 dev->port.out = NULL;
886ep_auto_out_fail:
887 dev->port.in->driver_data = NULL;
888 dev->port.in = NULL;
889
890 return ret;
891}
892
Manu Gautam2b0234a2011-09-07 16:47:52 +0530893static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700894{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895 int status;
896 struct f_rmnet *dev;
897 struct usb_function *f;
898 unsigned long flags;
899
900 pr_debug("%s: usb config:%p\n", __func__, c);
901
Manu Gautam2b0234a2011-09-07 16:47:52 +0530902 if (portno >= nr_rmnet_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700903 pr_err("%s: supporting ports#%u port_id:%u", __func__,
Manu Gautam2b0234a2011-09-07 16:47:52 +0530904 nr_rmnet_ports, portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905 return -ENODEV;
906 }
907
908 if (rmnet_string_defs[0].id == 0) {
909 status = usb_string_id(c->cdev);
910 if (status < 0) {
911 pr_err("%s: failed to get string id, err:%d\n",
912 __func__, status);
913 return status;
914 }
915 rmnet_string_defs[0].id = status;
916 }
917
Manu Gautam2b0234a2011-09-07 16:47:52 +0530918 dev = rmnet_ports[portno].port;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700919
920 spin_lock_irqsave(&dev->lock, flags);
921 dev->cdev = c->cdev;
922 f = &dev->port.func;
Vamsi Krishna188078d2011-10-26 15:09:55 -0700923 f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700924 spin_unlock_irqrestore(&dev->lock, flags);
Vamsi Krishna188078d2011-10-26 15:09:55 -0700925 if (!f->name) {
926 pr_err("%s: cannot allocate memory for name\n", __func__);
927 return -ENOMEM;
928 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700929
930 f->strings = rmnet_strings;
931 f->bind = frmnet_bind;
932 f->unbind = frmnet_unbind;
933 f->disable = frmnet_disable;
934 f->set_alt = frmnet_set_alt;
935 f->setup = frmnet_setup;
Chiranjeevi Velempatie007bed2012-01-24 09:54:11 +0530936 f->suspend = frmnet_suspend;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937 dev->port.send_cpkt_response = frmnet_send_cpkt_response;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700938 dev->port.disconnect = frmnet_disconnect;
939 dev->port.connect = frmnet_connect;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700940
941 status = usb_add_function(c, f);
942 if (status) {
943 pr_err("%s: usb add function failed: %d\n",
944 __func__, status);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530945 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700946 return status;
947 }
948
949 pr_debug("%s: complete\n", __func__);
950
951 return status;
952}
953
Manu Gautame3e897c2011-09-12 17:18:46 +0530954static void frmnet_cleanup(void)
955{
956 int i;
957
958 for (i = 0; i < nr_rmnet_ports; i++)
959 kfree(rmnet_ports[i].port);
960
961 nr_rmnet_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700962 no_ctrl_smd_ports = 0;
963 no_data_bam_ports = 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200964 no_data_bam2bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -0800965 no_ctrl_hsic_ports = 0;
966 no_data_hsic_ports = 0;
Manu Gautame3e897c2011-09-12 17:18:46 +0530967}
968
Hemant Kumar1b820d52011-11-03 15:08:28 -0700969static int frmnet_init_port(const char *ctrl_name, const char *data_name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700970{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700971 struct f_rmnet *dev;
972 struct rmnet_ports *rmnet_port;
973 int ret;
974 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700975
Hemant Kumar1b820d52011-11-03 15:08:28 -0700976 if (nr_rmnet_ports >= NR_RMNET_PORTS) {
977 pr_err("%s: Max-%d instances supported\n",
978 __func__, NR_RMNET_PORTS);
Manu Gautame3e897c2011-09-12 17:18:46 +0530979 return -EINVAL;
980 }
981
Hemant Kumar1b820d52011-11-03 15:08:28 -0700982 pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n",
983 __func__, nr_rmnet_ports, ctrl_name, data_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700984
Hemant Kumar1b820d52011-11-03 15:08:28 -0700985 dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
986 if (!dev) {
987 pr_err("%s: Unable to allocate rmnet device\n", __func__);
988 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989 }
990
Hemant Kumar1b820d52011-11-03 15:08:28 -0700991 dev->port_num = nr_rmnet_ports;
992 spin_lock_init(&dev->lock);
993 INIT_LIST_HEAD(&dev->cpkt_resp_q);
994
995 rmnet_port = &rmnet_ports[nr_rmnet_ports];
996 rmnet_port->port = dev;
997 rmnet_port->port_num = nr_rmnet_ports;
998 rmnet_port->ctrl_xport = str_to_xport(ctrl_name);
999 rmnet_port->data_xport = str_to_xport(data_name);
1000
1001 switch (rmnet_port->ctrl_xport) {
1002 case USB_GADGET_XPORT_SMD:
1003 rmnet_port->ctrl_xport_num = no_ctrl_smd_ports;
1004 no_ctrl_smd_ports++;
1005 break;
Jack Pham427f6922011-11-23 19:42:00 -08001006 case USB_GADGET_XPORT_HSIC:
1007 rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports;
1008 no_ctrl_hsic_ports++;
1009 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001010 case USB_GADGET_XPORT_NONE:
1011 break;
1012 default:
1013 pr_err("%s: Un-supported transport: %u\n", __func__,
1014 rmnet_port->ctrl_xport);
1015 ret = -ENODEV;
1016 goto fail_probe;
1017 }
1018
1019 switch (rmnet_port->data_xport) {
1020 case USB_GADGET_XPORT_BAM:
1021 rmnet_port->data_xport_num = no_data_bam_ports;
1022 no_data_bam_ports++;
1023 break;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001024 case USB_GADGET_XPORT_BAM2BAM:
1025 rmnet_port->data_xport_num = no_data_bam2bam_ports;
1026 no_data_bam2bam_ports++;
1027 break;
Jack Pham427f6922011-11-23 19:42:00 -08001028 case USB_GADGET_XPORT_HSIC:
1029 rmnet_port->data_xport_num = no_data_hsic_ports;
1030 no_data_hsic_ports++;
1031 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001032 case USB_GADGET_XPORT_NONE:
1033 break;
1034 default:
1035 pr_err("%s: Un-supported transport: %u\n", __func__,
1036 rmnet_port->data_xport);
1037 ret = -ENODEV;
1038 goto fail_probe;
1039 }
1040 nr_rmnet_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001041
1042 return 0;
1043
1044fail_probe:
Manu Gautam2b0234a2011-09-07 16:47:52 +05301045 for (i = 0; i < nr_rmnet_ports; i++)
1046 kfree(rmnet_ports[i].port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001047
Hemant Kumar1b820d52011-11-03 15:08:28 -07001048 nr_rmnet_ports = 0;
1049 no_ctrl_smd_ports = 0;
1050 no_data_bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -08001051 no_ctrl_hsic_ports = 0;
1052 no_data_hsic_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001053
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001054 return ret;
1055}