blob: b086428bc09f3c6948d2f0caeb29d4895f30512b [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
Anna Perel97b8c222012-01-18 10:08:14 +02002 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/usb/android_composite.h>
18#include <linux/spinlock.h>
19
Hemant Kumar1b820d52011-11-03 15:08:28 -070020#include <mach/usb_gadget_xport.h>
Ofir Cohena1c2a872011-12-14 10:26:34 +020021
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include "u_rmnet.h"
23#include "gadget_chips.h"
24
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#define RMNET_NOTIFY_INTERVAL 5
26#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
27
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028
29#define ACM_CTRL_DTR (1 << 0)
30
31/* TODO: use separate structures for data and
32 * control paths
33 */
34struct f_rmnet {
35 struct grmnet port;
36 int ifc_id;
37 u8 port_num;
38 atomic_t online;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -070039 atomic_t ctrl_online;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040 struct usb_composite_dev *cdev;
41
42 spinlock_t lock;
43
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044 /* usb eps*/
45 struct usb_ep *notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046 struct usb_request *notify_req;
47
48 /* control info */
49 struct list_head cpkt_resp_q;
50 atomic_t notify_count;
51 unsigned long cpkts_len;
52};
53
Anna Perel21515162012-02-02 20:50:02 +020054#define NR_RMNET_PORTS 3
Manu Gautam2b0234a2011-09-07 16:47:52 +053055static unsigned int nr_rmnet_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070056static unsigned int no_ctrl_smd_ports;
Jack Pham427f6922011-11-23 19:42:00 -080057static unsigned int no_ctrl_hsic_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070058static unsigned int no_data_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020059static unsigned int no_data_bam2bam_ports;
Jack Pham427f6922011-11-23 19:42:00 -080060static unsigned int no_data_hsic_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061static struct rmnet_ports {
Hemant Kumar1b820d52011-11-03 15:08:28 -070062 enum transport_type data_xport;
63 enum transport_type ctrl_xport;
64 unsigned data_xport_num;
65 unsigned ctrl_xport_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066 unsigned port_num;
67 struct f_rmnet *port;
Manu Gautam2b0234a2011-09-07 16:47:52 +053068} rmnet_ports[NR_RMNET_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069
70static struct usb_interface_descriptor rmnet_interface_desc = {
71 .bLength = USB_DT_INTERFACE_SIZE,
72 .bDescriptorType = USB_DT_INTERFACE,
73 .bNumEndpoints = 3,
74 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
75 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
76 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
77 /* .iInterface = DYNAMIC */
78};
79
80/* Full speed support */
81static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
82 .bLength = USB_DT_ENDPOINT_SIZE,
83 .bDescriptorType = USB_DT_ENDPOINT,
84 .bEndpointAddress = USB_DIR_IN,
85 .bmAttributes = USB_ENDPOINT_XFER_INT,
86 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
87 .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
88};
89
90static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
91 .bLength = USB_DT_ENDPOINT_SIZE,
92 .bDescriptorType = USB_DT_ENDPOINT,
93 .bEndpointAddress = USB_DIR_IN,
94 .bmAttributes = USB_ENDPOINT_XFER_BULK,
95 .wMaxPacketSize = __constant_cpu_to_le16(64),
96};
97
98static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
99 .bLength = USB_DT_ENDPOINT_SIZE,
100 .bDescriptorType = USB_DT_ENDPOINT,
101 .bEndpointAddress = USB_DIR_OUT,
102 .bmAttributes = USB_ENDPOINT_XFER_BULK,
103 .wMaxPacketSize = __constant_cpu_to_le16(64),
104};
105
106static struct usb_descriptor_header *rmnet_fs_function[] = {
107 (struct usb_descriptor_header *) &rmnet_interface_desc,
108 (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
109 (struct usb_descriptor_header *) &rmnet_fs_in_desc,
110 (struct usb_descriptor_header *) &rmnet_fs_out_desc,
111 NULL,
112};
113
114/* High speed support */
115static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
116 .bLength = USB_DT_ENDPOINT_SIZE,
117 .bDescriptorType = USB_DT_ENDPOINT,
118 .bEndpointAddress = USB_DIR_IN,
119 .bmAttributes = USB_ENDPOINT_XFER_INT,
120 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
121 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
122};
123
124static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
125 .bLength = USB_DT_ENDPOINT_SIZE,
126 .bDescriptorType = USB_DT_ENDPOINT,
127 .bEndpointAddress = USB_DIR_IN,
128 .bmAttributes = USB_ENDPOINT_XFER_BULK,
129 .wMaxPacketSize = __constant_cpu_to_le16(512),
130};
131
132static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
133 .bLength = USB_DT_ENDPOINT_SIZE,
134 .bDescriptorType = USB_DT_ENDPOINT,
135 .bEndpointAddress = USB_DIR_OUT,
136 .bmAttributes = USB_ENDPOINT_XFER_BULK,
137 .wMaxPacketSize = __constant_cpu_to_le16(512),
138};
139
140static struct usb_descriptor_header *rmnet_hs_function[] = {
141 (struct usb_descriptor_header *) &rmnet_interface_desc,
142 (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
143 (struct usb_descriptor_header *) &rmnet_hs_in_desc,
144 (struct usb_descriptor_header *) &rmnet_hs_out_desc,
145 NULL,
146};
147
148/* String descriptors */
149
150static struct usb_string rmnet_string_defs[] = {
151 [0].s = "RmNet",
152 { } /* end of list */
153};
154
155static struct usb_gadget_strings rmnet_string_table = {
156 .language = 0x0409, /* en-us */
157 .strings = rmnet_string_defs,
158};
159
160static struct usb_gadget_strings *rmnet_strings[] = {
161 &rmnet_string_table,
162 NULL,
163};
164
165/* ------- misc functions --------------------*/
166
167static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
168{
169 return container_of(f, struct f_rmnet, port.func);
170}
171
172static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
173{
174 return container_of(r, struct f_rmnet, port);
175}
176
177static struct usb_request *
178frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
179{
180 struct usb_request *req;
181
182 req = usb_ep_alloc_request(ep, flags);
183 if (!req)
184 return ERR_PTR(-ENOMEM);
185
186 req->buf = kmalloc(len, flags);
187 if (!req->buf) {
188 usb_ep_free_request(ep, req);
189 return ERR_PTR(-ENOMEM);
190 }
191
192 req->length = len;
193
194 return req;
195}
196
197void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
198{
199 kfree(req->buf);
200 usb_ep_free_request(ep, req);
201}
202
203static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
204{
205 struct rmnet_ctrl_pkt *pkt;
206
207 pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
208 if (!pkt)
209 return ERR_PTR(-ENOMEM);
210
211 pkt->buf = kmalloc(len, flags);
212 if (!pkt->buf) {
213 kfree(pkt);
214 return ERR_PTR(-ENOMEM);
215 }
216 pkt->len = len;
217
218 return pkt;
219}
220
221static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
222{
223 kfree(pkt->buf);
224 kfree(pkt);
225}
226
227/* -------------------------------------------*/
228
Hemant Kumar1b820d52011-11-03 15:08:28 -0700229static int rmnet_gport_setup(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230{
Jack Pham427f6922011-11-23 19:42:00 -0800231 int ret;
232 int port_idx;
233 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234
Ofir Cohena1c2a872011-12-14 10:26:34 +0200235 pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u"
236 " smd ports: %u ctrl hsic ports: %u"
237 " nr_rmnet_ports: %u\n",
238 __func__, no_data_bam_ports, no_data_bam2bam_ports,
239 no_data_hsic_ports, no_ctrl_smd_ports,
240 no_ctrl_hsic_ports, nr_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241
Ofir Cohena1c2a872011-12-14 10:26:34 +0200242 if (no_data_bam_ports || no_data_bam2bam_ports) {
243 ret = gbam_setup(no_data_bam_ports,
244 no_data_bam2bam_ports);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700245 if (ret)
246 return ret;
247 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248
Hemant Kumar1b820d52011-11-03 15:08:28 -0700249 if (no_ctrl_smd_ports) {
250 ret = gsmd_ctrl_setup(no_ctrl_smd_ports);
251 if (ret)
252 return ret;
253 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254
Jack Pham427f6922011-11-23 19:42:00 -0800255 if (no_data_hsic_ports) {
256 port_idx = ghsic_data_setup(no_data_hsic_ports,
257 USB_GADGET_RMNET);
258 if (port_idx < 0)
259 return port_idx;
260 for (i = 0; i < nr_rmnet_ports; i++) {
261 if (rmnet_ports[i].data_xport ==
262 USB_GADGET_XPORT_HSIC) {
263 rmnet_ports[i].data_xport_num = port_idx;
264 port_idx++;
265 }
266 }
267 }
268
269 if (no_ctrl_hsic_ports) {
270 port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports,
271 USB_GADGET_RMNET);
272 if (port_idx < 0)
273 return port_idx;
274 for (i = 0; i < nr_rmnet_ports; i++) {
275 if (rmnet_ports[i].ctrl_xport ==
276 USB_GADGET_XPORT_HSIC) {
277 rmnet_ports[i].ctrl_xport_num = port_idx;
278 port_idx++;
279 }
280 }
281 }
282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283 return 0;
284}
285
Manu Gautam2b0234a2011-09-07 16:47:52 +0530286static int gport_rmnet_connect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700288 int ret;
289 unsigned port_num;
290 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
291 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292
Hemant Kumar1b820d52011-11-03 15:08:28 -0700293 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
294 __func__, xport_to_str(cxport), xport_to_str(dxport),
295 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296
Hemant Kumar1b820d52011-11-03 15:08:28 -0700297 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
298 switch (cxport) {
299 case USB_GADGET_XPORT_SMD:
300 ret = gsmd_ctrl_connect(&dev->port, port_num);
301 if (ret) {
302 pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
303 __func__, ret);
304 return ret;
305 }
306 break;
Jack Pham427f6922011-11-23 19:42:00 -0800307 case USB_GADGET_XPORT_HSIC:
308 ret = ghsic_ctrl_connect(&dev->port, port_num);
309 if (ret) {
310 pr_err("%s: ghsic_ctrl_connect failed: err:%d\n",
311 __func__, ret);
312 return ret;
313 }
314 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700315 case USB_GADGET_XPORT_NONE:
316 break;
317 default:
318 pr_err("%s: Un-supported transport: %s\n", __func__,
319 xport_to_str(cxport));
320 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321 }
322
Hemant Kumar1b820d52011-11-03 15:08:28 -0700323 port_num = rmnet_ports[dev->port_num].data_xport_num;
324 switch (dxport) {
325 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200326 case USB_GADGET_XPORT_BAM2BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200327 ret = gbam_connect(&dev->port, port_num,
Anna Perel21515162012-02-02 20:50:02 +0200328 dxport, port_num);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700329 if (ret) {
330 pr_err("%s: gbam_connect failed: err:%d\n",
331 __func__, ret);
332 gsmd_ctrl_disconnect(&dev->port, port_num);
333 return ret;
334 }
335 break;
Jack Pham427f6922011-11-23 19:42:00 -0800336 case USB_GADGET_XPORT_HSIC:
337 ret = ghsic_data_connect(&dev->port, port_num);
338 if (ret) {
339 pr_err("%s: ghsic_data_connect failed: err:%d\n",
340 __func__, ret);
341 ghsic_ctrl_disconnect(&dev->port, port_num);
342 return ret;
343 }
344 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700345 case USB_GADGET_XPORT_NONE:
346 break;
347 default:
348 pr_err("%s: Un-supported transport: %s\n", __func__,
349 xport_to_str(dxport));
350 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351 }
352
353 return 0;
354}
355
Manu Gautam2b0234a2011-09-07 16:47:52 +0530356static int gport_rmnet_disconnect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700358 unsigned port_num;
359 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
360 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361
Hemant Kumar1b820d52011-11-03 15:08:28 -0700362 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
363 __func__, xport_to_str(cxport), xport_to_str(dxport),
364 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700365
Hemant Kumar1b820d52011-11-03 15:08:28 -0700366 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
367 switch (cxport) {
368 case USB_GADGET_XPORT_SMD:
369 gsmd_ctrl_disconnect(&dev->port, port_num);
370 break;
Jack Pham427f6922011-11-23 19:42:00 -0800371 case USB_GADGET_XPORT_HSIC:
372 ghsic_ctrl_disconnect(&dev->port, port_num);
373 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700374 case USB_GADGET_XPORT_NONE:
375 break;
376 default:
377 pr_err("%s: Un-supported transport: %s\n", __func__,
378 xport_to_str(cxport));
379 return -ENODEV;
380 }
381
382 port_num = rmnet_ports[dev->port_num].data_xport_num;
383 switch (dxport) {
384 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200385 case USB_GADGET_XPORT_BAM2BAM:
386 gbam_disconnect(&dev->port, port_num, dxport);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700387 break;
Jack Pham427f6922011-11-23 19:42:00 -0800388 case USB_GADGET_XPORT_HSIC:
389 ghsic_data_disconnect(&dev->port, port_num);
390 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700391 case USB_GADGET_XPORT_NONE:
392 break;
393 default:
394 pr_err("%s: Un-supported transport: %s\n", __func__,
395 xport_to_str(dxport));
396 return -ENODEV;
397 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398
399 return 0;
400}
401
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
403{
404 struct f_rmnet *dev = func_to_rmnet(f);
405
406 pr_debug("%s: portno:%d\n", __func__, dev->port_num);
407
408 if (gadget_is_dualspeed(c->cdev->gadget))
409 usb_free_descriptors(f->hs_descriptors);
410 usb_free_descriptors(f->descriptors);
411
412 frmnet_free_req(dev->notify, dev->notify_req);
413
Manu Gautamdd4222b2011-09-09 15:06:05 +0530414 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415}
416
Chiranjeevi Velempatie007bed2012-01-24 09:54:11 +0530417static void frmnet_suspend(struct usb_function *f)
418{
419 struct f_rmnet *dev = func_to_rmnet(f);
420 unsigned port_num;
421
422 if (!atomic_read(&dev->online))
423 return;
424 /* This is a workaround for a bug in Windows 7/XP hosts in which
425 * the DTR bit is not set low when going into suspend. Hence force it
426 * low here when this function driver is suspended.
427 */
428 if (dev->port.notify_modem) {
429 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
430 dev->port.notify_modem(&dev->port, port_num, ~ACM_CTRL_DTR);
431 }
432}
433
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434static void frmnet_disable(struct usb_function *f)
435{
436 struct f_rmnet *dev = func_to_rmnet(f);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700437 unsigned long flags;
438 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439
440 pr_debug("%s: port#%d\n", __func__, dev->port_num);
441
442 usb_ep_disable(dev->notify);
Anna Perel97b8c222012-01-18 10:08:14 +0200443 dev->notify->driver_data = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444
445 atomic_set(&dev->online, 0);
446
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700447 spin_lock_irqsave(&dev->lock, flags);
448 while (!list_empty(&dev->cpkt_resp_q)) {
449 cpkt = list_first_entry(&dev->cpkt_resp_q,
450 struct rmnet_ctrl_pkt, list);
451
452 list_del(&cpkt->list);
453 rmnet_free_ctrl_pkt(cpkt);
454 }
455 atomic_set(&dev->notify_count, 0);
456 spin_unlock_irqrestore(&dev->lock, flags);
457
Manu Gautam2b0234a2011-09-07 16:47:52 +0530458 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459}
460
461static int
462frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
463{
464 struct f_rmnet *dev = func_to_rmnet(f);
465 struct usb_composite_dev *cdev = dev->cdev;
466 int ret;
467
468 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
469
470 if (dev->notify->driver_data) {
471 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
472 usb_ep_disable(dev->notify);
473 }
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200474
475 ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
476 if (ret) {
477 dev->notify->desc = NULL;
478 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
479 dev->notify->name, ret);
480 return ret;
481 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300482 ret = usb_ep_enable(dev->notify);
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 if (ret) {
485 pr_err("%s: usb ep#%s enable failed, err#%d\n",
486 __func__, dev->notify->name, ret);
487 return ret;
488 }
489 dev->notify->driver_data = dev;
490
Bar Weiner0fc137a2012-03-28 16:58:09 +0200491 if (!dev->port.in->driver_data) {
492 if (config_ep_by_speed(cdev->gadget, f, dev->port.in) ||
493 config_ep_by_speed(cdev->gadget, f, dev->port.out)) {
494 dev->port.in->desc = NULL;
495 dev->port.out->desc = NULL;
496 return -EINVAL;
497 }
498 ret = gport_rmnet_connect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 }
500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 atomic_set(&dev->online, 1);
502
503 return ret;
504}
505
506static void frmnet_ctrl_response_available(struct f_rmnet *dev)
507{
508 struct usb_request *req = dev->notify_req;
509 struct usb_cdc_notification *event;
510 unsigned long flags;
511 int ret;
512
513 pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
514
515 spin_lock_irqsave(&dev->lock, flags);
516 if (!atomic_read(&dev->online) || !req || !req->buf) {
517 spin_unlock_irqrestore(&dev->lock, flags);
518 return;
519 }
520
521 if (atomic_inc_return(&dev->notify_count) != 1) {
522 spin_unlock_irqrestore(&dev->lock, flags);
523 return;
524 }
525
526 event = req->buf;
527 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
528 | USB_RECIP_INTERFACE;
529 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
530 event->wValue = cpu_to_le16(0);
531 event->wIndex = cpu_to_le16(dev->ifc_id);
532 event->wLength = cpu_to_le16(0);
533 spin_unlock_irqrestore(&dev->lock, flags);
534
535 ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
536 if (ret) {
537 atomic_dec(&dev->notify_count);
538 pr_debug("ep enqueue error %d\n", ret);
539 }
540}
541
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700542static void frmnet_connect(struct grmnet *gr)
543{
544 struct f_rmnet *dev;
545
546 if (!gr) {
547 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
548 return;
549 }
550
551 dev = port_to_rmnet(gr);
552
553 atomic_set(&dev->ctrl_online, 1);
554}
555
556static void frmnet_disconnect(struct grmnet *gr)
557{
558 struct f_rmnet *dev;
559 unsigned long flags;
560 struct usb_cdc_notification *event;
561 int status;
562 struct rmnet_ctrl_pkt *cpkt;
563
564 if (!gr) {
565 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
566 return;
567 }
568
569 dev = port_to_rmnet(gr);
570
571 atomic_set(&dev->ctrl_online, 0);
572
573 if (!atomic_read(&dev->online)) {
574 pr_debug("%s: nothing to do\n", __func__);
575 return;
576 }
577
578 usb_ep_fifo_flush(dev->notify);
579
580 event = dev->notify_req->buf;
581 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
582 | USB_RECIP_INTERFACE;
583 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
584 event->wValue = cpu_to_le16(0);
585 event->wIndex = cpu_to_le16(dev->ifc_id);
586 event->wLength = cpu_to_le16(0);
587
Vamsi Krishna188078d2011-10-26 15:09:55 -0700588 status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700589 if (status < 0) {
590 if (!atomic_read(&dev->online))
591 return;
592 pr_err("%s: rmnet notify ep enqueue error %d\n",
593 __func__, status);
594 }
595
596 spin_lock_irqsave(&dev->lock, flags);
597 while (!list_empty(&dev->cpkt_resp_q)) {
598 cpkt = list_first_entry(&dev->cpkt_resp_q,
599 struct rmnet_ctrl_pkt, list);
600
601 list_del(&cpkt->list);
602 rmnet_free_ctrl_pkt(cpkt);
603 }
604 atomic_set(&dev->notify_count, 0);
605 spin_unlock_irqrestore(&dev->lock, flags);
606
607}
608
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609static int
Hemant Kumarf60c0252011-11-03 12:37:07 -0700610frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611{
612 struct f_rmnet *dev;
Hemant Kumarf60c0252011-11-03 12:37:07 -0700613 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614 unsigned long flags;
615
Hemant Kumarf60c0252011-11-03 12:37:07 -0700616 if (!gr || !buf) {
617 pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
618 __func__, gr, buf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619 return -ENODEV;
620 }
Hemant Kumarf60c0252011-11-03 12:37:07 -0700621 cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
622 if (IS_ERR(cpkt)) {
623 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
624 return -ENOMEM;
625 }
626 memcpy(cpkt->buf, buf, len);
627 cpkt->len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628
629 dev = port_to_rmnet(gr);
630
631 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
632
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700633 if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700634 rmnet_free_ctrl_pkt(cpkt);
635 return 0;
636 }
637
638 spin_lock_irqsave(&dev->lock, flags);
Chiranjeevi Velempati263e6c82011-11-11 23:07:36 +0530639 list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 spin_unlock_irqrestore(&dev->lock, flags);
641
642 frmnet_ctrl_response_available(dev);
643
644 return 0;
645}
646
647static void
648frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
649{
650 struct f_rmnet *dev = req->context;
651 struct usb_composite_dev *cdev;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700652 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653
654 if (!dev) {
655 pr_err("%s: rmnet dev is null\n", __func__);
656 return;
657 }
658
659 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
660
661 cdev = dev->cdev;
662
Hemant Kumar1b820d52011-11-03 15:08:28 -0700663 if (dev->port.send_encap_cmd) {
664 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
665 dev->port.send_encap_cmd(port_num, req->buf, req->actual);
666 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700667}
668
669static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
670{
671 struct f_rmnet *dev = req->context;
672 int status = req->status;
673
674 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
675
676 switch (status) {
677 case -ECONNRESET:
678 case -ESHUTDOWN:
679 /* connection gone */
680 atomic_set(&dev->notify_count, 0);
681 break;
682 default:
683 pr_err("rmnet notify ep error %d\n", status);
684 /* FALLTHROUGH */
685 case 0:
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700686 if (!atomic_read(&dev->ctrl_online))
687 break;
688
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689 if (atomic_dec_and_test(&dev->notify_count))
690 break;
691
692 status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
693 if (status) {
694 atomic_dec(&dev->notify_count);
695 pr_debug("ep enqueue error %d\n", status);
696 }
697 break;
698 }
699}
700
701static int
702frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
703{
704 struct f_rmnet *dev = func_to_rmnet(f);
705 struct usb_composite_dev *cdev = dev->cdev;
706 struct usb_request *req = cdev->req;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700707 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700708 u16 w_index = le16_to_cpu(ctrl->wIndex);
709 u16 w_value = le16_to_cpu(ctrl->wValue);
710 u16 w_length = le16_to_cpu(ctrl->wLength);
711 int ret = -EOPNOTSUPP;
712
713 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
714
715 if (!atomic_read(&dev->online)) {
716 pr_debug("%s: usb cable is not connected\n", __func__);
717 return -ENOTCONN;
718 }
719
720 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
721
722 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
723 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 ret = w_length;
725 req->complete = frmnet_cmd_complete;
726 req->context = dev;
727 break;
728
729
730 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
731 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
732 if (w_value)
733 goto invalid;
734 else {
735 unsigned len;
736 struct rmnet_ctrl_pkt *cpkt;
737
738 spin_lock(&dev->lock);
739 if (list_empty(&dev->cpkt_resp_q)) {
740 pr_err("ctrl resp queue empty "
741 " req%02x.%02x v%04x i%04x l%d\n",
742 ctrl->bRequestType, ctrl->bRequest,
743 w_value, w_index, w_length);
744 spin_unlock(&dev->lock);
745 goto invalid;
746 }
747
748 cpkt = list_first_entry(&dev->cpkt_resp_q,
749 struct rmnet_ctrl_pkt, list);
750 list_del(&cpkt->list);
751 spin_unlock(&dev->lock);
752
753 len = min_t(unsigned, w_length, cpkt->len);
754 memcpy(req->buf, cpkt->buf, len);
755 ret = len;
756
757 rmnet_free_ctrl_pkt(cpkt);
758 }
759 break;
760 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
761 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
Hemant Kumar1b820d52011-11-03 15:08:28 -0700762 if (dev->port.notify_modem) {
763 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
764 dev->port.notify_modem(&dev->port, port_num, w_value);
765 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766 ret = 0;
767
768 break;
769 default:
770
771invalid:
772 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
773 ctrl->bRequestType, ctrl->bRequest,
774 w_value, w_index, w_length);
775 }
776
777 /* respond with data transfer or status phase? */
778 if (ret >= 0) {
779 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
780 ctrl->bRequestType, ctrl->bRequest,
781 w_value, w_index, w_length);
782 req->zero = (ret < w_length);
783 req->length = ret;
784 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
785 if (ret < 0)
786 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
787 }
788
789 return ret;
790}
791
792static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
793{
794 struct f_rmnet *dev = func_to_rmnet(f);
795 struct usb_ep *ep;
796 struct usb_composite_dev *cdev = c->cdev;
797 int ret = -ENODEV;
798
799 dev->ifc_id = usb_interface_id(c, f);
800 if (dev->ifc_id < 0) {
801 pr_err("%s: unable to allocate ifc id, err:%d",
802 __func__, dev->ifc_id);
803 return dev->ifc_id;
804 }
805 rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
806
807 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
808 if (!ep) {
809 pr_err("%s: usb epin autoconfig failed\n", __func__);
810 return -ENODEV;
811 }
812 dev->port.in = ep;
813 ep->driver_data = cdev;
814
815 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
816 if (!ep) {
817 pr_err("%s: usb epout autoconfig failed\n", __func__);
818 ret = -ENODEV;
819 goto ep_auto_out_fail;
820 }
821 dev->port.out = ep;
822 ep->driver_data = cdev;
823
824 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
825 if (!ep) {
826 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
827 ret = -ENODEV;
828 goto ep_auto_notify_fail;
829 }
830 dev->notify = ep;
831 ep->driver_data = cdev;
832
833 dev->notify_req = frmnet_alloc_req(ep,
Hemant Kumarfbf113d2011-09-16 18:24:45 -0700834 sizeof(struct usb_cdc_notification),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700835 GFP_KERNEL);
836 if (IS_ERR(dev->notify_req)) {
837 pr_err("%s: unable to allocate memory for notify req\n",
838 __func__);
839 ret = -ENOMEM;
840 goto ep_notify_alloc_fail;
841 }
842
843 dev->notify_req->complete = frmnet_notify_complete;
844 dev->notify_req->context = dev;
845
846 f->descriptors = usb_copy_descriptors(rmnet_fs_function);
847
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530848 if (!f->descriptors)
849 goto fail;
850
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 if (gadget_is_dualspeed(cdev->gadget)) {
852 rmnet_hs_in_desc.bEndpointAddress =
853 rmnet_fs_in_desc.bEndpointAddress;
854 rmnet_hs_out_desc.bEndpointAddress =
855 rmnet_fs_out_desc.bEndpointAddress;
856 rmnet_hs_notify_desc.bEndpointAddress =
857 rmnet_fs_notify_desc.bEndpointAddress;
858
859 /* copy descriptors, and track endpoint copies */
860 f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
861
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530862 if (!f->hs_descriptors)
863 goto fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700864 }
865
866 pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
867 __func__, dev->port_num,
868 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
869 dev->port.in->name, dev->port.out->name);
870
871 return 0;
872
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530873fail:
874 if (f->descriptors)
875 usb_free_descriptors(f->descriptors);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700876ep_notify_alloc_fail:
877 dev->notify->driver_data = NULL;
878 dev->notify = NULL;
879ep_auto_notify_fail:
880 dev->port.out->driver_data = NULL;
881 dev->port.out = NULL;
882ep_auto_out_fail:
883 dev->port.in->driver_data = NULL;
884 dev->port.in = NULL;
885
886 return ret;
887}
888
Manu Gautam2b0234a2011-09-07 16:47:52 +0530889static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700890{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700891 int status;
892 struct f_rmnet *dev;
893 struct usb_function *f;
894 unsigned long flags;
895
896 pr_debug("%s: usb config:%p\n", __func__, c);
897
Manu Gautam2b0234a2011-09-07 16:47:52 +0530898 if (portno >= nr_rmnet_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 pr_err("%s: supporting ports#%u port_id:%u", __func__,
Manu Gautam2b0234a2011-09-07 16:47:52 +0530900 nr_rmnet_ports, portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 return -ENODEV;
902 }
903
904 if (rmnet_string_defs[0].id == 0) {
905 status = usb_string_id(c->cdev);
906 if (status < 0) {
907 pr_err("%s: failed to get string id, err:%d\n",
908 __func__, status);
909 return status;
910 }
911 rmnet_string_defs[0].id = status;
912 }
913
Manu Gautam2b0234a2011-09-07 16:47:52 +0530914 dev = rmnet_ports[portno].port;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915
916 spin_lock_irqsave(&dev->lock, flags);
917 dev->cdev = c->cdev;
918 f = &dev->port.func;
Vamsi Krishna188078d2011-10-26 15:09:55 -0700919 f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 spin_unlock_irqrestore(&dev->lock, flags);
Vamsi Krishna188078d2011-10-26 15:09:55 -0700921 if (!f->name) {
922 pr_err("%s: cannot allocate memory for name\n", __func__);
923 return -ENOMEM;
924 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925
926 f->strings = rmnet_strings;
927 f->bind = frmnet_bind;
928 f->unbind = frmnet_unbind;
929 f->disable = frmnet_disable;
930 f->set_alt = frmnet_set_alt;
931 f->setup = frmnet_setup;
Chiranjeevi Velempatie007bed2012-01-24 09:54:11 +0530932 f->suspend = frmnet_suspend;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933 dev->port.send_cpkt_response = frmnet_send_cpkt_response;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700934 dev->port.disconnect = frmnet_disconnect;
935 dev->port.connect = frmnet_connect;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700936
937 status = usb_add_function(c, f);
938 if (status) {
939 pr_err("%s: usb add function failed: %d\n",
940 __func__, status);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530941 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700942 return status;
943 }
944
945 pr_debug("%s: complete\n", __func__);
946
947 return status;
948}
949
Manu Gautame3e897c2011-09-12 17:18:46 +0530950static void frmnet_cleanup(void)
951{
952 int i;
953
954 for (i = 0; i < nr_rmnet_ports; i++)
955 kfree(rmnet_ports[i].port);
956
957 nr_rmnet_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700958 no_ctrl_smd_ports = 0;
959 no_data_bam_ports = 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +0200960 no_data_bam2bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -0800961 no_ctrl_hsic_ports = 0;
962 no_data_hsic_ports = 0;
Manu Gautame3e897c2011-09-12 17:18:46 +0530963}
964
Hemant Kumar1b820d52011-11-03 15:08:28 -0700965static int frmnet_init_port(const char *ctrl_name, const char *data_name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700966{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700967 struct f_rmnet *dev;
968 struct rmnet_ports *rmnet_port;
969 int ret;
970 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971
Hemant Kumar1b820d52011-11-03 15:08:28 -0700972 if (nr_rmnet_ports >= NR_RMNET_PORTS) {
973 pr_err("%s: Max-%d instances supported\n",
974 __func__, NR_RMNET_PORTS);
Manu Gautame3e897c2011-09-12 17:18:46 +0530975 return -EINVAL;
976 }
977
Hemant Kumar1b820d52011-11-03 15:08:28 -0700978 pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n",
979 __func__, nr_rmnet_ports, ctrl_name, data_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700980
Hemant Kumar1b820d52011-11-03 15:08:28 -0700981 dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
982 if (!dev) {
983 pr_err("%s: Unable to allocate rmnet device\n", __func__);
984 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700985 }
986
Hemant Kumar1b820d52011-11-03 15:08:28 -0700987 dev->port_num = nr_rmnet_ports;
988 spin_lock_init(&dev->lock);
989 INIT_LIST_HEAD(&dev->cpkt_resp_q);
990
991 rmnet_port = &rmnet_ports[nr_rmnet_ports];
992 rmnet_port->port = dev;
993 rmnet_port->port_num = nr_rmnet_ports;
994 rmnet_port->ctrl_xport = str_to_xport(ctrl_name);
995 rmnet_port->data_xport = str_to_xport(data_name);
996
997 switch (rmnet_port->ctrl_xport) {
998 case USB_GADGET_XPORT_SMD:
999 rmnet_port->ctrl_xport_num = no_ctrl_smd_ports;
1000 no_ctrl_smd_ports++;
1001 break;
Jack Pham427f6922011-11-23 19:42:00 -08001002 case USB_GADGET_XPORT_HSIC:
1003 rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports;
1004 no_ctrl_hsic_ports++;
1005 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001006 case USB_GADGET_XPORT_NONE:
1007 break;
1008 default:
1009 pr_err("%s: Un-supported transport: %u\n", __func__,
1010 rmnet_port->ctrl_xport);
1011 ret = -ENODEV;
1012 goto fail_probe;
1013 }
1014
1015 switch (rmnet_port->data_xport) {
1016 case USB_GADGET_XPORT_BAM:
1017 rmnet_port->data_xport_num = no_data_bam_ports;
1018 no_data_bam_ports++;
1019 break;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001020 case USB_GADGET_XPORT_BAM2BAM:
1021 rmnet_port->data_xport_num = no_data_bam2bam_ports;
1022 no_data_bam2bam_ports++;
1023 break;
Jack Pham427f6922011-11-23 19:42:00 -08001024 case USB_GADGET_XPORT_HSIC:
1025 rmnet_port->data_xport_num = no_data_hsic_ports;
1026 no_data_hsic_ports++;
1027 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001028 case USB_GADGET_XPORT_NONE:
1029 break;
1030 default:
1031 pr_err("%s: Un-supported transport: %u\n", __func__,
1032 rmnet_port->data_xport);
1033 ret = -ENODEV;
1034 goto fail_probe;
1035 }
1036 nr_rmnet_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001037
1038 return 0;
1039
1040fail_probe:
Manu Gautam2b0234a2011-09-07 16:47:52 +05301041 for (i = 0; i < nr_rmnet_ports; i++)
1042 kfree(rmnet_ports[i].port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001043
Hemant Kumar1b820d52011-11-03 15:08:28 -07001044 nr_rmnet_ports = 0;
1045 no_ctrl_smd_ports = 0;
1046 no_data_bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -08001047 no_ctrl_hsic_ports = 0;
1048 no_data_hsic_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001049
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001050 return ret;
1051}