blob: 70dc393ae2d5fc5365437f6700d03d63bcfec247 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
Duy Truonge833aca2013-02-12 13:35:08 -08002 * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/usb/android_composite.h>
18#include <linux/spinlock.h>
19
Hemant Kumar1b820d52011-11-03 15:08:28 -070020#include <mach/usb_gadget_xport.h>
Ofir Cohena1c2a872011-12-14 10:26:34 +020021
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include "u_rmnet.h"
23#include "gadget_chips.h"
24
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#define RMNET_NOTIFY_INTERVAL 5
26#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
27
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028
29#define ACM_CTRL_DTR (1 << 0)
30
31/* TODO: use separate structures for data and
32 * control paths
33 */
34struct f_rmnet {
35 struct grmnet port;
36 int ifc_id;
37 u8 port_num;
38 atomic_t online;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -070039 atomic_t ctrl_online;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040 struct usb_composite_dev *cdev;
41
42 spinlock_t lock;
43
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044 /* usb eps*/
45 struct usb_ep *notify;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046 struct usb_request *notify_req;
47
48 /* control info */
49 struct list_head cpkt_resp_q;
50 atomic_t notify_count;
51 unsigned long cpkts_len;
52};
53
Anna Perel21515162012-02-02 20:50:02 +020054#define NR_RMNET_PORTS 3
Manu Gautam2b0234a2011-09-07 16:47:52 +053055static unsigned int nr_rmnet_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070056static unsigned int no_ctrl_smd_ports;
Jack Pham427f6922011-11-23 19:42:00 -080057static unsigned int no_ctrl_hsic_ports;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +053058static unsigned int no_ctrl_hsuart_ports;
Hemant Kumar1b820d52011-11-03 15:08:28 -070059static unsigned int no_data_bam_ports;
Ofir Cohena1c2a872011-12-14 10:26:34 +020060static unsigned int no_data_bam2bam_ports;
Jack Pham427f6922011-11-23 19:42:00 -080061static unsigned int no_data_hsic_ports;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +053062static unsigned int no_data_hsuart_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063static struct rmnet_ports {
Hemant Kumar1b820d52011-11-03 15:08:28 -070064 enum transport_type data_xport;
65 enum transport_type ctrl_xport;
66 unsigned data_xport_num;
67 unsigned ctrl_xport_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068 unsigned port_num;
69 struct f_rmnet *port;
Manu Gautam2b0234a2011-09-07 16:47:52 +053070} rmnet_ports[NR_RMNET_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72static struct usb_interface_descriptor rmnet_interface_desc = {
73 .bLength = USB_DT_INTERFACE_SIZE,
74 .bDescriptorType = USB_DT_INTERFACE,
75 .bNumEndpoints = 3,
76 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
77 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
78 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
79 /* .iInterface = DYNAMIC */
80};
81
82/* Full speed support */
83static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
84 .bLength = USB_DT_ENDPOINT_SIZE,
85 .bDescriptorType = USB_DT_ENDPOINT,
86 .bEndpointAddress = USB_DIR_IN,
87 .bmAttributes = USB_ENDPOINT_XFER_INT,
88 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
89 .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
90};
91
92static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
93 .bLength = USB_DT_ENDPOINT_SIZE,
94 .bDescriptorType = USB_DT_ENDPOINT,
95 .bEndpointAddress = USB_DIR_IN,
96 .bmAttributes = USB_ENDPOINT_XFER_BULK,
97 .wMaxPacketSize = __constant_cpu_to_le16(64),
98};
99
100static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
101 .bLength = USB_DT_ENDPOINT_SIZE,
102 .bDescriptorType = USB_DT_ENDPOINT,
103 .bEndpointAddress = USB_DIR_OUT,
104 .bmAttributes = USB_ENDPOINT_XFER_BULK,
105 .wMaxPacketSize = __constant_cpu_to_le16(64),
106};
107
108static struct usb_descriptor_header *rmnet_fs_function[] = {
109 (struct usb_descriptor_header *) &rmnet_interface_desc,
110 (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
111 (struct usb_descriptor_header *) &rmnet_fs_in_desc,
112 (struct usb_descriptor_header *) &rmnet_fs_out_desc,
113 NULL,
114};
115
116/* High speed support */
117static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
118 .bLength = USB_DT_ENDPOINT_SIZE,
119 .bDescriptorType = USB_DT_ENDPOINT,
120 .bEndpointAddress = USB_DIR_IN,
121 .bmAttributes = USB_ENDPOINT_XFER_INT,
122 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
123 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
124};
125
126static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
127 .bLength = USB_DT_ENDPOINT_SIZE,
128 .bDescriptorType = USB_DT_ENDPOINT,
129 .bEndpointAddress = USB_DIR_IN,
130 .bmAttributes = USB_ENDPOINT_XFER_BULK,
131 .wMaxPacketSize = __constant_cpu_to_le16(512),
132};
133
134static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
135 .bLength = USB_DT_ENDPOINT_SIZE,
136 .bDescriptorType = USB_DT_ENDPOINT,
137 .bEndpointAddress = USB_DIR_OUT,
138 .bmAttributes = USB_ENDPOINT_XFER_BULK,
139 .wMaxPacketSize = __constant_cpu_to_le16(512),
140};
141
142static struct usb_descriptor_header *rmnet_hs_function[] = {
143 (struct usb_descriptor_header *) &rmnet_interface_desc,
144 (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
145 (struct usb_descriptor_header *) &rmnet_hs_in_desc,
146 (struct usb_descriptor_header *) &rmnet_hs_out_desc,
147 NULL,
148};
149
150/* String descriptors */
151
152static struct usb_string rmnet_string_defs[] = {
153 [0].s = "RmNet",
154 { } /* end of list */
155};
156
157static struct usb_gadget_strings rmnet_string_table = {
158 .language = 0x0409, /* en-us */
159 .strings = rmnet_string_defs,
160};
161
162static struct usb_gadget_strings *rmnet_strings[] = {
163 &rmnet_string_table,
164 NULL,
165};
166
Amit Blay2d4fb632012-05-29 18:05:38 +0300167static void frmnet_ctrl_response_available(struct f_rmnet *dev);
168
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169/* ------- misc functions --------------------*/
170
171static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
172{
173 return container_of(f, struct f_rmnet, port.func);
174}
175
176static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
177{
178 return container_of(r, struct f_rmnet, port);
179}
180
181static struct usb_request *
182frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
183{
184 struct usb_request *req;
185
186 req = usb_ep_alloc_request(ep, flags);
187 if (!req)
188 return ERR_PTR(-ENOMEM);
189
190 req->buf = kmalloc(len, flags);
191 if (!req->buf) {
192 usb_ep_free_request(ep, req);
193 return ERR_PTR(-ENOMEM);
194 }
195
196 req->length = len;
197
198 return req;
199}
200
201void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
202{
203 kfree(req->buf);
204 usb_ep_free_request(ep, req);
205}
206
207static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
208{
209 struct rmnet_ctrl_pkt *pkt;
210
211 pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
212 if (!pkt)
213 return ERR_PTR(-ENOMEM);
214
215 pkt->buf = kmalloc(len, flags);
216 if (!pkt->buf) {
217 kfree(pkt);
218 return ERR_PTR(-ENOMEM);
219 }
220 pkt->len = len;
221
222 return pkt;
223}
224
225static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
226{
227 kfree(pkt->buf);
228 kfree(pkt);
229}
230
231/* -------------------------------------------*/
232
Hemant Kumar1b820d52011-11-03 15:08:28 -0700233static int rmnet_gport_setup(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234{
Jack Pham427f6922011-11-23 19:42:00 -0800235 int ret;
236 int port_idx;
237 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530239 pr_debug("%s: bam ports: %u bam2bam ports: %u data hsic ports: %u data hsuart ports: %u"
240 " smd ports: %u ctrl hsic ports: %u ctrl hsuart ports: %u"
Ofir Cohena1c2a872011-12-14 10:26:34 +0200241 " nr_rmnet_ports: %u\n",
242 __func__, no_data_bam_ports, no_data_bam2bam_ports,
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530243 no_data_hsic_ports, no_data_hsuart_ports, no_ctrl_smd_ports,
244 no_ctrl_hsic_ports, no_ctrl_hsuart_ports, nr_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245
Ofir Cohena1c2a872011-12-14 10:26:34 +0200246 if (no_data_bam_ports || no_data_bam2bam_ports) {
247 ret = gbam_setup(no_data_bam_ports,
248 no_data_bam2bam_ports);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700249 if (ret)
250 return ret;
251 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700252
Hemant Kumar1b820d52011-11-03 15:08:28 -0700253 if (no_ctrl_smd_ports) {
254 ret = gsmd_ctrl_setup(no_ctrl_smd_ports);
255 if (ret)
256 return ret;
257 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258
Jack Pham427f6922011-11-23 19:42:00 -0800259 if (no_data_hsic_ports) {
260 port_idx = ghsic_data_setup(no_data_hsic_ports,
261 USB_GADGET_RMNET);
262 if (port_idx < 0)
263 return port_idx;
264 for (i = 0; i < nr_rmnet_ports; i++) {
265 if (rmnet_ports[i].data_xport ==
266 USB_GADGET_XPORT_HSIC) {
267 rmnet_ports[i].data_xport_num = port_idx;
268 port_idx++;
269 }
270 }
271 }
272
273 if (no_ctrl_hsic_ports) {
274 port_idx = ghsic_ctrl_setup(no_ctrl_hsic_ports,
275 USB_GADGET_RMNET);
276 if (port_idx < 0)
277 return port_idx;
278 for (i = 0; i < nr_rmnet_ports; i++) {
279 if (rmnet_ports[i].ctrl_xport ==
280 USB_GADGET_XPORT_HSIC) {
281 rmnet_ports[i].ctrl_xport_num = port_idx;
282 port_idx++;
283 }
284 }
285 }
286
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530287 if (no_data_hsuart_ports) {
288 port_idx = ghsuart_data_setup(no_data_hsuart_ports,
289 USB_GADGET_RMNET);
290 if (port_idx < 0)
291 return port_idx;
292 for (i = 0; i < nr_rmnet_ports; i++) {
293 if (rmnet_ports[i].data_xport ==
294 USB_GADGET_XPORT_HSUART) {
295 rmnet_ports[i].data_xport_num = port_idx;
296 port_idx++;
297 }
298 }
299 }
300
301 if (no_ctrl_hsuart_ports) {
302 port_idx = ghsuart_ctrl_setup(no_ctrl_hsuart_ports,
303 USB_GADGET_RMNET);
304 if (port_idx < 0)
305 return port_idx;
306 for (i = 0; i < nr_rmnet_ports; i++) {
307 if (rmnet_ports[i].ctrl_xport ==
308 USB_GADGET_XPORT_HSUART) {
309 rmnet_ports[i].ctrl_xport_num = port_idx;
310 port_idx++;
311 }
312 }
313 }
314
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315 return 0;
316}
317
Manu Gautam2b0234a2011-09-07 16:47:52 +0530318static int gport_rmnet_connect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700320 int ret;
321 unsigned port_num;
322 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
323 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324
Hemant Kumar1b820d52011-11-03 15:08:28 -0700325 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
326 __func__, xport_to_str(cxport), xport_to_str(dxport),
327 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328
Hemant Kumar1b820d52011-11-03 15:08:28 -0700329 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
330 switch (cxport) {
331 case USB_GADGET_XPORT_SMD:
332 ret = gsmd_ctrl_connect(&dev->port, port_num);
333 if (ret) {
334 pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
335 __func__, ret);
336 return ret;
337 }
338 break;
Jack Pham427f6922011-11-23 19:42:00 -0800339 case USB_GADGET_XPORT_HSIC:
340 ret = ghsic_ctrl_connect(&dev->port, port_num);
341 if (ret) {
342 pr_err("%s: ghsic_ctrl_connect failed: err:%d\n",
343 __func__, ret);
344 return ret;
345 }
346 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530347 case USB_GADGET_XPORT_HSUART:
348 ret = ghsuart_ctrl_connect(&dev->port, port_num);
349 if (ret) {
350 pr_err("%s: ghsuart_ctrl_connect failed: err:%d\n",
351 __func__, ret);
352 return ret;
353 }
354 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700355 case USB_GADGET_XPORT_NONE:
356 break;
357 default:
358 pr_err("%s: Un-supported transport: %s\n", __func__,
359 xport_to_str(cxport));
360 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361 }
362
Hemant Kumar1b820d52011-11-03 15:08:28 -0700363 port_num = rmnet_ports[dev->port_num].data_xport_num;
364 switch (dxport) {
365 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200366 case USB_GADGET_XPORT_BAM2BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200367 ret = gbam_connect(&dev->port, port_num,
Anna Perel21515162012-02-02 20:50:02 +0200368 dxport, port_num);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700369 if (ret) {
370 pr_err("%s: gbam_connect failed: err:%d\n",
371 __func__, ret);
372 gsmd_ctrl_disconnect(&dev->port, port_num);
373 return ret;
374 }
375 break;
Jack Pham427f6922011-11-23 19:42:00 -0800376 case USB_GADGET_XPORT_HSIC:
377 ret = ghsic_data_connect(&dev->port, port_num);
378 if (ret) {
379 pr_err("%s: ghsic_data_connect failed: err:%d\n",
380 __func__, ret);
381 ghsic_ctrl_disconnect(&dev->port, port_num);
382 return ret;
383 }
384 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530385 case USB_GADGET_XPORT_HSUART:
386 ret = ghsuart_data_connect(&dev->port, port_num);
387 if (ret) {
388 pr_err("%s: ghsuart_data_connect failed: err:%d\n",
389 __func__, ret);
390 ghsuart_ctrl_disconnect(&dev->port, port_num);
391 return ret;
392 }
393 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700394 case USB_GADGET_XPORT_NONE:
395 break;
396 default:
397 pr_err("%s: Un-supported transport: %s\n", __func__,
398 xport_to_str(dxport));
399 return -ENODEV;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700400 }
401
402 return 0;
403}
404
Manu Gautam2b0234a2011-09-07 16:47:52 +0530405static int gport_rmnet_disconnect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700406{
Hemant Kumar1b820d52011-11-03 15:08:28 -0700407 unsigned port_num;
408 enum transport_type cxport = rmnet_ports[dev->port_num].ctrl_xport;
409 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410
Hemant Kumar1b820d52011-11-03 15:08:28 -0700411 pr_debug("%s: ctrl xport: %s data xport: %s dev: %p portno: %d\n",
412 __func__, xport_to_str(cxport), xport_to_str(dxport),
413 dev, dev->port_num);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414
Hemant Kumar1b820d52011-11-03 15:08:28 -0700415 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
416 switch (cxport) {
417 case USB_GADGET_XPORT_SMD:
418 gsmd_ctrl_disconnect(&dev->port, port_num);
419 break;
Jack Pham427f6922011-11-23 19:42:00 -0800420 case USB_GADGET_XPORT_HSIC:
421 ghsic_ctrl_disconnect(&dev->port, port_num);
422 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530423 case USB_GADGET_XPORT_HSUART:
424 ghsuart_ctrl_disconnect(&dev->port, port_num);
425 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700426 case USB_GADGET_XPORT_NONE:
427 break;
428 default:
429 pr_err("%s: Un-supported transport: %s\n", __func__,
430 xport_to_str(cxport));
431 return -ENODEV;
432 }
433
434 port_num = rmnet_ports[dev->port_num].data_xport_num;
435 switch (dxport) {
436 case USB_GADGET_XPORT_BAM:
Ofir Cohena1c2a872011-12-14 10:26:34 +0200437 case USB_GADGET_XPORT_BAM2BAM:
438 gbam_disconnect(&dev->port, port_num, dxport);
Hemant Kumar1b820d52011-11-03 15:08:28 -0700439 break;
Jack Pham427f6922011-11-23 19:42:00 -0800440 case USB_GADGET_XPORT_HSIC:
441 ghsic_data_disconnect(&dev->port, port_num);
442 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +0530443 case USB_GADGET_XPORT_HSUART:
444 ghsuart_data_disconnect(&dev->port, port_num);
445 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700446 case USB_GADGET_XPORT_NONE:
447 break;
448 default:
449 pr_err("%s: Un-supported transport: %s\n", __func__,
450 xport_to_str(dxport));
451 return -ENODEV;
452 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453
454 return 0;
455}
456
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
458{
459 struct f_rmnet *dev = func_to_rmnet(f);
460
461 pr_debug("%s: portno:%d\n", __func__, dev->port_num);
462
463 if (gadget_is_dualspeed(c->cdev->gadget))
464 usb_free_descriptors(f->hs_descriptors);
465 usb_free_descriptors(f->descriptors);
466
467 frmnet_free_req(dev->notify, dev->notify_req);
468
Manu Gautamdd4222b2011-09-09 15:06:05 +0530469 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470}
471
Amit Blaye5bb35e2012-05-08 20:38:20 +0300472static void frmnet_suspend(struct usb_function *f)
473{
474 struct f_rmnet *dev = func_to_rmnet(f);
475 unsigned port_num;
476 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
477
478 pr_debug("%s: data xport: %s dev: %p portno: %d\n",
479 __func__, xport_to_str(dxport),
480 dev, dev->port_num);
481
482 port_num = rmnet_ports[dev->port_num].data_xport_num;
483 switch (dxport) {
484 case USB_GADGET_XPORT_BAM:
485 break;
486 case USB_GADGET_XPORT_BAM2BAM:
487 gbam_suspend(&dev->port, port_num, dxport);
488 break;
489 case USB_GADGET_XPORT_HSIC:
490 break;
491 case USB_GADGET_XPORT_NONE:
492 break;
493 default:
494 pr_err("%s: Un-supported transport: %s\n", __func__,
495 xport_to_str(dxport));
496 }
497}
498
499static void frmnet_resume(struct usb_function *f)
500{
501 struct f_rmnet *dev = func_to_rmnet(f);
502 unsigned port_num;
503 enum transport_type dxport = rmnet_ports[dev->port_num].data_xport;
504
505 pr_debug("%s: data xport: %s dev: %p portno: %d\n",
506 __func__, xport_to_str(dxport),
507 dev, dev->port_num);
508
509 port_num = rmnet_ports[dev->port_num].data_xport_num;
510 switch (dxport) {
511 case USB_GADGET_XPORT_BAM:
512 break;
513 case USB_GADGET_XPORT_BAM2BAM:
514 gbam_resume(&dev->port, port_num, dxport);
515 break;
516 case USB_GADGET_XPORT_HSIC:
517 break;
518 case USB_GADGET_XPORT_NONE:
519 break;
520 default:
521 pr_err("%s: Un-supported transport: %s\n", __func__,
522 xport_to_str(dxport));
523 }
524}
525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526static void frmnet_disable(struct usb_function *f)
527{
528 struct f_rmnet *dev = func_to_rmnet(f);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700529 unsigned long flags;
530 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531
532 pr_debug("%s: port#%d\n", __func__, dev->port_num);
533
534 usb_ep_disable(dev->notify);
Anna Perel97b8c222012-01-18 10:08:14 +0200535 dev->notify->driver_data = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536
537 atomic_set(&dev->online, 0);
538
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700539 spin_lock_irqsave(&dev->lock, flags);
540 while (!list_empty(&dev->cpkt_resp_q)) {
541 cpkt = list_first_entry(&dev->cpkt_resp_q,
542 struct rmnet_ctrl_pkt, list);
543
544 list_del(&cpkt->list);
545 rmnet_free_ctrl_pkt(cpkt);
546 }
547 atomic_set(&dev->notify_count, 0);
548 spin_unlock_irqrestore(&dev->lock, flags);
549
Manu Gautam2b0234a2011-09-07 16:47:52 +0530550 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551}
552
553static int
554frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
555{
556 struct f_rmnet *dev = func_to_rmnet(f);
557 struct usb_composite_dev *cdev = dev->cdev;
558 int ret;
Amit Blay2d4fb632012-05-29 18:05:38 +0300559 struct list_head *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560
561 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
562
563 if (dev->notify->driver_data) {
564 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
565 usb_ep_disable(dev->notify);
566 }
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200567
568 ret = config_ep_by_speed(cdev->gadget, f, dev->notify);
569 if (ret) {
570 dev->notify->desc = NULL;
571 ERROR(cdev, "config_ep_by_speed failes for ep %s, result %d\n",
572 dev->notify->name, ret);
573 return ret;
574 }
Tatyana Brokhmancf709c12011-06-28 16:33:48 +0300575 ret = usb_ep_enable(dev->notify);
Tatyana Brokhman31ac3522011-06-28 15:33:50 +0200576
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 if (ret) {
578 pr_err("%s: usb ep#%s enable failed, err#%d\n",
579 __func__, dev->notify->name, ret);
580 return ret;
581 }
582 dev->notify->driver_data = dev;
583
Chiranjeevi Velempati502b1c82012-05-16 14:49:46 +0530584 if (!dev->port.in->desc || !dev->port.out->desc) {
Bar Weiner0fc137a2012-03-28 16:58:09 +0200585 if (config_ep_by_speed(cdev->gadget, f, dev->port.in) ||
586 config_ep_by_speed(cdev->gadget, f, dev->port.out)) {
587 dev->port.in->desc = NULL;
588 dev->port.out->desc = NULL;
589 return -EINVAL;
590 }
591 ret = gport_rmnet_connect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700592 }
593
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700594 atomic_set(&dev->online, 1);
595
Amit Blay2d4fb632012-05-29 18:05:38 +0300596 /* In case notifications were aborted, but there are pending control
597 packets in the response queue, re-add the notifications */
598 list_for_each(cpkt, &dev->cpkt_resp_q)
599 frmnet_ctrl_response_available(dev);
600
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601 return ret;
602}
603
604static void frmnet_ctrl_response_available(struct f_rmnet *dev)
605{
606 struct usb_request *req = dev->notify_req;
607 struct usb_cdc_notification *event;
608 unsigned long flags;
609 int ret;
Anna Perelf3af59d2012-08-12 15:28:30 +0300610 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611
612 pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
613
614 spin_lock_irqsave(&dev->lock, flags);
615 if (!atomic_read(&dev->online) || !req || !req->buf) {
616 spin_unlock_irqrestore(&dev->lock, flags);
617 return;
618 }
619
620 if (atomic_inc_return(&dev->notify_count) != 1) {
621 spin_unlock_irqrestore(&dev->lock, flags);
622 return;
623 }
624
625 event = req->buf;
626 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
627 | USB_RECIP_INTERFACE;
628 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
629 event->wValue = cpu_to_le16(0);
630 event->wIndex = cpu_to_le16(dev->ifc_id);
631 event->wLength = cpu_to_le16(0);
632 spin_unlock_irqrestore(&dev->lock, flags);
633
634 ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
635 if (ret) {
636 atomic_dec(&dev->notify_count);
Anna Perelf3af59d2012-08-12 15:28:30 +0300637 spin_lock_irqsave(&dev->lock, flags);
638 cpkt = list_first_entry(&dev->cpkt_resp_q,
639 struct rmnet_ctrl_pkt, list);
640 if (cpkt) {
641 list_del(&cpkt->list);
642 rmnet_free_ctrl_pkt(cpkt);
643 }
644 spin_unlock_irqrestore(&dev->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645 pr_debug("ep enqueue error %d\n", ret);
646 }
647}
648
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700649static void frmnet_connect(struct grmnet *gr)
650{
651 struct f_rmnet *dev;
652
653 if (!gr) {
654 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
655 return;
656 }
657
658 dev = port_to_rmnet(gr);
659
660 atomic_set(&dev->ctrl_online, 1);
661}
662
663static void frmnet_disconnect(struct grmnet *gr)
664{
665 struct f_rmnet *dev;
666 unsigned long flags;
667 struct usb_cdc_notification *event;
668 int status;
669 struct rmnet_ctrl_pkt *cpkt;
670
671 if (!gr) {
672 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
673 return;
674 }
675
676 dev = port_to_rmnet(gr);
677
678 atomic_set(&dev->ctrl_online, 0);
679
680 if (!atomic_read(&dev->online)) {
681 pr_debug("%s: nothing to do\n", __func__);
682 return;
683 }
684
685 usb_ep_fifo_flush(dev->notify);
686
687 event = dev->notify_req->buf;
688 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
689 | USB_RECIP_INTERFACE;
690 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
691 event->wValue = cpu_to_le16(0);
692 event->wIndex = cpu_to_le16(dev->ifc_id);
693 event->wLength = cpu_to_le16(0);
694
Vamsi Krishna188078d2011-10-26 15:09:55 -0700695 status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700696 if (status < 0) {
697 if (!atomic_read(&dev->online))
698 return;
699 pr_err("%s: rmnet notify ep enqueue error %d\n",
700 __func__, status);
701 }
702
703 spin_lock_irqsave(&dev->lock, flags);
704 while (!list_empty(&dev->cpkt_resp_q)) {
705 cpkt = list_first_entry(&dev->cpkt_resp_q,
706 struct rmnet_ctrl_pkt, list);
707
708 list_del(&cpkt->list);
709 rmnet_free_ctrl_pkt(cpkt);
710 }
711 atomic_set(&dev->notify_count, 0);
712 spin_unlock_irqrestore(&dev->lock, flags);
713
714}
715
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716static int
Hemant Kumarf60c0252011-11-03 12:37:07 -0700717frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718{
719 struct f_rmnet *dev;
Hemant Kumarf60c0252011-11-03 12:37:07 -0700720 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721 unsigned long flags;
722
Hemant Kumarf60c0252011-11-03 12:37:07 -0700723 if (!gr || !buf) {
724 pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
725 __func__, gr, buf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700726 return -ENODEV;
727 }
Hemant Kumarf60c0252011-11-03 12:37:07 -0700728 cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
729 if (IS_ERR(cpkt)) {
730 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
731 return -ENOMEM;
732 }
733 memcpy(cpkt->buf, buf, len);
734 cpkt->len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700735
736 dev = port_to_rmnet(gr);
737
738 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
739
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700740 if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700741 rmnet_free_ctrl_pkt(cpkt);
742 return 0;
743 }
744
745 spin_lock_irqsave(&dev->lock, flags);
Chiranjeevi Velempati263e6c82011-11-11 23:07:36 +0530746 list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700747 spin_unlock_irqrestore(&dev->lock, flags);
748
749 frmnet_ctrl_response_available(dev);
750
751 return 0;
752}
753
754static void
755frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
756{
757 struct f_rmnet *dev = req->context;
758 struct usb_composite_dev *cdev;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700759 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700760
761 if (!dev) {
762 pr_err("%s: rmnet dev is null\n", __func__);
763 return;
764 }
765
766 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
767
768 cdev = dev->cdev;
769
Hemant Kumar1b820d52011-11-03 15:08:28 -0700770 if (dev->port.send_encap_cmd) {
771 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
772 dev->port.send_encap_cmd(port_num, req->buf, req->actual);
773 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700774}
775
776static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
777{
778 struct f_rmnet *dev = req->context;
779 int status = req->status;
Anna Perelf3af59d2012-08-12 15:28:30 +0300780 unsigned long flags;
781 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700782
783 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
784
785 switch (status) {
786 case -ECONNRESET:
787 case -ESHUTDOWN:
788 /* connection gone */
789 atomic_set(&dev->notify_count, 0);
790 break;
791 default:
792 pr_err("rmnet notify ep error %d\n", status);
793 /* FALLTHROUGH */
794 case 0:
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700795 if (!atomic_read(&dev->ctrl_online))
796 break;
797
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700798 if (atomic_dec_and_test(&dev->notify_count))
799 break;
800
801 status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
802 if (status) {
803 atomic_dec(&dev->notify_count);
Anna Perelf3af59d2012-08-12 15:28:30 +0300804 spin_lock_irqsave(&dev->lock, flags);
805 cpkt = list_first_entry(&dev->cpkt_resp_q,
806 struct rmnet_ctrl_pkt, list);
807 if (cpkt) {
808 list_del(&cpkt->list);
809 rmnet_free_ctrl_pkt(cpkt);
810 }
811 spin_unlock_irqrestore(&dev->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812 pr_debug("ep enqueue error %d\n", status);
813 }
814 break;
815 }
816}
817
818static int
819frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
820{
821 struct f_rmnet *dev = func_to_rmnet(f);
822 struct usb_composite_dev *cdev = dev->cdev;
823 struct usb_request *req = cdev->req;
Hemant Kumar1b820d52011-11-03 15:08:28 -0700824 unsigned port_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825 u16 w_index = le16_to_cpu(ctrl->wIndex);
826 u16 w_value = le16_to_cpu(ctrl->wValue);
827 u16 w_length = le16_to_cpu(ctrl->wLength);
828 int ret = -EOPNOTSUPP;
829
830 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
831
832 if (!atomic_read(&dev->online)) {
833 pr_debug("%s: usb cable is not connected\n", __func__);
834 return -ENOTCONN;
835 }
836
837 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
838
839 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
840 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841 ret = w_length;
842 req->complete = frmnet_cmd_complete;
843 req->context = dev;
844 break;
845
846
847 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
848 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
849 if (w_value)
850 goto invalid;
851 else {
852 unsigned len;
853 struct rmnet_ctrl_pkt *cpkt;
854
855 spin_lock(&dev->lock);
856 if (list_empty(&dev->cpkt_resp_q)) {
857 pr_err("ctrl resp queue empty "
858 " req%02x.%02x v%04x i%04x l%d\n",
859 ctrl->bRequestType, ctrl->bRequest,
860 w_value, w_index, w_length);
861 spin_unlock(&dev->lock);
862 goto invalid;
863 }
864
865 cpkt = list_first_entry(&dev->cpkt_resp_q,
866 struct rmnet_ctrl_pkt, list);
867 list_del(&cpkt->list);
868 spin_unlock(&dev->lock);
869
870 len = min_t(unsigned, w_length, cpkt->len);
871 memcpy(req->buf, cpkt->buf, len);
872 ret = len;
873
874 rmnet_free_ctrl_pkt(cpkt);
875 }
876 break;
877 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
878 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
Hemant Kumar1b820d52011-11-03 15:08:28 -0700879 if (dev->port.notify_modem) {
880 port_num = rmnet_ports[dev->port_num].ctrl_xport_num;
881 dev->port.notify_modem(&dev->port, port_num, w_value);
882 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883 ret = 0;
884
885 break;
886 default:
887
888invalid:
889 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
890 ctrl->bRequestType, ctrl->bRequest,
891 w_value, w_index, w_length);
892 }
893
894 /* respond with data transfer or status phase? */
895 if (ret >= 0) {
896 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
897 ctrl->bRequestType, ctrl->bRequest,
898 w_value, w_index, w_length);
899 req->zero = (ret < w_length);
900 req->length = ret;
901 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
902 if (ret < 0)
903 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
904 }
905
906 return ret;
907}
908
909static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
910{
911 struct f_rmnet *dev = func_to_rmnet(f);
912 struct usb_ep *ep;
913 struct usb_composite_dev *cdev = c->cdev;
914 int ret = -ENODEV;
915
916 dev->ifc_id = usb_interface_id(c, f);
917 if (dev->ifc_id < 0) {
918 pr_err("%s: unable to allocate ifc id, err:%d",
919 __func__, dev->ifc_id);
920 return dev->ifc_id;
921 }
922 rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
923
924 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
925 if (!ep) {
926 pr_err("%s: usb epin autoconfig failed\n", __func__);
927 return -ENODEV;
928 }
929 dev->port.in = ep;
930 ep->driver_data = cdev;
931
932 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
933 if (!ep) {
934 pr_err("%s: usb epout autoconfig failed\n", __func__);
935 ret = -ENODEV;
936 goto ep_auto_out_fail;
937 }
938 dev->port.out = ep;
939 ep->driver_data = cdev;
940
941 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
942 if (!ep) {
943 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
944 ret = -ENODEV;
945 goto ep_auto_notify_fail;
946 }
947 dev->notify = ep;
948 ep->driver_data = cdev;
949
950 dev->notify_req = frmnet_alloc_req(ep,
Hemant Kumarfbf113d2011-09-16 18:24:45 -0700951 sizeof(struct usb_cdc_notification),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952 GFP_KERNEL);
953 if (IS_ERR(dev->notify_req)) {
954 pr_err("%s: unable to allocate memory for notify req\n",
955 __func__);
956 ret = -ENOMEM;
957 goto ep_notify_alloc_fail;
958 }
959
960 dev->notify_req->complete = frmnet_notify_complete;
961 dev->notify_req->context = dev;
962
963 f->descriptors = usb_copy_descriptors(rmnet_fs_function);
964
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530965 if (!f->descriptors)
966 goto fail;
967
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700968 if (gadget_is_dualspeed(cdev->gadget)) {
969 rmnet_hs_in_desc.bEndpointAddress =
970 rmnet_fs_in_desc.bEndpointAddress;
971 rmnet_hs_out_desc.bEndpointAddress =
972 rmnet_fs_out_desc.bEndpointAddress;
973 rmnet_hs_notify_desc.bEndpointAddress =
974 rmnet_fs_notify_desc.bEndpointAddress;
975
976 /* copy descriptors, and track endpoint copies */
977 f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
978
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530979 if (!f->hs_descriptors)
980 goto fail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 }
982
983 pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
984 __func__, dev->port_num,
985 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
986 dev->port.in->name, dev->port.out->name);
987
988 return 0;
989
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530990fail:
991 if (f->descriptors)
992 usb_free_descriptors(f->descriptors);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700993ep_notify_alloc_fail:
994 dev->notify->driver_data = NULL;
995 dev->notify = NULL;
996ep_auto_notify_fail:
997 dev->port.out->driver_data = NULL;
998 dev->port.out = NULL;
999ep_auto_out_fail:
1000 dev->port.in->driver_data = NULL;
1001 dev->port.in = NULL;
1002
1003 return ret;
1004}
1005
Manu Gautam2b0234a2011-09-07 16:47:52 +05301006static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001007{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001008 int status;
1009 struct f_rmnet *dev;
1010 struct usb_function *f;
1011 unsigned long flags;
1012
1013 pr_debug("%s: usb config:%p\n", __func__, c);
1014
Manu Gautam2b0234a2011-09-07 16:47:52 +05301015 if (portno >= nr_rmnet_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001016 pr_err("%s: supporting ports#%u port_id:%u", __func__,
Manu Gautam2b0234a2011-09-07 16:47:52 +05301017 nr_rmnet_ports, portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001018 return -ENODEV;
1019 }
1020
1021 if (rmnet_string_defs[0].id == 0) {
1022 status = usb_string_id(c->cdev);
1023 if (status < 0) {
1024 pr_err("%s: failed to get string id, err:%d\n",
1025 __func__, status);
1026 return status;
1027 }
1028 rmnet_string_defs[0].id = status;
1029 }
1030
Manu Gautam2b0234a2011-09-07 16:47:52 +05301031 dev = rmnet_ports[portno].port;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001032
1033 spin_lock_irqsave(&dev->lock, flags);
1034 dev->cdev = c->cdev;
1035 f = &dev->port.func;
Vamsi Krishna188078d2011-10-26 15:09:55 -07001036 f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001037 spin_unlock_irqrestore(&dev->lock, flags);
Vamsi Krishna188078d2011-10-26 15:09:55 -07001038 if (!f->name) {
1039 pr_err("%s: cannot allocate memory for name\n", __func__);
1040 return -ENOMEM;
1041 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042
1043 f->strings = rmnet_strings;
1044 f->bind = frmnet_bind;
1045 f->unbind = frmnet_unbind;
1046 f->disable = frmnet_disable;
1047 f->set_alt = frmnet_set_alt;
1048 f->setup = frmnet_setup;
Amit Blaye5bb35e2012-05-08 20:38:20 +03001049 f->suspend = frmnet_suspend;
1050 f->resume = frmnet_resume;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001051 dev->port.send_cpkt_response = frmnet_send_cpkt_response;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -07001052 dev->port.disconnect = frmnet_disconnect;
1053 dev->port.connect = frmnet_connect;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001054
1055 status = usb_add_function(c, f);
1056 if (status) {
1057 pr_err("%s: usb add function failed: %d\n",
1058 __func__, status);
Manu Gautam2b0234a2011-09-07 16:47:52 +05301059 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060 return status;
1061 }
1062
1063 pr_debug("%s: complete\n", __func__);
1064
1065 return status;
1066}
1067
Manu Gautame3e897c2011-09-12 17:18:46 +05301068static void frmnet_cleanup(void)
1069{
1070 int i;
1071
1072 for (i = 0; i < nr_rmnet_ports; i++)
1073 kfree(rmnet_ports[i].port);
1074
1075 nr_rmnet_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001076 no_ctrl_smd_ports = 0;
1077 no_data_bam_ports = 0;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001078 no_data_bam2bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -08001079 no_ctrl_hsic_ports = 0;
1080 no_data_hsic_ports = 0;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +05301081 no_ctrl_hsuart_ports = 0;
1082 no_data_hsuart_ports = 0;
Manu Gautame3e897c2011-09-12 17:18:46 +05301083}
1084
Hemant Kumar92fe88c2013-02-03 15:56:29 -08001085static int frmnet_init_port(const char *ctrl_name, const char *data_name,
1086 const char *port_name)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001087{
Hemant Kumar1b820d52011-11-03 15:08:28 -07001088 struct f_rmnet *dev;
1089 struct rmnet_ports *rmnet_port;
1090 int ret;
1091 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001092
Hemant Kumar1b820d52011-11-03 15:08:28 -07001093 if (nr_rmnet_ports >= NR_RMNET_PORTS) {
1094 pr_err("%s: Max-%d instances supported\n",
1095 __func__, NR_RMNET_PORTS);
Manu Gautame3e897c2011-09-12 17:18:46 +05301096 return -EINVAL;
1097 }
1098
Hemant Kumar1b820d52011-11-03 15:08:28 -07001099 pr_debug("%s: port#:%d, ctrl port: %s data port: %s\n",
1100 __func__, nr_rmnet_ports, ctrl_name, data_name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001101
Hemant Kumar1b820d52011-11-03 15:08:28 -07001102 dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
1103 if (!dev) {
1104 pr_err("%s: Unable to allocate rmnet device\n", __func__);
1105 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106 }
1107
Hemant Kumar1b820d52011-11-03 15:08:28 -07001108 dev->port_num = nr_rmnet_ports;
1109 spin_lock_init(&dev->lock);
1110 INIT_LIST_HEAD(&dev->cpkt_resp_q);
1111
1112 rmnet_port = &rmnet_ports[nr_rmnet_ports];
1113 rmnet_port->port = dev;
1114 rmnet_port->port_num = nr_rmnet_ports;
1115 rmnet_port->ctrl_xport = str_to_xport(ctrl_name);
1116 rmnet_port->data_xport = str_to_xport(data_name);
1117
1118 switch (rmnet_port->ctrl_xport) {
1119 case USB_GADGET_XPORT_SMD:
1120 rmnet_port->ctrl_xport_num = no_ctrl_smd_ports;
1121 no_ctrl_smd_ports++;
1122 break;
Jack Pham427f6922011-11-23 19:42:00 -08001123 case USB_GADGET_XPORT_HSIC:
Hemant Kumar92fe88c2013-02-03 15:56:29 -08001124 ghsic_ctrl_set_port_name(port_name, ctrl_name);
Jack Pham427f6922011-11-23 19:42:00 -08001125 rmnet_port->ctrl_xport_num = no_ctrl_hsic_ports;
1126 no_ctrl_hsic_ports++;
1127 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +05301128 case USB_GADGET_XPORT_HSUART:
1129 rmnet_port->ctrl_xport_num = no_ctrl_hsuart_ports;
1130 no_ctrl_hsuart_ports++;
1131 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001132 case USB_GADGET_XPORT_NONE:
1133 break;
1134 default:
1135 pr_err("%s: Un-supported transport: %u\n", __func__,
1136 rmnet_port->ctrl_xport);
1137 ret = -ENODEV;
1138 goto fail_probe;
1139 }
1140
1141 switch (rmnet_port->data_xport) {
1142 case USB_GADGET_XPORT_BAM:
1143 rmnet_port->data_xport_num = no_data_bam_ports;
1144 no_data_bam_ports++;
1145 break;
Ofir Cohena1c2a872011-12-14 10:26:34 +02001146 case USB_GADGET_XPORT_BAM2BAM:
1147 rmnet_port->data_xport_num = no_data_bam2bam_ports;
1148 no_data_bam2bam_ports++;
1149 break;
Jack Pham427f6922011-11-23 19:42:00 -08001150 case USB_GADGET_XPORT_HSIC:
Hemant Kumar92fe88c2013-02-03 15:56:29 -08001151 ghsic_data_set_port_name(port_name, data_name);
Jack Pham427f6922011-11-23 19:42:00 -08001152 rmnet_port->data_xport_num = no_data_hsic_ports;
1153 no_data_hsic_ports++;
1154 break;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +05301155 case USB_GADGET_XPORT_HSUART:
1156 rmnet_port->data_xport_num = no_data_hsuart_ports;
1157 no_data_hsuart_ports++;
1158 break;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001159 case USB_GADGET_XPORT_NONE:
1160 break;
1161 default:
1162 pr_err("%s: Un-supported transport: %u\n", __func__,
1163 rmnet_port->data_xport);
1164 ret = -ENODEV;
1165 goto fail_probe;
1166 }
1167 nr_rmnet_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168
1169 return 0;
1170
1171fail_probe:
Manu Gautam2b0234a2011-09-07 16:47:52 +05301172 for (i = 0; i < nr_rmnet_ports; i++)
1173 kfree(rmnet_ports[i].port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174
Hemant Kumar1b820d52011-11-03 15:08:28 -07001175 nr_rmnet_ports = 0;
1176 no_ctrl_smd_ports = 0;
1177 no_data_bam_ports = 0;
Jack Pham427f6922011-11-23 19:42:00 -08001178 no_ctrl_hsic_ports = 0;
1179 no_data_hsic_ports = 0;
Vijayavardhan Vennapusaeb8d2392012-04-03 18:58:49 +05301180 no_ctrl_hsuart_ports = 0;
1181 no_data_hsuart_ports = 0;
Hemant Kumar1b820d52011-11-03 15:08:28 -07001182
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001183 return ret;
1184}