blob: db43f95a971589cb2530e05e11849a7f2ff755e3 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/usb/android_composite.h>
18#include <linux/spinlock.h>
19
20#include <linux/platform_data/usb_rmnet.h>
21#include "u_rmnet.h"
22#include "gadget_chips.h"
23
24
25#define RMNET_NOTIFY_INTERVAL 5
26#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
27
28struct rmnet_descs {
29 struct usb_endpoint_descriptor *in;
30 struct usb_endpoint_descriptor *out;
31 struct usb_endpoint_descriptor *notify;
32};
33
34#define ACM_CTRL_DTR (1 << 0)
35
36/* TODO: use separate structures for data and
37 * control paths
38 */
39struct f_rmnet {
40 struct grmnet port;
41 int ifc_id;
42 u8 port_num;
43 atomic_t online;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -070044 atomic_t ctrl_online;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045 struct usb_composite_dev *cdev;
46
47 spinlock_t lock;
48
49 /* usb descriptors */
50 struct rmnet_descs fs;
51 struct rmnet_descs hs;
52
53 /* usb eps*/
54 struct usb_ep *notify;
55 struct usb_endpoint_descriptor *notify_desc;
56 struct usb_request *notify_req;
57
58 /* control info */
59 struct list_head cpkt_resp_q;
60 atomic_t notify_count;
61 unsigned long cpkts_len;
62};
63
Manu Gautam2b0234a2011-09-07 16:47:52 +053064#define NR_RMNET_PORTS 1
65static unsigned int nr_rmnet_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066static struct rmnet_ports {
67 unsigned port_num;
68 struct f_rmnet *port;
Manu Gautam2b0234a2011-09-07 16:47:52 +053069} rmnet_ports[NR_RMNET_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
71static struct usb_interface_descriptor rmnet_interface_desc = {
72 .bLength = USB_DT_INTERFACE_SIZE,
73 .bDescriptorType = USB_DT_INTERFACE,
74 .bNumEndpoints = 3,
75 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
76 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
77 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
78 /* .iInterface = DYNAMIC */
79};
80
81/* Full speed support */
82static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
83 .bLength = USB_DT_ENDPOINT_SIZE,
84 .bDescriptorType = USB_DT_ENDPOINT,
85 .bEndpointAddress = USB_DIR_IN,
86 .bmAttributes = USB_ENDPOINT_XFER_INT,
87 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
88 .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
89};
90
91static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
92 .bLength = USB_DT_ENDPOINT_SIZE,
93 .bDescriptorType = USB_DT_ENDPOINT,
94 .bEndpointAddress = USB_DIR_IN,
95 .bmAttributes = USB_ENDPOINT_XFER_BULK,
96 .wMaxPacketSize = __constant_cpu_to_le16(64),
97};
98
99static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
100 .bLength = USB_DT_ENDPOINT_SIZE,
101 .bDescriptorType = USB_DT_ENDPOINT,
102 .bEndpointAddress = USB_DIR_OUT,
103 .bmAttributes = USB_ENDPOINT_XFER_BULK,
104 .wMaxPacketSize = __constant_cpu_to_le16(64),
105};
106
107static struct usb_descriptor_header *rmnet_fs_function[] = {
108 (struct usb_descriptor_header *) &rmnet_interface_desc,
109 (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
110 (struct usb_descriptor_header *) &rmnet_fs_in_desc,
111 (struct usb_descriptor_header *) &rmnet_fs_out_desc,
112 NULL,
113};
114
115/* High speed support */
116static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
117 .bLength = USB_DT_ENDPOINT_SIZE,
118 .bDescriptorType = USB_DT_ENDPOINT,
119 .bEndpointAddress = USB_DIR_IN,
120 .bmAttributes = USB_ENDPOINT_XFER_INT,
121 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
122 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
123};
124
125static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
126 .bLength = USB_DT_ENDPOINT_SIZE,
127 .bDescriptorType = USB_DT_ENDPOINT,
128 .bEndpointAddress = USB_DIR_IN,
129 .bmAttributes = USB_ENDPOINT_XFER_BULK,
130 .wMaxPacketSize = __constant_cpu_to_le16(512),
131};
132
133static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
134 .bLength = USB_DT_ENDPOINT_SIZE,
135 .bDescriptorType = USB_DT_ENDPOINT,
136 .bEndpointAddress = USB_DIR_OUT,
137 .bmAttributes = USB_ENDPOINT_XFER_BULK,
138 .wMaxPacketSize = __constant_cpu_to_le16(512),
139};
140
141static struct usb_descriptor_header *rmnet_hs_function[] = {
142 (struct usb_descriptor_header *) &rmnet_interface_desc,
143 (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
144 (struct usb_descriptor_header *) &rmnet_hs_in_desc,
145 (struct usb_descriptor_header *) &rmnet_hs_out_desc,
146 NULL,
147};
148
149/* String descriptors */
150
151static struct usb_string rmnet_string_defs[] = {
152 [0].s = "RmNet",
153 { } /* end of list */
154};
155
156static struct usb_gadget_strings rmnet_string_table = {
157 .language = 0x0409, /* en-us */
158 .strings = rmnet_string_defs,
159};
160
161static struct usb_gadget_strings *rmnet_strings[] = {
162 &rmnet_string_table,
163 NULL,
164};
165
166/* ------- misc functions --------------------*/
167
168static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
169{
170 return container_of(f, struct f_rmnet, port.func);
171}
172
173static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
174{
175 return container_of(r, struct f_rmnet, port);
176}
177
178static struct usb_request *
179frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
180{
181 struct usb_request *req;
182
183 req = usb_ep_alloc_request(ep, flags);
184 if (!req)
185 return ERR_PTR(-ENOMEM);
186
187 req->buf = kmalloc(len, flags);
188 if (!req->buf) {
189 usb_ep_free_request(ep, req);
190 return ERR_PTR(-ENOMEM);
191 }
192
193 req->length = len;
194
195 return req;
196}
197
198void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
199{
200 kfree(req->buf);
201 usb_ep_free_request(ep, req);
202}
203
204static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
205{
206 struct rmnet_ctrl_pkt *pkt;
207
208 pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
209 if (!pkt)
210 return ERR_PTR(-ENOMEM);
211
212 pkt->buf = kmalloc(len, flags);
213 if (!pkt->buf) {
214 kfree(pkt);
215 return ERR_PTR(-ENOMEM);
216 }
217 pkt->len = len;
218
219 return pkt;
220}
221
222static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
223{
224 kfree(pkt->buf);
225 kfree(pkt);
226}
227
228/* -------------------------------------------*/
229
Manu Gautam2b0234a2011-09-07 16:47:52 +0530230static int rmnet_gport_setup(int no_rmnet_ports)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231{
232 int ret;
233
Manu Gautam2b0234a2011-09-07 16:47:52 +0530234 pr_debug("%s: no_rmnet_ports:%d\n", __func__, no_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235
Manu Gautam2b0234a2011-09-07 16:47:52 +0530236 ret = gbam_setup(no_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237 if (ret)
238 return ret;
239
Manu Gautam2b0234a2011-09-07 16:47:52 +0530240 ret = gsmd_ctrl_setup(no_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241 if (ret)
242 return ret;
243
244 return 0;
245}
246
Manu Gautam2b0234a2011-09-07 16:47:52 +0530247static int gport_rmnet_connect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248{
249 int ret;
250
251 pr_debug("%s:dev:%p portno:%d\n",
252 __func__, dev, dev->port_num);
253
254 ret = gsmd_ctrl_connect(&dev->port, dev->port_num);
255 if (ret) {
256 pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
257 __func__, ret);
258 return ret;
259 }
260
261 ret = gbam_connect(&dev->port, dev->port_num);
262 if (ret) {
263 pr_err("%s: gbam_connect failed: err:%d\n",
264 __func__, ret);
265 return ret;
266 }
267
268 return 0;
269}
270
Manu Gautam2b0234a2011-09-07 16:47:52 +0530271static int gport_rmnet_disconnect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272{
273 pr_debug("%s:dev:%p portno:%d\n",
274 __func__, dev, dev->port_num);
275
276 gbam_disconnect(&dev->port, dev->port_num);
277
278 gsmd_ctrl_disconnect(&dev->port, dev->port_num);
279
280 return 0;
281}
282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
284{
285 struct f_rmnet *dev = func_to_rmnet(f);
286
287 pr_debug("%s: portno:%d\n", __func__, dev->port_num);
288
289 if (gadget_is_dualspeed(c->cdev->gadget))
290 usb_free_descriptors(f->hs_descriptors);
291 usb_free_descriptors(f->descriptors);
292
293 frmnet_free_req(dev->notify, dev->notify_req);
294
Manu Gautamdd4222b2011-09-09 15:06:05 +0530295 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296}
297
298static void frmnet_disable(struct usb_function *f)
299{
300 struct f_rmnet *dev = func_to_rmnet(f);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700301 unsigned long flags;
302 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303
304 pr_debug("%s: port#%d\n", __func__, dev->port_num);
305
306 usb_ep_disable(dev->notify);
307
308 atomic_set(&dev->online, 0);
309
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700310 spin_lock_irqsave(&dev->lock, flags);
311 while (!list_empty(&dev->cpkt_resp_q)) {
312 cpkt = list_first_entry(&dev->cpkt_resp_q,
313 struct rmnet_ctrl_pkt, list);
314
315 list_del(&cpkt->list);
316 rmnet_free_ctrl_pkt(cpkt);
317 }
318 atomic_set(&dev->notify_count, 0);
319 spin_unlock_irqrestore(&dev->lock, flags);
320
Manu Gautam2b0234a2011-09-07 16:47:52 +0530321 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700322}
323
324static int
325frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
326{
327 struct f_rmnet *dev = func_to_rmnet(f);
328 struct usb_composite_dev *cdev = dev->cdev;
329 int ret;
330
331 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
332
333 if (dev->notify->driver_data) {
334 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
335 usb_ep_disable(dev->notify);
336 }
337 dev->notify_desc = ep_choose(cdev->gadget,
338 dev->hs.notify,
339 dev->fs.notify);
340 ret = usb_ep_enable(dev->notify, dev->notify_desc);
341 if (ret) {
342 pr_err("%s: usb ep#%s enable failed, err#%d\n",
343 __func__, dev->notify->name, ret);
344 return ret;
345 }
346 dev->notify->driver_data = dev;
347
348 if (dev->port.in->driver_data) {
349 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530350 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351 }
352
353 dev->port.in_desc = ep_choose(cdev->gadget,
354 dev->hs.in, dev->fs.in);
355 dev->port.out_desc = ep_choose(cdev->gadget,
356 dev->hs.out, dev->fs.out);
357
Manu Gautam2b0234a2011-09-07 16:47:52 +0530358 ret = gport_rmnet_connect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359
360 atomic_set(&dev->online, 1);
361
362 return ret;
363}
364
365static void frmnet_ctrl_response_available(struct f_rmnet *dev)
366{
367 struct usb_request *req = dev->notify_req;
368 struct usb_cdc_notification *event;
369 unsigned long flags;
370 int ret;
371
372 pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
373
374 spin_lock_irqsave(&dev->lock, flags);
375 if (!atomic_read(&dev->online) || !req || !req->buf) {
376 spin_unlock_irqrestore(&dev->lock, flags);
377 return;
378 }
379
380 if (atomic_inc_return(&dev->notify_count) != 1) {
381 spin_unlock_irqrestore(&dev->lock, flags);
382 return;
383 }
384
385 event = req->buf;
386 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
387 | USB_RECIP_INTERFACE;
388 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
389 event->wValue = cpu_to_le16(0);
390 event->wIndex = cpu_to_le16(dev->ifc_id);
391 event->wLength = cpu_to_le16(0);
392 spin_unlock_irqrestore(&dev->lock, flags);
393
394 ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
395 if (ret) {
396 atomic_dec(&dev->notify_count);
397 pr_debug("ep enqueue error %d\n", ret);
398 }
399}
400
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700401static void frmnet_connect(struct grmnet *gr)
402{
403 struct f_rmnet *dev;
404
405 if (!gr) {
406 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
407 return;
408 }
409
410 dev = port_to_rmnet(gr);
411
412 atomic_set(&dev->ctrl_online, 1);
413}
414
415static void frmnet_disconnect(struct grmnet *gr)
416{
417 struct f_rmnet *dev;
418 unsigned long flags;
419 struct usb_cdc_notification *event;
420 int status;
421 struct rmnet_ctrl_pkt *cpkt;
422
423 if (!gr) {
424 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
425 return;
426 }
427
428 dev = port_to_rmnet(gr);
429
430 atomic_set(&dev->ctrl_online, 0);
431
432 if (!atomic_read(&dev->online)) {
433 pr_debug("%s: nothing to do\n", __func__);
434 return;
435 }
436
437 usb_ep_fifo_flush(dev->notify);
438
439 event = dev->notify_req->buf;
440 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
441 | USB_RECIP_INTERFACE;
442 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
443 event->wValue = cpu_to_le16(0);
444 event->wIndex = cpu_to_le16(dev->ifc_id);
445 event->wLength = cpu_to_le16(0);
446
Vamsi Krishna188078d2011-10-26 15:09:55 -0700447 status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700448 if (status < 0) {
449 if (!atomic_read(&dev->online))
450 return;
451 pr_err("%s: rmnet notify ep enqueue error %d\n",
452 __func__, status);
453 }
454
455 spin_lock_irqsave(&dev->lock, flags);
456 while (!list_empty(&dev->cpkt_resp_q)) {
457 cpkt = list_first_entry(&dev->cpkt_resp_q,
458 struct rmnet_ctrl_pkt, list);
459
460 list_del(&cpkt->list);
461 rmnet_free_ctrl_pkt(cpkt);
462 }
463 atomic_set(&dev->notify_count, 0);
464 spin_unlock_irqrestore(&dev->lock, flags);
465
466}
467
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468static int
Hemant Kumarf60c0252011-11-03 12:37:07 -0700469frmnet_send_cpkt_response(void *gr, void *buf, size_t len)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700470{
471 struct f_rmnet *dev;
Hemant Kumarf60c0252011-11-03 12:37:07 -0700472 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 unsigned long flags;
474
Hemant Kumarf60c0252011-11-03 12:37:07 -0700475 if (!gr || !buf) {
476 pr_err("%s: Invalid grmnet/buf, grmnet:%p buf:%p\n",
477 __func__, gr, buf);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700478 return -ENODEV;
479 }
Hemant Kumarf60c0252011-11-03 12:37:07 -0700480 cpkt = rmnet_alloc_ctrl_pkt(len, GFP_ATOMIC);
481 if (IS_ERR(cpkt)) {
482 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
483 return -ENOMEM;
484 }
485 memcpy(cpkt->buf, buf, len);
486 cpkt->len = len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487
488 dev = port_to_rmnet(gr);
489
490 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
491
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700492 if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 rmnet_free_ctrl_pkt(cpkt);
494 return 0;
495 }
496
497 spin_lock_irqsave(&dev->lock, flags);
Chiranjeevi Velempati263e6c82011-11-11 23:07:36 +0530498 list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 spin_unlock_irqrestore(&dev->lock, flags);
500
501 frmnet_ctrl_response_available(dev);
502
503 return 0;
504}
505
506static void
507frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
508{
509 struct f_rmnet *dev = req->context;
510 struct usb_composite_dev *cdev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511
512 if (!dev) {
513 pr_err("%s: rmnet dev is null\n", __func__);
514 return;
515 }
516
517 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
518
519 cdev = dev->cdev;
520
Hemant Kumarf60c0252011-11-03 12:37:07 -0700521 if (dev->port.send_encap_cmd)
522 dev->port.send_encap_cmd(dev->port_num, req->buf, req->actual);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523}
524
525static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
526{
527 struct f_rmnet *dev = req->context;
528 int status = req->status;
529
530 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
531
532 switch (status) {
533 case -ECONNRESET:
534 case -ESHUTDOWN:
535 /* connection gone */
536 atomic_set(&dev->notify_count, 0);
537 break;
538 default:
539 pr_err("rmnet notify ep error %d\n", status);
540 /* FALLTHROUGH */
541 case 0:
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700542 if (!atomic_read(&dev->ctrl_online))
543 break;
544
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700545 if (atomic_dec_and_test(&dev->notify_count))
546 break;
547
548 status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
549 if (status) {
550 atomic_dec(&dev->notify_count);
551 pr_debug("ep enqueue error %d\n", status);
552 }
553 break;
554 }
555}
556
557static int
558frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
559{
560 struct f_rmnet *dev = func_to_rmnet(f);
561 struct usb_composite_dev *cdev = dev->cdev;
562 struct usb_request *req = cdev->req;
563 u16 w_index = le16_to_cpu(ctrl->wIndex);
564 u16 w_value = le16_to_cpu(ctrl->wValue);
565 u16 w_length = le16_to_cpu(ctrl->wLength);
566 int ret = -EOPNOTSUPP;
567
568 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
569
570 if (!atomic_read(&dev->online)) {
571 pr_debug("%s: usb cable is not connected\n", __func__);
572 return -ENOTCONN;
573 }
574
575 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
576
577 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
578 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 ret = w_length;
580 req->complete = frmnet_cmd_complete;
581 req->context = dev;
582 break;
583
584
585 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
586 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
587 if (w_value)
588 goto invalid;
589 else {
590 unsigned len;
591 struct rmnet_ctrl_pkt *cpkt;
592
593 spin_lock(&dev->lock);
594 if (list_empty(&dev->cpkt_resp_q)) {
595 pr_err("ctrl resp queue empty "
596 " req%02x.%02x v%04x i%04x l%d\n",
597 ctrl->bRequestType, ctrl->bRequest,
598 w_value, w_index, w_length);
599 spin_unlock(&dev->lock);
600 goto invalid;
601 }
602
603 cpkt = list_first_entry(&dev->cpkt_resp_q,
604 struct rmnet_ctrl_pkt, list);
605 list_del(&cpkt->list);
606 spin_unlock(&dev->lock);
607
608 len = min_t(unsigned, w_length, cpkt->len);
609 memcpy(req->buf, cpkt->buf, len);
610 ret = len;
611
612 rmnet_free_ctrl_pkt(cpkt);
613 }
614 break;
615 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
616 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
Hemant Kumarf60c0252011-11-03 12:37:07 -0700617 if (dev->port.notify_modem)
618 dev->port.notify_modem(&dev->port,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619 dev->port_num,
620 w_value);
621 ret = 0;
622
623 break;
624 default:
625
626invalid:
627 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
628 ctrl->bRequestType, ctrl->bRequest,
629 w_value, w_index, w_length);
630 }
631
632 /* respond with data transfer or status phase? */
633 if (ret >= 0) {
634 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
635 ctrl->bRequestType, ctrl->bRequest,
636 w_value, w_index, w_length);
637 req->zero = (ret < w_length);
638 req->length = ret;
639 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
640 if (ret < 0)
641 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
642 }
643
644 return ret;
645}
646
647static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
648{
649 struct f_rmnet *dev = func_to_rmnet(f);
650 struct usb_ep *ep;
651 struct usb_composite_dev *cdev = c->cdev;
652 int ret = -ENODEV;
653
654 dev->ifc_id = usb_interface_id(c, f);
655 if (dev->ifc_id < 0) {
656 pr_err("%s: unable to allocate ifc id, err:%d",
657 __func__, dev->ifc_id);
658 return dev->ifc_id;
659 }
660 rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
661
662 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
663 if (!ep) {
664 pr_err("%s: usb epin autoconfig failed\n", __func__);
665 return -ENODEV;
666 }
667 dev->port.in = ep;
668 ep->driver_data = cdev;
669
670 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
671 if (!ep) {
672 pr_err("%s: usb epout autoconfig failed\n", __func__);
673 ret = -ENODEV;
674 goto ep_auto_out_fail;
675 }
676 dev->port.out = ep;
677 ep->driver_data = cdev;
678
679 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
680 if (!ep) {
681 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
682 ret = -ENODEV;
683 goto ep_auto_notify_fail;
684 }
685 dev->notify = ep;
686 ep->driver_data = cdev;
687
688 dev->notify_req = frmnet_alloc_req(ep,
Hemant Kumarfbf113d2011-09-16 18:24:45 -0700689 sizeof(struct usb_cdc_notification),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690 GFP_KERNEL);
691 if (IS_ERR(dev->notify_req)) {
692 pr_err("%s: unable to allocate memory for notify req\n",
693 __func__);
694 ret = -ENOMEM;
695 goto ep_notify_alloc_fail;
696 }
697
698 dev->notify_req->complete = frmnet_notify_complete;
699 dev->notify_req->context = dev;
700
701 f->descriptors = usb_copy_descriptors(rmnet_fs_function);
702
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530703 if (!f->descriptors)
704 goto fail;
705
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 dev->fs.in = usb_find_endpoint(rmnet_fs_function,
707 f->descriptors,
708 &rmnet_fs_in_desc);
709 dev->fs.out = usb_find_endpoint(rmnet_fs_function,
710 f->descriptors,
711 &rmnet_fs_out_desc);
712 dev->fs.notify = usb_find_endpoint(rmnet_fs_function,
713 f->descriptors,
714 &rmnet_fs_notify_desc);
715
716 if (gadget_is_dualspeed(cdev->gadget)) {
717 rmnet_hs_in_desc.bEndpointAddress =
718 rmnet_fs_in_desc.bEndpointAddress;
719 rmnet_hs_out_desc.bEndpointAddress =
720 rmnet_fs_out_desc.bEndpointAddress;
721 rmnet_hs_notify_desc.bEndpointAddress =
722 rmnet_fs_notify_desc.bEndpointAddress;
723
724 /* copy descriptors, and track endpoint copies */
725 f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
726
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530727 if (!f->hs_descriptors)
728 goto fail;
729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700730 dev->hs.in = usb_find_endpoint(rmnet_hs_function,
731 f->hs_descriptors, &rmnet_hs_in_desc);
732 dev->hs.out = usb_find_endpoint(rmnet_hs_function,
733 f->hs_descriptors, &rmnet_hs_out_desc);
734 dev->hs.notify = usb_find_endpoint(rmnet_hs_function,
735 f->hs_descriptors, &rmnet_hs_notify_desc);
736 }
737
738 pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
739 __func__, dev->port_num,
740 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
741 dev->port.in->name, dev->port.out->name);
742
743 return 0;
744
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530745fail:
746 if (f->descriptors)
747 usb_free_descriptors(f->descriptors);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700748ep_notify_alloc_fail:
749 dev->notify->driver_data = NULL;
750 dev->notify = NULL;
751ep_auto_notify_fail:
752 dev->port.out->driver_data = NULL;
753 dev->port.out = NULL;
754ep_auto_out_fail:
755 dev->port.in->driver_data = NULL;
756 dev->port.in = NULL;
757
758 return ret;
759}
760
Manu Gautam2b0234a2011-09-07 16:47:52 +0530761static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700763 int status;
764 struct f_rmnet *dev;
765 struct usb_function *f;
766 unsigned long flags;
767
768 pr_debug("%s: usb config:%p\n", __func__, c);
769
Manu Gautam2b0234a2011-09-07 16:47:52 +0530770 if (portno >= nr_rmnet_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771 pr_err("%s: supporting ports#%u port_id:%u", __func__,
Manu Gautam2b0234a2011-09-07 16:47:52 +0530772 nr_rmnet_ports, portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700773 return -ENODEV;
774 }
775
776 if (rmnet_string_defs[0].id == 0) {
777 status = usb_string_id(c->cdev);
778 if (status < 0) {
779 pr_err("%s: failed to get string id, err:%d\n",
780 __func__, status);
781 return status;
782 }
783 rmnet_string_defs[0].id = status;
784 }
785
Manu Gautam2b0234a2011-09-07 16:47:52 +0530786 dev = rmnet_ports[portno].port;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700787
788 spin_lock_irqsave(&dev->lock, flags);
789 dev->cdev = c->cdev;
790 f = &dev->port.func;
Vamsi Krishna188078d2011-10-26 15:09:55 -0700791 f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700792 spin_unlock_irqrestore(&dev->lock, flags);
Vamsi Krishna188078d2011-10-26 15:09:55 -0700793 if (!f->name) {
794 pr_err("%s: cannot allocate memory for name\n", __func__);
795 return -ENOMEM;
796 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700797
798 f->strings = rmnet_strings;
799 f->bind = frmnet_bind;
800 f->unbind = frmnet_unbind;
801 f->disable = frmnet_disable;
802 f->set_alt = frmnet_set_alt;
803 f->setup = frmnet_setup;
804 dev->port.send_cpkt_response = frmnet_send_cpkt_response;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700805 dev->port.disconnect = frmnet_disconnect;
806 dev->port.connect = frmnet_connect;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700807
808 status = usb_add_function(c, f);
809 if (status) {
810 pr_err("%s: usb add function failed: %d\n",
811 __func__, status);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530812 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700813 return status;
814 }
815
816 pr_debug("%s: complete\n", __func__);
817
818 return status;
819}
820
Manu Gautame3e897c2011-09-12 17:18:46 +0530821static void frmnet_cleanup(void)
822{
823 int i;
824
825 for (i = 0; i < nr_rmnet_ports; i++)
826 kfree(rmnet_ports[i].port);
827
828 nr_rmnet_ports = 0;
829}
830
Manu Gautam2b0234a2011-09-07 16:47:52 +0530831static int frmnet_init_port(int instances)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833 int i;
834 struct f_rmnet *dev;
835 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836
837 pr_debug("%s: instances :%d\n", __func__, instances);
838
Manu Gautame3e897c2011-09-12 17:18:46 +0530839 if (instances > NR_RMNET_PORTS) {
840 pr_err("%s: Max-%d instances supported\n", __func__,
841 NR_RMNET_PORTS);
842 return -EINVAL;
843 }
844
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845 for (i = 0; i < instances; i++) {
846 dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
847 if (!dev) {
848 pr_err("%s: Unable to allocate rmnet device\n",
849 __func__);
850 ret = -ENOMEM;
851 goto fail_probe;
852 }
853
854 dev->port_num = i;
855 spin_lock_init(&dev->lock);
856 INIT_LIST_HEAD(&dev->cpkt_resp_q);
857
Manu Gautam2b0234a2011-09-07 16:47:52 +0530858 rmnet_ports[i].port = dev;
859 rmnet_ports[i].port_num = i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700860
Manu Gautam2b0234a2011-09-07 16:47:52 +0530861 nr_rmnet_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700862 }
863
Manu Gautam2b0234a2011-09-07 16:47:52 +0530864 rmnet_gport_setup(nr_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700865
866 return 0;
867
868fail_probe:
Manu Gautam2b0234a2011-09-07 16:47:52 +0530869 for (i = 0; i < nr_rmnet_ports; i++)
870 kfree(rmnet_ports[i].port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700871
872 return ret;
873}