blob: 7686bf2c1793a9dc1859c18050cc2c29d62d6e8c [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/slab.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/usb/android_composite.h>
18#include <linux/spinlock.h>
19
20#include <linux/platform_data/usb_rmnet.h>
21#include "u_rmnet.h"
22#include "gadget_chips.h"
23
24
25#define RMNET_NOTIFY_INTERVAL 5
26#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
27
28struct rmnet_descs {
29 struct usb_endpoint_descriptor *in;
30 struct usb_endpoint_descriptor *out;
31 struct usb_endpoint_descriptor *notify;
32};
33
34#define ACM_CTRL_DTR (1 << 0)
35
36/* TODO: use separate structures for data and
37 * control paths
38 */
39struct f_rmnet {
40 struct grmnet port;
41 int ifc_id;
42 u8 port_num;
43 atomic_t online;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -070044 atomic_t ctrl_online;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045 struct usb_composite_dev *cdev;
46
47 spinlock_t lock;
48
49 /* usb descriptors */
50 struct rmnet_descs fs;
51 struct rmnet_descs hs;
52
53 /* usb eps*/
54 struct usb_ep *notify;
55 struct usb_endpoint_descriptor *notify_desc;
56 struct usb_request *notify_req;
57
58 /* control info */
59 struct list_head cpkt_resp_q;
60 atomic_t notify_count;
61 unsigned long cpkts_len;
62};
63
Manu Gautam2b0234a2011-09-07 16:47:52 +053064#define NR_RMNET_PORTS 1
65static unsigned int nr_rmnet_ports;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066static struct rmnet_ports {
67 unsigned port_num;
68 struct f_rmnet *port;
Manu Gautam2b0234a2011-09-07 16:47:52 +053069} rmnet_ports[NR_RMNET_PORTS];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
71static struct usb_interface_descriptor rmnet_interface_desc = {
72 .bLength = USB_DT_INTERFACE_SIZE,
73 .bDescriptorType = USB_DT_INTERFACE,
74 .bNumEndpoints = 3,
75 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
76 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
77 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
78 /* .iInterface = DYNAMIC */
79};
80
81/* Full speed support */
82static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
83 .bLength = USB_DT_ENDPOINT_SIZE,
84 .bDescriptorType = USB_DT_ENDPOINT,
85 .bEndpointAddress = USB_DIR_IN,
86 .bmAttributes = USB_ENDPOINT_XFER_INT,
87 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
88 .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
89};
90
91static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
92 .bLength = USB_DT_ENDPOINT_SIZE,
93 .bDescriptorType = USB_DT_ENDPOINT,
94 .bEndpointAddress = USB_DIR_IN,
95 .bmAttributes = USB_ENDPOINT_XFER_BULK,
96 .wMaxPacketSize = __constant_cpu_to_le16(64),
97};
98
99static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
100 .bLength = USB_DT_ENDPOINT_SIZE,
101 .bDescriptorType = USB_DT_ENDPOINT,
102 .bEndpointAddress = USB_DIR_OUT,
103 .bmAttributes = USB_ENDPOINT_XFER_BULK,
104 .wMaxPacketSize = __constant_cpu_to_le16(64),
105};
106
107static struct usb_descriptor_header *rmnet_fs_function[] = {
108 (struct usb_descriptor_header *) &rmnet_interface_desc,
109 (struct usb_descriptor_header *) &rmnet_fs_notify_desc,
110 (struct usb_descriptor_header *) &rmnet_fs_in_desc,
111 (struct usb_descriptor_header *) &rmnet_fs_out_desc,
112 NULL,
113};
114
115/* High speed support */
116static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
117 .bLength = USB_DT_ENDPOINT_SIZE,
118 .bDescriptorType = USB_DT_ENDPOINT,
119 .bEndpointAddress = USB_DIR_IN,
120 .bmAttributes = USB_ENDPOINT_XFER_INT,
121 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
122 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
123};
124
125static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
126 .bLength = USB_DT_ENDPOINT_SIZE,
127 .bDescriptorType = USB_DT_ENDPOINT,
128 .bEndpointAddress = USB_DIR_IN,
129 .bmAttributes = USB_ENDPOINT_XFER_BULK,
130 .wMaxPacketSize = __constant_cpu_to_le16(512),
131};
132
133static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
134 .bLength = USB_DT_ENDPOINT_SIZE,
135 .bDescriptorType = USB_DT_ENDPOINT,
136 .bEndpointAddress = USB_DIR_OUT,
137 .bmAttributes = USB_ENDPOINT_XFER_BULK,
138 .wMaxPacketSize = __constant_cpu_to_le16(512),
139};
140
141static struct usb_descriptor_header *rmnet_hs_function[] = {
142 (struct usb_descriptor_header *) &rmnet_interface_desc,
143 (struct usb_descriptor_header *) &rmnet_hs_notify_desc,
144 (struct usb_descriptor_header *) &rmnet_hs_in_desc,
145 (struct usb_descriptor_header *) &rmnet_hs_out_desc,
146 NULL,
147};
148
149/* String descriptors */
150
151static struct usb_string rmnet_string_defs[] = {
152 [0].s = "RmNet",
153 { } /* end of list */
154};
155
156static struct usb_gadget_strings rmnet_string_table = {
157 .language = 0x0409, /* en-us */
158 .strings = rmnet_string_defs,
159};
160
161static struct usb_gadget_strings *rmnet_strings[] = {
162 &rmnet_string_table,
163 NULL,
164};
165
166/* ------- misc functions --------------------*/
167
168static inline struct f_rmnet *func_to_rmnet(struct usb_function *f)
169{
170 return container_of(f, struct f_rmnet, port.func);
171}
172
173static inline struct f_rmnet *port_to_rmnet(struct grmnet *r)
174{
175 return container_of(r, struct f_rmnet, port);
176}
177
178static struct usb_request *
179frmnet_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
180{
181 struct usb_request *req;
182
183 req = usb_ep_alloc_request(ep, flags);
184 if (!req)
185 return ERR_PTR(-ENOMEM);
186
187 req->buf = kmalloc(len, flags);
188 if (!req->buf) {
189 usb_ep_free_request(ep, req);
190 return ERR_PTR(-ENOMEM);
191 }
192
193 req->length = len;
194
195 return req;
196}
197
198void frmnet_free_req(struct usb_ep *ep, struct usb_request *req)
199{
200 kfree(req->buf);
201 usb_ep_free_request(ep, req);
202}
203
204static struct rmnet_ctrl_pkt *rmnet_alloc_ctrl_pkt(unsigned len, gfp_t flags)
205{
206 struct rmnet_ctrl_pkt *pkt;
207
208 pkt = kzalloc(sizeof(struct rmnet_ctrl_pkt), flags);
209 if (!pkt)
210 return ERR_PTR(-ENOMEM);
211
212 pkt->buf = kmalloc(len, flags);
213 if (!pkt->buf) {
214 kfree(pkt);
215 return ERR_PTR(-ENOMEM);
216 }
217 pkt->len = len;
218
219 return pkt;
220}
221
222static void rmnet_free_ctrl_pkt(struct rmnet_ctrl_pkt *pkt)
223{
224 kfree(pkt->buf);
225 kfree(pkt);
226}
227
228/* -------------------------------------------*/
229
Manu Gautam2b0234a2011-09-07 16:47:52 +0530230static int rmnet_gport_setup(int no_rmnet_ports)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231{
232 int ret;
233
Manu Gautam2b0234a2011-09-07 16:47:52 +0530234 pr_debug("%s: no_rmnet_ports:%d\n", __func__, no_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235
Manu Gautam2b0234a2011-09-07 16:47:52 +0530236 ret = gbam_setup(no_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237 if (ret)
238 return ret;
239
Manu Gautam2b0234a2011-09-07 16:47:52 +0530240 ret = gsmd_ctrl_setup(no_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241 if (ret)
242 return ret;
243
244 return 0;
245}
246
Manu Gautam2b0234a2011-09-07 16:47:52 +0530247static int gport_rmnet_connect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248{
249 int ret;
250
251 pr_debug("%s:dev:%p portno:%d\n",
252 __func__, dev, dev->port_num);
253
254 ret = gsmd_ctrl_connect(&dev->port, dev->port_num);
255 if (ret) {
256 pr_err("%s: gsmd_ctrl_connect failed: err:%d\n",
257 __func__, ret);
258 return ret;
259 }
260
261 ret = gbam_connect(&dev->port, dev->port_num);
262 if (ret) {
263 pr_err("%s: gbam_connect failed: err:%d\n",
264 __func__, ret);
265 return ret;
266 }
267
268 return 0;
269}
270
Manu Gautam2b0234a2011-09-07 16:47:52 +0530271static int gport_rmnet_disconnect(struct f_rmnet *dev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272{
273 pr_debug("%s:dev:%p portno:%d\n",
274 __func__, dev, dev->port_num);
275
276 gbam_disconnect(&dev->port, dev->port_num);
277
278 gsmd_ctrl_disconnect(&dev->port, dev->port_num);
279
280 return 0;
281}
282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283static void frmnet_unbind(struct usb_configuration *c, struct usb_function *f)
284{
285 struct f_rmnet *dev = func_to_rmnet(f);
286
287 pr_debug("%s: portno:%d\n", __func__, dev->port_num);
288
289 if (gadget_is_dualspeed(c->cdev->gadget))
290 usb_free_descriptors(f->hs_descriptors);
291 usb_free_descriptors(f->descriptors);
292
293 frmnet_free_req(dev->notify, dev->notify_req);
294
Manu Gautamdd4222b2011-09-09 15:06:05 +0530295 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296}
297
298static void frmnet_disable(struct usb_function *f)
299{
300 struct f_rmnet *dev = func_to_rmnet(f);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700301 unsigned long flags;
302 struct rmnet_ctrl_pkt *cpkt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303
304 pr_debug("%s: port#%d\n", __func__, dev->port_num);
305
306 usb_ep_disable(dev->notify);
307
308 atomic_set(&dev->online, 0);
309
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700310 spin_lock_irqsave(&dev->lock, flags);
311 while (!list_empty(&dev->cpkt_resp_q)) {
312 cpkt = list_first_entry(&dev->cpkt_resp_q,
313 struct rmnet_ctrl_pkt, list);
314
315 list_del(&cpkt->list);
316 rmnet_free_ctrl_pkt(cpkt);
317 }
318 atomic_set(&dev->notify_count, 0);
319 spin_unlock_irqrestore(&dev->lock, flags);
320
Manu Gautam2b0234a2011-09-07 16:47:52 +0530321 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700322}
323
324static int
325frmnet_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
326{
327 struct f_rmnet *dev = func_to_rmnet(f);
328 struct usb_composite_dev *cdev = dev->cdev;
329 int ret;
330
331 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
332
333 if (dev->notify->driver_data) {
334 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
335 usb_ep_disable(dev->notify);
336 }
337 dev->notify_desc = ep_choose(cdev->gadget,
338 dev->hs.notify,
339 dev->fs.notify);
340 ret = usb_ep_enable(dev->notify, dev->notify_desc);
341 if (ret) {
342 pr_err("%s: usb ep#%s enable failed, err#%d\n",
343 __func__, dev->notify->name, ret);
344 return ret;
345 }
346 dev->notify->driver_data = dev;
347
348 if (dev->port.in->driver_data) {
349 pr_debug("%s: reset port:%d\n", __func__, dev->port_num);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530350 gport_rmnet_disconnect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351 }
352
353 dev->port.in_desc = ep_choose(cdev->gadget,
354 dev->hs.in, dev->fs.in);
355 dev->port.out_desc = ep_choose(cdev->gadget,
356 dev->hs.out, dev->fs.out);
357
Manu Gautam2b0234a2011-09-07 16:47:52 +0530358 ret = gport_rmnet_connect(dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359
360 atomic_set(&dev->online, 1);
361
362 return ret;
363}
364
365static void frmnet_ctrl_response_available(struct f_rmnet *dev)
366{
367 struct usb_request *req = dev->notify_req;
368 struct usb_cdc_notification *event;
369 unsigned long flags;
370 int ret;
371
372 pr_debug("%s:dev:%p portno#%d\n", __func__, dev, dev->port_num);
373
374 spin_lock_irqsave(&dev->lock, flags);
375 if (!atomic_read(&dev->online) || !req || !req->buf) {
376 spin_unlock_irqrestore(&dev->lock, flags);
377 return;
378 }
379
380 if (atomic_inc_return(&dev->notify_count) != 1) {
381 spin_unlock_irqrestore(&dev->lock, flags);
382 return;
383 }
384
385 event = req->buf;
386 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
387 | USB_RECIP_INTERFACE;
388 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
389 event->wValue = cpu_to_le16(0);
390 event->wIndex = cpu_to_le16(dev->ifc_id);
391 event->wLength = cpu_to_le16(0);
392 spin_unlock_irqrestore(&dev->lock, flags);
393
394 ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
395 if (ret) {
396 atomic_dec(&dev->notify_count);
397 pr_debug("ep enqueue error %d\n", ret);
398 }
399}
400
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700401static void frmnet_connect(struct grmnet *gr)
402{
403 struct f_rmnet *dev;
404
405 if (!gr) {
406 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
407 return;
408 }
409
410 dev = port_to_rmnet(gr);
411
412 atomic_set(&dev->ctrl_online, 1);
413}
414
415static void frmnet_disconnect(struct grmnet *gr)
416{
417 struct f_rmnet *dev;
418 unsigned long flags;
419 struct usb_cdc_notification *event;
420 int status;
421 struct rmnet_ctrl_pkt *cpkt;
422
423 if (!gr) {
424 pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
425 return;
426 }
427
428 dev = port_to_rmnet(gr);
429
430 atomic_set(&dev->ctrl_online, 0);
431
432 if (!atomic_read(&dev->online)) {
433 pr_debug("%s: nothing to do\n", __func__);
434 return;
435 }
436
437 usb_ep_fifo_flush(dev->notify);
438
439 event = dev->notify_req->buf;
440 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
441 | USB_RECIP_INTERFACE;
442 event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
443 event->wValue = cpu_to_le16(0);
444 event->wIndex = cpu_to_le16(dev->ifc_id);
445 event->wLength = cpu_to_le16(0);
446
Vamsi Krishna188078d2011-10-26 15:09:55 -0700447 status = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700448 if (status < 0) {
449 if (!atomic_read(&dev->online))
450 return;
451 pr_err("%s: rmnet notify ep enqueue error %d\n",
452 __func__, status);
453 }
454
455 spin_lock_irqsave(&dev->lock, flags);
456 while (!list_empty(&dev->cpkt_resp_q)) {
457 cpkt = list_first_entry(&dev->cpkt_resp_q,
458 struct rmnet_ctrl_pkt, list);
459
460 list_del(&cpkt->list);
461 rmnet_free_ctrl_pkt(cpkt);
462 }
463 atomic_set(&dev->notify_count, 0);
464 spin_unlock_irqrestore(&dev->lock, flags);
465
466}
467
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468static int
469frmnet_send_cpkt_response(struct grmnet *gr, struct rmnet_ctrl_pkt *cpkt)
470{
471 struct f_rmnet *dev;
472 unsigned long flags;
473
474 if (!gr || !cpkt) {
475 pr_err("%s: Invalid grmnet/cpkt, grmnet:%p cpkt:%p\n",
476 __func__, gr, cpkt);
477 return -ENODEV;
478 }
479
480 dev = port_to_rmnet(gr);
481
482 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
483
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700484 if (!atomic_read(&dev->online) || !atomic_read(&dev->ctrl_online)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 rmnet_free_ctrl_pkt(cpkt);
486 return 0;
487 }
488
489 spin_lock_irqsave(&dev->lock, flags);
Chiranjeevi Velempati263e6c82011-11-11 23:07:36 +0530490 list_add_tail(&cpkt->list, &dev->cpkt_resp_q);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491 spin_unlock_irqrestore(&dev->lock, flags);
492
493 frmnet_ctrl_response_available(dev);
494
495 return 0;
496}
497
498static void
499frmnet_cmd_complete(struct usb_ep *ep, struct usb_request *req)
500{
501 struct f_rmnet *dev = req->context;
502 struct usb_composite_dev *cdev;
503 struct rmnet_ctrl_pkt *cpkt;
504
505 if (!dev) {
506 pr_err("%s: rmnet dev is null\n", __func__);
507 return;
508 }
509
510 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
511
512 cdev = dev->cdev;
513
514 cpkt = rmnet_alloc_ctrl_pkt(req->actual, GFP_ATOMIC);
515 if (IS_ERR(cpkt)) {
516 pr_err("%s: Unable to allocate ctrl pkt\n", __func__);
517 return;
518 }
519
520 memcpy(cpkt->buf, req->buf, req->actual);
521
522 if (dev->port.send_cpkt_request)
523 dev->port.send_cpkt_request(&dev->port, dev->port_num, cpkt);
524}
525
526static void frmnet_notify_complete(struct usb_ep *ep, struct usb_request *req)
527{
528 struct f_rmnet *dev = req->context;
529 int status = req->status;
530
531 pr_debug("%s: dev:%p port#%d\n", __func__, dev, dev->port_num);
532
533 switch (status) {
534 case -ECONNRESET:
535 case -ESHUTDOWN:
536 /* connection gone */
537 atomic_set(&dev->notify_count, 0);
538 break;
539 default:
540 pr_err("rmnet notify ep error %d\n", status);
541 /* FALLTHROUGH */
542 case 0:
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700543 if (!atomic_read(&dev->ctrl_online))
544 break;
545
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700546 if (atomic_dec_and_test(&dev->notify_count))
547 break;
548
549 status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
550 if (status) {
551 atomic_dec(&dev->notify_count);
552 pr_debug("ep enqueue error %d\n", status);
553 }
554 break;
555 }
556}
557
558static int
559frmnet_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
560{
561 struct f_rmnet *dev = func_to_rmnet(f);
562 struct usb_composite_dev *cdev = dev->cdev;
563 struct usb_request *req = cdev->req;
564 u16 w_index = le16_to_cpu(ctrl->wIndex);
565 u16 w_value = le16_to_cpu(ctrl->wValue);
566 u16 w_length = le16_to_cpu(ctrl->wLength);
567 int ret = -EOPNOTSUPP;
568
569 pr_debug("%s:dev:%p port#%d\n", __func__, dev, dev->port_num);
570
571 if (!atomic_read(&dev->online)) {
572 pr_debug("%s: usb cable is not connected\n", __func__);
573 return -ENOTCONN;
574 }
575
576 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
577
578 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
579 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580 ret = w_length;
581 req->complete = frmnet_cmd_complete;
582 req->context = dev;
583 break;
584
585
586 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
587 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
588 if (w_value)
589 goto invalid;
590 else {
591 unsigned len;
592 struct rmnet_ctrl_pkt *cpkt;
593
594 spin_lock(&dev->lock);
595 if (list_empty(&dev->cpkt_resp_q)) {
596 pr_err("ctrl resp queue empty "
597 " req%02x.%02x v%04x i%04x l%d\n",
598 ctrl->bRequestType, ctrl->bRequest,
599 w_value, w_index, w_length);
600 spin_unlock(&dev->lock);
601 goto invalid;
602 }
603
604 cpkt = list_first_entry(&dev->cpkt_resp_q,
605 struct rmnet_ctrl_pkt, list);
606 list_del(&cpkt->list);
607 spin_unlock(&dev->lock);
608
609 len = min_t(unsigned, w_length, cpkt->len);
610 memcpy(req->buf, cpkt->buf, len);
611 ret = len;
612
613 rmnet_free_ctrl_pkt(cpkt);
614 }
615 break;
616 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
617 | USB_CDC_REQ_SET_CONTROL_LINE_STATE:
618 if (dev->port.send_cbits_tomodem)
619 dev->port.send_cbits_tomodem(&dev->port,
620 dev->port_num,
621 w_value);
622 ret = 0;
623
624 break;
625 default:
626
627invalid:
628 DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
629 ctrl->bRequestType, ctrl->bRequest,
630 w_value, w_index, w_length);
631 }
632
633 /* respond with data transfer or status phase? */
634 if (ret >= 0) {
635 VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
636 ctrl->bRequestType, ctrl->bRequest,
637 w_value, w_index, w_length);
638 req->zero = (ret < w_length);
639 req->length = ret;
640 ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
641 if (ret < 0)
642 ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
643 }
644
645 return ret;
646}
647
648static int frmnet_bind(struct usb_configuration *c, struct usb_function *f)
649{
650 struct f_rmnet *dev = func_to_rmnet(f);
651 struct usb_ep *ep;
652 struct usb_composite_dev *cdev = c->cdev;
653 int ret = -ENODEV;
654
655 dev->ifc_id = usb_interface_id(c, f);
656 if (dev->ifc_id < 0) {
657 pr_err("%s: unable to allocate ifc id, err:%d",
658 __func__, dev->ifc_id);
659 return dev->ifc_id;
660 }
661 rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
662
663 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_in_desc);
664 if (!ep) {
665 pr_err("%s: usb epin autoconfig failed\n", __func__);
666 return -ENODEV;
667 }
668 dev->port.in = ep;
669 ep->driver_data = cdev;
670
671 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_out_desc);
672 if (!ep) {
673 pr_err("%s: usb epout autoconfig failed\n", __func__);
674 ret = -ENODEV;
675 goto ep_auto_out_fail;
676 }
677 dev->port.out = ep;
678 ep->driver_data = cdev;
679
680 ep = usb_ep_autoconfig(cdev->gadget, &rmnet_fs_notify_desc);
681 if (!ep) {
682 pr_err("%s: usb epnotify autoconfig failed\n", __func__);
683 ret = -ENODEV;
684 goto ep_auto_notify_fail;
685 }
686 dev->notify = ep;
687 ep->driver_data = cdev;
688
689 dev->notify_req = frmnet_alloc_req(ep,
Hemant Kumarfbf113d2011-09-16 18:24:45 -0700690 sizeof(struct usb_cdc_notification),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691 GFP_KERNEL);
692 if (IS_ERR(dev->notify_req)) {
693 pr_err("%s: unable to allocate memory for notify req\n",
694 __func__);
695 ret = -ENOMEM;
696 goto ep_notify_alloc_fail;
697 }
698
699 dev->notify_req->complete = frmnet_notify_complete;
700 dev->notify_req->context = dev;
701
702 f->descriptors = usb_copy_descriptors(rmnet_fs_function);
703
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530704 if (!f->descriptors)
705 goto fail;
706
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700707 dev->fs.in = usb_find_endpoint(rmnet_fs_function,
708 f->descriptors,
709 &rmnet_fs_in_desc);
710 dev->fs.out = usb_find_endpoint(rmnet_fs_function,
711 f->descriptors,
712 &rmnet_fs_out_desc);
713 dev->fs.notify = usb_find_endpoint(rmnet_fs_function,
714 f->descriptors,
715 &rmnet_fs_notify_desc);
716
717 if (gadget_is_dualspeed(cdev->gadget)) {
718 rmnet_hs_in_desc.bEndpointAddress =
719 rmnet_fs_in_desc.bEndpointAddress;
720 rmnet_hs_out_desc.bEndpointAddress =
721 rmnet_fs_out_desc.bEndpointAddress;
722 rmnet_hs_notify_desc.bEndpointAddress =
723 rmnet_fs_notify_desc.bEndpointAddress;
724
725 /* copy descriptors, and track endpoint copies */
726 f->hs_descriptors = usb_copy_descriptors(rmnet_hs_function);
727
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530728 if (!f->hs_descriptors)
729 goto fail;
730
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731 dev->hs.in = usb_find_endpoint(rmnet_hs_function,
732 f->hs_descriptors, &rmnet_hs_in_desc);
733 dev->hs.out = usb_find_endpoint(rmnet_hs_function,
734 f->hs_descriptors, &rmnet_hs_out_desc);
735 dev->hs.notify = usb_find_endpoint(rmnet_hs_function,
736 f->hs_descriptors, &rmnet_hs_notify_desc);
737 }
738
739 pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
740 __func__, dev->port_num,
741 gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
742 dev->port.in->name, dev->port.out->name);
743
744 return 0;
745
Rajkumar Raghupathy42ec8da2011-10-21 18:58:53 +0530746fail:
747 if (f->descriptors)
748 usb_free_descriptors(f->descriptors);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700749ep_notify_alloc_fail:
750 dev->notify->driver_data = NULL;
751 dev->notify = NULL;
752ep_auto_notify_fail:
753 dev->port.out->driver_data = NULL;
754 dev->port.out = NULL;
755ep_auto_out_fail:
756 dev->port.in->driver_data = NULL;
757 dev->port.in = NULL;
758
759 return ret;
760}
761
Manu Gautam2b0234a2011-09-07 16:47:52 +0530762static int frmnet_bind_config(struct usb_configuration *c, unsigned portno)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700763{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764 int status;
765 struct f_rmnet *dev;
766 struct usb_function *f;
767 unsigned long flags;
768
769 pr_debug("%s: usb config:%p\n", __func__, c);
770
Manu Gautam2b0234a2011-09-07 16:47:52 +0530771 if (portno >= nr_rmnet_ports) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700772 pr_err("%s: supporting ports#%u port_id:%u", __func__,
Manu Gautam2b0234a2011-09-07 16:47:52 +0530773 nr_rmnet_ports, portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700774 return -ENODEV;
775 }
776
777 if (rmnet_string_defs[0].id == 0) {
778 status = usb_string_id(c->cdev);
779 if (status < 0) {
780 pr_err("%s: failed to get string id, err:%d\n",
781 __func__, status);
782 return status;
783 }
784 rmnet_string_defs[0].id = status;
785 }
786
Manu Gautam2b0234a2011-09-07 16:47:52 +0530787 dev = rmnet_ports[portno].port;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788
789 spin_lock_irqsave(&dev->lock, flags);
790 dev->cdev = c->cdev;
791 f = &dev->port.func;
Vamsi Krishna188078d2011-10-26 15:09:55 -0700792 f->name = kasprintf(GFP_ATOMIC, "rmnet%d", portno);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700793 spin_unlock_irqrestore(&dev->lock, flags);
Vamsi Krishna188078d2011-10-26 15:09:55 -0700794 if (!f->name) {
795 pr_err("%s: cannot allocate memory for name\n", __func__);
796 return -ENOMEM;
797 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700798
799 f->strings = rmnet_strings;
800 f->bind = frmnet_bind;
801 f->unbind = frmnet_unbind;
802 f->disable = frmnet_disable;
803 f->set_alt = frmnet_set_alt;
804 f->setup = frmnet_setup;
805 dev->port.send_cpkt_response = frmnet_send_cpkt_response;
Vamsi Krishna9e9921a2011-10-04 16:09:31 -0700806 dev->port.disconnect = frmnet_disconnect;
807 dev->port.connect = frmnet_connect;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808
809 status = usb_add_function(c, f);
810 if (status) {
811 pr_err("%s: usb add function failed: %d\n",
812 __func__, status);
Manu Gautam2b0234a2011-09-07 16:47:52 +0530813 kfree(f->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700814 return status;
815 }
816
817 pr_debug("%s: complete\n", __func__);
818
819 return status;
820}
821
Manu Gautame3e897c2011-09-12 17:18:46 +0530822static void frmnet_cleanup(void)
823{
824 int i;
825
826 for (i = 0; i < nr_rmnet_ports; i++)
827 kfree(rmnet_ports[i].port);
828
829 nr_rmnet_ports = 0;
830}
831
Manu Gautam2b0234a2011-09-07 16:47:52 +0530832static int frmnet_init_port(int instances)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834 int i;
835 struct f_rmnet *dev;
836 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700837
838 pr_debug("%s: instances :%d\n", __func__, instances);
839
Manu Gautame3e897c2011-09-12 17:18:46 +0530840 if (instances > NR_RMNET_PORTS) {
841 pr_err("%s: Max-%d instances supported\n", __func__,
842 NR_RMNET_PORTS);
843 return -EINVAL;
844 }
845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 for (i = 0; i < instances; i++) {
847 dev = kzalloc(sizeof(struct f_rmnet), GFP_KERNEL);
848 if (!dev) {
849 pr_err("%s: Unable to allocate rmnet device\n",
850 __func__);
851 ret = -ENOMEM;
852 goto fail_probe;
853 }
854
855 dev->port_num = i;
856 spin_lock_init(&dev->lock);
857 INIT_LIST_HEAD(&dev->cpkt_resp_q);
858
Manu Gautam2b0234a2011-09-07 16:47:52 +0530859 rmnet_ports[i].port = dev;
860 rmnet_ports[i].port_num = i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700861
Manu Gautam2b0234a2011-09-07 16:47:52 +0530862 nr_rmnet_ports++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700863 }
864
Manu Gautam2b0234a2011-09-07 16:47:52 +0530865 rmnet_gport_setup(nr_rmnet_ports);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866
867 return 0;
868
869fail_probe:
Manu Gautam2b0234a2011-09-07 16:47:52 +0530870 for (i = 0; i < nr_rmnet_ports; i++)
871 kfree(rmnet_ports[i].port);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700872
873 return ret;
874}