blob: e618ec06b42ffe3bb2535de329955f905a0a0173 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * rmnet.c -- RmNet function driver
3 *
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
7 * Copyright (C) 2008 Nokia Corporation
8 * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/err.h>
29#include <linux/interrupt.h>
30#include <linux/kfifo.h>
31
32#include <mach/msm_smd.h>
33#include <linux/usb/cdc.h>
34
35#include "usb_function.h"
36
37static char *rmnet_ctl_ch = CONFIG_RMNET_SMD_CTL_CHANNEL;
38module_param(rmnet_ctl_ch, charp, S_IRUGO);
39MODULE_PARM_DESC(rmnet_ctl_ch, "RmNet control SMD channel");
40
41static char *rmnet_data_ch = CONFIG_RMNET_SMD_DATA_CHANNEL;
42module_param(rmnet_data_ch, charp, S_IRUGO);
43MODULE_PARM_DESC(rmnet_data_ch, "RmNet data SMD channel");
44
45#define RMNET_NOTIFY_INTERVAL 5
46#define RMNET_MAX_NOTIFY_SIZE sizeof(struct usb_cdc_notification)
47
48#define QMI_REQ_MAX 4
49#define QMI_REQ_SIZE 2048
50#define QMI_RESP_MAX 8
51#define QMI_RESP_SIZE 2048
52
53#define RX_REQ_MAX 8
54#define RX_REQ_SIZE 2048
55#define TX_REQ_MAX 8
56#define TX_REQ_SIZE 2048
57
58#define TXN_MAX 2048
59
60static struct usb_interface_descriptor rmnet_interface_desc = {
61 .bLength = USB_DT_INTERFACE_SIZE,
62 .bDescriptorType = USB_DT_INTERFACE,
63 /* .bInterfaceNumber = DYNAMIC */
64 .bNumEndpoints = 3,
65 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
66 .bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
67 .bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
68 /* .iInterface = DYNAMIC */
69};
70
71/* Full speed support */
72static struct usb_endpoint_descriptor rmnet_fs_notify_desc = {
73 .bLength = USB_DT_ENDPOINT_SIZE,
74 .bDescriptorType = USB_DT_ENDPOINT,
75 .bEndpointAddress = USB_DIR_IN,
76 .bmAttributes = USB_ENDPOINT_XFER_INT,
77 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
78 .bInterval = 1 << RMNET_NOTIFY_INTERVAL,
79};
80
81static struct usb_endpoint_descriptor rmnet_fs_in_desc = {
82 .bLength = USB_DT_ENDPOINT_SIZE,
83 .bDescriptorType = USB_DT_ENDPOINT,
84 .bEndpointAddress = USB_DIR_IN,
85 .bmAttributes = USB_ENDPOINT_XFER_BULK,
86 .wMaxPacketSize = __constant_cpu_to_le16(64),
87};
88
89static struct usb_endpoint_descriptor rmnet_fs_out_desc = {
90 .bLength = USB_DT_ENDPOINT_SIZE,
91 .bDescriptorType = USB_DT_ENDPOINT,
92 .bEndpointAddress = USB_DIR_OUT,
93 .bmAttributes = USB_ENDPOINT_XFER_BULK,
94 .wMaxPacketSize = __constant_cpu_to_le16(64),
95};
96
97/* High speed support */
98static struct usb_endpoint_descriptor rmnet_hs_notify_desc = {
99 .bLength = USB_DT_ENDPOINT_SIZE,
100 .bDescriptorType = USB_DT_ENDPOINT,
101 .bEndpointAddress = USB_DIR_IN,
102 .bmAttributes = USB_ENDPOINT_XFER_INT,
103 .wMaxPacketSize = __constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
104 .bInterval = RMNET_NOTIFY_INTERVAL + 4,
105};
106
107static struct usb_endpoint_descriptor rmnet_hs_in_desc = {
108 .bLength = USB_DT_ENDPOINT_SIZE,
109 .bDescriptorType = USB_DT_ENDPOINT,
110 .bEndpointAddress = USB_DIR_IN,
111 .bmAttributes = USB_ENDPOINT_XFER_BULK,
112 .wMaxPacketSize = __constant_cpu_to_le16(512),
113};
114
115static struct usb_endpoint_descriptor rmnet_hs_out_desc = {
116 .bLength = USB_DT_ENDPOINT_SIZE,
117 .bDescriptorType = USB_DT_ENDPOINT,
118 .bEndpointAddress = USB_DIR_OUT,
119 .bmAttributes = USB_ENDPOINT_XFER_BULK,
120 .wMaxPacketSize = __constant_cpu_to_le16(512),
121};
122
123/* QMI requests & responses buffer*/
124struct qmi_buf {
125 void *buf;
126 int len;
127 struct list_head list;
128};
129
130/* Control & data SMD channel private data */
131struct rmnet_smd_info {
132 struct smd_channel *ch;
133 struct tasklet_struct tx_tlet;
134 struct tasklet_struct rx_tlet;
135#define CH_OPENED 0
136 unsigned long flags;
137 /* pending rx packet length */
138 atomic_t rx_pkt;
139 /* wait for smd open event*/
140 wait_queue_head_t wait;
141};
142
143struct rmnet_dev {
144 struct usb_endpoint *epout;
145 struct usb_endpoint *epin;
146 struct usb_endpoint *epnotify;
147 struct usb_request *notify_req;
148
149 u8 ifc_id;
150 /* QMI lists */
151 struct list_head qmi_req_pool;
152 struct list_head qmi_resp_pool;
153 struct list_head qmi_req_q;
154 struct list_head qmi_resp_q;
155 /* Tx/Rx lists */
156 struct list_head tx_idle;
157 struct list_head rx_idle;
158 struct list_head rx_queue;
159
160 spinlock_t lock;
161 atomic_t online;
162 atomic_t notify_count;
163
164 struct rmnet_smd_info smd_ctl;
165 struct rmnet_smd_info smd_data;
166
167 struct workqueue_struct *wq;
168 struct work_struct connect_work;
169 struct work_struct disconnect_work;
170};
171
172static struct usb_function rmnet_function;
173
174struct qmi_buf *
175rmnet_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
176{
177 struct qmi_buf *qmi;
178
179 qmi = kmalloc(sizeof(struct qmi_buf), kmalloc_flags);
180 if (qmi != NULL) {
181 qmi->buf = kmalloc(len, kmalloc_flags);
182 if (qmi->buf == NULL) {
183 kfree(qmi);
184 qmi = NULL;
185 }
186 }
187
188 return qmi ? qmi : ERR_PTR(-ENOMEM);
189}
190
191void rmnet_free_qmi(struct qmi_buf *qmi)
192{
193 kfree(qmi->buf);
194 kfree(qmi);
195}
196/*
197 * Allocate a usb_request and its buffer. Returns a pointer to the
198 * usb_request or NULL if there is an error.
199 */
200struct usb_request *
201rmnet_alloc_req(struct usb_endpoint *ep, unsigned len, gfp_t kmalloc_flags)
202{
203 struct usb_request *req;
204
205 req = usb_ept_alloc_req(ep, 0);
206
207 if (req != NULL) {
208 req->length = len;
209 req->buf = kmalloc(len, kmalloc_flags);
210 if (req->buf == NULL) {
211 usb_ept_free_req(ep, req);
212 req = NULL;
213 }
214 }
215
216 return req ? req : ERR_PTR(-ENOMEM);
217}
218
219/*
220 * Free a usb_request and its buffer.
221 */
222void rmnet_free_req(struct usb_endpoint *ep, struct usb_request *req)
223{
224 kfree(req->buf);
225 usb_ept_free_req(ep, req);
226}
227
228static void rmnet_notify_complete(struct usb_endpoint *ep,
229 struct usb_request *req)
230{
231 struct rmnet_dev *dev = req->context;
232 int status = req->status;
233
234 switch (status) {
235 case -ECONNRESET:
236 case -ESHUTDOWN:
237 case -ENODEV:
238 /* connection gone */
239 atomic_set(&dev->notify_count, 0);
240 break;
241 default:
242 pr_err("%s: rmnet notify ep error %d\n", __func__, status);
243 /* FALLTHROUGH */
244 case 0:
245 if (ep != dev->epnotify)
246 break;
247
248 /* handle multiple pending QMI_RESPONSE_AVAILABLE
249 * notifications by resending until we're done
250 */
251 if (atomic_dec_and_test(&dev->notify_count))
252 break;
253
254 status = usb_ept_queue_xfer(dev->epnotify, dev->notify_req);
255 if (status) {
256 atomic_dec(&dev->notify_count);
257 pr_err("%s: rmnet notify ep enqueue error %d\n",
258 __func__, status);
259 }
260 break;
261 }
262}
263
264static void qmi_response_available(struct rmnet_dev *dev)
265{
266 struct usb_request *req = dev->notify_req;
267 struct usb_cdc_notification *event = req->buf;
268 int status;
269
270 /* Response will be sent later */
271 if (atomic_inc_return(&dev->notify_count) != 1)
272 return;
273
274 event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
275 | USB_RECIP_INTERFACE;
276 event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
277 event->wValue = cpu_to_le16(0);
278 event->wIndex = cpu_to_le16(dev->ifc_id);
279 event->wLength = cpu_to_le16(0);
280
281 status = usb_ept_queue_xfer(dev->epnotify, dev->notify_req);
282 if (status < 0) {
283 atomic_dec(&dev->notify_count);
284 pr_err("%s: rmnet notify ep enqueue error %d\n",
285 __func__, status);
286 }
287}
288
289/* TODO
290 * handle modem restart events
291 */
292static void rmnet_smd_notify(void *priv, unsigned event)
293{
294 struct rmnet_smd_info *smd_info = priv;
295 int len = atomic_read(&smd_info->rx_pkt);
296
297 switch (event) {
298 case SMD_EVENT_DATA: {
299
300 if (len && (smd_write_avail(smd_info->ch) >= len))
301 tasklet_schedule(&smd_info->rx_tlet);
302
303 if (smd_read_avail(smd_info->ch))
304 tasklet_schedule(&smd_info->tx_tlet);
305
306 break;
307 }
308 case SMD_EVENT_OPEN:
309 /* usb endpoints are not enabled untill smd channels
310 * are opened. wake up worker thread to continue
311 * connection processing
312 */
313 set_bit(CH_OPENED, &smd_info->flags);
314 wake_up(&smd_info->wait);
315 break;
316 case SMD_EVENT_CLOSE:
317 /* We will never come here.
318 * reset flags after closing smd channel
319 * */
320 clear_bit(CH_OPENED, &smd_info->flags);
321 break;
322 }
323}
324
325static void rmnet_control_tx_tlet(unsigned long arg)
326{
327 struct rmnet_dev *dev = (struct rmnet_dev *) arg;
328 struct qmi_buf *qmi_resp;
329 int sz;
330 unsigned long flags;
331
332 while (1) {
333 sz = smd_cur_packet_size(dev->smd_ctl.ch);
334 if (sz == 0)
335 break;
336 if (smd_read_avail(dev->smd_ctl.ch) < sz)
337 break;
338
339 spin_lock_irqsave(&dev->lock, flags);
340 if (list_empty(&dev->qmi_resp_pool)) {
341 pr_err("%s: rmnet QMI Tx buffers full\n", __func__);
342 spin_unlock_irqrestore(&dev->lock, flags);
343 break;
344 }
345 qmi_resp = list_first_entry(&dev->qmi_resp_pool,
346 struct qmi_buf, list);
347 list_del(&qmi_resp->list);
348 spin_unlock_irqrestore(&dev->lock, flags);
349
350 qmi_resp->len = smd_read(dev->smd_ctl.ch, qmi_resp->buf, sz);
351
352 spin_lock_irqsave(&dev->lock, flags);
353 list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
354 spin_unlock_irqrestore(&dev->lock, flags);
355
356 qmi_response_available(dev);
357 }
358
359}
360
361static void rmnet_control_rx_tlet(unsigned long arg)
362{
363 struct rmnet_dev *dev = (struct rmnet_dev *) arg;
364 struct qmi_buf *qmi_req;
365 int ret;
366 unsigned long flags;
367
368 spin_lock_irqsave(&dev->lock, flags);
369 while (1) {
370
371 if (list_empty(&dev->qmi_req_q)) {
372 atomic_set(&dev->smd_ctl.rx_pkt, 0);
373 break;
374 }
375 qmi_req = list_first_entry(&dev->qmi_req_q,
376 struct qmi_buf, list);
377 if (smd_write_avail(dev->smd_ctl.ch) < qmi_req->len) {
378 atomic_set(&dev->smd_ctl.rx_pkt, qmi_req->len);
379 pr_debug("%s: rmnet control smd channel full\n",
380 __func__);
381 break;
382 }
383
384 list_del(&qmi_req->list);
385 spin_unlock_irqrestore(&dev->lock, flags);
386 ret = smd_write(dev->smd_ctl.ch, qmi_req->buf, qmi_req->len);
387 spin_lock_irqsave(&dev->lock, flags);
388 if (ret != qmi_req->len) {
389 pr_err("%s: rmnet control smd write failed\n",
390 __func__);
391 break;
392 }
393
394 list_add_tail(&qmi_req->list, &dev->qmi_req_pool);
395 }
396 spin_unlock_irqrestore(&dev->lock, flags);
397}
398
399static void rmnet_command_complete(struct usb_endpoint *ep,
400 struct usb_request *req)
401{
402 struct rmnet_dev *dev = req->context;
403 struct usb_function *func = &rmnet_function;
404 struct usb_request *in_req;
405 struct qmi_buf *qmi_req;
406 int ret;
407
408 if (req->status < 0) {
409 pr_err("%s: rmnet command error %d\n", __func__, req->status);
410 return;
411 }
412
413 spin_lock(&dev->lock);
414 /* no pending control rx packet */
415 if (!atomic_read(&dev->smd_ctl.rx_pkt)) {
416 if (smd_write_avail(dev->smd_ctl.ch) < req->actual) {
417 atomic_set(&dev->smd_ctl.rx_pkt, req->actual);
418 goto queue_req;
419 }
420 spin_unlock(&dev->lock);
421 ret = smd_write(dev->smd_ctl.ch, req->buf, req->actual);
422 /* This should never happen */
423 if (ret != req->actual)
424 pr_err("%s: rmnet control smd write failed\n",
425 __func__);
426 goto ep0_ack;
427 }
428queue_req:
429 if (list_empty(&dev->qmi_req_pool)) {
430 spin_unlock(&dev->lock);
431 pr_err("%s: rmnet QMI pool is empty\n", __func__);
432 return;
433 }
434
435 qmi_req = list_first_entry(&dev->qmi_req_pool, struct qmi_buf, list);
436 list_del(&qmi_req->list);
437 spin_unlock(&dev->lock);
438 memcpy(qmi_req->buf, req->buf, req->actual);
439 qmi_req->len = req->actual;
440 spin_lock(&dev->lock);
441 list_add_tail(&qmi_req->list, &dev->qmi_req_q);
442 spin_unlock(&dev->lock);
443ep0_ack:
444 /* Send ACK on EP0 IN */
445 in_req = func->ep0_in_req;
446 in_req->length = 0;
447 in_req->complete = 0;
448 usb_ept_queue_xfer(func->ep0_in, in_req);
449}
450
451static int rmnet_setup(struct usb_ctrlrequest *ctrl, void *buf,
452 int len, void *context)
453{
454 struct rmnet_dev *dev = context;
455 struct usb_request *req = rmnet_function.ep0_out_req;
456 int ret = -EOPNOTSUPP;
457 u16 w_index = le16_to_cpu(ctrl->wIndex);
458 u16 w_value = le16_to_cpu(ctrl->wValue);
459 u16 w_length = le16_to_cpu(ctrl->wLength);
460 struct qmi_buf *resp;
461 int schedule = 0;
462
463 if (!atomic_read(&dev->online))
464 return -ENOTCONN;
465
466 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
467
468 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
469 | USB_CDC_SEND_ENCAPSULATED_COMMAND:
470 if (w_value || w_index != dev->ifc_id)
471 goto invalid;
472 ret = w_length;
473 req->complete = rmnet_command_complete;
474 req->context = dev;
475 break;
476
477
478 case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
479 | USB_CDC_GET_ENCAPSULATED_RESPONSE:
480 if (w_value || w_index != dev->ifc_id)
481 goto invalid;
482 else {
483 spin_lock(&dev->lock);
484 resp = list_first_entry(&dev->qmi_resp_q,
485 struct qmi_buf, list);
486 list_del(&resp->list);
487 spin_unlock(&dev->lock);
488 memcpy(buf, resp->buf, resp->len);
489 ret = resp->len;
490 spin_lock(&dev->lock);
491
492 if (list_empty(&dev->qmi_resp_pool))
493 schedule = 1;
494 list_add_tail(&resp->list, &dev->qmi_resp_pool);
495
496 if (schedule)
497 tasklet_schedule(&dev->smd_ctl.tx_tlet);
498 spin_unlock(&dev->lock);
499 }
500 break;
501 default:
502
503invalid:
504 pr_debug("%s: invalid control req%02x.%02x v%04x i%04x l%d\n",
505 __func__, ctrl->bRequestType, ctrl->bRequest,
506 w_value, w_index, w_length);
507 }
508
509 return ret;
510}
511
512static void rmnet_start_rx(struct rmnet_dev *dev)
513{
514 int status;
515 struct usb_request *req;
516 struct list_head *act, *tmp;
517 unsigned long flags;
518
519 spin_lock_irqsave(&dev->lock, flags);
520 list_for_each_safe(act, tmp, &dev->rx_idle) {
521 req = list_entry(act, struct usb_request, list);
522 list_del(&req->list);
523
524 spin_unlock_irqrestore(&dev->lock, flags);
525 status = usb_ept_queue_xfer(dev->epout, req);
526 spin_lock_irqsave(&dev->lock, flags);
527
528 if (status) {
529 pr_err("%s: rmnet data rx enqueue err %d\n",
530 __func__, status);
531 list_add_tail(&req->list, &dev->rx_idle);
532 break;
533 }
534 }
535 spin_unlock_irqrestore(&dev->lock, flags);
536}
537
538static void rmnet_data_tx_tlet(unsigned long arg)
539{
540 struct rmnet_dev *dev = (struct rmnet_dev *) arg;
541 struct usb_request *req;
542 int status;
543 int sz;
544 unsigned long flags;
545
546 while (1) {
547
548 sz = smd_cur_packet_size(dev->smd_data.ch);
549 if (sz == 0)
550 break;
551 if (smd_read_avail(dev->smd_data.ch) < sz)
552 break;
553
554 spin_lock_irqsave(&dev->lock, flags);
555 if (list_empty(&dev->tx_idle)) {
556 spin_unlock_irqrestore(&dev->lock, flags);
557 pr_debug("%s: rmnet data Tx buffers full\n", __func__);
558 break;
559 }
560 req = list_first_entry(&dev->tx_idle, struct usb_request, list);
561 list_del(&req->list);
562 spin_unlock_irqrestore(&dev->lock, flags);
563
564 req->length = smd_read(dev->smd_data.ch, req->buf, sz);
565 status = usb_ept_queue_xfer(dev->epin, req);
566 if (status) {
567 pr_err("%s: rmnet tx data enqueue err %d\n",
568 __func__, status);
569 spin_lock_irqsave(&dev->lock, flags);
570 list_add_tail(&req->list, &dev->tx_idle);
571 spin_unlock_irqrestore(&dev->lock, flags);
572 break;
573 }
574 }
575
576}
577
578static void rmnet_data_rx_tlet(unsigned long arg)
579{
580 struct rmnet_dev *dev = (struct rmnet_dev *) arg;
581 struct usb_request *req;
582 int ret;
583 unsigned long flags;
584
585 spin_lock_irqsave(&dev->lock, flags);
586 while (1) {
587 if (list_empty(&dev->rx_queue)) {
588 atomic_set(&dev->smd_data.rx_pkt, 0);
589 break;
590 }
591 req = list_first_entry(&dev->rx_queue,
592 struct usb_request, list);
593 if (smd_write_avail(dev->smd_data.ch) < req->actual) {
594 atomic_set(&dev->smd_data.rx_pkt, req->actual);
595 pr_debug("%s: rmnet SMD data channel full\n", __func__);
596 break;
597 }
598
599 list_del(&req->list);
600 spin_unlock_irqrestore(&dev->lock, flags);
601 ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
602 spin_lock_irqsave(&dev->lock, flags);
603 if (ret != req->actual) {
604 pr_err("%s: rmnet SMD data write failed\n", __func__);
605 break;
606 }
607 list_add_tail(&req->list, &dev->rx_idle);
608 }
609 spin_unlock_irqrestore(&dev->lock, flags);
610
611 /* We have free rx data requests. */
612 rmnet_start_rx(dev);
613}
614
615/* If SMD has enough room to accommodate a data rx packet,
616 * write into SMD directly. Otherwise enqueue to rx_queue.
617 * We will not write into SMD directly untill rx_queue is
618 * empty to strictly follow the ordering requests.
619 */
620static void rmnet_complete_epout(struct usb_endpoint *ep,
621 struct usb_request *req)
622{
623 struct rmnet_dev *dev = req->context;
624 int status = req->status;
625 int ret;
626
627 switch (status) {
628 case 0:
629 /* normal completion */
630 break;
631 case -ECONNRESET:
632 case -ESHUTDOWN:
633 case -ENODEV:
634 /* connection gone */
635 spin_lock(&dev->lock);
636 list_add_tail(&req->list, &dev->rx_idle);
637 spin_unlock(&dev->lock);
638 return;
639 default:
640 /* unexpected failure */
641 pr_err("%s: response error %d, %d/%d\n",
642 __func__, status, req->actual,
643 req->length);
644 spin_lock(&dev->lock);
645 list_add_tail(&req->list, &dev->rx_idle);
646 spin_unlock(&dev->lock);
647 return;
648 }
649
650 spin_lock(&dev->lock);
651 if (!atomic_read(&dev->smd_data.rx_pkt)) {
652 if (smd_write_avail(dev->smd_data.ch) < req->actual) {
653 atomic_set(&dev->smd_data.rx_pkt, req->actual);
654 goto queue_req;
655 }
656 spin_unlock(&dev->lock);
657 ret = smd_write(dev->smd_data.ch, req->buf, req->actual);
658 /* This should never happen */
659 if (ret != req->actual)
660 pr_err("%s: rmnet data smd write failed\n", __func__);
661 /* Restart Rx */
662 spin_lock(&dev->lock);
663 list_add_tail(&req->list, &dev->rx_idle);
664 spin_unlock(&dev->lock);
665 rmnet_start_rx(dev);
666 return;
667 }
668queue_req:
669 list_add_tail(&req->list, &dev->rx_queue);
670 spin_unlock(&dev->lock);
671}
672
673static void rmnet_complete_epin(struct usb_endpoint *ep,
674 struct usb_request *req)
675{
676 struct rmnet_dev *dev = req->context;
677 int status = req->status;
678 int schedule = 0;
679
680 switch (status) {
681 case -ECONNRESET:
682 case -ESHUTDOWN:
683 case -ENODEV:
684 /* connection gone */
685 spin_lock(&dev->lock);
686 list_add_tail(&req->list, &dev->tx_idle);
687 spin_unlock(&dev->lock);
688 break;
689 default:
690 pr_err("%s: rmnet data tx ep error %d\n", __func__, status);
691 /* FALLTHROUGH */
692 case 0:
693 spin_lock(&dev->lock);
694 if (list_empty(&dev->tx_idle))
695 schedule = 1;
696 list_add_tail(&req->list, &dev->tx_idle);
697
698 if (schedule)
699 tasklet_schedule(&dev->smd_data.tx_tlet);
700 spin_unlock(&dev->lock);
701 break;
702 }
703
704}
705
706static void rmnet_disconnect_work(struct work_struct *w)
707{
708 struct qmi_buf *qmi;
709 struct usb_request *req;
710 struct list_head *act, *tmp;
711 struct rmnet_dev *dev = container_of(w, struct rmnet_dev,
712 disconnect_work);
713
714 atomic_set(&dev->notify_count, 0);
715
716 tasklet_kill(&dev->smd_ctl.rx_tlet);
717 tasklet_kill(&dev->smd_ctl.tx_tlet);
718 tasklet_kill(&dev->smd_data.rx_tlet);
719 tasklet_kill(&dev->smd_data.rx_tlet);
720
721 list_for_each_safe(act, tmp, &dev->rx_queue) {
722 req = list_entry(act, struct usb_request, list);
723 list_del(&req->list);
724 list_add_tail(&req->list, &dev->rx_idle);
725 }
726
727 list_for_each_safe(act, tmp, &dev->qmi_req_q) {
728 qmi = list_entry(act, struct qmi_buf, list);
729 list_del(&qmi->list);
730 list_add_tail(&qmi->list, &dev->qmi_req_pool);
731 }
732
733 list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
734 qmi = list_entry(act, struct qmi_buf, list);
735 list_del(&qmi->list);
736 list_add_tail(&qmi->list, &dev->qmi_resp_pool);
737 }
738
739 smd_close(dev->smd_ctl.ch);
740 dev->smd_ctl.flags = 0;
741
742 smd_close(dev->smd_data.ch);
743 dev->smd_data.flags = 0;
744}
745
746static void rmnet_connect_work(struct work_struct *w)
747{
748 struct rmnet_dev *dev = container_of(w, struct rmnet_dev, connect_work);
749 int ret;
750
751 /* Control channel for QMI messages */
752 ret = smd_open(rmnet_ctl_ch, &dev->smd_ctl.ch,
753 &dev->smd_ctl, rmnet_smd_notify);
754 if (ret) {
755 pr_err("%s: Unable to open control smd channel\n", __func__);
756 return;
757 }
758 wait_event(dev->smd_ctl.wait, test_bit(CH_OPENED,
759 &dev->smd_ctl.flags));
760
761 /* Data channel for network packets */
762 ret = smd_open(rmnet_data_ch, &dev->smd_data.ch,
763 &dev->smd_data, rmnet_smd_notify);
764 if (ret) {
765 pr_err("%s: Unable to open data smd channel\n", __func__);
766 smd_close(dev->smd_ctl.ch);
767 }
768 wait_event(dev->smd_data.wait, test_bit(CH_OPENED,
769 &dev->smd_data.flags));
770
771 if (usb_msm_get_speed() == USB_SPEED_HIGH) {
772 usb_configure_endpoint(dev->epin, &rmnet_hs_in_desc);
773 usb_configure_endpoint(dev->epout, &rmnet_hs_out_desc);
774 usb_configure_endpoint(dev->epnotify, &rmnet_hs_notify_desc);
775 } else {
776 usb_configure_endpoint(dev->epin, &rmnet_fs_in_desc);
777 usb_configure_endpoint(dev->epout, &rmnet_fs_out_desc);
778 usb_configure_endpoint(dev->epnotify, &rmnet_fs_notify_desc);
779 }
780
781 usb_ept_enable(dev->epin, 1);
782 usb_ept_enable(dev->epout, 1);
783 usb_ept_enable(dev->epnotify, 1);
784
785 atomic_set(&dev->online, 1);
786 /* Queue Rx data requests */
787 rmnet_start_rx(dev);
788}
789
790static void rmnet_configure(int configured, void *context)
791
792{
793 struct rmnet_dev *dev = context;
794
795 if (configured) {
796 queue_work(dev->wq, &dev->connect_work);
797 } else {
798 /* all pending requests will be canceled */
799 if (!atomic_read(&dev->online))
800 return;
801
802 atomic_set(&dev->online, 0);
803
804 usb_ept_fifo_flush(dev->epnotify);
805 usb_ept_enable(dev->epnotify, 0);
806
807 usb_ept_fifo_flush(dev->epout);
808 usb_ept_enable(dev->epout, 0);
809
810 usb_ept_fifo_flush(dev->epin);
811 usb_ept_enable(dev->epin, 0);
812
813 /* cleanup work */
814 queue_work(dev->wq, &dev->disconnect_work);
815 }
816
817}
818
819static void rmnet_free_buf(struct rmnet_dev *dev)
820{
821 struct qmi_buf *qmi;
822 struct usb_request *req;
823 struct list_head *act, *tmp;
824
825 /* free all usb requests in tx pool */
826 list_for_each_safe(act, tmp, &dev->tx_idle) {
827 req = list_entry(act, struct usb_request, list);
828 list_del(&req->list);
829 rmnet_free_req(dev->epout, req);
830 }
831
832 /* free all usb requests in rx pool */
833 list_for_each_safe(act, tmp, &dev->rx_idle) {
834 req = list_entry(act, struct usb_request, list);
835 list_del(&req->list);
836 rmnet_free_req(dev->epin, req);
837 }
838
839 /* free all buffers in qmi request pool */
840 list_for_each_safe(act, tmp, &dev->qmi_req_pool) {
841 qmi = list_entry(act, struct qmi_buf, list);
842 list_del(&qmi->list);
843 rmnet_free_qmi(qmi);
844 }
845
846 /* free all buffers in qmi request pool */
847 list_for_each_safe(act, tmp, &dev->qmi_resp_pool) {
848 qmi = list_entry(act, struct qmi_buf, list);
849 list_del(&qmi->list);
850 rmnet_free_qmi(qmi);
851 }
852
853 rmnet_free_req(dev->epnotify, dev->notify_req);
854}
855
856static void rmnet_bind(void *context)
857{
858 struct rmnet_dev *dev = context;
859 int i, ret;
860 struct usb_request *req;
861 struct qmi_buf *qmi;
862
863 dev->ifc_id = usb_msm_get_next_ifc_number(&rmnet_function);
864 rmnet_interface_desc.bInterfaceNumber = dev->ifc_id;
865
866 /*Configuring IN Endpoint*/
867 dev->epin = usb_alloc_endpoint(USB_DIR_IN);
868 if (!dev->epin)
869 return;
870
871 rmnet_hs_in_desc.bEndpointAddress = USB_DIR_IN |
872 dev->epin->num;
873 rmnet_fs_in_desc.bEndpointAddress = USB_DIR_IN |
874 dev->epin->num;
875
876 /*Configuring OUT Endpoint*/
877 dev->epout = usb_alloc_endpoint(USB_DIR_OUT);
878 if (!dev->epout)
879 goto free_epin;
880
881 rmnet_hs_out_desc.bEndpointAddress = USB_DIR_OUT |
882 dev->epout->num;
883 rmnet_fs_out_desc.bEndpointAddress = USB_DIR_OUT |
884 dev->epout->num;
885
886 /*Configuring NOTIFY Endpoint*/
887 dev->epnotify = usb_alloc_endpoint(USB_DIR_IN);
888 if (!dev->epnotify)
889 goto free_epout;
890
891 rmnet_hs_notify_desc.bEndpointAddress = USB_DIR_IN |
892 dev->epnotify->num;
893 rmnet_fs_notify_desc.bEndpointAddress = USB_DIR_IN |
894 dev->epnotify->num;
895
896 dev->notify_req = usb_ept_alloc_req(dev->epnotify, 0);
897 if (!dev->notify_req)
898 goto free_epnotify;
899
900 dev->notify_req->buf = kmalloc(RMNET_MAX_NOTIFY_SIZE, GFP_KERNEL);
901 if (!dev->notify_req->buf)
902 goto free_buf;;
903
904 dev->notify_req->complete = rmnet_notify_complete;
905 dev->notify_req->context = dev;
906 dev->notify_req->length = RMNET_MAX_NOTIFY_SIZE;
907
908 /* Allocate the qmi request and response buffers */
909 for (i = 0; i < QMI_REQ_MAX; i++) {
910 qmi = rmnet_alloc_qmi(QMI_REQ_SIZE, GFP_KERNEL);
911 if (IS_ERR(qmi)) {
912 ret = PTR_ERR(qmi);
913 goto free_buf;
914 }
915 list_add_tail(&qmi->list, &dev->qmi_req_pool);
916 }
917
918 for (i = 0; i < QMI_RESP_MAX; i++) {
919 qmi = rmnet_alloc_qmi(QMI_RESP_SIZE, GFP_KERNEL);
920 if (IS_ERR(qmi)) {
921 ret = PTR_ERR(qmi);
922 goto free_buf;
923 }
924 list_add_tail(&qmi->list, &dev->qmi_resp_pool);
925 }
926
927 /* Allocate bulk in/out requests for data transfer */
928 for (i = 0; i < RX_REQ_MAX; i++) {
929 req = rmnet_alloc_req(dev->epout, RX_REQ_SIZE, GFP_KERNEL);
930 if (IS_ERR(req)) {
931 ret = PTR_ERR(req);
932 goto free_buf;
933 }
934 req->length = TXN_MAX;
935 req->context = dev;
936 req->complete = rmnet_complete_epout;
937 list_add_tail(&req->list, &dev->rx_idle);
938 }
939
940 for (i = 0; i < TX_REQ_MAX; i++) {
941 req = rmnet_alloc_req(dev->epout, TX_REQ_SIZE, GFP_KERNEL);
942 if (IS_ERR(req)) {
943 ret = PTR_ERR(req);
944 goto free_buf;
945 }
946 req->context = dev;
947 req->complete = rmnet_complete_epin;
948 list_add_tail(&req->list, &dev->tx_idle);
949 }
950
951
952 pr_info("Rmnet function bind completed\n");
953
954 return;
955
956free_buf:
957 rmnet_free_buf(dev);
958free_epnotify:
959 usb_free_endpoint(dev->epnotify);
960free_epout:
961 usb_free_endpoint(dev->epout);
962free_epin:
963 usb_free_endpoint(dev->epin);
964
965}
966
967static void rmnet_unbind(void *context)
968{
969 struct rmnet_dev *dev = context;
970
971 tasklet_kill(&dev->smd_ctl.rx_tlet);
972 tasklet_kill(&dev->smd_ctl.tx_tlet);
973 tasklet_kill(&dev->smd_data.rx_tlet);
974 tasklet_kill(&dev->smd_data.rx_tlet);
975 flush_workqueue(dev->wq);
976
977 rmnet_free_buf(dev);
978 usb_free_endpoint(dev->epin);
979 usb_free_endpoint(dev->epout);
980 usb_free_endpoint(dev->epnotify);
981
982 kfree(dev);
983
984}
985static struct usb_function rmnet_function = {
986 .bind = rmnet_bind,
987 .configure = rmnet_configure,
988 .unbind = rmnet_unbind,
989 .setup = rmnet_setup,
990 .name = "rmnet",
991};
992
993struct usb_descriptor_header *rmnet_hs_descriptors[5];
994struct usb_descriptor_header *rmnet_fs_descriptors[5];
995static int __init rmnet_init(void)
996{
997 struct rmnet_dev *dev;
998 int ret;
999
1000 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1001 if (!dev)
1002 return -ENOMEM;
1003
1004 dev->wq = create_singlethread_workqueue("k_rmnet_work");
1005 if (!dev->wq) {
1006 ret = -ENOMEM;
1007 goto free_dev;
1008 }
1009
1010 spin_lock_init(&dev->lock);
1011 atomic_set(&dev->notify_count, 0);
1012 atomic_set(&dev->online, 0);
1013 atomic_set(&dev->smd_ctl.rx_pkt, 0);
1014 atomic_set(&dev->smd_data.rx_pkt, 0);
1015
1016 INIT_WORK(&dev->connect_work, rmnet_connect_work);
1017 INIT_WORK(&dev->disconnect_work, rmnet_disconnect_work);
1018
1019 tasklet_init(&dev->smd_ctl.rx_tlet, rmnet_control_rx_tlet,
1020 (unsigned long) dev);
1021 tasklet_init(&dev->smd_ctl.tx_tlet, rmnet_control_tx_tlet,
1022 (unsigned long) dev);
1023 tasklet_init(&dev->smd_data.rx_tlet, rmnet_data_rx_tlet,
1024 (unsigned long) dev);
1025 tasklet_init(&dev->smd_data.tx_tlet, rmnet_data_tx_tlet,
1026 (unsigned long) dev);
1027
1028 init_waitqueue_head(&dev->smd_ctl.wait);
1029 init_waitqueue_head(&dev->smd_data.wait);
1030
1031 INIT_LIST_HEAD(&dev->qmi_req_pool);
1032 INIT_LIST_HEAD(&dev->qmi_req_q);
1033 INIT_LIST_HEAD(&dev->qmi_resp_pool);
1034 INIT_LIST_HEAD(&dev->qmi_resp_q);
1035 INIT_LIST_HEAD(&dev->rx_idle);
1036 INIT_LIST_HEAD(&dev->rx_queue);
1037 INIT_LIST_HEAD(&dev->tx_idle);
1038
1039 rmnet_hs_descriptors[0] =
1040 (struct usb_descriptor_header *)&rmnet_interface_desc;
1041 rmnet_hs_descriptors[1] =
1042 (struct usb_descriptor_header *)&rmnet_hs_in_desc;
1043 rmnet_hs_descriptors[2] =
1044 (struct usb_descriptor_header *)&rmnet_hs_out_desc;
1045 rmnet_hs_descriptors[3] =
1046 (struct usb_descriptor_header *)&rmnet_hs_notify_desc;
1047 rmnet_hs_descriptors[4] = NULL;
1048
1049 rmnet_fs_descriptors[0] =
1050 (struct usb_descriptor_header *)&rmnet_interface_desc;
1051 rmnet_fs_descriptors[1] =
1052 (struct usb_descriptor_header *)&rmnet_fs_in_desc;
1053 rmnet_fs_descriptors[2] =
1054 (struct usb_descriptor_header *)&rmnet_fs_out_desc;
1055 rmnet_fs_descriptors[3] =
1056 (struct usb_descriptor_header *)&rmnet_fs_notify_desc;
1057 rmnet_fs_descriptors[4] = NULL;
1058
1059 rmnet_function.hs_descriptors = rmnet_hs_descriptors;
1060 rmnet_function.fs_descriptors = rmnet_fs_descriptors;
1061 rmnet_function.context = dev;
1062
1063 ret = usb_function_register(&rmnet_function);
1064 if (ret)
1065 goto free_wq;
1066
1067 return 0;
1068
1069free_wq:
1070 destroy_workqueue(dev->wq);
1071free_dev:
1072 kfree(dev);
1073
1074 return ret;
1075}
1076
1077static void __exit rmnet_exit(void)
1078{
1079 usb_function_unregister(&rmnet_function);
1080}
1081
1082module_init(rmnet_init);
1083module_exit(rmnet_exit);
1084MODULE_DESCRIPTION("RmNet usb function driver");
1085MODULE_VERSION("1.0");
1086MODULE_LICENSE("GPL v2");