blob: 8128b203e76f4370b9f57d0866592c7fd4bf8a2c [file] [log] [blame]
Mike Lockwoodba83b012010-04-16 10:39:22 -04001/*
2 * Gadget Function Driver for MTP
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Author: Mike Lockwood <lockwood@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/* #define DEBUG */
19/* #define VERBOSE_DEBUG */
20
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/poll.h>
24#include <linux/delay.h>
25#include <linux/wait.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
Mike Lockwoodba83b012010-04-16 10:39:22 -040028
29#include <linux/types.h>
30#include <linux/file.h>
31#include <linux/device.h>
32#include <linux/miscdevice.h>
33
34#include <linux/usb.h>
35#include <linux/usb_usual.h>
36#include <linux/usb/ch9.h>
37#include <linux/usb/android_composite.h>
38#include <linux/usb/f_mtp.h>
39
40#define BULK_BUFFER_SIZE 16384
Mike Lockwood1de4d4d2010-07-06 19:27:52 -040041#define INTR_BUFFER_SIZE 28
Mike Lockwoodba83b012010-04-16 10:39:22 -040042
43/* String IDs */
44#define INTERFACE_STRING_INDEX 0
45
46/* values for mtp_dev.state */
47#define STATE_OFFLINE 0 /* initial state, disconnected */
48#define STATE_READY 1 /* ready for userspace calls */
49#define STATE_BUSY 2 /* processing userspace calls */
50#define STATE_CANCELED 3 /* transaction canceled by host */
51#define STATE_ERROR 4 /* error from completion routine */
52
53/* number of tx and rx requests to allocate */
54#define TX_REQ_MAX 4
55#define RX_REQ_MAX 2
56
Mike Lockwoodba83b012010-04-16 10:39:22 -040057/* ID for Microsoft MTP OS String */
58#define MTP_OS_STRING_ID 0xEE
59
60/* MTP class reqeusts */
61#define MTP_REQ_CANCEL 0x64
62#define MTP_REQ_GET_EXT_EVENT_DATA 0x65
63#define MTP_REQ_RESET 0x66
64#define MTP_REQ_GET_DEVICE_STATUS 0x67
65
66/* constants for device status */
67#define MTP_RESPONSE_OK 0x2001
68#define MTP_RESPONSE_DEVICE_BUSY 0x2019
69
70static const char shortname[] = "mtp_usb";
71
72struct mtp_dev {
73 struct usb_function function;
74 struct usb_composite_dev *cdev;
75 spinlock_t lock;
76
Mike Lockwood1de4d4d2010-07-06 19:27:52 -040077 /* appear as MTP or PTP when enumerating */
Mike Lockwoodba83b012010-04-16 10:39:22 -040078 int interface_mode;
79
80 struct usb_ep *ep_in;
81 struct usb_ep *ep_out;
82 struct usb_ep *ep_intr;
83
84 int state;
85
Mike Lockwood1de4d4d2010-07-06 19:27:52 -040086 /* synchronize access to our device file */
Mike Lockwoodba83b012010-04-16 10:39:22 -040087 atomic_t open_excl;
Mike Lockwood491d4182010-11-08 10:41:31 -050088 /* to enforce only one ioctl at a time */
89 atomic_t ioctl_excl;
Mike Lockwoodba83b012010-04-16 10:39:22 -040090
91 struct list_head tx_idle;
92
93 wait_queue_head_t read_wq;
94 wait_queue_head_t write_wq;
95 struct usb_request *rx_req[RX_REQ_MAX];
Mike Lockwood1de4d4d2010-07-06 19:27:52 -040096 struct usb_request *intr_req;
Mike Lockwoodba83b012010-04-16 10:39:22 -040097 int rx_done;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -040098 /* true if interrupt endpoint is busy */
99 int intr_busy;
100
Mike Lockwood491d4182010-11-08 10:41:31 -0500101 /* for processing MTP_SEND_FILE and MTP_RECEIVE_FILE
102 * ioctls on a work queue
103 */
104 struct workqueue_struct *wq;
105 struct work_struct send_file_work;
106 struct work_struct receive_file_work;
107 struct file *xfer_file;
108 loff_t xfer_file_offset;
Mike Lockwood3e800b62010-11-16 17:14:32 -0500109 int64_t xfer_file_length;
Mike Lockwood491d4182010-11-08 10:41:31 -0500110 int xfer_result;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400111};
112
113static struct usb_interface_descriptor mtp_interface_desc = {
114 .bLength = USB_DT_INTERFACE_SIZE,
115 .bDescriptorType = USB_DT_INTERFACE,
116 .bInterfaceNumber = 0,
117 .bNumEndpoints = 3,
118 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
119 .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
120 .bInterfaceProtocol = 0,
121};
122
123static struct usb_interface_descriptor ptp_interface_desc = {
124 .bLength = USB_DT_INTERFACE_SIZE,
125 .bDescriptorType = USB_DT_INTERFACE,
126 .bInterfaceNumber = 0,
127 .bNumEndpoints = 3,
128 .bInterfaceClass = USB_CLASS_STILL_IMAGE,
129 .bInterfaceSubClass = 1,
130 .bInterfaceProtocol = 1,
131};
132
133static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
134 .bLength = USB_DT_ENDPOINT_SIZE,
135 .bDescriptorType = USB_DT_ENDPOINT,
136 .bEndpointAddress = USB_DIR_IN,
137 .bmAttributes = USB_ENDPOINT_XFER_BULK,
138 .wMaxPacketSize = __constant_cpu_to_le16(512),
139};
140
141static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
142 .bLength = USB_DT_ENDPOINT_SIZE,
143 .bDescriptorType = USB_DT_ENDPOINT,
144 .bEndpointAddress = USB_DIR_OUT,
145 .bmAttributes = USB_ENDPOINT_XFER_BULK,
146 .wMaxPacketSize = __constant_cpu_to_le16(512),
147};
148
149static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
150 .bLength = USB_DT_ENDPOINT_SIZE,
151 .bDescriptorType = USB_DT_ENDPOINT,
152 .bEndpointAddress = USB_DIR_IN,
153 .bmAttributes = USB_ENDPOINT_XFER_BULK,
154};
155
156static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
157 .bLength = USB_DT_ENDPOINT_SIZE,
158 .bDescriptorType = USB_DT_ENDPOINT,
159 .bEndpointAddress = USB_DIR_OUT,
160 .bmAttributes = USB_ENDPOINT_XFER_BULK,
161};
162
163static struct usb_endpoint_descriptor mtp_intr_desc = {
164 .bLength = USB_DT_ENDPOINT_SIZE,
165 .bDescriptorType = USB_DT_ENDPOINT,
166 .bEndpointAddress = USB_DIR_IN,
167 .bmAttributes = USB_ENDPOINT_XFER_INT,
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400168 .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
Mike Lockwoodba83b012010-04-16 10:39:22 -0400169 .bInterval = 6,
170};
171
172static struct usb_descriptor_header *fs_mtp_descs[] = {
173 (struct usb_descriptor_header *) &mtp_interface_desc,
174 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
175 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
176 (struct usb_descriptor_header *) &mtp_intr_desc,
177 NULL,
178};
179
180static struct usb_descriptor_header *hs_mtp_descs[] = {
181 (struct usb_descriptor_header *) &mtp_interface_desc,
182 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
183 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
184 (struct usb_descriptor_header *) &mtp_intr_desc,
185 NULL,
186};
187
188static struct usb_descriptor_header *fs_ptp_descs[] = {
189 (struct usb_descriptor_header *) &ptp_interface_desc,
190 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
191 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
192 (struct usb_descriptor_header *) &mtp_intr_desc,
193 NULL,
194};
195
196static struct usb_descriptor_header *hs_ptp_descs[] = {
197 (struct usb_descriptor_header *) &ptp_interface_desc,
198 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
199 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
200 (struct usb_descriptor_header *) &mtp_intr_desc,
201 NULL,
202};
203
204static struct usb_string mtp_string_defs[] = {
205 /* Naming interface "MTP" so libmtp will recognize us */
206 [INTERFACE_STRING_INDEX].s = "MTP",
207 { }, /* end of list */
208};
209
210static struct usb_gadget_strings mtp_string_table = {
211 .language = 0x0409, /* en-US */
212 .strings = mtp_string_defs,
213};
214
215static struct usb_gadget_strings *mtp_strings[] = {
216 &mtp_string_table,
217 NULL,
218};
219
220/* Microsoft MTP OS String */
221static u8 mtp_os_string[] = {
222 18, /* sizeof(mtp_os_string) */
223 USB_DT_STRING,
224 /* Signature field: "MSFT100" */
225 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
226 /* vendor code */
227 1,
228 /* padding */
229 0
230};
231
232/* Microsoft Extended Configuration Descriptor Header Section */
233struct mtp_ext_config_desc_header {
234 __le32 dwLength;
235 __u16 bcdVersion;
236 __le16 wIndex;
237 __u8 bCount;
238 __u8 reserved[7];
239};
240
241/* Microsoft Extended Configuration Descriptor Function Section */
242struct mtp_ext_config_desc_function {
243 __u8 bFirstInterfaceNumber;
244 __u8 bInterfaceCount;
245 __u8 compatibleID[8];
246 __u8 subCompatibleID[8];
247 __u8 reserved[6];
248};
249
250/* MTP Extended Configuration Descriptor */
251struct {
252 struct mtp_ext_config_desc_header header;
253 struct mtp_ext_config_desc_function function;
254} mtp_ext_config_desc = {
255 .header = {
256 .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
257 .bcdVersion = __constant_cpu_to_le16(0x0100),
258 .wIndex = __constant_cpu_to_le16(4),
259 .bCount = __constant_cpu_to_le16(1),
260 },
261 .function = {
262 .bFirstInterfaceNumber = 0,
263 .bInterfaceCount = 1,
264 .compatibleID = { 'M', 'T', 'P' },
265 },
266};
267
268struct mtp_device_status {
269 __le16 wLength;
270 __le16 wCode;
271};
272
273/* temporary variable used between mtp_open() and mtp_gadget_bind() */
274static struct mtp_dev *_mtp_dev;
275
276static inline struct mtp_dev *func_to_dev(struct usb_function *f)
277{
278 return container_of(f, struct mtp_dev, function);
279}
280
281static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
282{
283 struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
284 if (!req)
285 return NULL;
286
287 /* now allocate buffers for the requests */
288 req->buf = kmalloc(buffer_size, GFP_KERNEL);
289 if (!req->buf) {
290 usb_ep_free_request(ep, req);
291 return NULL;
292 }
293
294 return req;
295}
296
297static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
298{
299 if (req) {
300 kfree(req->buf);
301 usb_ep_free_request(ep, req);
302 }
303}
304
305static inline int _lock(atomic_t *excl)
306{
307 if (atomic_inc_return(excl) == 1) {
308 return 0;
309 } else {
310 atomic_dec(excl);
311 return -1;
312 }
313}
314
315static inline void _unlock(atomic_t *excl)
316{
317 atomic_dec(excl);
318}
319
320/* add a request to the tail of a list */
321static void req_put(struct mtp_dev *dev, struct list_head *head,
322 struct usb_request *req)
323{
324 unsigned long flags;
325
326 spin_lock_irqsave(&dev->lock, flags);
327 list_add_tail(&req->list, head);
328 spin_unlock_irqrestore(&dev->lock, flags);
329}
330
331/* remove a request from the head of a list */
332static struct usb_request *req_get(struct mtp_dev *dev, struct list_head *head)
333{
334 unsigned long flags;
335 struct usb_request *req;
336
337 spin_lock_irqsave(&dev->lock, flags);
338 if (list_empty(head)) {
339 req = 0;
340 } else {
341 req = list_first_entry(head, struct usb_request, list);
342 list_del(&req->list);
343 }
344 spin_unlock_irqrestore(&dev->lock, flags);
345 return req;
346}
347
348static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
349{
350 struct mtp_dev *dev = _mtp_dev;
351
352 if (req->status != 0)
353 dev->state = STATE_ERROR;
354
355 req_put(dev, &dev->tx_idle, req);
356
357 wake_up(&dev->write_wq);
358}
359
360static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
361{
362 struct mtp_dev *dev = _mtp_dev;
363
364 dev->rx_done = 1;
365 if (req->status != 0)
366 dev->state = STATE_ERROR;
367
368 wake_up(&dev->read_wq);
369}
370
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400371static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
372{
373 struct mtp_dev *dev = _mtp_dev;
374
Mike Lockwood292b9632011-02-10 11:54:53 -0500375 DBG(dev->cdev, "mtp_complete_intr status: %d actual: %d\n",
376 req->status, req->actual);
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400377 dev->intr_busy = 0;
378 if (req->status != 0)
379 dev->state = STATE_ERROR;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400380}
381
Mike Lockwoodba83b012010-04-16 10:39:22 -0400382static int __init create_bulk_endpoints(struct mtp_dev *dev,
383 struct usb_endpoint_descriptor *in_desc,
384 struct usb_endpoint_descriptor *out_desc,
385 struct usb_endpoint_descriptor *intr_desc)
386{
387 struct usb_composite_dev *cdev = dev->cdev;
388 struct usb_request *req;
389 struct usb_ep *ep;
390 int i;
391
392 DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
393
394 ep = usb_ep_autoconfig(cdev->gadget, in_desc);
395 if (!ep) {
396 DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
397 return -ENODEV;
398 }
399 DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
400 ep->driver_data = dev; /* claim the endpoint */
401 dev->ep_in = ep;
402
403 ep = usb_ep_autoconfig(cdev->gadget, out_desc);
404 if (!ep) {
405 DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
406 return -ENODEV;
407 }
408 DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
409 ep->driver_data = dev; /* claim the endpoint */
410 dev->ep_out = ep;
411
412 ep = usb_ep_autoconfig(cdev->gadget, out_desc);
413 if (!ep) {
414 DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
415 return -ENODEV;
416 }
417 DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
418 ep->driver_data = dev; /* claim the endpoint */
419 dev->ep_out = ep;
420
421 ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
422 if (!ep) {
423 DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
424 return -ENODEV;
425 }
426 DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
427 ep->driver_data = dev; /* claim the endpoint */
428 dev->ep_intr = ep;
429
430 /* now allocate requests for our endpoints */
431 for (i = 0; i < TX_REQ_MAX; i++) {
432 req = mtp_request_new(dev->ep_in, BULK_BUFFER_SIZE);
433 if (!req)
434 goto fail;
435 req->complete = mtp_complete_in;
436 req_put(dev, &dev->tx_idle, req);
437 }
438 for (i = 0; i < RX_REQ_MAX; i++) {
439 req = mtp_request_new(dev->ep_out, BULK_BUFFER_SIZE);
440 if (!req)
441 goto fail;
442 req->complete = mtp_complete_out;
443 dev->rx_req[i] = req;
444 }
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400445 req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
446 if (!req)
447 goto fail;
448 req->complete = mtp_complete_intr;
449 dev->intr_req = req;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400450
451 return 0;
452
453fail:
454 printk(KERN_ERR "mtp_bind() could not allocate requests\n");
455 return -1;
456}
457
458static ssize_t mtp_read(struct file *fp, char __user *buf,
459 size_t count, loff_t *pos)
460{
461 struct mtp_dev *dev = fp->private_data;
462 struct usb_composite_dev *cdev = dev->cdev;
463 struct usb_request *req;
464 int r = count, xfer;
465 int ret = 0;
466
467 DBG(cdev, "mtp_read(%d)\n", count);
468
469 if (count > BULK_BUFFER_SIZE)
470 return -EINVAL;
471
472 /* we will block until we're online */
473 DBG(cdev, "mtp_read: waiting for online state\n");
474 ret = wait_event_interruptible(dev->read_wq,
475 dev->state != STATE_OFFLINE);
476 if (ret < 0) {
477 r = ret;
478 goto done;
479 }
480 spin_lock_irq(&dev->lock);
481 if (dev->state == STATE_CANCELED) {
482 /* report cancelation to userspace */
483 dev->state = STATE_READY;
484 spin_unlock_irq(&dev->lock);
485 return -ECANCELED;
486 }
487 dev->state = STATE_BUSY;
488 spin_unlock_irq(&dev->lock);
489
490requeue_req:
491 /* queue a request */
492 req = dev->rx_req[0];
493 req->length = count;
494 dev->rx_done = 0;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400495 ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400496 if (ret < 0) {
497 r = -EIO;
498 goto done;
499 } else {
500 DBG(cdev, "rx %p queue\n", req);
501 }
502
503 /* wait for a request to complete */
504 ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
505 if (ret < 0) {
506 r = ret;
507 goto done;
508 }
509 if (dev->state == STATE_BUSY) {
510 /* If we got a 0-len packet, throw it back and try again. */
511 if (req->actual == 0)
512 goto requeue_req;
513
514 DBG(cdev, "rx %p %d\n", req, req->actual);
515 xfer = (req->actual < count) ? req->actual : count;
516 r = xfer;
517 if (copy_to_user(buf, req->buf, xfer))
518 r = -EFAULT;
519 } else
520 r = -EIO;
521
522done:
523 spin_lock_irq(&dev->lock);
524 if (dev->state == STATE_CANCELED)
525 r = -ECANCELED;
526 else if (dev->state != STATE_OFFLINE)
527 dev->state = STATE_READY;
528 spin_unlock_irq(&dev->lock);
529
530 DBG(cdev, "mtp_read returning %d\n", r);
531 return r;
532}
533
534static ssize_t mtp_write(struct file *fp, const char __user *buf,
535 size_t count, loff_t *pos)
536{
537 struct mtp_dev *dev = fp->private_data;
538 struct usb_composite_dev *cdev = dev->cdev;
539 struct usb_request *req = 0;
540 int r = count, xfer;
Mike Lockwood16c08c22010-11-17 11:16:35 -0500541 int sendZLP = 0;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400542 int ret;
543
544 DBG(cdev, "mtp_write(%d)\n", count);
545
546 spin_lock_irq(&dev->lock);
547 if (dev->state == STATE_CANCELED) {
548 /* report cancelation to userspace */
549 dev->state = STATE_READY;
550 spin_unlock_irq(&dev->lock);
551 return -ECANCELED;
552 }
553 if (dev->state == STATE_OFFLINE) {
554 spin_unlock_irq(&dev->lock);
555 return -ENODEV;
556 }
557 dev->state = STATE_BUSY;
558 spin_unlock_irq(&dev->lock);
559
Mike Lockwood16c08c22010-11-17 11:16:35 -0500560 /* we need to send a zero length packet to signal the end of transfer
561 * if the transfer size is aligned to a packet boundary.
562 */
563 if ((count & (dev->ep_in->maxpacket - 1)) == 0) {
564 sendZLP = 1;
565 }
566
567 while (count > 0 || sendZLP) {
568 /* so we exit after sending ZLP */
569 if (count == 0)
570 sendZLP = 0;
571
Mike Lockwoodba83b012010-04-16 10:39:22 -0400572 if (dev->state != STATE_BUSY) {
573 DBG(cdev, "mtp_write dev->error\n");
574 r = -EIO;
575 break;
576 }
577
578 /* get an idle tx request to use */
579 req = 0;
580 ret = wait_event_interruptible(dev->write_wq,
581 ((req = req_get(dev, &dev->tx_idle))
582 || dev->state != STATE_BUSY));
583 if (!req) {
584 r = ret;
585 break;
586 }
587
588 if (count > BULK_BUFFER_SIZE)
589 xfer = BULK_BUFFER_SIZE;
590 else
591 xfer = count;
Mike Lockwood3e800b62010-11-16 17:14:32 -0500592 if (xfer && copy_from_user(req->buf, buf, xfer)) {
Mike Lockwoodba83b012010-04-16 10:39:22 -0400593 r = -EFAULT;
594 break;
595 }
596
597 req->length = xfer;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400598 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400599 if (ret < 0) {
600 DBG(cdev, "mtp_write: xfer error %d\n", ret);
601 r = -EIO;
602 break;
603 }
604
605 buf += xfer;
606 count -= xfer;
607
608 /* zero this so we don't try to free it on error exit */
609 req = 0;
Mike Lockwood16c08c22010-11-17 11:16:35 -0500610 }
Mike Lockwoodba83b012010-04-16 10:39:22 -0400611
612 if (req)
613 req_put(dev, &dev->tx_idle, req);
614
615 spin_lock_irq(&dev->lock);
616 if (dev->state == STATE_CANCELED)
617 r = -ECANCELED;
618 else if (dev->state != STATE_OFFLINE)
619 dev->state = STATE_READY;
620 spin_unlock_irq(&dev->lock);
621
622 DBG(cdev, "mtp_write returning %d\n", r);
623 return r;
624}
625
Mike Lockwood491d4182010-11-08 10:41:31 -0500626/* read from a local file and write to USB */
627static void send_file_work(struct work_struct *data) {
628 struct mtp_dev *dev = container_of(data, struct mtp_dev, send_file_work);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400629 struct usb_composite_dev *cdev = dev->cdev;
630 struct usb_request *req = 0;
Mike Lockwood491d4182010-11-08 10:41:31 -0500631 struct file *filp;
632 loff_t offset;
Mike Lockwood3e800b62010-11-16 17:14:32 -0500633 int64_t count;
Mike Lockwood76ac6552010-11-15 15:22:21 -0500634 int xfer, ret;
635 int r = 0;
Mike Lockwood3e800b62010-11-16 17:14:32 -0500636 int sendZLP = 0;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400637
Mike Lockwood491d4182010-11-08 10:41:31 -0500638 /* read our parameters */
639 smp_rmb();
640 filp = dev->xfer_file;
641 offset = dev->xfer_file_offset;
Mike Lockwood76ac6552010-11-15 15:22:21 -0500642 count = dev->xfer_file_length;
Mike Lockwood491d4182010-11-08 10:41:31 -0500643
Mike Lockwood3e800b62010-11-16 17:14:32 -0500644 DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400645
Mike Lockwood3e800b62010-11-16 17:14:32 -0500646 /* we need to send a zero length packet to signal the end of transfer
Mike Lockwood16c08c22010-11-17 11:16:35 -0500647 * if the transfer size is aligned to a packet boundary.
Mike Lockwood3e800b62010-11-16 17:14:32 -0500648 */
Mike Lockwood16c08c22010-11-17 11:16:35 -0500649 if ((dev->xfer_file_length & (dev->ep_in->maxpacket - 1)) == 0) {
Mike Lockwood3e800b62010-11-16 17:14:32 -0500650 sendZLP = 1;
651 }
652
653 while (count > 0 || sendZLP) {
654 /* so we exit after sending ZLP */
655 if (count == 0)
656 sendZLP = 0;
657
Mike Lockwoodba83b012010-04-16 10:39:22 -0400658 /* get an idle tx request to use */
659 req = 0;
660 ret = wait_event_interruptible(dev->write_wq,
661 (req = req_get(dev, &dev->tx_idle))
662 || dev->state != STATE_BUSY);
Mike Lockwood090cbc42011-02-07 11:51:07 -0500663 if (dev->state == STATE_CANCELED) {
664 r = -ECANCELED;
665 break;
666 }
Mike Lockwoodba83b012010-04-16 10:39:22 -0400667 if (!req) {
668 r = ret;
669 break;
670 }
671
672 if (count > BULK_BUFFER_SIZE)
673 xfer = BULK_BUFFER_SIZE;
674 else
675 xfer = count;
676 ret = vfs_read(filp, req->buf, xfer, &offset);
677 if (ret < 0) {
678 r = ret;
679 break;
680 }
681 xfer = ret;
682
683 req->length = xfer;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400684 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400685 if (ret < 0) {
Mike Lockwood491d4182010-11-08 10:41:31 -0500686 DBG(cdev, "send_file_work: xfer error %d\n", ret);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400687 dev->state = STATE_ERROR;
688 r = -EIO;
689 break;
690 }
691
692 count -= xfer;
693
694 /* zero this so we don't try to free it on error exit */
695 req = 0;
696 }
697
698 if (req)
699 req_put(dev, &dev->tx_idle, req);
700
Mike Lockwood491d4182010-11-08 10:41:31 -0500701 DBG(cdev, "send_file_work returning %d\n", r);
702 /* write the result */
703 dev->xfer_result = r;
704 smp_wmb();
Mike Lockwoodba83b012010-04-16 10:39:22 -0400705}
706
Mike Lockwood491d4182010-11-08 10:41:31 -0500707/* read from USB and write to a local file */
708static void receive_file_work(struct work_struct *data)
Mike Lockwoodba83b012010-04-16 10:39:22 -0400709{
Mike Lockwood491d4182010-11-08 10:41:31 -0500710 struct mtp_dev *dev = container_of(data, struct mtp_dev, receive_file_work);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400711 struct usb_composite_dev *cdev = dev->cdev;
712 struct usb_request *read_req = NULL, *write_req = NULL;
Mike Lockwood491d4182010-11-08 10:41:31 -0500713 struct file *filp;
714 loff_t offset;
Mike Lockwood3e800b62010-11-16 17:14:32 -0500715 int64_t count;
Mike Lockwood76ac6552010-11-15 15:22:21 -0500716 int ret, cur_buf = 0;
717 int r = 0;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400718
Mike Lockwood491d4182010-11-08 10:41:31 -0500719 /* read our parameters */
720 smp_rmb();
721 filp = dev->xfer_file;
722 offset = dev->xfer_file_offset;
Mike Lockwood76ac6552010-11-15 15:22:21 -0500723 count = dev->xfer_file_length;
Mike Lockwood491d4182010-11-08 10:41:31 -0500724
Mike Lockwood3e800b62010-11-16 17:14:32 -0500725 DBG(cdev, "receive_file_work(%lld)\n", count);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400726
727 while (count > 0 || write_req) {
728 if (count > 0) {
729 /* queue a request */
730 read_req = dev->rx_req[cur_buf];
731 cur_buf = (cur_buf + 1) % RX_REQ_MAX;
732
733 read_req->length = (count > BULK_BUFFER_SIZE
734 ? BULK_BUFFER_SIZE : count);
735 dev->rx_done = 0;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400736 ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400737 if (ret < 0) {
738 r = -EIO;
739 dev->state = STATE_ERROR;
740 break;
741 }
Mike Lockwoodba83b012010-04-16 10:39:22 -0400742 }
743
744 if (write_req) {
745 DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
746 ret = vfs_write(filp, write_req->buf, write_req->actual,
747 &offset);
748 DBG(cdev, "vfs_write %d\n", ret);
749 if (ret != write_req->actual) {
750 r = -EIO;
751 dev->state = STATE_ERROR;
752 break;
753 }
754 write_req = NULL;
755 }
756
757 if (read_req) {
758 /* wait for our last read to complete */
759 ret = wait_event_interruptible(dev->read_wq,
760 dev->rx_done || dev->state != STATE_BUSY);
Mike Lockwood50fe49a2011-01-13 16:19:57 -0500761 if (dev->state == STATE_CANCELED) {
762 r = -ECANCELED;
763 if (!dev->rx_done)
764 usb_ep_dequeue(dev->ep_out, read_req);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400765 break;
766 }
Mike Lockwood3e800b62010-11-16 17:14:32 -0500767 /* if xfer_file_length is 0xFFFFFFFF, then we read until
768 * we get a zero length packet
769 */
770 if (count != 0xFFFFFFFF)
771 count -= read_req->actual;
772 if (read_req->actual < read_req->length) {
773 /* short packet is used to signal EOF for sizes > 4 gig */
774 DBG(cdev, "got short packet\n");
775 count = 0;
776 }
777
Mike Lockwoodba83b012010-04-16 10:39:22 -0400778 write_req = read_req;
779 read_req = NULL;
780 }
781 }
782
Mike Lockwood491d4182010-11-08 10:41:31 -0500783 DBG(cdev, "receive_file_work returning %d\n", r);
784 /* write the result */
785 dev->xfer_result = r;
786 smp_wmb();
Mike Lockwoodba83b012010-04-16 10:39:22 -0400787}
788
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400789static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
790{
791 struct usb_request *req;
792 int ret;
793 int length = event->length;
794
795 DBG(dev->cdev, "mtp_send_event(%d)\n", event->length);
796
797 if (length < 0 || length > INTR_BUFFER_SIZE)
798 return -EINVAL;
Mike Lockwood491d4182010-11-08 10:41:31 -0500799 if (dev->state == STATE_OFFLINE)
800 return -ENODEV;
Mike Lockwood292b9632011-02-10 11:54:53 -0500801 /* unfortunately an interrupt request might hang indefinitely if the host
802 * is not listening on the interrupt endpoint, so instead of waiting,
803 * we just fail if the endpoint is busy.
804 */
805 if (dev->intr_busy)
806 return -EBUSY;
807
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400808 req = dev->intr_req;
Mike Lockwood491d4182010-11-08 10:41:31 -0500809 if (copy_from_user(req->buf, (void __user *)event->data, length))
810 return -EFAULT;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400811 req->length = length;
812 dev->intr_busy = 1;
813 ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
814 if (ret)
815 dev->intr_busy = 0;
816
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400817 return ret;
818}
819
Mike Lockwoodba83b012010-04-16 10:39:22 -0400820static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
821{
822 struct mtp_dev *dev = fp->private_data;
823 struct file *filp = NULL;
824 int ret = -EINVAL;
825
Mike Lockwood491d4182010-11-08 10:41:31 -0500826 if (_lock(&dev->ioctl_excl))
827 return -EBUSY;
828
Mike Lockwoodba83b012010-04-16 10:39:22 -0400829 switch (code) {
830 case MTP_SEND_FILE:
831 case MTP_RECEIVE_FILE:
832 {
833 struct mtp_file_range mfr;
Mike Lockwood491d4182010-11-08 10:41:31 -0500834 struct work_struct *work;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400835
836 spin_lock_irq(&dev->lock);
837 if (dev->state == STATE_CANCELED) {
838 /* report cancelation to userspace */
839 dev->state = STATE_READY;
840 spin_unlock_irq(&dev->lock);
Mike Lockwood491d4182010-11-08 10:41:31 -0500841 ret = -ECANCELED;
842 goto out;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400843 }
844 if (dev->state == STATE_OFFLINE) {
845 spin_unlock_irq(&dev->lock);
Mike Lockwood491d4182010-11-08 10:41:31 -0500846 ret = -ENODEV;
847 goto out;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400848 }
849 dev->state = STATE_BUSY;
850 spin_unlock_irq(&dev->lock);
851
852 if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
853 ret = -EFAULT;
854 goto fail;
855 }
Mike Lockwood491d4182010-11-08 10:41:31 -0500856 /* hold a reference to the file while we are working with it */
Mike Lockwoodba83b012010-04-16 10:39:22 -0400857 filp = fget(mfr.fd);
858 if (!filp) {
859 ret = -EBADF;
860 goto fail;
861 }
862
Mike Lockwood491d4182010-11-08 10:41:31 -0500863 /* write the parameters */
864 dev->xfer_file = filp;
865 dev->xfer_file_offset = mfr.offset;
866 dev->xfer_file_length = mfr.length;
867 smp_wmb();
Mike Lockwoodba83b012010-04-16 10:39:22 -0400868
869 if (code == MTP_SEND_FILE)
Mike Lockwood491d4182010-11-08 10:41:31 -0500870 work = &dev->send_file_work;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400871 else
Mike Lockwood491d4182010-11-08 10:41:31 -0500872 work = &dev->receive_file_work;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400873
Mike Lockwood491d4182010-11-08 10:41:31 -0500874 /* We do the file transfer on a work queue so it will run
875 * in kernel context, which is necessary for vfs_read and
876 * vfs_write to use our buffers in the kernel address space.
877 */
878 queue_work(dev->wq, work);
879 /* wait for operation to complete */
880 flush_workqueue(dev->wq);
881 fput(filp);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400882
Mike Lockwood491d4182010-11-08 10:41:31 -0500883 /* read the result */
884 smp_rmb();
885 ret = dev->xfer_result;
Mike Lockwoodba83b012010-04-16 10:39:22 -0400886 break;
887 }
888 case MTP_SET_INTERFACE_MODE:
889 if (value == MTP_INTERFACE_MODE_MTP ||
890 value == MTP_INTERFACE_MODE_PTP) {
891 dev->interface_mode = value;
892 if (value == MTP_INTERFACE_MODE_PTP) {
893 dev->function.descriptors = fs_ptp_descs;
894 dev->function.hs_descriptors = hs_ptp_descs;
895 } else {
896 dev->function.descriptors = fs_mtp_descs;
897 dev->function.hs_descriptors = hs_mtp_descs;
898 }
899 ret = 0;
900 }
901 break;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400902 case MTP_SEND_EVENT:
903 {
904 struct mtp_event event;
905 /* return here so we don't change dev->state below,
906 * which would interfere with bulk transfer state.
907 */
908 if (copy_from_user(&event, (void __user *)value, sizeof(event)))
Mike Lockwood491d4182010-11-08 10:41:31 -0500909 ret = -EFAULT;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400910 else
Mike Lockwood491d4182010-11-08 10:41:31 -0500911 ret = mtp_send_event(dev, &event);
912 goto out;
Mike Lockwood1de4d4d2010-07-06 19:27:52 -0400913 }
Mike Lockwoodba83b012010-04-16 10:39:22 -0400914 }
915
916fail:
Mike Lockwoodba83b012010-04-16 10:39:22 -0400917 spin_lock_irq(&dev->lock);
918 if (dev->state == STATE_CANCELED)
919 ret = -ECANCELED;
920 else if (dev->state != STATE_OFFLINE)
921 dev->state = STATE_READY;
922 spin_unlock_irq(&dev->lock);
Mike Lockwood491d4182010-11-08 10:41:31 -0500923out:
924 _unlock(&dev->ioctl_excl);
Mike Lockwoodba83b012010-04-16 10:39:22 -0400925 DBG(dev->cdev, "ioctl returning %d\n", ret);
926 return ret;
927}
928
929static int mtp_open(struct inode *ip, struct file *fp)
930{
931 printk(KERN_INFO "mtp_open\n");
932 if (_lock(&_mtp_dev->open_excl))
933 return -EBUSY;
934
Mike Lockwoodba83b012010-04-16 10:39:22 -0400935 /* clear any error condition */
936 if (_mtp_dev->state != STATE_OFFLINE)
937 _mtp_dev->state = STATE_READY;
938
939 fp->private_data = _mtp_dev;
940 return 0;
941}
942
943static int mtp_release(struct inode *ip, struct file *fp)
944{
945 printk(KERN_INFO "mtp_release\n");
946
Mike Lockwoodba83b012010-04-16 10:39:22 -0400947 _unlock(&_mtp_dev->open_excl);
948 return 0;
949}
950
951/* file operations for /dev/mtp_usb */
952static const struct file_operations mtp_fops = {
953 .owner = THIS_MODULE,
954 .read = mtp_read,
955 .write = mtp_write,
956 .unlocked_ioctl = mtp_ioctl,
957 .open = mtp_open,
958 .release = mtp_release,
959};
960
961static struct miscdevice mtp_device = {
962 .minor = MISC_DYNAMIC_MINOR,
963 .name = shortname,
964 .fops = &mtp_fops,
965};
966
967static int
968mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
969{
970 struct usb_composite_dev *cdev = c->cdev;
971 struct mtp_dev *dev = func_to_dev(f);
972 int id;
973 int ret;
974
975 dev->cdev = cdev;
976 DBG(cdev, "mtp_function_bind dev: %p\n", dev);
977
978 /* allocate interface ID(s) */
979 id = usb_interface_id(c, f);
980 if (id < 0)
981 return id;
982 mtp_interface_desc.bInterfaceNumber = id;
983
984 /* allocate endpoints */
985 ret = create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
986 &mtp_fullspeed_out_desc, &mtp_intr_desc);
987 if (ret)
988 return ret;
989
990 /* support high speed hardware */
991 if (gadget_is_dualspeed(c->cdev->gadget)) {
992 mtp_highspeed_in_desc.bEndpointAddress =
993 mtp_fullspeed_in_desc.bEndpointAddress;
994 mtp_highspeed_out_desc.bEndpointAddress =
995 mtp_fullspeed_out_desc.bEndpointAddress;
996 }
997
998 DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
999 gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
1000 f->name, dev->ep_in->name, dev->ep_out->name);
1001 return 0;
1002}
1003
1004static void
1005mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
1006{
1007 struct mtp_dev *dev = func_to_dev(f);
1008 struct usb_request *req;
1009 int i;
1010
1011 spin_lock_irq(&dev->lock);
1012 while ((req = req_get(dev, &dev->tx_idle)))
1013 mtp_request_free(req, dev->ep_in);
1014 for (i = 0; i < RX_REQ_MAX; i++)
1015 mtp_request_free(dev->rx_req[i], dev->ep_out);
Mike Lockwood1de4d4d2010-07-06 19:27:52 -04001016 mtp_request_free(dev->intr_req, dev->ep_intr);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001017 dev->state = STATE_OFFLINE;
1018 spin_unlock_irq(&dev->lock);
1019
1020 misc_deregister(&mtp_device);
1021 kfree(_mtp_dev);
1022 _mtp_dev = NULL;
1023}
1024
1025static int mtp_function_setup(struct usb_function *f,
1026 const struct usb_ctrlrequest *ctrl)
1027{
1028 struct mtp_dev *dev = func_to_dev(f);
1029 struct usb_composite_dev *cdev = dev->cdev;
1030 int value = -EOPNOTSUPP;
1031 u16 w_index = le16_to_cpu(ctrl->wIndex);
1032 u16 w_value = le16_to_cpu(ctrl->wValue);
1033 u16 w_length = le16_to_cpu(ctrl->wLength);
1034 unsigned long flags;
1035
1036 /* do nothing if we are disabled */
1037 if (dev->function.disabled)
1038 return value;
1039
1040 VDBG(cdev, "mtp_function_setup "
1041 "%02x.%02x v%04x i%04x l%u\n",
1042 ctrl->bRequestType, ctrl->bRequest,
1043 w_value, w_index, w_length);
1044
1045 /* Handle MTP OS string */
1046 if (dev->interface_mode == MTP_INTERFACE_MODE_MTP
1047 && ctrl->bRequestType ==
1048 (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
1049 && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
1050 && (w_value >> 8) == USB_DT_STRING
1051 && (w_value & 0xFF) == MTP_OS_STRING_ID) {
1052 value = (w_length < sizeof(mtp_os_string)
1053 ? w_length : sizeof(mtp_os_string));
1054 memcpy(cdev->req->buf, mtp_os_string, value);
1055 /* return here since composite.c will send for us */
1056 return value;
1057 }
1058 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
1059 /* Handle MTP OS descriptor */
1060 DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
1061 ctrl->bRequest, w_index, w_value, w_length);
1062
1063 if (dev->interface_mode == MTP_INTERFACE_MODE_MTP
1064 && ctrl->bRequest == 1
1065 && (ctrl->bRequestType & USB_DIR_IN)
1066 && (w_index == 4 || w_index == 5)) {
1067 value = (w_length < sizeof(mtp_ext_config_desc) ?
1068 w_length : sizeof(mtp_ext_config_desc));
1069 memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
1070 }
1071 }
1072 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1073 DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
1074 ctrl->bRequest, w_index, w_value, w_length);
1075
1076 if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
1077 && w_value == 0) {
1078 DBG(cdev, "MTP_REQ_CANCEL\n");
1079
1080 spin_lock_irqsave(&dev->lock, flags);
1081 if (dev->state == STATE_BUSY) {
1082 dev->state = STATE_CANCELED;
1083 wake_up(&dev->read_wq);
1084 wake_up(&dev->write_wq);
1085 }
1086 spin_unlock_irqrestore(&dev->lock, flags);
1087
1088 /* We need to queue a request to read the remaining
1089 * bytes, but we don't actually need to look at
1090 * the contents.
1091 */
1092 value = w_length;
1093 } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
1094 && w_index == 0 && w_value == 0) {
1095 struct mtp_device_status *status = cdev->req->buf;
1096 status->wLength =
1097 __constant_cpu_to_le16(sizeof(*status));
1098
1099 DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
1100 spin_lock_irqsave(&dev->lock, flags);
1101 /* device status is "busy" until we report
1102 * the cancelation to userspace
1103 */
Mike Lockwood090cbc42011-02-07 11:51:07 -05001104 if (dev->state == STATE_CANCELED)
Mike Lockwoodba83b012010-04-16 10:39:22 -04001105 status->wCode =
1106 __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
1107 else
1108 status->wCode =
1109 __cpu_to_le16(MTP_RESPONSE_OK);
Mike Lockwood090cbc42011-02-07 11:51:07 -05001110 spin_unlock_irqrestore(&dev->lock, flags);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001111 value = sizeof(*status);
1112 }
1113 }
1114
1115 /* respond with data transfer or status phase? */
1116 if (value >= 0) {
1117 int rc;
1118 cdev->req->zero = value < w_length;
1119 cdev->req->length = value;
1120 rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
1121 if (rc < 0)
1122 ERROR(cdev, "%s setup response queue error\n", __func__);
1123 }
1124
1125 if (value == -EOPNOTSUPP)
1126 VDBG(cdev,
1127 "unknown class-specific control req "
1128 "%02x.%02x v%04x i%04x l%u\n",
1129 ctrl->bRequestType, ctrl->bRequest,
1130 w_value, w_index, w_length);
1131 return value;
1132}
1133
1134static int mtp_function_set_alt(struct usb_function *f,
1135 unsigned intf, unsigned alt)
1136{
1137 struct mtp_dev *dev = func_to_dev(f);
1138 struct usb_composite_dev *cdev = f->config->cdev;
1139 int ret;
1140
1141 DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
1142 ret = usb_ep_enable(dev->ep_in,
1143 ep_choose(cdev->gadget,
1144 &mtp_highspeed_in_desc,
1145 &mtp_fullspeed_in_desc));
1146 if (ret)
1147 return ret;
1148 ret = usb_ep_enable(dev->ep_out,
1149 ep_choose(cdev->gadget,
1150 &mtp_highspeed_out_desc,
1151 &mtp_fullspeed_out_desc));
1152 if (ret) {
1153 usb_ep_disable(dev->ep_in);
1154 return ret;
1155 }
1156 ret = usb_ep_enable(dev->ep_intr, &mtp_intr_desc);
1157 if (ret) {
1158 usb_ep_disable(dev->ep_out);
1159 usb_ep_disable(dev->ep_in);
1160 return ret;
1161 }
1162 dev->state = STATE_READY;
1163
1164 /* readers may be blocked waiting for us to go online */
1165 wake_up(&dev->read_wq);
1166 return 0;
1167}
1168
1169static void mtp_function_disable(struct usb_function *f)
1170{
1171 struct mtp_dev *dev = func_to_dev(f);
1172 struct usb_composite_dev *cdev = dev->cdev;
1173
1174 DBG(cdev, "mtp_function_disable\n");
1175 dev->state = STATE_OFFLINE;
1176 usb_ep_disable(dev->ep_in);
1177 usb_ep_disable(dev->ep_out);
1178 usb_ep_disable(dev->ep_intr);
1179
1180 /* readers may be blocked waiting for us to go online */
1181 wake_up(&dev->read_wq);
1182
1183 VDBG(cdev, "%s disabled\n", dev->function.name);
1184}
1185
1186static int mtp_bind_config(struct usb_configuration *c)
1187{
1188 struct mtp_dev *dev;
Mike Lockwood090cbc42011-02-07 11:51:07 -05001189 int ret = 0;
Mike Lockwoodba83b012010-04-16 10:39:22 -04001190
1191 printk(KERN_INFO "mtp_bind_config\n");
1192
1193 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1194 if (!dev)
1195 return -ENOMEM;
1196
1197 /* allocate a string ID for our interface */
1198 if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
1199 ret = usb_string_id(c->cdev);
1200 if (ret < 0)
1201 return ret;
1202 mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
1203 mtp_interface_desc.iInterface = ret;
1204 }
1205
1206 spin_lock_init(&dev->lock);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001207 init_waitqueue_head(&dev->read_wq);
1208 init_waitqueue_head(&dev->write_wq);
1209 atomic_set(&dev->open_excl, 0);
Mike Lockwood491d4182010-11-08 10:41:31 -05001210 atomic_set(&dev->ioctl_excl, 0);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001211 INIT_LIST_HEAD(&dev->tx_idle);
Mike Lockwood491d4182010-11-08 10:41:31 -05001212
1213 dev->wq = create_singlethread_workqueue("f_mtp");
1214 if (!dev->wq)
1215 goto err1;
1216 INIT_WORK(&dev->send_file_work, send_file_work);
1217 INIT_WORK(&dev->receive_file_work, receive_file_work);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001218
1219 dev->cdev = c->cdev;
1220 dev->function.name = "mtp";
1221 dev->function.strings = mtp_strings,
1222 dev->function.descriptors = fs_mtp_descs;
1223 dev->function.hs_descriptors = hs_mtp_descs;
1224 dev->function.bind = mtp_function_bind;
1225 dev->function.unbind = mtp_function_unbind;
1226 dev->function.setup = mtp_function_setup;
1227 dev->function.set_alt = mtp_function_set_alt;
1228 dev->function.disable = mtp_function_disable;
1229
1230 /* MTP mode by default */
1231 dev->interface_mode = MTP_INTERFACE_MODE_MTP;
1232
1233 /* _mtp_dev must be set before calling usb_gadget_register_driver */
1234 _mtp_dev = dev;
1235
1236 ret = misc_register(&mtp_device);
1237 if (ret)
1238 goto err1;
1239
1240 ret = usb_add_function(c, &dev->function);
1241 if (ret)
1242 goto err2;
1243
1244 return 0;
1245
1246err2:
1247 misc_deregister(&mtp_device);
1248err1:
Mike Lockwood491d4182010-11-08 10:41:31 -05001249 if (dev->wq)
1250 destroy_workqueue(dev->wq);
Mike Lockwoodba83b012010-04-16 10:39:22 -04001251 kfree(dev);
1252 printk(KERN_ERR "mtp gadget driver failed to initialize\n");
1253 return ret;
1254}
1255
1256static struct android_usb_function mtp_function = {
1257 .name = "mtp",
1258 .bind_config = mtp_bind_config,
1259};
1260
1261static int __init init(void)
1262{
1263 printk(KERN_INFO "f_mtp init\n");
1264 android_register_function(&mtp_function);
1265 return 0;
1266}
1267module_init(init);