blob: cb9827fe5c31b7b6c8924b58e17ea73c77342f40 [file] [log] [blame]
Jerry Zhang487be612016-10-24 12:10:41 -07001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <android-base/logging.h>
18#include <android-base/properties.h>
Jerry Zhangdf69dd32017-05-03 17:17:49 -070019#include <asyncio/AsyncIO.h>
Jerry Zhang487be612016-10-24 12:10:41 -070020#include <dirent.h>
21#include <errno.h>
22#include <fcntl.h>
Jerry Zhangdf69dd32017-05-03 17:17:49 -070023#include <memory>
Jerry Zhang487be612016-10-24 12:10:41 -070024#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
Jerry Zhangdf69dd32017-05-03 17:17:49 -070027#include <sys/eventfd.h>
Jerry Zhang487be612016-10-24 12:10:41 -070028#include <sys/ioctl.h>
Jerry Zhange9d94422017-01-18 12:03:56 -080029#include <sys/mman.h>
Jerry Zhangdf69dd32017-05-03 17:17:49 -070030#include <sys/poll.h>
Jerry Zhang487be612016-10-24 12:10:41 -070031#include <sys/stat.h>
32#include <sys/types.h>
33#include <unistd.h>
Jerry Zhang487be612016-10-24 12:10:41 -070034
Jerry Zhangdf69dd32017-05-03 17:17:49 -070035#include "PosixAsyncIO.h"
Jerry Zhang69b74502017-10-02 16:26:37 -070036#include "MtpDescriptors.h"
Jerry Zhang487be612016-10-24 12:10:41 -070037#include "MtpFfsHandle.h"
Jerry Zhangcc9d0fd2017-01-27 10:29:59 -080038#include "mtp.h"
Jerry Zhang487be612016-10-24 12:10:41 -070039
Jerry Zhang487be612016-10-24 12:10:41 -070040namespace {
41
42constexpr char FFS_MTP_EP_IN[] = "/dev/usb-ffs/mtp/ep1";
43constexpr char FFS_MTP_EP_OUT[] = "/dev/usb-ffs/mtp/ep2";
44constexpr char FFS_MTP_EP_INTR[] = "/dev/usb-ffs/mtp/ep3";
45
Jerry Zhangdf69dd32017-05-03 17:17:49 -070046constexpr unsigned AIO_BUFS_MAX = 128;
47constexpr unsigned AIO_BUF_LEN = 16384;
Jerry Zhang487be612016-10-24 12:10:41 -070048
Jerry Zhangdf69dd32017-05-03 17:17:49 -070049constexpr unsigned FFS_NUM_EVENTS = 5;
Jerry Zhang487be612016-10-24 12:10:41 -070050
Jerry Zhangdf69dd32017-05-03 17:17:49 -070051constexpr unsigned MAX_FILE_CHUNK_SIZE = AIO_BUFS_MAX * AIO_BUF_LEN;
Jerry Zhangb4f54262017-02-02 18:14:33 -080052
Jerry Zhangdf69dd32017-05-03 17:17:49 -070053constexpr uint32_t MAX_MTP_FILE_SIZE = 0xFFFFFFFF;
Jerry Zhang487be612016-10-24 12:10:41 -070054
Jerry Zhangdf69dd32017-05-03 17:17:49 -070055struct timespec ZERO_TIMEOUT = { 0, 0 };
Jerry Zhangb4f54262017-02-02 18:14:33 -080056
Jerry Zhangdf69dd32017-05-03 17:17:49 -070057struct mtp_device_status {
58 uint16_t wLength;
59 uint16_t wCode;
60};
61
Jerry Zhang487be612016-10-24 12:10:41 -070062} // anonymous namespace
63
64namespace android {
65
Jerry Zhangdf69dd32017-05-03 17:17:49 -070066int MtpFfsHandle::getPacketSize(int ffs_fd) {
67 struct usb_endpoint_descriptor desc;
68 if (ioctl(ffs_fd, FUNCTIONFS_ENDPOINT_DESC, reinterpret_cast<unsigned long>(&desc))) {
69 PLOG(ERROR) << "Could not get FFS bulk-in descriptor";
70 return MAX_PACKET_SIZE_HS;
71 } else {
72 return desc.wMaxPacketSize;
73 }
74}
75
76MtpFfsHandle::MtpFfsHandle() {}
Jerry Zhang487be612016-10-24 12:10:41 -070077
78MtpFfsHandle::~MtpFfsHandle() {}
79
80void MtpFfsHandle::closeEndpoints() {
81 mIntr.reset();
82 mBulkIn.reset();
83 mBulkOut.reset();
84}
85
Jerry Zhangdf69dd32017-05-03 17:17:49 -070086bool MtpFfsHandle::openEndpoints() {
87 if (mBulkIn < 0) {
88 mBulkIn.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_IN, O_RDWR)));
89 if (mBulkIn < 0) {
90 PLOG(ERROR) << FFS_MTP_EP_IN << ": cannot open bulk in ep";
91 return false;
92 }
93 }
94
95 if (mBulkOut < 0) {
96 mBulkOut.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_OUT, O_RDWR)));
97 if (mBulkOut < 0) {
98 PLOG(ERROR) << FFS_MTP_EP_OUT << ": cannot open bulk out ep";
99 return false;
100 }
101 }
102
103 if (mIntr < 0) {
104 mIntr.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP_INTR, O_RDWR)));
105 if (mIntr < 0) {
106 PLOG(ERROR) << FFS_MTP_EP_INTR << ": cannot open intr ep";
107 return false;
108 }
109 }
110 return true;
111}
112
113void MtpFfsHandle::advise(int fd) {
114 for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
115 if (posix_madvise(mIobuf[i].bufs.data(), MAX_FILE_CHUNK_SIZE,
116 POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED) < 0)
117 PLOG(ERROR) << "Failed to madvise";
118 }
119 if (posix_fadvise(fd, 0, 0,
120 POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE | POSIX_FADV_WILLNEED) < 0)
121 PLOG(ERROR) << "Failed to fadvise";
122}
123
Jerry Zhang487be612016-10-24 12:10:41 -0700124bool MtpFfsHandle::initFunctionfs() {
Jerry Zhang487be612016-10-24 12:10:41 -0700125 if (mControl < 0) { // might have already done this before
126 mControl.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP0, O_RDWR)));
127 if (mControl < 0) {
128 PLOG(ERROR) << FFS_MTP_EP0 << ": cannot open control endpoint";
Jerry Zhang69b74502017-10-02 16:26:37 -0700129 return false;
Jerry Zhang487be612016-10-24 12:10:41 -0700130 }
Jerry Zhang69b74502017-10-02 16:26:37 -0700131 if (!writeDescriptors()) {
132 closeConfig();
133 return false;
Jerry Zhang487be612016-10-24 12:10:41 -0700134 }
135 }
Jerry Zhang487be612016-10-24 12:10:41 -0700136 return true;
Jerry Zhang69b74502017-10-02 16:26:37 -0700137}
Jerry Zhang487be612016-10-24 12:10:41 -0700138
Jerry Zhang69b74502017-10-02 16:26:37 -0700139bool MtpFfsHandle::writeDescriptors() {
140 ssize_t ret = TEMP_FAILURE_RETRY(::write(mControl,
141 &(mPtp ? ptp_desc_v2 : mtp_desc_v2), sizeof(desc_v2)));
142 if (ret < 0) {
143 PLOG(ERROR) << FFS_MTP_EP0 << "Switching to V1 descriptor format";
144 ret = TEMP_FAILURE_RETRY(::write(mControl,
145 &(mPtp ? ptp_desc_v1 : mtp_desc_v1), sizeof(desc_v1)));
146 if (ret < 0) {
147 PLOG(ERROR) << FFS_MTP_EP0 << "Writing descriptors failed";
148 return false;
149 }
150 }
151 ret = TEMP_FAILURE_RETRY(::write(mControl, &mtp_strings, sizeof(mtp_strings)));
152 if (ret < 0) {
153 PLOG(ERROR) << FFS_MTP_EP0 << "Writing strings failed";
154 return false;
155 }
156 return true;
Jerry Zhang487be612016-10-24 12:10:41 -0700157}
158
159void MtpFfsHandle::closeConfig() {
160 mControl.reset();
161}
162
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700163int MtpFfsHandle::doAsync(void* data, size_t len, bool read) {
164 struct io_event ioevs[1];
165 if (len > AIO_BUF_LEN) {
166 LOG(ERROR) << "Mtp read/write too large " << len;
167 errno = EINVAL;
168 return -1;
Jerry Zhang487be612016-10-24 12:10:41 -0700169 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700170 mIobuf[0].buf[0] = reinterpret_cast<unsigned char*>(data);
171 if (iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, len, read) == -1)
172 return -1;
173 int ret = waitEvents(&mIobuf[0], 1, ioevs, nullptr);
174 mIobuf[0].buf[0] = mIobuf[0].bufs.data();
Jerry Zhang487be612016-10-24 12:10:41 -0700175 return ret;
176}
177
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700178int MtpFfsHandle::read(void* data, size_t len) {
179 return doAsync(data, len, true);
180}
181
182int MtpFfsHandle::write(const void* data, size_t len) {
183 return doAsync(const_cast<void*>(data), len, false);
184}
185
186int MtpFfsHandle::handleEvent() {
187
188 std::vector<usb_functionfs_event> events(FFS_NUM_EVENTS);
189 usb_functionfs_event *event = events.data();
190 int nbytes = TEMP_FAILURE_RETRY(::read(mControl, event,
191 events.size() * sizeof(usb_functionfs_event)));
192 if (nbytes == -1) {
193 return -1;
194 }
Jerry Zhang487be612016-10-24 12:10:41 -0700195 int ret = 0;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700196 for (size_t n = nbytes / sizeof *event; n; --n, ++event) {
197 switch (event->type) {
198 case FUNCTIONFS_BIND:
199 case FUNCTIONFS_ENABLE:
200 case FUNCTIONFS_RESUME:
201 ret = 0;
202 errno = 0;
Jerry Zhang487be612016-10-24 12:10:41 -0700203 break;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700204 case FUNCTIONFS_SUSPEND:
205 case FUNCTIONFS_UNBIND:
206 case FUNCTIONFS_DISABLE:
207 errno = ESHUTDOWN;
208 ret = -1;
Jerry Zhang487be612016-10-24 12:10:41 -0700209 break;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700210 case FUNCTIONFS_SETUP:
211 if (handleControlRequest(&event->u.setup) == -1)
212 ret = -1;
213 break;
214 default:
215 LOG(ERROR) << "Mtp Event " << event->type << " (unknown)";
216 }
Jerry Zhang487be612016-10-24 12:10:41 -0700217 }
218 return ret;
219}
220
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700221int MtpFfsHandle::handleControlRequest(const struct usb_ctrlrequest *setup) {
222 uint8_t type = setup->bRequestType;
223 uint8_t code = setup->bRequest;
224 uint16_t length = setup->wLength;
225 uint16_t index = setup->wIndex;
226 uint16_t value = setup->wValue;
227 std::vector<char> buf;
228 buf.resize(length);
229 int ret = 0;
Jerry Zhang487be612016-10-24 12:10:41 -0700230
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700231 if (!(type & USB_DIR_IN)) {
232 if (::read(mControl, buf.data(), length) != length) {
233 PLOG(ERROR) << "Mtp error ctrlreq read data";
234 }
235 }
236
237 if ((type & USB_TYPE_MASK) == USB_TYPE_CLASS && index == 0 && value == 0) {
238 switch(code) {
239 case MTP_REQ_RESET:
240 case MTP_REQ_CANCEL:
241 errno = ECANCELED;
242 ret = -1;
243 break;
244 case MTP_REQ_GET_DEVICE_STATUS:
245 {
246 if (length < sizeof(struct mtp_device_status) + 4) {
247 errno = EINVAL;
248 return -1;
249 }
250 struct mtp_device_status *st = reinterpret_cast<struct mtp_device_status*>(buf.data());
251 st->wLength = htole16(sizeof(st));
252 if (mCanceled) {
253 st->wLength += 4;
254 st->wCode = MTP_RESPONSE_TRANSACTION_CANCELLED;
255 uint16_t *endpoints = reinterpret_cast<uint16_t*>(st + 1);
256 endpoints[0] = ioctl(mBulkIn, FUNCTIONFS_ENDPOINT_REVMAP);
257 endpoints[1] = ioctl(mBulkOut, FUNCTIONFS_ENDPOINT_REVMAP);
258 mCanceled = false;
259 } else {
260 st->wCode = MTP_RESPONSE_OK;
261 }
262 length = st->wLength;
263 break;
264 }
265 default:
266 LOG(ERROR) << "Unrecognized Mtp class request! " << code;
267 }
268 } else {
269 LOG(ERROR) << "Unrecognized request type " << type;
270 }
271
272 if (type & USB_DIR_IN) {
273 if (::write(mControl, buf.data(), length) != length) {
274 PLOG(ERROR) << "Mtp error ctrlreq write data";
275 }
276 }
277 return 0;
Jerry Zhang487be612016-10-24 12:10:41 -0700278}
279
280int MtpFfsHandle::start() {
281 mLock.lock();
Jerry Zhangcc9d0fd2017-01-27 10:29:59 -0800282
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700283 if (!openEndpoints())
Jerry Zhangcc9d0fd2017-01-27 10:29:59 -0800284 return -1;
Jerry Zhangcc9d0fd2017-01-27 10:29:59 -0800285
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700286 for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
287 mIobuf[i].bufs.resize(MAX_FILE_CHUNK_SIZE);
288 mIobuf[i].iocb.resize(AIO_BUFS_MAX);
289 mIobuf[i].iocbs.resize(AIO_BUFS_MAX);
290 mIobuf[i].buf.resize(AIO_BUFS_MAX);
291 for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
292 mIobuf[i].buf[j] = mIobuf[i].bufs.data() + j * AIO_BUF_LEN;
293 mIobuf[i].iocb[j] = &mIobuf[i].iocbs[j];
Jerry Zhangcc9d0fd2017-01-27 10:29:59 -0800294 }
295 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700296
297 memset(&mCtx, 0, sizeof(mCtx));
298 if (io_setup(AIO_BUFS_MAX, &mCtx) < 0) {
299 PLOG(ERROR) << "unable to setup aio";
300 return -1;
301 }
302 mEventFd.reset(eventfd(0, EFD_NONBLOCK));
303 mPollFds[0].fd = mControl;
304 mPollFds[0].events = POLLIN;
305 mPollFds[1].fd = mEventFd;
306 mPollFds[1].events = POLLIN;
307
308 mCanceled = false;
Jerry Zhangb4f54262017-02-02 18:14:33 -0800309 return 0;
Jerry Zhang487be612016-10-24 12:10:41 -0700310}
311
312int MtpFfsHandle::configure(bool usePtp) {
313 // Wait till previous server invocation has closed
Jerry Zhang0475d912017-04-03 11:24:48 -0700314 if (!mLock.try_lock_for(std::chrono::milliseconds(1000))) {
315 LOG(ERROR) << "MtpServer was unable to get configure lock";
316 return -1;
317 }
318 int ret = 0;
Jerry Zhang487be612016-10-24 12:10:41 -0700319
320 // If ptp is changed, the configuration must be rewritten
321 if (mPtp != usePtp) {
322 closeEndpoints();
323 closeConfig();
324 }
325 mPtp = usePtp;
326
327 if (!initFunctionfs()) {
Jerry Zhang0475d912017-04-03 11:24:48 -0700328 ret = -1;
Jerry Zhang487be612016-10-24 12:10:41 -0700329 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700330
Jerry Zhang0475d912017-04-03 11:24:48 -0700331 mLock.unlock();
332 return ret;
Jerry Zhang487be612016-10-24 12:10:41 -0700333}
334
335void MtpFfsHandle::close() {
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700336 io_destroy(mCtx);
Jerry Zhang487be612016-10-24 12:10:41 -0700337 closeEndpoints();
338 mLock.unlock();
339}
340
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700341int MtpFfsHandle::waitEvents(struct io_buffer *buf, int min_events, struct io_event *events,
342 int *counter) {
343 int num_events = 0;
344 int ret = 0;
345 int error = 0;
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700346
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700347 while (num_events < min_events) {
348 if (poll(mPollFds, 2, 0) == -1) {
349 PLOG(ERROR) << "Mtp error during poll()";
350 return -1;
351 }
352 if (mPollFds[0].revents & POLLIN) {
353 mPollFds[0].revents = 0;
354 if (handleEvent() == -1) {
355 error = errno;
356 }
357 }
358 if (mPollFds[1].revents & POLLIN) {
359 mPollFds[1].revents = 0;
360 uint64_t ev_cnt = 0;
361
362 if (::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1) {
363 PLOG(ERROR) << "Mtp unable to read eventfd";
364 error = errno;
365 continue;
366 }
367
368 // It's possible that io_getevents will return more events than the eventFd reported,
369 // since events may appear in the time between the calls. In this case, the eventFd will
370 // show up as readable next iteration, but there will be fewer or no events to actually
371 // wait for. Thus we never want io_getevents to block.
372 int this_events = TEMP_FAILURE_RETRY(io_getevents(mCtx, 0, AIO_BUFS_MAX, events, &ZERO_TIMEOUT));
373 if (this_events == -1) {
374 PLOG(ERROR) << "Mtp error getting events";
375 error = errno;
376 }
377 // Add up the total amount of data and find errors on the way.
378 for (unsigned j = 0; j < static_cast<unsigned>(this_events); j++) {
379 if (events[j].res < 0) {
380 errno = -events[j].res;
381 PLOG(ERROR) << "Mtp got error event at " << j << " and " << buf->actual << " total";
382 error = errno;
383 }
384 ret += events[j].res;
385 }
386 num_events += this_events;
387 if (counter)
388 *counter += this_events;
389 }
390 if (error) {
391 errno = error;
392 ret = -1;
393 break;
394 }
395 }
396 return ret;
397}
398
399void MtpFfsHandle::cancelTransaction() {
400 // Device cancels by stalling both bulk endpoints.
401 if (::read(mBulkIn, nullptr, 0) != -1 || errno != EBADMSG)
402 PLOG(ERROR) << "Mtp stall failed on bulk in";
403 if (::write(mBulkOut, nullptr, 0) != -1 || errno != EBADMSG)
404 PLOG(ERROR) << "Mtp stall failed on bulk out";
405 mCanceled = true;
406 errno = ECANCELED;
407}
408
409int MtpFfsHandle::cancelEvents(struct iocb **iocb, struct io_event *events, unsigned start,
410 unsigned end) {
411 // Some manpages for io_cancel are out of date and incorrect.
412 // io_cancel will return -EINPROGRESS on success and does
413 // not place the event in the given memory. We have to use
414 // io_getevents to wait for all the events we cancelled.
415 int ret = 0;
416 unsigned num_events = 0;
417 int save_errno = errno;
418 errno = 0;
419
420 for (unsigned j = start; j < end; j++) {
421 if (io_cancel(mCtx, iocb[j], nullptr) != -1 || errno != EINPROGRESS) {
422 PLOG(ERROR) << "Mtp couldn't cancel request " << j;
423 } else {
424 num_events++;
425 }
426 }
427 if (num_events != end - start) {
428 ret = -1;
429 errno = EIO;
430 }
431 int evs = TEMP_FAILURE_RETRY(io_getevents(mCtx, num_events, AIO_BUFS_MAX, events, nullptr));
432 if (static_cast<unsigned>(evs) != num_events) {
433 PLOG(ERROR) << "Mtp couldn't cancel all requests, got " << evs;
434 ret = -1;
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700435 }
Jerry Zhang487be612016-10-24 12:10:41 -0700436
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700437 uint64_t ev_cnt = 0;
438 if (num_events && ::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1)
439 PLOG(ERROR) << "Mtp Unable to read event fd";
440
441 if (ret == 0) {
442 // Restore errno since it probably got overriden with EINPROGRESS.
443 errno = save_errno;
444 }
445 return ret;
446}
447
448int MtpFfsHandle::iobufSubmit(struct io_buffer *buf, int fd, unsigned length, bool read) {
449 int ret = 0;
450 buf->actual = AIO_BUFS_MAX;
451 for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
452 unsigned rq_length = std::min(AIO_BUF_LEN, length - AIO_BUF_LEN * j);
453 io_prep(buf->iocb[j], fd, buf->buf[j], rq_length, 0, read);
454 buf->iocb[j]->aio_flags |= IOCB_FLAG_RESFD;
455 buf->iocb[j]->aio_resfd = mEventFd;
456
457 // Not enough data, so table is truncated.
458 if (rq_length < AIO_BUF_LEN || length == AIO_BUF_LEN * (j + 1)) {
459 buf->actual = j + 1;
460 break;
461 }
462 }
463
464 ret = io_submit(mCtx, buf->actual, buf->iocb.data());
465 if (ret != static_cast<int>(buf->actual)) {
466 PLOG(ERROR) << "Mtp io_submit got " << ret << " expected " << buf->actual;
467 if (ret != -1) {
468 errno = EIO;
469 }
470 ret = -1;
471 }
472 return ret;
473}
474
475int MtpFfsHandle::receiveFile(mtp_file_range mfr, bool zero_packet) {
476 // When receiving files, the incoming length is given in 32 bits.
477 // A >=4G file is given as 0xFFFFFFFF
478 uint32_t file_length = mfr.length;
479 uint64_t offset = mfr.offset;
Jerry Zhang487be612016-10-24 12:10:41 -0700480
481 struct aiocb aio;
482 aio.aio_fildes = mfr.fd;
483 aio.aio_buf = nullptr;
484 struct aiocb *aiol[] = {&aio};
Jerry Zhang487be612016-10-24 12:10:41 -0700485
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700486 int ret = -1;
487 unsigned i = 0;
488 size_t length;
489 struct io_event ioevs[AIO_BUFS_MAX];
490 bool has_write = false;
491 bool error = false;
492 bool write_error = false;
493 int packet_size = getPacketSize(mBulkOut);
494 bool short_packet = false;
495 advise(mfr.fd);
Jerry Zhang487be612016-10-24 12:10:41 -0700496
497 // Break down the file into pieces that fit in buffers
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700498 while (file_length > 0 || has_write) {
499 // Queue an asynchronous read from USB.
Jerry Zhang487be612016-10-24 12:10:41 -0700500 if (file_length > 0) {
501 length = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE), file_length);
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700502 if (iobufSubmit(&mIobuf[i], mBulkOut, length, true) == -1)
503 error = true;
Jerry Zhang487be612016-10-24 12:10:41 -0700504 }
505
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700506 // Get the return status of the last write request.
507 if (has_write) {
Jerry Zhang487be612016-10-24 12:10:41 -0700508 aio_suspend(aiol, 1, nullptr);
Jerry Zhang487be612016-10-24 12:10:41 -0700509 int written = aio_return(&aio);
Jerry Zhang487be612016-10-24 12:10:41 -0700510 if (static_cast<size_t>(written) < aio.aio_nbytes) {
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700511 errno = written == -1 ? aio_error(&aio) : EIO;
512 PLOG(ERROR) << "Mtp error writing to disk";
513 write_error = true;
Jerry Zhang487be612016-10-24 12:10:41 -0700514 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700515 has_write = false;
Jerry Zhang487be612016-10-24 12:10:41 -0700516 }
517
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700518 if (error) {
Jerry Zhang7063c932017-04-04 15:06:10 -0700519 return -1;
520 }
521
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700522 // Get the result of the read request, and queue a write to disk.
523 if (file_length > 0) {
524 unsigned num_events = 0;
525 ret = 0;
526 unsigned short_i = mIobuf[i].actual;
527 while (num_events < short_i) {
528 // Get all events up to the short read, if there is one.
529 // We must wait for each event since data transfer could end at any time.
530 int this_events = 0;
531 int event_ret = waitEvents(&mIobuf[i], 1, ioevs, &this_events);
532 num_events += this_events;
533
534 if (event_ret == -1) {
535 cancelEvents(mIobuf[i].iocb.data(), ioevs, num_events, mIobuf[i].actual);
536 return -1;
537 }
538 ret += event_ret;
539 for (int j = 0; j < this_events; j++) {
540 // struct io_event contains a pointer to the associated struct iocb as a __u64.
541 if (static_cast<__u64>(ioevs[j].res) <
542 reinterpret_cast<struct iocb*>(ioevs[j].obj)->aio_nbytes) {
543 // We've found a short event. Store the index since
544 // events won't necessarily arrive in the order they are queued.
545 short_i = (ioevs[j].obj - reinterpret_cast<uint64_t>(mIobuf[i].iocbs.data()))
546 / sizeof(struct iocb) + 1;
547 short_packet = true;
548 }
549 }
550 }
551 if (short_packet) {
552 if (cancelEvents(mIobuf[i].iocb.data(), ioevs, short_i, mIobuf[i].actual)) {
553 write_error = true;
554 }
555 }
Jerry Zhang487be612016-10-24 12:10:41 -0700556 if (file_length == MAX_MTP_FILE_SIZE) {
557 // For larger files, receive until a short packet is received.
558 if (static_cast<size_t>(ret) < length) {
559 file_length = 0;
560 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700561 } else if (ret < static_cast<int>(length)) {
562 // If file is less than 4G and we get a short packet, it's an error.
563 errno = EIO;
564 LOG(ERROR) << "Mtp got unexpected short packet";
565 return -1;
Jerry Zhang487be612016-10-24 12:10:41 -0700566 } else {
567 file_length -= ret;
568 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700569
570 if (write_error) {
571 cancelTransaction();
572 return -1;
573 }
574
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700575 // Enqueue a new write request
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700576 aio_prepare(&aio, mIobuf[i].bufs.data(), ret, offset);
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700577 aio_write(&aio);
Jerry Zhang487be612016-10-24 12:10:41 -0700578
579 offset += ret;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700580 i = (i + 1) % NUM_IO_BUFS;
581 has_write = true;
Jerry Zhang487be612016-10-24 12:10:41 -0700582 }
583 }
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700584 if ((ret % packet_size == 0 && !short_packet) || zero_packet) {
585 // Receive an empty packet if size is a multiple of the endpoint size
586 // and we didn't already get an empty packet from the header or large file.
587 if (read(mIobuf[0].bufs.data(), packet_size) != 0) {
Jerry Zhang54107562017-05-15 11:54:19 -0700588 return -1;
589 }
590 }
Jerry Zhang487be612016-10-24 12:10:41 -0700591 return 0;
592}
593
Jerry Zhang487be612016-10-24 12:10:41 -0700594int MtpFfsHandle::sendFile(mtp_file_range mfr) {
595 uint64_t file_length = mfr.length;
596 uint32_t given_length = std::min(static_cast<uint64_t>(MAX_MTP_FILE_SIZE),
597 file_length + sizeof(mtp_data_header));
Jerry Zhang44180302017-02-03 16:31:31 -0800598 uint64_t offset = mfr.offset;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700599 int packet_size = getPacketSize(mBulkIn);
Jerry Zhang487be612016-10-24 12:10:41 -0700600
Jerry Zhang44180302017-02-03 16:31:31 -0800601 // If file_length is larger than a size_t, truncating would produce the wrong comparison.
602 // Instead, promote the left side to 64 bits, then truncate the small result.
603 int init_read_len = std::min(
604 static_cast<uint64_t>(packet_size - sizeof(mtp_data_header)), file_length);
Jerry Zhang487be612016-10-24 12:10:41 -0700605
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700606 advise(mfr.fd);
Jerry Zhange9d94422017-01-18 12:03:56 -0800607
Jerry Zhang487be612016-10-24 12:10:41 -0700608 struct aiocb aio;
609 aio.aio_fildes = mfr.fd;
610 struct aiocb *aiol[] = {&aio};
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700611 int ret = 0;
612 int length, num_read;
613 unsigned i = 0;
614 struct io_event ioevs[AIO_BUFS_MAX];
615 bool error = false;
616 bool has_write = false;
Jerry Zhang487be612016-10-24 12:10:41 -0700617
618 // Send the header data
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700619 mtp_data_header *header = reinterpret_cast<mtp_data_header*>(mIobuf[0].bufs.data());
620 header->length = htole32(given_length);
621 header->type = htole16(2); // data packet
622 header->command = htole16(mfr.command);
623 header->transaction_id = htole32(mfr.transaction_id);
Jerry Zhang487be612016-10-24 12:10:41 -0700624
625 // Some hosts don't support header/data separation even though MTP allows it
626 // Handle by filling first packet with initial file data
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700627 if (TEMP_FAILURE_RETRY(pread(mfr.fd, mIobuf[0].bufs.data() +
Jerry Zhang487be612016-10-24 12:10:41 -0700628 sizeof(mtp_data_header), init_read_len, offset))
629 != init_read_len) return -1;
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700630 if (write(mIobuf[0].bufs.data(), sizeof(mtp_data_header) + init_read_len) == -1)
631 return -1;
Jerry Zhang487be612016-10-24 12:10:41 -0700632 file_length -= init_read_len;
633 offset += init_read_len;
Jerry Zhang54107562017-05-15 11:54:19 -0700634 ret = init_read_len + sizeof(mtp_data_header);
Jerry Zhang487be612016-10-24 12:10:41 -0700635
Jerry Zhang487be612016-10-24 12:10:41 -0700636 // Break down the file into pieces that fit in buffers
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700637 while(file_length > 0 || has_write) {
638 if (file_length > 0) {
639 // Queue up a read from disk.
640 length = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length);
641 aio_prepare(&aio, mIobuf[i].bufs.data(), length, offset);
642 aio_read(&aio);
Jerry Zhang487be612016-10-24 12:10:41 -0700643 }
644
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700645 if (has_write) {
646 // Wait for usb write. Cancel unwritten portion if there's an error.
647 int num_events = 0;
648 if (waitEvents(&mIobuf[(i-1)%NUM_IO_BUFS], mIobuf[(i-1)%NUM_IO_BUFS].actual, ioevs,
649 &num_events) != ret) {
650 error = true;
651 cancelEvents(mIobuf[(i-1)%NUM_IO_BUFS].iocb.data(), ioevs, num_events,
652 mIobuf[(i-1)%NUM_IO_BUFS].actual);
653 }
654 has_write = false;
Jerry Zhang7063c932017-04-04 15:06:10 -0700655 }
656
Jerry Zhang487be612016-10-24 12:10:41 -0700657 if (file_length > 0) {
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700658 // Wait for the previous read to finish
659 aio_suspend(aiol, 1, nullptr);
660 num_read = aio_return(&aio);
661 if (static_cast<size_t>(num_read) < aio.aio_nbytes) {
662 errno = num_read == -1 ? aio_error(&aio) : EIO;
663 PLOG(ERROR) << "Mtp error reading from disk";
664 cancelTransaction();
665 return -1;
666 }
667
668 file_length -= num_read;
669 offset += num_read;
670
671 if (error) {
672 return -1;
673 }
674
675 // Queue up a write to usb.
676 if (iobufSubmit(&mIobuf[i], mBulkIn, num_read, false) == -1) {
677 return -1;
678 }
679 has_write = true;
680 ret = num_read;
Jerry Zhang487be612016-10-24 12:10:41 -0700681 }
682
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700683 i = (i + 1) % NUM_IO_BUFS;
Jerry Zhang487be612016-10-24 12:10:41 -0700684 }
685
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700686 if (ret % packet_size == 0) {
Jerry Zhang487be612016-10-24 12:10:41 -0700687 // If the last packet wasn't short, send a final empty packet
Jerry Zhangdf69dd32017-05-03 17:17:49 -0700688 if (write(mIobuf[0].bufs.data(), 0) != 0) {
Jerry Zhangc9cbf982017-04-14 15:26:53 -0700689 return -1;
690 }
Jerry Zhang487be612016-10-24 12:10:41 -0700691 }
Jerry Zhang487be612016-10-24 12:10:41 -0700692 return 0;
693}
694
695int MtpFfsHandle::sendEvent(mtp_event me) {
Jerry Zhang94ef0ea2017-07-26 11:37:23 -0700696 // Mimic the behavior of f_mtp by sending the event async.
697 // Events aren't critical to the connection, so we don't need to check the return value.
698 char *temp = new char[me.length];
699 memcpy(temp, me.data, me.length);
700 me.data = temp;
Jerry Zhang008f4df2017-08-09 17:53:50 -0700701 std::thread t([this, me]() { return this->doSendEvent(me); });
Jerry Zhang94ef0ea2017-07-26 11:37:23 -0700702 t.detach();
703 return 0;
704}
705
706void MtpFfsHandle::doSendEvent(mtp_event me) {
Jerry Zhang487be612016-10-24 12:10:41 -0700707 unsigned length = me.length;
Jerry Zhang94ef0ea2017-07-26 11:37:23 -0700708 int ret = ::write(mIntr, me.data, length);
Jerry Zhang94ef0ea2017-07-26 11:37:23 -0700709 if (static_cast<unsigned>(ret) != length)
710 PLOG(ERROR) << "Mtp error sending event thread!";
Jerry Zhang008f4df2017-08-09 17:53:50 -0700711 delete[] reinterpret_cast<char*>(me.data);
Jerry Zhang487be612016-10-24 12:10:41 -0700712}
713
714} // namespace android
715