blob: 9d9e179a03321c22d8473147036800395ad0d67c [file] [log] [blame]
Wei Jia53692fa2017-12-11 10:33:46 -08001/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "NuPlayer2Renderer"
19#include <utils/Log.h>
20
21#include "JWakeLock.h"
22#include "NuPlayer2Renderer.h"
23#include <algorithm>
24#include <cutils/properties.h>
25#include <media/stagefright/foundation/ADebug.h>
26#include <media/stagefright/foundation/AMessage.h>
27#include <media/stagefright/foundation/AUtils.h>
28#include <media/stagefright/MediaClock.h>
29#include <media/stagefright/MediaErrors.h>
Wei Jia53692fa2017-12-11 10:33:46 -080030#include <media/stagefright/Utils.h>
31#include <media/stagefright/VideoFrameScheduler.h>
32#include <media/MediaCodecBuffer.h>
33
34#include <inttypes.h>
35
36namespace android {
37
38/*
39 * Example of common configuration settings in shell script form
40
41 #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
42 adb shell setprop audio.offload.disable 1
43
44 #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
45 adb shell setprop audio.offload.video 1
46
47 #Use audio callbacks for PCM data
48 adb shell setprop media.stagefright.audio.cbk 1
49
50 #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
51 adb shell setprop media.stagefright.audio.deep 1
52
53 #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
54 adb shell setprop media.stagefright.audio.sink 1000
55
56 * These configurations take effect for the next track played (not the current track).
57 */
58
59static inline bool getUseAudioCallbackSetting() {
60 return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
61}
62
63static inline int32_t getAudioSinkPcmMsSetting() {
64 return property_get_int32(
65 "media.stagefright.audio.sink", 500 /* default_value */);
66}
67
68// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
69// is closed to allow the audio DSP to power down.
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -080070static const int64_t kOffloadPauseMaxUs = 10000000LL;
Wei Jia53692fa2017-12-11 10:33:46 -080071
72// Maximum allowed delay from AudioSink, 1.5 seconds.
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -080073static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000LL;
Wei Jia53692fa2017-12-11 10:33:46 -080074
75static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
76
77// static
78const NuPlayer2::Renderer::PcmInfo NuPlayer2::Renderer::AUDIO_PCMINFO_INITIALIZER = {
79 AUDIO_CHANNEL_NONE,
80 AUDIO_OUTPUT_FLAG_NONE,
81 AUDIO_FORMAT_INVALID,
82 0, // mNumChannels
83 0 // mSampleRate
84};
85
86// static
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -080087const int64_t NuPlayer2::Renderer::kMinPositionUpdateDelayUs = 100000LL;
Wei Jia53692fa2017-12-11 10:33:46 -080088
89NuPlayer2::Renderer::Renderer(
Wei Jia33abcc72018-01-30 09:47:38 -080090 const sp<MediaPlayer2Interface::AudioSink> &sink,
Wei Jia53692fa2017-12-11 10:33:46 -080091 const sp<MediaClock> &mediaClock,
92 const sp<AMessage> &notify,
93 uint32_t flags)
94 : mAudioSink(sink),
95 mUseVirtualAudioSink(false),
96 mNotify(notify),
97 mFlags(flags),
98 mNumFramesWritten(0),
99 mDrainAudioQueuePending(false),
100 mDrainVideoQueuePending(false),
101 mAudioQueueGeneration(0),
102 mVideoQueueGeneration(0),
103 mAudioDrainGeneration(0),
104 mVideoDrainGeneration(0),
105 mAudioEOSGeneration(0),
106 mMediaClock(mediaClock),
107 mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
108 mAudioFirstAnchorTimeMediaUs(-1),
109 mAnchorTimeMediaUs(-1),
110 mAnchorNumFramesWritten(-1),
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -0800111 mVideoLateByUs(0LL),
Wei Jia53692fa2017-12-11 10:33:46 -0800112 mNextVideoTimeMediaUs(-1),
113 mHasAudio(false),
114 mHasVideo(false),
115 mNotifyCompleteAudio(false),
116 mNotifyCompleteVideo(false),
117 mSyncQueues(false),
Wei Jia6376cd52018-09-26 11:42:55 -0700118 mPaused(true),
Wei Jia53692fa2017-12-11 10:33:46 -0800119 mPauseDrainAudioAllowedUs(0),
120 mVideoSampleReceived(false),
121 mVideoRenderingStarted(false),
122 mVideoRenderingStartGeneration(0),
123 mAudioRenderingStartGeneration(0),
124 mRenderingDataDelivered(false),
125 mNextAudioClockUpdateTimeUs(-1),
126 mLastAudioMediaTimeUs(-1),
127 mAudioOffloadPauseTimeoutGeneration(0),
128 mAudioTornDown(false),
129 mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
130 mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
131 mTotalBuffersQueued(0),
132 mLastAudioBufferDrained(0),
133 mUseAudioCallback(false),
134 mWakeLock(new JWakeLock()) {
135 CHECK(mediaClock != NULL);
Wei Jia700a7c22018-09-14 18:04:35 -0700136 mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed);
Wei Jia53692fa2017-12-11 10:33:46 -0800137}
138
139NuPlayer2::Renderer::~Renderer() {
140 if (offloadingAudio()) {
141 mAudioSink->stop();
142 mAudioSink->flush();
143 mAudioSink->close();
144 }
145
146 // Try to avoid racing condition in case callback is still on.
147 Mutex::Autolock autoLock(mLock);
148 if (mUseAudioCallback) {
149 flushQueue(&mAudioQueue);
150 flushQueue(&mVideoQueue);
151 }
152 mWakeLock.clear();
153 mVideoScheduler.clear();
154 mNotify.clear();
155 mAudioSink.clear();
156}
157
158void NuPlayer2::Renderer::queueBuffer(
159 bool audio,
160 const sp<MediaCodecBuffer> &buffer,
161 const sp<AMessage> &notifyConsumed) {
162 sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
163 msg->setInt32("queueGeneration", getQueueGeneration(audio));
164 msg->setInt32("audio", static_cast<int32_t>(audio));
165 msg->setObject("buffer", buffer);
166 msg->setMessage("notifyConsumed", notifyConsumed);
167 msg->post();
168}
169
170void NuPlayer2::Renderer::queueEOS(bool audio, status_t finalResult) {
171 CHECK_NE(finalResult, (status_t)OK);
172
173 sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
174 msg->setInt32("queueGeneration", getQueueGeneration(audio));
175 msg->setInt32("audio", static_cast<int32_t>(audio));
176 msg->setInt32("finalResult", finalResult);
177 msg->post();
178}
179
180status_t NuPlayer2::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
181 sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
182 writeToAMessage(msg, rate);
183 sp<AMessage> response;
184 status_t err = msg->postAndAwaitResponse(&response);
185 if (err == OK && response != NULL) {
186 CHECK(response->findInt32("err", &err));
187 }
188 return err;
189}
190
191status_t NuPlayer2::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
Wei Jia700a7c22018-09-14 18:04:35 -0700192 if (rate.mSpeed <= 0.f) {
193 ALOGW("playback rate cannot be %f", rate.mSpeed);
194 return BAD_VALUE;
Wei Jia53692fa2017-12-11 10:33:46 -0800195 }
196
197 if (mAudioSink != NULL && mAudioSink->ready()) {
198 status_t err = mAudioSink->setPlaybackRate(rate);
199 if (err != OK) {
Wei Jia700a7c22018-09-14 18:04:35 -0700200 ALOGW("failed to get playback rate from audio sink, err(%d)", err);
Wei Jia53692fa2017-12-11 10:33:46 -0800201 return err;
202 }
203 }
204 mPlaybackSettings = rate;
Wei Jia700a7c22018-09-14 18:04:35 -0700205 mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed);
Wei Jia53692fa2017-12-11 10:33:46 -0800206 return OK;
207}
208
209status_t NuPlayer2::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
210 sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
211 sp<AMessage> response;
212 status_t err = msg->postAndAwaitResponse(&response);
213 if (err == OK && response != NULL) {
214 CHECK(response->findInt32("err", &err));
215 if (err == OK) {
216 readFromAMessage(response, rate);
217 }
218 }
219 return err;
220}
221
222status_t NuPlayer2::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
223 if (mAudioSink != NULL && mAudioSink->ready()) {
224 status_t err = mAudioSink->getPlaybackRate(rate);
225 if (err == OK) {
226 if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
Wei Jia48c16232018-11-17 17:22:59 -0800227 ALOGW("correcting mismatch in internal/external playback rate, %f vs %f",
228 rate->mSpeed, mPlaybackSettings.mSpeed);
Wei Jia53692fa2017-12-11 10:33:46 -0800229 }
230 // get playback settings used by audiosink, as it may be
231 // slightly off due to audiosink not taking small changes.
232 mPlaybackSettings = *rate;
Wei Jia53692fa2017-12-11 10:33:46 -0800233 }
234 return err;
235 }
236 *rate = mPlaybackSettings;
237 return OK;
238}
239
240status_t NuPlayer2::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
241 sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
242 writeToAMessage(msg, sync, videoFpsHint);
243 sp<AMessage> response;
244 status_t err = msg->postAndAwaitResponse(&response);
245 if (err == OK && response != NULL) {
246 CHECK(response->findInt32("err", &err));
247 }
248 return err;
249}
250
251status_t NuPlayer2::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
252 if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
253 return BAD_VALUE;
254 }
255 // TODO: support sync sources
256 return INVALID_OPERATION;
257}
258
259status_t NuPlayer2::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
260 sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
261 sp<AMessage> response;
262 status_t err = msg->postAndAwaitResponse(&response);
263 if (err == OK && response != NULL) {
264 CHECK(response->findInt32("err", &err));
265 if (err == OK) {
266 readFromAMessage(response, sync, videoFps);
267 }
268 }
269 return err;
270}
271
272status_t NuPlayer2::Renderer::onGetSyncSettings(
273 AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
274 *sync = mSyncSettings;
275 *videoFps = -1.f;
276 return OK;
277}
278
279void NuPlayer2::Renderer::flush(bool audio, bool notifyComplete) {
280 {
281 Mutex::Autolock autoLock(mLock);
282 if (audio) {
283 mNotifyCompleteAudio |= notifyComplete;
284 clearAudioFirstAnchorTime_l();
285 ++mAudioQueueGeneration;
286 ++mAudioDrainGeneration;
287 } else {
288 mNotifyCompleteVideo |= notifyComplete;
289 ++mVideoQueueGeneration;
290 ++mVideoDrainGeneration;
291 }
292
293 mMediaClock->clearAnchor();
294 mVideoLateByUs = 0;
295 mNextVideoTimeMediaUs = -1;
296 mSyncQueues = false;
297 }
298
299 sp<AMessage> msg = new AMessage(kWhatFlush, this);
300 msg->setInt32("audio", static_cast<int32_t>(audio));
301 msg->post();
302}
303
304void NuPlayer2::Renderer::signalTimeDiscontinuity() {
305}
306
307void NuPlayer2::Renderer::signalDisableOffloadAudio() {
308 (new AMessage(kWhatDisableOffloadAudio, this))->post();
309}
310
311void NuPlayer2::Renderer::signalEnableOffloadAudio() {
312 (new AMessage(kWhatEnableOffloadAudio, this))->post();
313}
314
315void NuPlayer2::Renderer::pause() {
316 (new AMessage(kWhatPause, this))->post();
317}
318
319void NuPlayer2::Renderer::resume() {
320 (new AMessage(kWhatResume, this))->post();
321}
322
323void NuPlayer2::Renderer::setVideoFrameRate(float fps) {
324 sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
325 msg->setFloat("frame-rate", fps);
326 msg->post();
327}
328
329// Called on any threads without mLock acquired.
330status_t NuPlayer2::Renderer::getCurrentPosition(int64_t *mediaUs) {
331 status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
332 if (result == OK) {
333 return result;
334 }
335
336 // MediaClock has not started yet. Try to start it if possible.
337 {
338 Mutex::Autolock autoLock(mLock);
339 if (mAudioFirstAnchorTimeMediaUs == -1) {
340 return result;
341 }
342
343 AudioTimestamp ts;
344 status_t res = mAudioSink->getTimestamp(ts);
345 if (res != OK) {
346 return result;
347 }
348
349 // AudioSink has rendered some frames.
350 int64_t nowUs = ALooper::GetNowUs();
351 int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
352 + mAudioFirstAnchorTimeMediaUs;
353 mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
354 }
355
356 return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
357}
358
359void NuPlayer2::Renderer::clearAudioFirstAnchorTime_l() {
360 mAudioFirstAnchorTimeMediaUs = -1;
361 mMediaClock->setStartingTimeMedia(-1);
362}
363
364void NuPlayer2::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
365 if (mAudioFirstAnchorTimeMediaUs == -1) {
366 mAudioFirstAnchorTimeMediaUs = mediaUs;
367 mMediaClock->setStartingTimeMedia(mediaUs);
368 }
369}
370
371// Called on renderer looper.
372void NuPlayer2::Renderer::clearAnchorTime() {
373 mMediaClock->clearAnchor();
374 mAnchorTimeMediaUs = -1;
375 mAnchorNumFramesWritten = -1;
376}
377
378void NuPlayer2::Renderer::setVideoLateByUs(int64_t lateUs) {
379 Mutex::Autolock autoLock(mLock);
380 mVideoLateByUs = lateUs;
381}
382
383int64_t NuPlayer2::Renderer::getVideoLateByUs() {
384 Mutex::Autolock autoLock(mLock);
385 return mVideoLateByUs;
386}
387
388status_t NuPlayer2::Renderer::openAudioSink(
389 const sp<AMessage> &format,
390 bool offloadOnly,
391 bool hasVideo,
392 uint32_t flags,
393 bool *isOffloaded,
394 bool isStreaming) {
395 sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
396 msg->setMessage("format", format);
397 msg->setInt32("offload-only", offloadOnly);
398 msg->setInt32("has-video", hasVideo);
399 msg->setInt32("flags", flags);
400 msg->setInt32("isStreaming", isStreaming);
401
402 sp<AMessage> response;
403 status_t postStatus = msg->postAndAwaitResponse(&response);
404
405 int32_t err;
406 if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
407 err = INVALID_OPERATION;
408 } else if (err == OK && isOffloaded != NULL) {
409 int32_t offload;
410 CHECK(response->findInt32("offload", &offload));
411 *isOffloaded = (offload != 0);
412 }
413 return err;
414}
415
416void NuPlayer2::Renderer::closeAudioSink() {
417 sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
418
419 sp<AMessage> response;
420 msg->postAndAwaitResponse(&response);
421}
422
423void NuPlayer2::Renderer::changeAudioFormat(
424 const sp<AMessage> &format,
425 bool offloadOnly,
426 bool hasVideo,
427 uint32_t flags,
428 bool isStreaming,
429 const sp<AMessage> &notify) {
430 sp<AMessage> meta = new AMessage;
431 meta->setMessage("format", format);
432 meta->setInt32("offload-only", offloadOnly);
433 meta->setInt32("has-video", hasVideo);
434 meta->setInt32("flags", flags);
435 meta->setInt32("isStreaming", isStreaming);
436
437 sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
438 msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
439 msg->setMessage("notify", notify);
440 msg->setMessage("meta", meta);
441 msg->post();
442}
443
444void NuPlayer2::Renderer::onMessageReceived(const sp<AMessage> &msg) {
445 switch (msg->what()) {
446 case kWhatOpenAudioSink:
447 {
448 sp<AMessage> format;
449 CHECK(msg->findMessage("format", &format));
450
451 int32_t offloadOnly;
452 CHECK(msg->findInt32("offload-only", &offloadOnly));
453
454 int32_t hasVideo;
455 CHECK(msg->findInt32("has-video", &hasVideo));
456
457 uint32_t flags;
458 CHECK(msg->findInt32("flags", (int32_t *)&flags));
459
460 uint32_t isStreaming;
461 CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
462
463 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
464
465 sp<AMessage> response = new AMessage;
466 response->setInt32("err", err);
467 response->setInt32("offload", offloadingAudio());
468
469 sp<AReplyToken> replyID;
470 CHECK(msg->senderAwaitsResponse(&replyID));
471 response->postReply(replyID);
472
473 break;
474 }
475
476 case kWhatCloseAudioSink:
477 {
478 sp<AReplyToken> replyID;
479 CHECK(msg->senderAwaitsResponse(&replyID));
480
481 onCloseAudioSink();
482
483 sp<AMessage> response = new AMessage;
484 response->postReply(replyID);
485 break;
486 }
487
488 case kWhatStopAudioSink:
489 {
490 mAudioSink->stop();
491 break;
492 }
493
494 case kWhatChangeAudioFormat:
495 {
496 int32_t queueGeneration;
497 CHECK(msg->findInt32("queueGeneration", &queueGeneration));
498
499 sp<AMessage> notify;
500 CHECK(msg->findMessage("notify", &notify));
501
502 if (offloadingAudio()) {
503 ALOGW("changeAudioFormat should NOT be called in offload mode");
504 notify->setInt32("err", INVALID_OPERATION);
505 notify->post();
506 break;
507 }
508
509 sp<AMessage> meta;
510 CHECK(msg->findMessage("meta", &meta));
511
512 if (queueGeneration != getQueueGeneration(true /* audio */)
513 || mAudioQueue.empty()) {
514 onChangeAudioFormat(meta, notify);
515 break;
516 }
517
518 QueueEntry entry;
519 entry.mNotifyConsumed = notify;
520 entry.mMeta = meta;
521
522 Mutex::Autolock autoLock(mLock);
523 mAudioQueue.push_back(entry);
524 postDrainAudioQueue_l();
525
526 break;
527 }
528
529 case kWhatDrainAudioQueue:
530 {
531 mDrainAudioQueuePending = false;
532
533 int32_t generation;
534 CHECK(msg->findInt32("drainGeneration", &generation));
535 if (generation != getDrainGeneration(true /* audio */)) {
536 break;
537 }
538
539 if (onDrainAudioQueue()) {
540 uint32_t numFramesPlayed;
541 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
542 (status_t)OK);
543
544 // Handle AudioTrack race when start is immediately called after flush.
545 uint32_t numFramesPendingPlayout =
546 (mNumFramesWritten > numFramesPlayed ?
547 mNumFramesWritten - numFramesPlayed : 0);
548
549 // This is how long the audio sink will have data to
550 // play back.
551 int64_t delayUs =
552 mAudioSink->msecsPerFrame()
553 * numFramesPendingPlayout * 1000ll;
Wei Jia700a7c22018-09-14 18:04:35 -0700554 if (mPlaybackSettings.mSpeed > 1.0f) {
555 delayUs /= mPlaybackSettings.mSpeed;
Wei Jia53692fa2017-12-11 10:33:46 -0800556 }
557
558 // Let's give it more data after about half that time
559 // has elapsed.
560 delayUs /= 2;
561 // check the buffer size to estimate maximum delay permitted.
562 const int64_t maxDrainDelayUs = std::max(
563 mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
564 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
565 (long long)delayUs, (long long)maxDrainDelayUs);
566 Mutex::Autolock autoLock(mLock);
567 postDrainAudioQueue_l(delayUs);
568 }
569 break;
570 }
571
572 case kWhatDrainVideoQueue:
573 {
574 int32_t generation;
575 CHECK(msg->findInt32("drainGeneration", &generation));
576 if (generation != getDrainGeneration(false /* audio */)) {
577 break;
578 }
579
580 mDrainVideoQueuePending = false;
581
582 onDrainVideoQueue();
583
584 postDrainVideoQueue();
585 break;
586 }
587
588 case kWhatPostDrainVideoQueue:
589 {
590 int32_t generation;
591 CHECK(msg->findInt32("drainGeneration", &generation));
592 if (generation != getDrainGeneration(false /* audio */)) {
593 break;
594 }
595
596 mDrainVideoQueuePending = false;
597 postDrainVideoQueue();
598 break;
599 }
600
601 case kWhatQueueBuffer:
602 {
603 onQueueBuffer(msg);
604 break;
605 }
606
607 case kWhatQueueEOS:
608 {
609 onQueueEOS(msg);
610 break;
611 }
612
613 case kWhatEOS:
614 {
615 int32_t generation;
616 CHECK(msg->findInt32("audioEOSGeneration", &generation));
617 if (generation != mAudioEOSGeneration) {
618 break;
619 }
620 status_t finalResult;
621 CHECK(msg->findInt32("finalResult", &finalResult));
622 notifyEOS(true /* audio */, finalResult);
623 break;
624 }
625
626 case kWhatConfigPlayback:
627 {
628 sp<AReplyToken> replyID;
629 CHECK(msg->senderAwaitsResponse(&replyID));
630 AudioPlaybackRate rate;
631 readFromAMessage(msg, &rate);
632 status_t err = onConfigPlayback(rate);
633 sp<AMessage> response = new AMessage;
634 response->setInt32("err", err);
635 response->postReply(replyID);
636 break;
637 }
638
639 case kWhatGetPlaybackSettings:
640 {
641 sp<AReplyToken> replyID;
642 CHECK(msg->senderAwaitsResponse(&replyID));
643 AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
644 status_t err = onGetPlaybackSettings(&rate);
645 sp<AMessage> response = new AMessage;
646 if (err == OK) {
647 writeToAMessage(response, rate);
648 }
649 response->setInt32("err", err);
650 response->postReply(replyID);
651 break;
652 }
653
654 case kWhatConfigSync:
655 {
656 sp<AReplyToken> replyID;
657 CHECK(msg->senderAwaitsResponse(&replyID));
658 AVSyncSettings sync;
659 float videoFpsHint;
660 readFromAMessage(msg, &sync, &videoFpsHint);
661 status_t err = onConfigSync(sync, videoFpsHint);
662 sp<AMessage> response = new AMessage;
663 response->setInt32("err", err);
664 response->postReply(replyID);
665 break;
666 }
667
668 case kWhatGetSyncSettings:
669 {
670 sp<AReplyToken> replyID;
671 CHECK(msg->senderAwaitsResponse(&replyID));
672
673 ALOGV("kWhatGetSyncSettings");
674 AVSyncSettings sync;
675 float videoFps = -1.f;
676 status_t err = onGetSyncSettings(&sync, &videoFps);
677 sp<AMessage> response = new AMessage;
678 if (err == OK) {
679 writeToAMessage(response, sync, videoFps);
680 }
681 response->setInt32("err", err);
682 response->postReply(replyID);
683 break;
684 }
685
686 case kWhatFlush:
687 {
688 onFlush(msg);
689 break;
690 }
691
692 case kWhatDisableOffloadAudio:
693 {
694 onDisableOffloadAudio();
695 break;
696 }
697
698 case kWhatEnableOffloadAudio:
699 {
700 onEnableOffloadAudio();
701 break;
702 }
703
704 case kWhatPause:
705 {
706 onPause();
707 break;
708 }
709
710 case kWhatResume:
711 {
712 onResume();
713 break;
714 }
715
716 case kWhatSetVideoFrameRate:
717 {
718 float fps;
719 CHECK(msg->findFloat("frame-rate", &fps));
720 onSetVideoFrameRate(fps);
721 break;
722 }
723
724 case kWhatAudioTearDown:
725 {
726 int32_t reason;
727 CHECK(msg->findInt32("reason", &reason));
728
729 onAudioTearDown((AudioTearDownReason)reason);
730 break;
731 }
732
733 case kWhatAudioOffloadPauseTimeout:
734 {
735 int32_t generation;
736 CHECK(msg->findInt32("drainGeneration", &generation));
737 if (generation != mAudioOffloadPauseTimeoutGeneration) {
738 break;
739 }
740 ALOGV("Audio Offload tear down due to pause timeout.");
741 onAudioTearDown(kDueToTimeout);
742 mWakeLock->release();
743 break;
744 }
745
746 default:
747 TRESPASS();
748 break;
749 }
750}
751
752void NuPlayer2::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
753 if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
754 return;
755 }
756
757 if (mAudioQueue.empty()) {
758 return;
759 }
760
761 // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
762 if (mPaused) {
763 const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
764 if (diffUs > delayUs) {
765 delayUs = diffUs;
766 }
767 }
768
769 mDrainAudioQueuePending = true;
770 sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
771 msg->setInt32("drainGeneration", mAudioDrainGeneration);
772 msg->post(delayUs);
773}
774
775void NuPlayer2::Renderer::prepareForMediaRenderingStart_l() {
776 mAudioRenderingStartGeneration = mAudioDrainGeneration;
777 mVideoRenderingStartGeneration = mVideoDrainGeneration;
778 mRenderingDataDelivered = false;
779}
780
781void NuPlayer2::Renderer::notifyIfMediaRenderingStarted_l() {
782 if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
783 mAudioRenderingStartGeneration == mAudioDrainGeneration) {
784 mRenderingDataDelivered = true;
785 if (mPaused) {
786 return;
787 }
788 mVideoRenderingStartGeneration = -1;
789 mAudioRenderingStartGeneration = -1;
790
791 sp<AMessage> notify = mNotify->dup();
792 notify->setInt32("what", kWhatMediaRenderingStart);
793 notify->post();
794 }
795}
796
797// static
798size_t NuPlayer2::Renderer::AudioSinkCallback(
Wei Jia33abcc72018-01-30 09:47:38 -0800799 MediaPlayer2Interface::AudioSink * /* audioSink */,
Wei Jia53692fa2017-12-11 10:33:46 -0800800 void *buffer,
801 size_t size,
802 void *cookie,
Wei Jia33abcc72018-01-30 09:47:38 -0800803 MediaPlayer2Interface::AudioSink::cb_event_t event) {
Wei Jia53692fa2017-12-11 10:33:46 -0800804 NuPlayer2::Renderer *me = (NuPlayer2::Renderer *)cookie;
805
806 switch (event) {
Wei Jia33abcc72018-01-30 09:47:38 -0800807 case MediaPlayer2Interface::AudioSink::CB_EVENT_FILL_BUFFER:
Wei Jia53692fa2017-12-11 10:33:46 -0800808 {
809 return me->fillAudioBuffer(buffer, size);
810 break;
811 }
812
Wei Jia33abcc72018-01-30 09:47:38 -0800813 case MediaPlayer2Interface::AudioSink::CB_EVENT_STREAM_END:
Wei Jia53692fa2017-12-11 10:33:46 -0800814 {
815 ALOGV("AudioSink::CB_EVENT_STREAM_END");
816 me->notifyEOSCallback();
817 break;
818 }
819
Wei Jia33abcc72018-01-30 09:47:38 -0800820 case MediaPlayer2Interface::AudioSink::CB_EVENT_TEAR_DOWN:
Wei Jia53692fa2017-12-11 10:33:46 -0800821 {
822 ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
823 me->notifyAudioTearDown(kDueToError);
824 break;
825 }
826 }
827
828 return 0;
829}
830
831void NuPlayer2::Renderer::notifyEOSCallback() {
832 Mutex::Autolock autoLock(mLock);
833
834 if (!mUseAudioCallback) {
835 return;
836 }
837
838 notifyEOS_l(true /* audio */, ERROR_END_OF_STREAM);
839}
840
841size_t NuPlayer2::Renderer::fillAudioBuffer(void *buffer, size_t size) {
842 Mutex::Autolock autoLock(mLock);
843
844 if (!mUseAudioCallback) {
845 return 0;
846 }
847
848 bool hasEOS = false;
849
850 size_t sizeCopied = 0;
851 bool firstEntry = true;
852 QueueEntry *entry; // will be valid after while loop if hasEOS is set.
853 while (sizeCopied < size && !mAudioQueue.empty()) {
854 entry = &*mAudioQueue.begin();
855
856 if (entry->mBuffer == NULL) { // EOS
857 hasEOS = true;
858 mAudioQueue.erase(mAudioQueue.begin());
859 break;
860 }
861
862 if (firstEntry && entry->mOffset == 0) {
863 firstEntry = false;
864 int64_t mediaTimeUs;
865 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
866 ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
867 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
868 }
869
870 size_t copy = entry->mBuffer->size() - entry->mOffset;
871 size_t sizeRemaining = size - sizeCopied;
872 if (copy > sizeRemaining) {
873 copy = sizeRemaining;
874 }
875
876 memcpy((char *)buffer + sizeCopied,
877 entry->mBuffer->data() + entry->mOffset,
878 copy);
879
880 entry->mOffset += copy;
881 if (entry->mOffset == entry->mBuffer->size()) {
882 entry->mNotifyConsumed->post();
883 mAudioQueue.erase(mAudioQueue.begin());
884 entry = NULL;
885 }
886 sizeCopied += copy;
887
888 notifyIfMediaRenderingStarted_l();
889 }
890
891 if (mAudioFirstAnchorTimeMediaUs >= 0) {
892 int64_t nowUs = ALooper::GetNowUs();
893 int64_t nowMediaUs =
894 mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
895 // we don't know how much data we are queueing for offloaded tracks.
896 mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
897 }
898
899 // for non-offloaded audio, we need to compute the frames written because
900 // there is no EVENT_STREAM_END notification. The frames written gives
901 // an estimate on the pending played out duration.
902 if (!offloadingAudio()) {
903 mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
904 }
905
906 if (hasEOS) {
907 (new AMessage(kWhatStopAudioSink, this))->post();
908 // As there is currently no EVENT_STREAM_END callback notification for
909 // non-offloaded audio tracks, we need to post the EOS ourselves.
910 if (!offloadingAudio()) {
911 int64_t postEOSDelayUs = 0;
912 if (mAudioSink->needsTrailingPadding()) {
913 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
914 }
915 ALOGV("fillAudioBuffer: notifyEOS_l "
916 "mNumFramesWritten:%u finalResult:%d postEOSDelay:%lld",
917 mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
918 notifyEOS_l(true /* audio */, entry->mFinalResult, postEOSDelayUs);
919 }
920 }
921 return sizeCopied;
922}
923
924void NuPlayer2::Renderer::drainAudioQueueUntilLastEOS() {
925 List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
926 bool foundEOS = false;
927 while (it != mAudioQueue.end()) {
928 int32_t eos;
929 QueueEntry *entry = &*it++;
930 if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
931 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
932 itEOS = it;
933 foundEOS = true;
934 }
935 }
936
937 if (foundEOS) {
938 // post all replies before EOS and drop the samples
939 for (it = mAudioQueue.begin(); it != itEOS; it++) {
940 if (it->mBuffer == nullptr) {
941 if (it->mNotifyConsumed == nullptr) {
942 // delay doesn't matter as we don't even have an AudioTrack
943 notifyEOS(true /* audio */, it->mFinalResult);
944 } else {
945 // TAG for re-opening audio sink.
946 onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
947 }
948 } else {
949 it->mNotifyConsumed->post();
950 }
951 }
952 mAudioQueue.erase(mAudioQueue.begin(), itEOS);
953 }
954}
955
956bool NuPlayer2::Renderer::onDrainAudioQueue() {
957 // do not drain audio during teardown as queued buffers may be invalid.
958 if (mAudioTornDown) {
959 return false;
960 }
961 // TODO: This call to getPosition checks if AudioTrack has been created
962 // in AudioSink before draining audio. If AudioTrack doesn't exist, then
963 // CHECKs on getPosition will fail.
964 // We still need to figure out why AudioTrack is not created when
965 // this function is called. One possible reason could be leftover
966 // audio. Another possible place is to check whether decoder
967 // has received INFO_FORMAT_CHANGED as the first buffer since
968 // AudioSink is opened there, and possible interactions with flush
969 // immediately after start. Investigate error message
970 // "vorbis_dsp_synthesis returned -135", along with RTSP.
971 uint32_t numFramesPlayed;
972 if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
973 // When getPosition fails, renderer will not reschedule the draining
974 // unless new samples are queued.
975 // If we have pending EOS (or "eos" marker for discontinuities), we need
976 // to post these now as NuPlayer2Decoder might be waiting for it.
977 drainAudioQueueUntilLastEOS();
978
979 ALOGW("onDrainAudioQueue(): audio sink is not ready");
980 return false;
981 }
982
983#if 0
984 ssize_t numFramesAvailableToWrite =
985 mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
986
987 if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
988 ALOGI("audio sink underrun");
989 } else {
990 ALOGV("audio queue has %d frames left to play",
991 mAudioSink->frameCount() - numFramesAvailableToWrite);
992 }
993#endif
994
995 uint32_t prevFramesWritten = mNumFramesWritten;
996 while (!mAudioQueue.empty()) {
997 QueueEntry *entry = &*mAudioQueue.begin();
998
999 if (entry->mBuffer == NULL) {
1000 if (entry->mNotifyConsumed != nullptr) {
1001 // TAG for re-open audio sink.
1002 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1003 mAudioQueue.erase(mAudioQueue.begin());
1004 continue;
1005 }
1006
1007 // EOS
1008 if (mPaused) {
1009 // Do not notify EOS when paused.
1010 // This is needed to avoid switch to next clip while in pause.
1011 ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
1012 return false;
1013 }
1014
1015 int64_t postEOSDelayUs = 0;
1016 if (mAudioSink->needsTrailingPadding()) {
1017 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
1018 }
1019 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
1020 mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1021
1022 mAudioQueue.erase(mAudioQueue.begin());
1023 entry = NULL;
1024 if (mAudioSink->needsTrailingPadding()) {
1025 // If we're not in gapless playback (i.e. through setNextPlayer), we
1026 // need to stop the track here, because that will play out the last
1027 // little bit at the end of the file. Otherwise short files won't play.
1028 mAudioSink->stop();
1029 mNumFramesWritten = 0;
1030 }
1031 return false;
1032 }
1033
1034 mLastAudioBufferDrained = entry->mBufferOrdinal;
1035
1036 // ignore 0-sized buffer which could be EOS marker with no data
1037 if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
1038 int64_t mediaTimeUs;
1039 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1040 ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
1041 mediaTimeUs / 1E6);
1042 onNewAudioMediaTime(mediaTimeUs);
1043 }
1044
1045 size_t copy = entry->mBuffer->size() - entry->mOffset;
1046
1047 ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
1048 copy, false /* blocking */);
1049 if (written < 0) {
1050 // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
1051 if (written == WOULD_BLOCK) {
1052 ALOGV("AudioSink write would block when writing %zu bytes", copy);
1053 } else {
1054 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
1055 // This can only happen when AudioSink was opened with doNotReconnect flag set to
1056 // true, in which case the NuPlayer2 will handle the reconnect.
1057 notifyAudioTearDown(kDueToError);
1058 }
1059 break;
1060 }
1061
1062 entry->mOffset += written;
1063 size_t remainder = entry->mBuffer->size() - entry->mOffset;
1064 if ((ssize_t)remainder < mAudioSink->frameSize()) {
1065 if (remainder > 0) {
1066 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
1067 remainder);
1068 entry->mOffset += remainder;
1069 copy -= remainder;
1070 }
1071
1072 entry->mNotifyConsumed->post();
1073 mAudioQueue.erase(mAudioQueue.begin());
1074
1075 entry = NULL;
1076 }
1077
1078 size_t copiedFrames = written / mAudioSink->frameSize();
1079 mNumFramesWritten += copiedFrames;
1080
1081 {
1082 Mutex::Autolock autoLock(mLock);
1083 int64_t maxTimeMedia;
1084 maxTimeMedia =
1085 mAnchorTimeMediaUs +
1086 (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
1087 * 1000LL * mAudioSink->msecsPerFrame());
1088 mMediaClock->updateMaxTimeMedia(maxTimeMedia);
1089
1090 notifyIfMediaRenderingStarted_l();
1091 }
1092
1093 if (written != (ssize_t)copy) {
1094 // A short count was received from AudioSink::write()
1095 //
1096 // AudioSink write is called in non-blocking mode.
1097 // It may return with a short count when:
1098 //
1099 // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
1100 // discarded.
1101 // 2) The data to be copied exceeds the available buffer in AudioSink.
1102 // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
1103 // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
1104
1105 // (Case 1)
1106 // Must be a multiple of the frame size. If it is not a multiple of a frame size, it
1107 // needs to fail, as we should not carry over fractional frames between calls.
1108 CHECK_EQ(copy % mAudioSink->frameSize(), 0u);
1109
1110 // (Case 2, 3, 4)
1111 // Return early to the caller.
1112 // Beware of calling immediately again as this may busy-loop if you are not careful.
1113 ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
1114 break;
1115 }
1116 }
1117
1118 // calculate whether we need to reschedule another write.
1119 bool reschedule = !mAudioQueue.empty()
1120 && (!mPaused
1121 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
1122 //ALOGD("reschedule:%d empty:%d mPaused:%d prevFramesWritten:%u mNumFramesWritten:%u",
1123 // reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
1124 return reschedule;
1125}
1126
1127int64_t NuPlayer2::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
1128 int32_t sampleRate = offloadingAudio() ?
1129 mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
1130 if (sampleRate == 0) {
1131 ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
1132 return 0;
1133 }
1134 // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
1135 return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
1136}
1137
1138// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
1139int64_t NuPlayer2::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
1140 int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1141 if (mUseVirtualAudioSink) {
1142 int64_t nowUs = ALooper::GetNowUs();
1143 int64_t mediaUs;
1144 if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -08001145 return 0LL;
Wei Jia53692fa2017-12-11 10:33:46 -08001146 } else {
1147 return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
1148 }
1149 }
1150
1151 const int64_t audioSinkPlayedUs = mAudioSink->getPlayedOutDurationUs(nowUs);
1152 int64_t pendingUs = writtenAudioDurationUs - audioSinkPlayedUs;
1153 if (pendingUs < 0) {
1154 // This shouldn't happen unless the timestamp is stale.
1155 ALOGW("%s: pendingUs %lld < 0, clamping to zero, potential resume after pause "
1156 "writtenAudioDurationUs: %lld, audioSinkPlayedUs: %lld",
1157 __func__, (long long)pendingUs,
1158 (long long)writtenAudioDurationUs, (long long)audioSinkPlayedUs);
1159 pendingUs = 0;
1160 }
1161 return pendingUs;
1162}
1163
1164int64_t NuPlayer2::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
1165 int64_t realUs;
1166 if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
1167 // If failed to get current position, e.g. due to audio clock is
1168 // not ready, then just play out video immediately without delay.
1169 return nowUs;
1170 }
1171 return realUs;
1172}
1173
1174void NuPlayer2::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1175 Mutex::Autolock autoLock(mLock);
1176 // TRICKY: vorbis decoder generates multiple frames with the same
1177 // timestamp, so only update on the first frame with a given timestamp
1178 if (mediaTimeUs == mAnchorTimeMediaUs) {
1179 return;
1180 }
1181 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1182
1183 // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
1184 if (mNextAudioClockUpdateTimeUs == -1) {
1185 AudioTimestamp ts;
1186 if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
1187 mNextAudioClockUpdateTimeUs = 0; // start our clock updates
1188 }
1189 }
1190 int64_t nowUs = ALooper::GetNowUs();
1191 if (mNextAudioClockUpdateTimeUs >= 0) {
1192 if (nowUs >= mNextAudioClockUpdateTimeUs) {
1193 int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1194 mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1195 mUseVirtualAudioSink = false;
1196 mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
1197 }
1198 } else {
1199 int64_t unused;
1200 if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
1201 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
1202 > kMaxAllowedAudioSinkDelayUs)) {
1203 // Enough data has been sent to AudioSink, but AudioSink has not rendered
1204 // any data yet. Something is wrong with AudioSink, e.g., the device is not
1205 // connected to audio out.
1206 // Switch to system clock. This essentially creates a virtual AudioSink with
1207 // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
1208 // This virtual AudioSink renders audio data starting from the very first sample
1209 // and it's paced by system clock.
1210 ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
1211 mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
1212 mUseVirtualAudioSink = true;
1213 }
1214 }
1215 mAnchorNumFramesWritten = mNumFramesWritten;
1216 mAnchorTimeMediaUs = mediaTimeUs;
1217}
1218
1219// Called without mLock acquired.
1220void NuPlayer2::Renderer::postDrainVideoQueue() {
1221 if (mDrainVideoQueuePending
1222 || getSyncQueues()
1223 || (mPaused && mVideoSampleReceived)) {
1224 return;
1225 }
1226
1227 if (mVideoQueue.empty()) {
1228 return;
1229 }
1230
1231 QueueEntry &entry = *mVideoQueue.begin();
1232
1233 sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1234 msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1235
1236 if (entry.mBuffer == NULL) {
1237 // EOS doesn't carry a timestamp.
1238 msg->post();
1239 mDrainVideoQueuePending = true;
1240 return;
1241 }
1242
1243 int64_t nowUs = ALooper::GetNowUs();
1244 if (mFlags & FLAG_REAL_TIME) {
1245 int64_t realTimeUs;
1246 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1247
1248 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1249
1250 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1251
1252 int64_t delayUs = realTimeUs - nowUs;
1253
1254 ALOGW_IF(delayUs > 500000, "unusually high delayUs: %lld", (long long)delayUs);
1255 // post 2 display refreshes before rendering is due
1256 msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1257
1258 mDrainVideoQueuePending = true;
1259 return;
1260 }
1261
1262 int64_t mediaTimeUs;
1263 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1264
1265 {
1266 Mutex::Autolock autoLock(mLock);
1267 if (mAnchorTimeMediaUs < 0) {
1268 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1269 mAnchorTimeMediaUs = mediaTimeUs;
1270 }
1271 }
1272 mNextVideoTimeMediaUs = mediaTimeUs + 100000;
1273 if (!mHasAudio) {
1274 // smooth out videos >= 10fps
1275 mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
1276 }
1277
1278 if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
1279 msg->post();
1280 } else {
1281 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1282
1283 // post 2 display refreshes before rendering is due
1284 mMediaClock->addTimer(msg, mediaTimeUs, -twoVsyncsUs);
1285 }
1286
1287 mDrainVideoQueuePending = true;
1288}
1289
1290void NuPlayer2::Renderer::onDrainVideoQueue() {
1291 if (mVideoQueue.empty()) {
1292 return;
1293 }
1294
1295 QueueEntry *entry = &*mVideoQueue.begin();
1296
1297 if (entry->mBuffer == NULL) {
1298 // EOS
1299
1300 notifyEOS(false /* audio */, entry->mFinalResult);
1301
1302 mVideoQueue.erase(mVideoQueue.begin());
1303 entry = NULL;
1304
1305 setVideoLateByUs(0);
1306 return;
1307 }
1308
1309 int64_t nowUs = ALooper::GetNowUs();
1310 int64_t realTimeUs;
1311 int64_t mediaTimeUs = -1;
1312 if (mFlags & FLAG_REAL_TIME) {
1313 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1314 } else {
1315 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1316
1317 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1318 }
1319 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1320
1321 bool tooLate = false;
1322
1323 if (!mPaused) {
1324 setVideoLateByUs(nowUs - realTimeUs);
1325 tooLate = (mVideoLateByUs > 40000);
1326
1327 if (tooLate) {
1328 ALOGV("video late by %lld us (%.2f secs)",
1329 (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1330 } else {
1331 int64_t mediaUs = 0;
1332 mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1333 ALOGV("rendering video at media time %.2f secs",
1334 (mFlags & FLAG_REAL_TIME ? realTimeUs :
1335 mediaUs) / 1E6);
1336
1337 if (!(mFlags & FLAG_REAL_TIME)
1338 && mLastAudioMediaTimeUs != -1
1339 && mediaTimeUs > mLastAudioMediaTimeUs) {
1340 // If audio ends before video, video continues to drive media clock.
1341 // Also smooth out videos >= 10fps.
1342 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1343 }
1344 }
1345 } else {
1346 setVideoLateByUs(0);
1347 if (!mVideoSampleReceived && !mHasAudio) {
1348 // This will ensure that the first frame after a flush won't be used as anchor
1349 // when renderer is in paused state, because resume can happen any time after seek.
1350 clearAnchorTime();
1351 }
1352 }
1353
1354 // Always render the first video frame while keeping stats on A/V sync.
1355 if (!mVideoSampleReceived) {
1356 realTimeUs = nowUs;
1357 tooLate = false;
1358 }
1359
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -08001360 entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000LL);
Wei Jia53692fa2017-12-11 10:33:46 -08001361 entry->mNotifyConsumed->setInt32("render", !tooLate);
1362 entry->mNotifyConsumed->post();
1363 mVideoQueue.erase(mVideoQueue.begin());
1364 entry = NULL;
1365
1366 mVideoSampleReceived = true;
1367
1368 if (!mPaused) {
1369 if (!mVideoRenderingStarted) {
1370 mVideoRenderingStarted = true;
1371 notifyVideoRenderingStart();
1372 }
1373 Mutex::Autolock autoLock(mLock);
1374 notifyIfMediaRenderingStarted_l();
1375 }
1376}
1377
1378void NuPlayer2::Renderer::notifyVideoRenderingStart() {
1379 sp<AMessage> notify = mNotify->dup();
1380 notify->setInt32("what", kWhatVideoRenderingStart);
1381 notify->post();
1382}
1383
1384void NuPlayer2::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1385 Mutex::Autolock autoLock(mLock);
1386 notifyEOS_l(audio, finalResult, delayUs);
1387}
1388
1389void NuPlayer2::Renderer::notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs) {
1390 if (audio && delayUs > 0) {
1391 sp<AMessage> msg = new AMessage(kWhatEOS, this);
1392 msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1393 msg->setInt32("finalResult", finalResult);
1394 msg->post(delayUs);
1395 return;
1396 }
1397 sp<AMessage> notify = mNotify->dup();
1398 notify->setInt32("what", kWhatEOS);
1399 notify->setInt32("audio", static_cast<int32_t>(audio));
1400 notify->setInt32("finalResult", finalResult);
1401 notify->post(delayUs);
1402
1403 if (audio) {
1404 // Video might outlive audio. Clear anchor to enable video only case.
1405 mAnchorTimeMediaUs = -1;
1406 mHasAudio = false;
1407 if (mNextVideoTimeMediaUs >= 0) {
1408 int64_t mediaUs = 0;
1409 mMediaClock->getMediaTime(ALooper::GetNowUs(), &mediaUs);
1410 if (mNextVideoTimeMediaUs > mediaUs) {
1411 mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
1412 }
1413 }
1414 }
1415}
1416
1417void NuPlayer2::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
1418 sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
1419 msg->setInt32("reason", reason);
1420 msg->post();
1421}
1422
1423void NuPlayer2::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1424 int32_t audio;
1425 CHECK(msg->findInt32("audio", &audio));
1426
1427 if (dropBufferIfStale(audio, msg)) {
1428 return;
1429 }
1430
1431 if (audio) {
1432 mHasAudio = true;
1433 } else {
1434 mHasVideo = true;
1435 }
1436
1437 if (mHasVideo) {
1438 if (mVideoScheduler == NULL) {
1439 mVideoScheduler = new VideoFrameScheduler();
1440 mVideoScheduler->init();
1441 }
1442 }
1443
1444 sp<RefBase> obj;
1445 CHECK(msg->findObject("buffer", &obj));
1446 sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
1447
1448 sp<AMessage> notifyConsumed;
1449 CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1450
1451 QueueEntry entry;
1452 entry.mBuffer = buffer;
1453 entry.mNotifyConsumed = notifyConsumed;
1454 entry.mOffset = 0;
1455 entry.mFinalResult = OK;
1456 entry.mBufferOrdinal = ++mTotalBuffersQueued;
1457
1458 if (audio) {
1459 Mutex::Autolock autoLock(mLock);
1460 mAudioQueue.push_back(entry);
1461 postDrainAudioQueue_l();
1462 } else {
1463 mVideoQueue.push_back(entry);
1464 postDrainVideoQueue();
1465 }
1466
1467 Mutex::Autolock autoLock(mLock);
1468 if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1469 return;
1470 }
1471
1472 sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1473 sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1474
1475 if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1476 // EOS signalled on either queue.
1477 syncQueuesDone_l();
1478 return;
1479 }
1480
1481 int64_t firstAudioTimeUs;
1482 int64_t firstVideoTimeUs;
1483 CHECK(firstAudioBuffer->meta()
1484 ->findInt64("timeUs", &firstAudioTimeUs));
1485 CHECK(firstVideoBuffer->meta()
1486 ->findInt64("timeUs", &firstVideoTimeUs));
1487
1488 int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1489
1490 ALOGV("queueDiff = %.2f secs", diff / 1E6);
1491
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -08001492 if (diff > 100000LL) {
Wei Jia53692fa2017-12-11 10:33:46 -08001493 // Audio data starts More than 0.1 secs before video.
1494 // Drop some audio.
1495
1496 (*mAudioQueue.begin()).mNotifyConsumed->post();
1497 mAudioQueue.erase(mAudioQueue.begin());
1498 return;
1499 }
1500
1501 syncQueuesDone_l();
1502}
1503
1504void NuPlayer2::Renderer::syncQueuesDone_l() {
1505 if (!mSyncQueues) {
1506 return;
1507 }
1508
1509 mSyncQueues = false;
1510
1511 if (!mAudioQueue.empty()) {
1512 postDrainAudioQueue_l();
1513 }
1514
1515 if (!mVideoQueue.empty()) {
1516 mLock.unlock();
1517 postDrainVideoQueue();
1518 mLock.lock();
1519 }
1520}
1521
1522void NuPlayer2::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1523 int32_t audio;
1524 CHECK(msg->findInt32("audio", &audio));
1525
1526 if (dropBufferIfStale(audio, msg)) {
1527 return;
1528 }
1529
1530 int32_t finalResult;
1531 CHECK(msg->findInt32("finalResult", &finalResult));
1532
1533 QueueEntry entry;
1534 entry.mOffset = 0;
1535 entry.mFinalResult = finalResult;
1536
1537 if (audio) {
1538 Mutex::Autolock autoLock(mLock);
1539 if (mAudioQueue.empty() && mSyncQueues) {
1540 syncQueuesDone_l();
1541 }
1542 mAudioQueue.push_back(entry);
1543 postDrainAudioQueue_l();
1544 } else {
1545 if (mVideoQueue.empty() && getSyncQueues()) {
1546 Mutex::Autolock autoLock(mLock);
1547 syncQueuesDone_l();
1548 }
1549 mVideoQueue.push_back(entry);
1550 postDrainVideoQueue();
1551 }
1552}
1553
1554void NuPlayer2::Renderer::onFlush(const sp<AMessage> &msg) {
1555 int32_t audio, notifyComplete;
1556 CHECK(msg->findInt32("audio", &audio));
1557
1558 {
1559 Mutex::Autolock autoLock(mLock);
1560 if (audio) {
1561 notifyComplete = mNotifyCompleteAudio;
1562 mNotifyCompleteAudio = false;
1563 mLastAudioMediaTimeUs = -1;
1564 } else {
1565 notifyComplete = mNotifyCompleteVideo;
1566 mNotifyCompleteVideo = false;
Wei Jiad1864f92018-10-19 12:34:56 -07001567 mVideoRenderingStarted = false;
Wei Jia53692fa2017-12-11 10:33:46 -08001568 }
1569
1570 // If we're currently syncing the queues, i.e. dropping audio while
1571 // aligning the first audio/video buffer times and only one of the
1572 // two queues has data, we may starve that queue by not requesting
1573 // more buffers from the decoder. If the other source then encounters
1574 // a discontinuity that leads to flushing, we'll never find the
1575 // corresponding discontinuity on the other queue.
1576 // Therefore we'll stop syncing the queues if at least one of them
1577 // is flushed.
1578 syncQueuesDone_l();
1579 }
1580 clearAnchorTime();
1581
1582 ALOGV("flushing %s", audio ? "audio" : "video");
1583 if (audio) {
1584 {
1585 Mutex::Autolock autoLock(mLock);
1586 flushQueue(&mAudioQueue);
1587
1588 ++mAudioDrainGeneration;
1589 ++mAudioEOSGeneration;
1590 prepareForMediaRenderingStart_l();
1591
1592 // the frame count will be reset after flush.
1593 clearAudioFirstAnchorTime_l();
1594 }
1595
1596 mDrainAudioQueuePending = false;
1597
1598 if (offloadingAudio()) {
1599 mAudioSink->pause();
1600 mAudioSink->flush();
1601 if (!mPaused) {
1602 mAudioSink->start();
1603 }
1604 } else {
1605 mAudioSink->pause();
1606 mAudioSink->flush();
1607 // Call stop() to signal to the AudioSink to completely fill the
1608 // internal buffer before resuming playback.
1609 // FIXME: this is ignored after flush().
1610 mAudioSink->stop();
1611 if (mPaused) {
1612 // Race condition: if renderer is paused and audio sink is stopped,
1613 // we need to make sure that the audio track buffer fully drains
1614 // before delivering data.
1615 // FIXME: remove this if we can detect if stop() is complete.
1616 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
1617 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
1618 } else {
1619 mAudioSink->start();
1620 }
1621 mNumFramesWritten = 0;
1622 }
1623 mNextAudioClockUpdateTimeUs = -1;
1624 } else {
1625 flushQueue(&mVideoQueue);
1626
1627 mDrainVideoQueuePending = false;
1628
1629 if (mVideoScheduler != NULL) {
1630 mVideoScheduler->restart();
1631 }
1632
1633 Mutex::Autolock autoLock(mLock);
1634 ++mVideoDrainGeneration;
1635 prepareForMediaRenderingStart_l();
1636 }
1637
1638 mVideoSampleReceived = false;
1639
1640 if (notifyComplete) {
1641 notifyFlushComplete(audio);
1642 }
1643}
1644
1645void NuPlayer2::Renderer::flushQueue(List<QueueEntry> *queue) {
1646 while (!queue->empty()) {
1647 QueueEntry *entry = &*queue->begin();
1648
1649 if (entry->mBuffer != NULL) {
1650 entry->mNotifyConsumed->post();
1651 } else if (entry->mNotifyConsumed != nullptr) {
1652 // Is it needed to open audio sink now?
1653 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1654 }
1655
1656 queue->erase(queue->begin());
1657 entry = NULL;
1658 }
1659}
1660
1661void NuPlayer2::Renderer::notifyFlushComplete(bool audio) {
1662 sp<AMessage> notify = mNotify->dup();
1663 notify->setInt32("what", kWhatFlushComplete);
1664 notify->setInt32("audio", static_cast<int32_t>(audio));
1665 notify->post();
1666}
1667
1668bool NuPlayer2::Renderer::dropBufferIfStale(
1669 bool audio, const sp<AMessage> &msg) {
1670 int32_t queueGeneration;
1671 CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1672
1673 if (queueGeneration == getQueueGeneration(audio)) {
1674 return false;
1675 }
1676
1677 sp<AMessage> notifyConsumed;
1678 if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1679 notifyConsumed->post();
1680 }
1681
1682 return true;
1683}
1684
1685void NuPlayer2::Renderer::onAudioSinkChanged() {
1686 if (offloadingAudio()) {
1687 return;
1688 }
1689 CHECK(!mDrainAudioQueuePending);
1690 mNumFramesWritten = 0;
1691 mAnchorNumFramesWritten = -1;
1692 uint32_t written;
1693 if (mAudioSink->getFramesWritten(&written) == OK) {
1694 mNumFramesWritten = written;
1695 }
1696}
1697
1698void NuPlayer2::Renderer::onDisableOffloadAudio() {
1699 Mutex::Autolock autoLock(mLock);
1700 mFlags &= ~FLAG_OFFLOAD_AUDIO;
1701 ++mAudioDrainGeneration;
1702 if (mAudioRenderingStartGeneration != -1) {
1703 prepareForMediaRenderingStart_l();
1704 }
1705}
1706
1707void NuPlayer2::Renderer::onEnableOffloadAudio() {
1708 Mutex::Autolock autoLock(mLock);
1709 mFlags |= FLAG_OFFLOAD_AUDIO;
1710 ++mAudioDrainGeneration;
1711 if (mAudioRenderingStartGeneration != -1) {
1712 prepareForMediaRenderingStart_l();
1713 }
1714}
1715
1716void NuPlayer2::Renderer::onPause() {
1717 if (mPaused) {
1718 return;
1719 }
1720
1721 {
1722 Mutex::Autolock autoLock(mLock);
1723 // we do not increment audio drain generation so that we fill audio buffer during pause.
1724 ++mVideoDrainGeneration;
1725 prepareForMediaRenderingStart_l();
1726 mPaused = true;
1727 mMediaClock->setPlaybackRate(0.0);
1728 }
1729
1730 mDrainAudioQueuePending = false;
1731 mDrainVideoQueuePending = false;
1732
1733 // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1734 mAudioSink->pause();
1735 startAudioOffloadPauseTimeout();
1736
1737 ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1738 mAudioQueue.size(), mVideoQueue.size());
1739}
1740
1741void NuPlayer2::Renderer::onResume() {
1742 if (!mPaused) {
1743 return;
1744 }
1745
1746 // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1747 cancelAudioOffloadPauseTimeout();
1748 if (mAudioSink->ready()) {
1749 status_t err = mAudioSink->start();
1750 if (err != OK) {
1751 ALOGE("cannot start AudioSink err %d", err);
1752 notifyAudioTearDown(kDueToError);
1753 }
1754 }
1755
1756 {
1757 Mutex::Autolock autoLock(mLock);
1758 mPaused = false;
1759 // rendering started message may have been delayed if we were paused.
1760 if (mRenderingDataDelivered) {
1761 notifyIfMediaRenderingStarted_l();
1762 }
1763 // configure audiosink as we did not do it when pausing
1764 if (mAudioSink != NULL && mAudioSink->ready()) {
1765 mAudioSink->setPlaybackRate(mPlaybackSettings);
1766 }
1767
Wei Jia700a7c22018-09-14 18:04:35 -07001768 mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed);
Wei Jia53692fa2017-12-11 10:33:46 -08001769
1770 if (!mAudioQueue.empty()) {
1771 postDrainAudioQueue_l();
1772 }
1773 }
1774
1775 if (!mVideoQueue.empty()) {
1776 postDrainVideoQueue();
1777 }
1778}
1779
1780void NuPlayer2::Renderer::onSetVideoFrameRate(float fps) {
1781 if (mVideoScheduler == NULL) {
1782 mVideoScheduler = new VideoFrameScheduler();
1783 }
1784 mVideoScheduler->init(fps);
1785}
1786
1787int32_t NuPlayer2::Renderer::getQueueGeneration(bool audio) {
1788 Mutex::Autolock autoLock(mLock);
1789 return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1790}
1791
1792int32_t NuPlayer2::Renderer::getDrainGeneration(bool audio) {
1793 Mutex::Autolock autoLock(mLock);
1794 return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1795}
1796
1797bool NuPlayer2::Renderer::getSyncQueues() {
1798 Mutex::Autolock autoLock(mLock);
1799 return mSyncQueues;
1800}
1801
1802void NuPlayer2::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1803 if (mAudioTornDown) {
1804 return;
1805 }
1806 mAudioTornDown = true;
1807
1808 int64_t currentPositionUs;
1809 sp<AMessage> notify = mNotify->dup();
1810 if (getCurrentPosition(&currentPositionUs) == OK) {
1811 notify->setInt64("positionUs", currentPositionUs);
1812 }
1813
1814 mAudioSink->stop();
1815 mAudioSink->flush();
1816
1817 notify->setInt32("what", kWhatAudioTearDown);
1818 notify->setInt32("reason", reason);
1819 notify->post();
1820}
1821
1822void NuPlayer2::Renderer::startAudioOffloadPauseTimeout() {
1823 if (offloadingAudio()) {
1824 mWakeLock->acquire();
1825 sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1826 msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1827 msg->post(kOffloadPauseMaxUs);
1828 }
1829}
1830
1831void NuPlayer2::Renderer::cancelAudioOffloadPauseTimeout() {
1832 // We may have called startAudioOffloadPauseTimeout() without
1833 // the AudioSink open and with offloadingAudio enabled.
1834 //
1835 // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
1836 // we always release the wakelock and increment the pause timeout generation.
1837 //
1838 // Note: The acquired wakelock prevents the device from suspending
1839 // immediately after offload pause (in case a resume happens shortly thereafter).
1840 mWakeLock->release(true);
1841 ++mAudioOffloadPauseTimeoutGeneration;
1842}
1843
1844status_t NuPlayer2::Renderer::onOpenAudioSink(
1845 const sp<AMessage> &format,
1846 bool offloadOnly,
1847 bool hasVideo,
1848 uint32_t flags,
1849 bool isStreaming) {
1850 ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1851 offloadOnly, offloadingAudio());
Dichen Zhangf8726912018-10-17 13:31:26 -07001852
Wei Jia53692fa2017-12-11 10:33:46 -08001853 bool audioSinkChanged = false;
1854
1855 int32_t numChannels;
1856 CHECK(format->findInt32("channel-count", &numChannels));
1857
1858 int32_t channelMask;
1859 if (!format->findInt32("channel-mask", &channelMask)) {
1860 // signal to the AudioSink to derive the mask from count.
1861 channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1862 }
1863
1864 int32_t sampleRate;
1865 CHECK(format->findInt32("sample-rate", &sampleRate));
1866
1867 if (offloadingAudio()) {
1868 audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
1869 AString mime;
1870 CHECK(format->findString("mime", &mime));
1871 status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1872
1873 if (err != OK) {
1874 ALOGE("Couldn't map mime \"%s\" to a valid "
1875 "audio_format", mime.c_str());
1876 onDisableOffloadAudio();
1877 } else {
1878 ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1879 mime.c_str(), audioFormat);
1880
1881 int avgBitRate = -1;
1882 format->findInt32("bitrate", &avgBitRate);
1883
1884 int32_t aacProfile = -1;
1885 if (audioFormat == AUDIO_FORMAT_AAC
1886 && format->findInt32("aac-profile", &aacProfile)) {
1887 // Redefine AAC format as per aac profile
1888 mapAACProfileToAudioFormat(
1889 audioFormat,
1890 aacProfile);
1891 }
1892
1893 audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1894 offloadInfo.duration_us = -1;
1895 format->findInt64(
1896 "durationUs", &offloadInfo.duration_us);
1897 offloadInfo.sample_rate = sampleRate;
1898 offloadInfo.channel_mask = channelMask;
1899 offloadInfo.format = audioFormat;
1900 offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1901 offloadInfo.bit_rate = avgBitRate;
1902 offloadInfo.has_video = hasVideo;
1903 offloadInfo.is_streaming = isStreaming;
1904
1905 if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1906 ALOGV("openAudioSink: no change in offload mode");
1907 // no change from previous configuration, everything ok.
1908 return OK;
1909 }
1910 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1911
1912 ALOGV("openAudioSink: try to open AudioSink in offload mode");
1913 uint32_t offloadFlags = flags;
1914 offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1915 offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1916 audioSinkChanged = true;
1917 mAudioSink->close();
1918
1919 err = mAudioSink->open(
1920 sampleRate,
1921 numChannels,
1922 (audio_channel_mask_t)channelMask,
1923 audioFormat,
Wei Jia53692fa2017-12-11 10:33:46 -08001924 &NuPlayer2::Renderer::AudioSinkCallback,
1925 this,
1926 (audio_output_flags_t)offloadFlags,
1927 &offloadInfo);
1928
1929 if (err == OK) {
1930 err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1931 }
1932
1933 if (err == OK) {
1934 // If the playback is offloaded to h/w, we pass
1935 // the HAL some metadata information.
1936 // We don't want to do this for PCM because it
1937 // will be going through the AudioFlinger mixer
1938 // before reaching the hardware.
1939 // TODO
1940 mCurrentOffloadInfo = offloadInfo;
1941 if (!mPaused) { // for preview mode, don't start if paused
1942 err = mAudioSink->start();
1943 }
1944 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1945 }
1946 if (err != OK) {
1947 // Clean up, fall back to non offload mode.
1948 mAudioSink->close();
1949 onDisableOffloadAudio();
1950 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1951 ALOGV("openAudioSink: offload failed");
1952 if (offloadOnly) {
1953 notifyAudioTearDown(kForceNonOffload);
1954 }
1955 } else {
1956 mUseAudioCallback = true; // offload mode transfers data through callback
1957 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
1958 }
1959 }
1960 }
1961 if (!offloadOnly && !offloadingAudio()) {
1962 ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1963 uint32_t pcmFlags = flags;
1964 pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1965
1966 const PcmInfo info = {
1967 (audio_channel_mask_t)channelMask,
1968 (audio_output_flags_t)pcmFlags,
1969 AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
1970 numChannels,
1971 sampleRate
1972 };
1973 if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
1974 ALOGV("openAudioSink: no change in pcm mode");
1975 // no change from previous configuration, everything ok.
1976 return OK;
1977 }
1978
1979 audioSinkChanged = true;
1980 mAudioSink->close();
1981 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1982 // Note: It is possible to set up the callback, but not use it to send audio data.
1983 // This requires a fix in AudioSink to explicitly specify the transfer mode.
1984 mUseAudioCallback = getUseAudioCallbackSetting();
1985 if (mUseAudioCallback) {
1986 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
1987 }
1988
1989 // Compute the desired buffer size.
1990 // For callback mode, the amount of time before wakeup is about half the buffer size.
1991 const uint32_t frameCount =
1992 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
1993
Wei Jia53692fa2017-12-11 10:33:46 -08001994 // We should always be able to set our playback settings if the sink is closed.
1995 LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
1996 "onOpenAudioSink: can't set playback rate on closed sink");
1997 status_t err = mAudioSink->open(
1998 sampleRate,
1999 numChannels,
2000 (audio_channel_mask_t)channelMask,
2001 AUDIO_FORMAT_PCM_16_BIT,
Wei Jia53692fa2017-12-11 10:33:46 -08002002 mUseAudioCallback ? &NuPlayer2::Renderer::AudioSinkCallback : NULL,
2003 mUseAudioCallback ? this : NULL,
2004 (audio_output_flags_t)pcmFlags,
2005 NULL,
Wei Jia53692fa2017-12-11 10:33:46 -08002006 frameCount);
2007 if (err != OK) {
2008 ALOGW("openAudioSink: non offloaded open failed status: %d", err);
2009 mAudioSink->close();
2010 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2011 return err;
2012 }
2013 mCurrentPcmInfo = info;
2014 if (!mPaused) { // for preview mode, don't start if paused
2015 mAudioSink->start();
2016 }
2017 }
2018 if (audioSinkChanged) {
2019 onAudioSinkChanged();
2020 }
2021 mAudioTornDown = false;
2022 return OK;
2023}
2024
2025void NuPlayer2::Renderer::onCloseAudioSink() {
2026 mAudioSink->close();
2027 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2028 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2029}
2030
2031void NuPlayer2::Renderer::onChangeAudioFormat(
2032 const sp<AMessage> &meta, const sp<AMessage> &notify) {
2033 sp<AMessage> format;
2034 CHECK(meta->findMessage("format", &format));
2035
2036 int32_t offloadOnly;
2037 CHECK(meta->findInt32("offload-only", &offloadOnly));
2038
2039 int32_t hasVideo;
2040 CHECK(meta->findInt32("has-video", &hasVideo));
2041
2042 uint32_t flags;
2043 CHECK(meta->findInt32("flags", (int32_t *)&flags));
2044
2045 uint32_t isStreaming;
2046 CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
2047
2048 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
2049
2050 if (err != OK) {
2051 notify->setInt32("err", err);
2052 }
2053 notify->post();
2054}
2055
2056} // namespace android
2057