blob: 3d69188723b8919f9f060e48409d75e1c0e54d95 [file] [log] [blame]
Wei Jia53692fa2017-12-11 10:33:46 -08001/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "NuPlayer2Renderer"
19#include <utils/Log.h>
20
21#include "JWakeLock.h"
22#include "NuPlayer2Renderer.h"
23#include <algorithm>
24#include <cutils/properties.h>
25#include <media/stagefright/foundation/ADebug.h>
26#include <media/stagefright/foundation/AMessage.h>
27#include <media/stagefright/foundation/AUtils.h>
28#include <media/stagefright/MediaClock.h>
Wei Jia4a16e2a2019-01-04 15:26:48 -080029#include <media/stagefright/MediaCodecConstants.h>
30#include <media/stagefright/MediaDefs.h>
Wei Jia53692fa2017-12-11 10:33:46 -080031#include <media/stagefright/MediaErrors.h>
Wei Jia53692fa2017-12-11 10:33:46 -080032#include <media/stagefright/Utils.h>
33#include <media/stagefright/VideoFrameScheduler.h>
34#include <media/MediaCodecBuffer.h>
35
36#include <inttypes.h>
37
38namespace android {
39
40/*
41 * Example of common configuration settings in shell script form
42
43 #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
44 adb shell setprop audio.offload.disable 1
45
46 #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
47 adb shell setprop audio.offload.video 1
48
49 #Use audio callbacks for PCM data
50 adb shell setprop media.stagefright.audio.cbk 1
51
52 #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
53 adb shell setprop media.stagefright.audio.deep 1
54
55 #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
56 adb shell setprop media.stagefright.audio.sink 1000
57
58 * These configurations take effect for the next track played (not the current track).
59 */
60
61static inline bool getUseAudioCallbackSetting() {
62 return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
63}
64
65static inline int32_t getAudioSinkPcmMsSetting() {
66 return property_get_int32(
67 "media.stagefright.audio.sink", 500 /* default_value */);
68}
69
70// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
71// is closed to allow the audio DSP to power down.
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -080072static const int64_t kOffloadPauseMaxUs = 10000000LL;
Wei Jia53692fa2017-12-11 10:33:46 -080073
74// Maximum allowed delay from AudioSink, 1.5 seconds.
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -080075static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000LL;
Wei Jia53692fa2017-12-11 10:33:46 -080076
77static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
78
79// static
80const NuPlayer2::Renderer::PcmInfo NuPlayer2::Renderer::AUDIO_PCMINFO_INITIALIZER = {
81 AUDIO_CHANNEL_NONE,
82 AUDIO_OUTPUT_FLAG_NONE,
83 AUDIO_FORMAT_INVALID,
84 0, // mNumChannels
85 0 // mSampleRate
86};
87
88// static
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -080089const int64_t NuPlayer2::Renderer::kMinPositionUpdateDelayUs = 100000LL;
Wei Jia53692fa2017-12-11 10:33:46 -080090
Wei Jia4a16e2a2019-01-04 15:26:48 -080091static audio_format_t constexpr audioFormatFromEncoding(int32_t pcmEncoding) {
92 switch (pcmEncoding) {
93 case kAudioEncodingPcmFloat:
94 return AUDIO_FORMAT_PCM_FLOAT;
95 case kAudioEncodingPcm16bit:
96 return AUDIO_FORMAT_PCM_16_BIT;
97 case kAudioEncodingPcm8bit:
98 return AUDIO_FORMAT_PCM_8_BIT; // TODO: do we want to support this?
99 default:
100 ALOGE("%s: Invalid encoding: %d", __func__, pcmEncoding);
101 return AUDIO_FORMAT_INVALID;
102 }
103}
104
Wei Jia53692fa2017-12-11 10:33:46 -0800105NuPlayer2::Renderer::Renderer(
Wei Jia33abcc72018-01-30 09:47:38 -0800106 const sp<MediaPlayer2Interface::AudioSink> &sink,
Wei Jia53692fa2017-12-11 10:33:46 -0800107 const sp<MediaClock> &mediaClock,
108 const sp<AMessage> &notify,
109 uint32_t flags)
110 : mAudioSink(sink),
111 mUseVirtualAudioSink(false),
112 mNotify(notify),
113 mFlags(flags),
114 mNumFramesWritten(0),
115 mDrainAudioQueuePending(false),
116 mDrainVideoQueuePending(false),
117 mAudioQueueGeneration(0),
118 mVideoQueueGeneration(0),
119 mAudioDrainGeneration(0),
120 mVideoDrainGeneration(0),
121 mAudioEOSGeneration(0),
122 mMediaClock(mediaClock),
123 mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
124 mAudioFirstAnchorTimeMediaUs(-1),
125 mAnchorTimeMediaUs(-1),
126 mAnchorNumFramesWritten(-1),
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -0800127 mVideoLateByUs(0LL),
Wei Jia53692fa2017-12-11 10:33:46 -0800128 mNextVideoTimeMediaUs(-1),
129 mHasAudio(false),
130 mHasVideo(false),
131 mNotifyCompleteAudio(false),
132 mNotifyCompleteVideo(false),
133 mSyncQueues(false),
134 mPaused(false),
135 mPauseDrainAudioAllowedUs(0),
136 mVideoSampleReceived(false),
137 mVideoRenderingStarted(false),
138 mVideoRenderingStartGeneration(0),
139 mAudioRenderingStartGeneration(0),
140 mRenderingDataDelivered(false),
141 mNextAudioClockUpdateTimeUs(-1),
142 mLastAudioMediaTimeUs(-1),
143 mAudioOffloadPauseTimeoutGeneration(0),
144 mAudioTornDown(false),
145 mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
146 mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
147 mTotalBuffersQueued(0),
148 mLastAudioBufferDrained(0),
149 mUseAudioCallback(false),
150 mWakeLock(new JWakeLock()) {
151 CHECK(mediaClock != NULL);
152 mPlaybackRate = mPlaybackSettings.mSpeed;
153 mMediaClock->setPlaybackRate(mPlaybackRate);
154}
155
156NuPlayer2::Renderer::~Renderer() {
157 if (offloadingAudio()) {
158 mAudioSink->stop();
159 mAudioSink->flush();
160 mAudioSink->close();
161 }
162
163 // Try to avoid racing condition in case callback is still on.
164 Mutex::Autolock autoLock(mLock);
165 if (mUseAudioCallback) {
166 flushQueue(&mAudioQueue);
167 flushQueue(&mVideoQueue);
168 }
169 mWakeLock.clear();
170 mVideoScheduler.clear();
171 mNotify.clear();
172 mAudioSink.clear();
173}
174
175void NuPlayer2::Renderer::queueBuffer(
176 bool audio,
177 const sp<MediaCodecBuffer> &buffer,
178 const sp<AMessage> &notifyConsumed) {
179 sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
180 msg->setInt32("queueGeneration", getQueueGeneration(audio));
181 msg->setInt32("audio", static_cast<int32_t>(audio));
182 msg->setObject("buffer", buffer);
183 msg->setMessage("notifyConsumed", notifyConsumed);
184 msg->post();
185}
186
187void NuPlayer2::Renderer::queueEOS(bool audio, status_t finalResult) {
188 CHECK_NE(finalResult, (status_t)OK);
189
190 sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
191 msg->setInt32("queueGeneration", getQueueGeneration(audio));
192 msg->setInt32("audio", static_cast<int32_t>(audio));
193 msg->setInt32("finalResult", finalResult);
194 msg->post();
195}
196
197status_t NuPlayer2::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
198 sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
199 writeToAMessage(msg, rate);
200 sp<AMessage> response;
201 status_t err = msg->postAndAwaitResponse(&response);
202 if (err == OK && response != NULL) {
203 CHECK(response->findInt32("err", &err));
204 }
205 return err;
206}
207
208status_t NuPlayer2::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
209 if (rate.mSpeed == 0.f) {
210 onPause();
211 // don't call audiosink's setPlaybackRate if pausing, as pitch does not
212 // have to correspond to the any non-0 speed (e.g old speed). Keep
213 // settings nonetheless, using the old speed, in case audiosink changes.
214 AudioPlaybackRate newRate = rate;
215 newRate.mSpeed = mPlaybackSettings.mSpeed;
216 mPlaybackSettings = newRate;
217 return OK;
218 }
219
220 if (mAudioSink != NULL && mAudioSink->ready()) {
221 status_t err = mAudioSink->setPlaybackRate(rate);
222 if (err != OK) {
223 return err;
224 }
225 }
226 mPlaybackSettings = rate;
227 mPlaybackRate = rate.mSpeed;
228 mMediaClock->setPlaybackRate(mPlaybackRate);
229 return OK;
230}
231
232status_t NuPlayer2::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
233 sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
234 sp<AMessage> response;
235 status_t err = msg->postAndAwaitResponse(&response);
236 if (err == OK && response != NULL) {
237 CHECK(response->findInt32("err", &err));
238 if (err == OK) {
239 readFromAMessage(response, rate);
240 }
241 }
242 return err;
243}
244
245status_t NuPlayer2::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
246 if (mAudioSink != NULL && mAudioSink->ready()) {
247 status_t err = mAudioSink->getPlaybackRate(rate);
248 if (err == OK) {
249 if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
250 ALOGW("correcting mismatch in internal/external playback rate");
251 }
252 // get playback settings used by audiosink, as it may be
253 // slightly off due to audiosink not taking small changes.
254 mPlaybackSettings = *rate;
255 if (mPaused) {
256 rate->mSpeed = 0.f;
257 }
258 }
259 return err;
260 }
261 *rate = mPlaybackSettings;
262 return OK;
263}
264
265status_t NuPlayer2::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
266 sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
267 writeToAMessage(msg, sync, videoFpsHint);
268 sp<AMessage> response;
269 status_t err = msg->postAndAwaitResponse(&response);
270 if (err == OK && response != NULL) {
271 CHECK(response->findInt32("err", &err));
272 }
273 return err;
274}
275
276status_t NuPlayer2::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
277 if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
278 return BAD_VALUE;
279 }
280 // TODO: support sync sources
281 return INVALID_OPERATION;
282}
283
284status_t NuPlayer2::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
285 sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
286 sp<AMessage> response;
287 status_t err = msg->postAndAwaitResponse(&response);
288 if (err == OK && response != NULL) {
289 CHECK(response->findInt32("err", &err));
290 if (err == OK) {
291 readFromAMessage(response, sync, videoFps);
292 }
293 }
294 return err;
295}
296
297status_t NuPlayer2::Renderer::onGetSyncSettings(
298 AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
299 *sync = mSyncSettings;
300 *videoFps = -1.f;
301 return OK;
302}
303
304void NuPlayer2::Renderer::flush(bool audio, bool notifyComplete) {
305 {
306 Mutex::Autolock autoLock(mLock);
307 if (audio) {
308 mNotifyCompleteAudio |= notifyComplete;
309 clearAudioFirstAnchorTime_l();
310 ++mAudioQueueGeneration;
311 ++mAudioDrainGeneration;
312 } else {
313 mNotifyCompleteVideo |= notifyComplete;
314 ++mVideoQueueGeneration;
315 ++mVideoDrainGeneration;
316 }
317
318 mMediaClock->clearAnchor();
319 mVideoLateByUs = 0;
320 mNextVideoTimeMediaUs = -1;
321 mSyncQueues = false;
322 }
323
324 sp<AMessage> msg = new AMessage(kWhatFlush, this);
325 msg->setInt32("audio", static_cast<int32_t>(audio));
326 msg->post();
327}
328
329void NuPlayer2::Renderer::signalTimeDiscontinuity() {
330}
331
332void NuPlayer2::Renderer::signalDisableOffloadAudio() {
333 (new AMessage(kWhatDisableOffloadAudio, this))->post();
334}
335
336void NuPlayer2::Renderer::signalEnableOffloadAudio() {
337 (new AMessage(kWhatEnableOffloadAudio, this))->post();
338}
339
340void NuPlayer2::Renderer::pause() {
341 (new AMessage(kWhatPause, this))->post();
342}
343
344void NuPlayer2::Renderer::resume() {
345 (new AMessage(kWhatResume, this))->post();
346}
347
348void NuPlayer2::Renderer::setVideoFrameRate(float fps) {
349 sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
350 msg->setFloat("frame-rate", fps);
351 msg->post();
352}
353
354// Called on any threads without mLock acquired.
355status_t NuPlayer2::Renderer::getCurrentPosition(int64_t *mediaUs) {
356 status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
357 if (result == OK) {
358 return result;
359 }
360
361 // MediaClock has not started yet. Try to start it if possible.
362 {
363 Mutex::Autolock autoLock(mLock);
364 if (mAudioFirstAnchorTimeMediaUs == -1) {
365 return result;
366 }
367
368 AudioTimestamp ts;
369 status_t res = mAudioSink->getTimestamp(ts);
370 if (res != OK) {
371 return result;
372 }
373
374 // AudioSink has rendered some frames.
375 int64_t nowUs = ALooper::GetNowUs();
376 int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
377 + mAudioFirstAnchorTimeMediaUs;
378 mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
379 }
380
381 return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
382}
383
384void NuPlayer2::Renderer::clearAudioFirstAnchorTime_l() {
385 mAudioFirstAnchorTimeMediaUs = -1;
386 mMediaClock->setStartingTimeMedia(-1);
387}
388
389void NuPlayer2::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
390 if (mAudioFirstAnchorTimeMediaUs == -1) {
391 mAudioFirstAnchorTimeMediaUs = mediaUs;
392 mMediaClock->setStartingTimeMedia(mediaUs);
393 }
394}
395
396// Called on renderer looper.
397void NuPlayer2::Renderer::clearAnchorTime() {
398 mMediaClock->clearAnchor();
399 mAnchorTimeMediaUs = -1;
400 mAnchorNumFramesWritten = -1;
401}
402
403void NuPlayer2::Renderer::setVideoLateByUs(int64_t lateUs) {
404 Mutex::Autolock autoLock(mLock);
405 mVideoLateByUs = lateUs;
406}
407
408int64_t NuPlayer2::Renderer::getVideoLateByUs() {
409 Mutex::Autolock autoLock(mLock);
410 return mVideoLateByUs;
411}
412
413status_t NuPlayer2::Renderer::openAudioSink(
414 const sp<AMessage> &format,
415 bool offloadOnly,
416 bool hasVideo,
417 uint32_t flags,
418 bool *isOffloaded,
419 bool isStreaming) {
420 sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
421 msg->setMessage("format", format);
422 msg->setInt32("offload-only", offloadOnly);
423 msg->setInt32("has-video", hasVideo);
424 msg->setInt32("flags", flags);
425 msg->setInt32("isStreaming", isStreaming);
426
427 sp<AMessage> response;
428 status_t postStatus = msg->postAndAwaitResponse(&response);
429
430 int32_t err;
431 if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
432 err = INVALID_OPERATION;
433 } else if (err == OK && isOffloaded != NULL) {
434 int32_t offload;
435 CHECK(response->findInt32("offload", &offload));
436 *isOffloaded = (offload != 0);
437 }
438 return err;
439}
440
441void NuPlayer2::Renderer::closeAudioSink() {
442 sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
443
444 sp<AMessage> response;
445 msg->postAndAwaitResponse(&response);
446}
447
448void NuPlayer2::Renderer::changeAudioFormat(
449 const sp<AMessage> &format,
450 bool offloadOnly,
451 bool hasVideo,
452 uint32_t flags,
453 bool isStreaming,
454 const sp<AMessage> &notify) {
455 sp<AMessage> meta = new AMessage;
456 meta->setMessage("format", format);
457 meta->setInt32("offload-only", offloadOnly);
458 meta->setInt32("has-video", hasVideo);
459 meta->setInt32("flags", flags);
460 meta->setInt32("isStreaming", isStreaming);
461
462 sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
463 msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
464 msg->setMessage("notify", notify);
465 msg->setMessage("meta", meta);
466 msg->post();
467}
468
469void NuPlayer2::Renderer::onMessageReceived(const sp<AMessage> &msg) {
470 switch (msg->what()) {
471 case kWhatOpenAudioSink:
472 {
473 sp<AMessage> format;
474 CHECK(msg->findMessage("format", &format));
475
476 int32_t offloadOnly;
477 CHECK(msg->findInt32("offload-only", &offloadOnly));
478
479 int32_t hasVideo;
480 CHECK(msg->findInt32("has-video", &hasVideo));
481
482 uint32_t flags;
483 CHECK(msg->findInt32("flags", (int32_t *)&flags));
484
485 uint32_t isStreaming;
486 CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
487
488 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
489
490 sp<AMessage> response = new AMessage;
491 response->setInt32("err", err);
492 response->setInt32("offload", offloadingAudio());
493
494 sp<AReplyToken> replyID;
495 CHECK(msg->senderAwaitsResponse(&replyID));
496 response->postReply(replyID);
497
498 break;
499 }
500
501 case kWhatCloseAudioSink:
502 {
503 sp<AReplyToken> replyID;
504 CHECK(msg->senderAwaitsResponse(&replyID));
505
506 onCloseAudioSink();
507
508 sp<AMessage> response = new AMessage;
509 response->postReply(replyID);
510 break;
511 }
512
513 case kWhatStopAudioSink:
514 {
515 mAudioSink->stop();
516 break;
517 }
518
519 case kWhatChangeAudioFormat:
520 {
521 int32_t queueGeneration;
522 CHECK(msg->findInt32("queueGeneration", &queueGeneration));
523
524 sp<AMessage> notify;
525 CHECK(msg->findMessage("notify", &notify));
526
527 if (offloadingAudio()) {
528 ALOGW("changeAudioFormat should NOT be called in offload mode");
529 notify->setInt32("err", INVALID_OPERATION);
530 notify->post();
531 break;
532 }
533
534 sp<AMessage> meta;
535 CHECK(msg->findMessage("meta", &meta));
536
537 if (queueGeneration != getQueueGeneration(true /* audio */)
538 || mAudioQueue.empty()) {
539 onChangeAudioFormat(meta, notify);
540 break;
541 }
542
543 QueueEntry entry;
544 entry.mNotifyConsumed = notify;
545 entry.mMeta = meta;
546
547 Mutex::Autolock autoLock(mLock);
548 mAudioQueue.push_back(entry);
549 postDrainAudioQueue_l();
550
551 break;
552 }
553
554 case kWhatDrainAudioQueue:
555 {
556 mDrainAudioQueuePending = false;
557
558 int32_t generation;
559 CHECK(msg->findInt32("drainGeneration", &generation));
560 if (generation != getDrainGeneration(true /* audio */)) {
561 break;
562 }
563
564 if (onDrainAudioQueue()) {
565 uint32_t numFramesPlayed;
566 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
567 (status_t)OK);
568
569 // Handle AudioTrack race when start is immediately called after flush.
570 uint32_t numFramesPendingPlayout =
571 (mNumFramesWritten > numFramesPlayed ?
572 mNumFramesWritten - numFramesPlayed : 0);
573
574 // This is how long the audio sink will have data to
575 // play back.
576 int64_t delayUs =
577 mAudioSink->msecsPerFrame()
578 * numFramesPendingPlayout * 1000ll;
579 if (mPlaybackRate > 1.0f) {
580 delayUs /= mPlaybackRate;
581 }
582
583 // Let's give it more data after about half that time
584 // has elapsed.
585 delayUs /= 2;
586 // check the buffer size to estimate maximum delay permitted.
587 const int64_t maxDrainDelayUs = std::max(
588 mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
589 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
590 (long long)delayUs, (long long)maxDrainDelayUs);
591 Mutex::Autolock autoLock(mLock);
592 postDrainAudioQueue_l(delayUs);
593 }
594 break;
595 }
596
597 case kWhatDrainVideoQueue:
598 {
599 int32_t generation;
600 CHECK(msg->findInt32("drainGeneration", &generation));
601 if (generation != getDrainGeneration(false /* audio */)) {
602 break;
603 }
604
605 mDrainVideoQueuePending = false;
606
607 onDrainVideoQueue();
608
609 postDrainVideoQueue();
610 break;
611 }
612
613 case kWhatPostDrainVideoQueue:
614 {
615 int32_t generation;
616 CHECK(msg->findInt32("drainGeneration", &generation));
617 if (generation != getDrainGeneration(false /* audio */)) {
618 break;
619 }
620
621 mDrainVideoQueuePending = false;
622 postDrainVideoQueue();
623 break;
624 }
625
626 case kWhatQueueBuffer:
627 {
628 onQueueBuffer(msg);
629 break;
630 }
631
632 case kWhatQueueEOS:
633 {
634 onQueueEOS(msg);
635 break;
636 }
637
638 case kWhatEOS:
639 {
640 int32_t generation;
641 CHECK(msg->findInt32("audioEOSGeneration", &generation));
642 if (generation != mAudioEOSGeneration) {
643 break;
644 }
645 status_t finalResult;
646 CHECK(msg->findInt32("finalResult", &finalResult));
647 notifyEOS(true /* audio */, finalResult);
648 break;
649 }
650
651 case kWhatConfigPlayback:
652 {
653 sp<AReplyToken> replyID;
654 CHECK(msg->senderAwaitsResponse(&replyID));
655 AudioPlaybackRate rate;
656 readFromAMessage(msg, &rate);
657 status_t err = onConfigPlayback(rate);
658 sp<AMessage> response = new AMessage;
659 response->setInt32("err", err);
660 response->postReply(replyID);
661 break;
662 }
663
664 case kWhatGetPlaybackSettings:
665 {
666 sp<AReplyToken> replyID;
667 CHECK(msg->senderAwaitsResponse(&replyID));
668 AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
669 status_t err = onGetPlaybackSettings(&rate);
670 sp<AMessage> response = new AMessage;
671 if (err == OK) {
672 writeToAMessage(response, rate);
673 }
674 response->setInt32("err", err);
675 response->postReply(replyID);
676 break;
677 }
678
679 case kWhatConfigSync:
680 {
681 sp<AReplyToken> replyID;
682 CHECK(msg->senderAwaitsResponse(&replyID));
683 AVSyncSettings sync;
684 float videoFpsHint;
685 readFromAMessage(msg, &sync, &videoFpsHint);
686 status_t err = onConfigSync(sync, videoFpsHint);
687 sp<AMessage> response = new AMessage;
688 response->setInt32("err", err);
689 response->postReply(replyID);
690 break;
691 }
692
693 case kWhatGetSyncSettings:
694 {
695 sp<AReplyToken> replyID;
696 CHECK(msg->senderAwaitsResponse(&replyID));
697
698 ALOGV("kWhatGetSyncSettings");
699 AVSyncSettings sync;
700 float videoFps = -1.f;
701 status_t err = onGetSyncSettings(&sync, &videoFps);
702 sp<AMessage> response = new AMessage;
703 if (err == OK) {
704 writeToAMessage(response, sync, videoFps);
705 }
706 response->setInt32("err", err);
707 response->postReply(replyID);
708 break;
709 }
710
711 case kWhatFlush:
712 {
713 onFlush(msg);
714 break;
715 }
716
717 case kWhatDisableOffloadAudio:
718 {
719 onDisableOffloadAudio();
720 break;
721 }
722
723 case kWhatEnableOffloadAudio:
724 {
725 onEnableOffloadAudio();
726 break;
727 }
728
729 case kWhatPause:
730 {
731 onPause();
732 break;
733 }
734
735 case kWhatResume:
736 {
737 onResume();
738 break;
739 }
740
741 case kWhatSetVideoFrameRate:
742 {
743 float fps;
744 CHECK(msg->findFloat("frame-rate", &fps));
745 onSetVideoFrameRate(fps);
746 break;
747 }
748
749 case kWhatAudioTearDown:
750 {
751 int32_t reason;
752 CHECK(msg->findInt32("reason", &reason));
753
754 onAudioTearDown((AudioTearDownReason)reason);
755 break;
756 }
757
758 case kWhatAudioOffloadPauseTimeout:
759 {
760 int32_t generation;
761 CHECK(msg->findInt32("drainGeneration", &generation));
762 if (generation != mAudioOffloadPauseTimeoutGeneration) {
763 break;
764 }
765 ALOGV("Audio Offload tear down due to pause timeout.");
766 onAudioTearDown(kDueToTimeout);
767 mWakeLock->release();
768 break;
769 }
770
771 default:
772 TRESPASS();
773 break;
774 }
775}
776
777void NuPlayer2::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
778 if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
779 return;
780 }
781
782 if (mAudioQueue.empty()) {
783 return;
784 }
785
786 // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
787 if (mPaused) {
788 const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
789 if (diffUs > delayUs) {
790 delayUs = diffUs;
791 }
792 }
793
794 mDrainAudioQueuePending = true;
795 sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
796 msg->setInt32("drainGeneration", mAudioDrainGeneration);
797 msg->post(delayUs);
798}
799
800void NuPlayer2::Renderer::prepareForMediaRenderingStart_l() {
801 mAudioRenderingStartGeneration = mAudioDrainGeneration;
802 mVideoRenderingStartGeneration = mVideoDrainGeneration;
803 mRenderingDataDelivered = false;
804}
805
806void NuPlayer2::Renderer::notifyIfMediaRenderingStarted_l() {
807 if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
808 mAudioRenderingStartGeneration == mAudioDrainGeneration) {
809 mRenderingDataDelivered = true;
810 if (mPaused) {
811 return;
812 }
813 mVideoRenderingStartGeneration = -1;
814 mAudioRenderingStartGeneration = -1;
815
816 sp<AMessage> notify = mNotify->dup();
817 notify->setInt32("what", kWhatMediaRenderingStart);
818 notify->post();
819 }
820}
821
822// static
823size_t NuPlayer2::Renderer::AudioSinkCallback(
Wei Jia33abcc72018-01-30 09:47:38 -0800824 MediaPlayer2Interface::AudioSink * /* audioSink */,
Wei Jia53692fa2017-12-11 10:33:46 -0800825 void *buffer,
826 size_t size,
827 void *cookie,
Wei Jia33abcc72018-01-30 09:47:38 -0800828 MediaPlayer2Interface::AudioSink::cb_event_t event) {
Wei Jia53692fa2017-12-11 10:33:46 -0800829 NuPlayer2::Renderer *me = (NuPlayer2::Renderer *)cookie;
830
831 switch (event) {
Wei Jia33abcc72018-01-30 09:47:38 -0800832 case MediaPlayer2Interface::AudioSink::CB_EVENT_FILL_BUFFER:
Wei Jia53692fa2017-12-11 10:33:46 -0800833 {
834 return me->fillAudioBuffer(buffer, size);
835 break;
836 }
837
Wei Jia33abcc72018-01-30 09:47:38 -0800838 case MediaPlayer2Interface::AudioSink::CB_EVENT_STREAM_END:
Wei Jia53692fa2017-12-11 10:33:46 -0800839 {
840 ALOGV("AudioSink::CB_EVENT_STREAM_END");
841 me->notifyEOSCallback();
842 break;
843 }
844
Wei Jia33abcc72018-01-30 09:47:38 -0800845 case MediaPlayer2Interface::AudioSink::CB_EVENT_TEAR_DOWN:
Wei Jia53692fa2017-12-11 10:33:46 -0800846 {
847 ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
848 me->notifyAudioTearDown(kDueToError);
849 break;
850 }
851 }
852
853 return 0;
854}
855
856void NuPlayer2::Renderer::notifyEOSCallback() {
857 Mutex::Autolock autoLock(mLock);
858
859 if (!mUseAudioCallback) {
860 return;
861 }
862
863 notifyEOS_l(true /* audio */, ERROR_END_OF_STREAM);
864}
865
866size_t NuPlayer2::Renderer::fillAudioBuffer(void *buffer, size_t size) {
867 Mutex::Autolock autoLock(mLock);
868
869 if (!mUseAudioCallback) {
870 return 0;
871 }
872
873 bool hasEOS = false;
874
875 size_t sizeCopied = 0;
876 bool firstEntry = true;
877 QueueEntry *entry; // will be valid after while loop if hasEOS is set.
878 while (sizeCopied < size && !mAudioQueue.empty()) {
879 entry = &*mAudioQueue.begin();
880
881 if (entry->mBuffer == NULL) { // EOS
882 hasEOS = true;
883 mAudioQueue.erase(mAudioQueue.begin());
884 break;
885 }
886
887 if (firstEntry && entry->mOffset == 0) {
888 firstEntry = false;
889 int64_t mediaTimeUs;
890 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
891 ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
892 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
893 }
894
895 size_t copy = entry->mBuffer->size() - entry->mOffset;
896 size_t sizeRemaining = size - sizeCopied;
897 if (copy > sizeRemaining) {
898 copy = sizeRemaining;
899 }
900
901 memcpy((char *)buffer + sizeCopied,
902 entry->mBuffer->data() + entry->mOffset,
903 copy);
904
905 entry->mOffset += copy;
906 if (entry->mOffset == entry->mBuffer->size()) {
907 entry->mNotifyConsumed->post();
908 mAudioQueue.erase(mAudioQueue.begin());
909 entry = NULL;
910 }
911 sizeCopied += copy;
912
913 notifyIfMediaRenderingStarted_l();
914 }
915
916 if (mAudioFirstAnchorTimeMediaUs >= 0) {
917 int64_t nowUs = ALooper::GetNowUs();
918 int64_t nowMediaUs =
919 mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
920 // we don't know how much data we are queueing for offloaded tracks.
921 mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
922 }
923
924 // for non-offloaded audio, we need to compute the frames written because
925 // there is no EVENT_STREAM_END notification. The frames written gives
926 // an estimate on the pending played out duration.
927 if (!offloadingAudio()) {
928 mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
929 }
930
931 if (hasEOS) {
932 (new AMessage(kWhatStopAudioSink, this))->post();
933 // As there is currently no EVENT_STREAM_END callback notification for
934 // non-offloaded audio tracks, we need to post the EOS ourselves.
935 if (!offloadingAudio()) {
936 int64_t postEOSDelayUs = 0;
937 if (mAudioSink->needsTrailingPadding()) {
938 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
939 }
940 ALOGV("fillAudioBuffer: notifyEOS_l "
941 "mNumFramesWritten:%u finalResult:%d postEOSDelay:%lld",
942 mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
943 notifyEOS_l(true /* audio */, entry->mFinalResult, postEOSDelayUs);
944 }
945 }
946 return sizeCopied;
947}
948
949void NuPlayer2::Renderer::drainAudioQueueUntilLastEOS() {
950 List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
951 bool foundEOS = false;
952 while (it != mAudioQueue.end()) {
953 int32_t eos;
954 QueueEntry *entry = &*it++;
955 if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
956 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
957 itEOS = it;
958 foundEOS = true;
959 }
960 }
961
962 if (foundEOS) {
963 // post all replies before EOS and drop the samples
964 for (it = mAudioQueue.begin(); it != itEOS; it++) {
965 if (it->mBuffer == nullptr) {
966 if (it->mNotifyConsumed == nullptr) {
967 // delay doesn't matter as we don't even have an AudioTrack
968 notifyEOS(true /* audio */, it->mFinalResult);
969 } else {
970 // TAG for re-opening audio sink.
971 onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
972 }
973 } else {
974 it->mNotifyConsumed->post();
975 }
976 }
977 mAudioQueue.erase(mAudioQueue.begin(), itEOS);
978 }
979}
980
981bool NuPlayer2::Renderer::onDrainAudioQueue() {
982 // do not drain audio during teardown as queued buffers may be invalid.
983 if (mAudioTornDown) {
984 return false;
985 }
986 // TODO: This call to getPosition checks if AudioTrack has been created
987 // in AudioSink before draining audio. If AudioTrack doesn't exist, then
988 // CHECKs on getPosition will fail.
989 // We still need to figure out why AudioTrack is not created when
990 // this function is called. One possible reason could be leftover
991 // audio. Another possible place is to check whether decoder
992 // has received INFO_FORMAT_CHANGED as the first buffer since
993 // AudioSink is opened there, and possible interactions with flush
994 // immediately after start. Investigate error message
995 // "vorbis_dsp_synthesis returned -135", along with RTSP.
996 uint32_t numFramesPlayed;
997 if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
998 // When getPosition fails, renderer will not reschedule the draining
999 // unless new samples are queued.
1000 // If we have pending EOS (or "eos" marker for discontinuities), we need
1001 // to post these now as NuPlayer2Decoder might be waiting for it.
1002 drainAudioQueueUntilLastEOS();
1003
1004 ALOGW("onDrainAudioQueue(): audio sink is not ready");
1005 return false;
1006 }
1007
1008#if 0
1009 ssize_t numFramesAvailableToWrite =
1010 mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
1011
1012 if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
1013 ALOGI("audio sink underrun");
1014 } else {
1015 ALOGV("audio queue has %d frames left to play",
1016 mAudioSink->frameCount() - numFramesAvailableToWrite);
1017 }
1018#endif
1019
1020 uint32_t prevFramesWritten = mNumFramesWritten;
1021 while (!mAudioQueue.empty()) {
1022 QueueEntry *entry = &*mAudioQueue.begin();
1023
1024 if (entry->mBuffer == NULL) {
1025 if (entry->mNotifyConsumed != nullptr) {
1026 // TAG for re-open audio sink.
1027 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1028 mAudioQueue.erase(mAudioQueue.begin());
1029 continue;
1030 }
1031
1032 // EOS
1033 if (mPaused) {
1034 // Do not notify EOS when paused.
1035 // This is needed to avoid switch to next clip while in pause.
1036 ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
1037 return false;
1038 }
1039
1040 int64_t postEOSDelayUs = 0;
1041 if (mAudioSink->needsTrailingPadding()) {
1042 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
1043 }
1044 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
1045 mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1046
1047 mAudioQueue.erase(mAudioQueue.begin());
1048 entry = NULL;
1049 if (mAudioSink->needsTrailingPadding()) {
1050 // If we're not in gapless playback (i.e. through setNextPlayer), we
1051 // need to stop the track here, because that will play out the last
1052 // little bit at the end of the file. Otherwise short files won't play.
1053 mAudioSink->stop();
1054 mNumFramesWritten = 0;
1055 }
1056 return false;
1057 }
1058
1059 mLastAudioBufferDrained = entry->mBufferOrdinal;
1060
1061 // ignore 0-sized buffer which could be EOS marker with no data
1062 if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
1063 int64_t mediaTimeUs;
1064 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1065 ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
1066 mediaTimeUs / 1E6);
1067 onNewAudioMediaTime(mediaTimeUs);
1068 }
1069
1070 size_t copy = entry->mBuffer->size() - entry->mOffset;
1071
1072 ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
1073 copy, false /* blocking */);
1074 if (written < 0) {
1075 // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
1076 if (written == WOULD_BLOCK) {
1077 ALOGV("AudioSink write would block when writing %zu bytes", copy);
1078 } else {
1079 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
1080 // This can only happen when AudioSink was opened with doNotReconnect flag set to
1081 // true, in which case the NuPlayer2 will handle the reconnect.
1082 notifyAudioTearDown(kDueToError);
1083 }
1084 break;
1085 }
1086
1087 entry->mOffset += written;
1088 size_t remainder = entry->mBuffer->size() - entry->mOffset;
1089 if ((ssize_t)remainder < mAudioSink->frameSize()) {
1090 if (remainder > 0) {
1091 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
1092 remainder);
1093 entry->mOffset += remainder;
1094 copy -= remainder;
1095 }
1096
1097 entry->mNotifyConsumed->post();
1098 mAudioQueue.erase(mAudioQueue.begin());
1099
1100 entry = NULL;
1101 }
1102
1103 size_t copiedFrames = written / mAudioSink->frameSize();
1104 mNumFramesWritten += copiedFrames;
1105
1106 {
1107 Mutex::Autolock autoLock(mLock);
1108 int64_t maxTimeMedia;
1109 maxTimeMedia =
1110 mAnchorTimeMediaUs +
1111 (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
1112 * 1000LL * mAudioSink->msecsPerFrame());
1113 mMediaClock->updateMaxTimeMedia(maxTimeMedia);
1114
1115 notifyIfMediaRenderingStarted_l();
1116 }
1117
1118 if (written != (ssize_t)copy) {
1119 // A short count was received from AudioSink::write()
1120 //
1121 // AudioSink write is called in non-blocking mode.
1122 // It may return with a short count when:
1123 //
1124 // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
1125 // discarded.
1126 // 2) The data to be copied exceeds the available buffer in AudioSink.
1127 // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
1128 // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
1129
1130 // (Case 1)
1131 // Must be a multiple of the frame size. If it is not a multiple of a frame size, it
1132 // needs to fail, as we should not carry over fractional frames between calls.
1133 CHECK_EQ(copy % mAudioSink->frameSize(), 0u);
1134
1135 // (Case 2, 3, 4)
1136 // Return early to the caller.
1137 // Beware of calling immediately again as this may busy-loop if you are not careful.
1138 ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
1139 break;
1140 }
1141 }
1142
1143 // calculate whether we need to reschedule another write.
1144 bool reschedule = !mAudioQueue.empty()
1145 && (!mPaused
1146 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
1147 //ALOGD("reschedule:%d empty:%d mPaused:%d prevFramesWritten:%u mNumFramesWritten:%u",
1148 // reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
1149 return reschedule;
1150}
1151
1152int64_t NuPlayer2::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
1153 int32_t sampleRate = offloadingAudio() ?
1154 mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
1155 if (sampleRate == 0) {
1156 ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
1157 return 0;
1158 }
Wei Jia82b657f2019-02-12 18:11:36 -08001159 return (int64_t)(numFrames * 1000000LL / sampleRate);
Wei Jia53692fa2017-12-11 10:33:46 -08001160}
1161
1162// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
1163int64_t NuPlayer2::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
1164 int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1165 if (mUseVirtualAudioSink) {
1166 int64_t nowUs = ALooper::GetNowUs();
1167 int64_t mediaUs;
1168 if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -08001169 return 0LL;
Wei Jia53692fa2017-12-11 10:33:46 -08001170 } else {
1171 return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
1172 }
1173 }
1174
1175 const int64_t audioSinkPlayedUs = mAudioSink->getPlayedOutDurationUs(nowUs);
1176 int64_t pendingUs = writtenAudioDurationUs - audioSinkPlayedUs;
1177 if (pendingUs < 0) {
1178 // This shouldn't happen unless the timestamp is stale.
1179 ALOGW("%s: pendingUs %lld < 0, clamping to zero, potential resume after pause "
1180 "writtenAudioDurationUs: %lld, audioSinkPlayedUs: %lld",
1181 __func__, (long long)pendingUs,
1182 (long long)writtenAudioDurationUs, (long long)audioSinkPlayedUs);
1183 pendingUs = 0;
1184 }
1185 return pendingUs;
1186}
1187
1188int64_t NuPlayer2::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
1189 int64_t realUs;
1190 if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
1191 // If failed to get current position, e.g. due to audio clock is
1192 // not ready, then just play out video immediately without delay.
1193 return nowUs;
1194 }
1195 return realUs;
1196}
1197
1198void NuPlayer2::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1199 Mutex::Autolock autoLock(mLock);
1200 // TRICKY: vorbis decoder generates multiple frames with the same
1201 // timestamp, so only update on the first frame with a given timestamp
1202 if (mediaTimeUs == mAnchorTimeMediaUs) {
1203 return;
1204 }
1205 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1206
1207 // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
1208 if (mNextAudioClockUpdateTimeUs == -1) {
1209 AudioTimestamp ts;
1210 if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
1211 mNextAudioClockUpdateTimeUs = 0; // start our clock updates
1212 }
1213 }
1214 int64_t nowUs = ALooper::GetNowUs();
1215 if (mNextAudioClockUpdateTimeUs >= 0) {
1216 if (nowUs >= mNextAudioClockUpdateTimeUs) {
1217 int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1218 mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1219 mUseVirtualAudioSink = false;
1220 mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
1221 }
1222 } else {
1223 int64_t unused;
1224 if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
1225 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
1226 > kMaxAllowedAudioSinkDelayUs)) {
1227 // Enough data has been sent to AudioSink, but AudioSink has not rendered
1228 // any data yet. Something is wrong with AudioSink, e.g., the device is not
1229 // connected to audio out.
1230 // Switch to system clock. This essentially creates a virtual AudioSink with
1231 // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
1232 // This virtual AudioSink renders audio data starting from the very first sample
1233 // and it's paced by system clock.
1234 ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
1235 mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
1236 mUseVirtualAudioSink = true;
1237 }
1238 }
1239 mAnchorNumFramesWritten = mNumFramesWritten;
1240 mAnchorTimeMediaUs = mediaTimeUs;
1241}
1242
1243// Called without mLock acquired.
1244void NuPlayer2::Renderer::postDrainVideoQueue() {
1245 if (mDrainVideoQueuePending
1246 || getSyncQueues()
1247 || (mPaused && mVideoSampleReceived)) {
1248 return;
1249 }
1250
1251 if (mVideoQueue.empty()) {
1252 return;
1253 }
1254
1255 QueueEntry &entry = *mVideoQueue.begin();
1256
1257 sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1258 msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1259
1260 if (entry.mBuffer == NULL) {
1261 // EOS doesn't carry a timestamp.
1262 msg->post();
1263 mDrainVideoQueuePending = true;
1264 return;
1265 }
1266
1267 int64_t nowUs = ALooper::GetNowUs();
1268 if (mFlags & FLAG_REAL_TIME) {
1269 int64_t realTimeUs;
1270 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1271
1272 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1273
1274 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1275
1276 int64_t delayUs = realTimeUs - nowUs;
1277
1278 ALOGW_IF(delayUs > 500000, "unusually high delayUs: %lld", (long long)delayUs);
1279 // post 2 display refreshes before rendering is due
1280 msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1281
1282 mDrainVideoQueuePending = true;
1283 return;
1284 }
1285
1286 int64_t mediaTimeUs;
1287 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1288
1289 {
1290 Mutex::Autolock autoLock(mLock);
1291 if (mAnchorTimeMediaUs < 0) {
1292 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1293 mAnchorTimeMediaUs = mediaTimeUs;
1294 }
1295 }
Wei Jia353c3d32019-01-04 12:44:38 -08001296 mNextVideoTimeMediaUs = mediaTimeUs;
Wei Jia53692fa2017-12-11 10:33:46 -08001297 if (!mHasAudio) {
1298 // smooth out videos >= 10fps
Wei Jia353c3d32019-01-04 12:44:38 -08001299 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
Wei Jia53692fa2017-12-11 10:33:46 -08001300 }
1301
1302 if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
1303 msg->post();
1304 } else {
1305 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1306
1307 // post 2 display refreshes before rendering is due
1308 mMediaClock->addTimer(msg, mediaTimeUs, -twoVsyncsUs);
1309 }
1310
1311 mDrainVideoQueuePending = true;
1312}
1313
1314void NuPlayer2::Renderer::onDrainVideoQueue() {
1315 if (mVideoQueue.empty()) {
1316 return;
1317 }
1318
1319 QueueEntry *entry = &*mVideoQueue.begin();
1320
1321 if (entry->mBuffer == NULL) {
1322 // EOS
1323
1324 notifyEOS(false /* audio */, entry->mFinalResult);
1325
1326 mVideoQueue.erase(mVideoQueue.begin());
1327 entry = NULL;
1328
1329 setVideoLateByUs(0);
1330 return;
1331 }
1332
1333 int64_t nowUs = ALooper::GetNowUs();
1334 int64_t realTimeUs;
1335 int64_t mediaTimeUs = -1;
1336 if (mFlags & FLAG_REAL_TIME) {
1337 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1338 } else {
1339 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1340
1341 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1342 }
1343 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1344
1345 bool tooLate = false;
1346
1347 if (!mPaused) {
1348 setVideoLateByUs(nowUs - realTimeUs);
1349 tooLate = (mVideoLateByUs > 40000);
1350
1351 if (tooLate) {
1352 ALOGV("video late by %lld us (%.2f secs)",
1353 (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1354 } else {
1355 int64_t mediaUs = 0;
1356 mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1357 ALOGV("rendering video at media time %.2f secs",
1358 (mFlags & FLAG_REAL_TIME ? realTimeUs :
1359 mediaUs) / 1E6);
1360
1361 if (!(mFlags & FLAG_REAL_TIME)
1362 && mLastAudioMediaTimeUs != -1
1363 && mediaTimeUs > mLastAudioMediaTimeUs) {
1364 // If audio ends before video, video continues to drive media clock.
1365 // Also smooth out videos >= 10fps.
1366 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1367 }
1368 }
1369 } else {
1370 setVideoLateByUs(0);
1371 if (!mVideoSampleReceived && !mHasAudio) {
1372 // This will ensure that the first frame after a flush won't be used as anchor
1373 // when renderer is in paused state, because resume can happen any time after seek.
1374 clearAnchorTime();
1375 }
1376 }
1377
1378 // Always render the first video frame while keeping stats on A/V sync.
1379 if (!mVideoSampleReceived) {
1380 realTimeUs = nowUs;
1381 tooLate = false;
1382 }
1383
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -08001384 entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000LL);
Wei Jia53692fa2017-12-11 10:33:46 -08001385 entry->mNotifyConsumed->setInt32("render", !tooLate);
1386 entry->mNotifyConsumed->post();
1387 mVideoQueue.erase(mVideoQueue.begin());
1388 entry = NULL;
1389
1390 mVideoSampleReceived = true;
1391
1392 if (!mPaused) {
1393 if (!mVideoRenderingStarted) {
1394 mVideoRenderingStarted = true;
1395 notifyVideoRenderingStart();
1396 }
1397 Mutex::Autolock autoLock(mLock);
1398 notifyIfMediaRenderingStarted_l();
1399 }
1400}
1401
1402void NuPlayer2::Renderer::notifyVideoRenderingStart() {
1403 sp<AMessage> notify = mNotify->dup();
1404 notify->setInt32("what", kWhatVideoRenderingStart);
1405 notify->post();
1406}
1407
1408void NuPlayer2::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1409 Mutex::Autolock autoLock(mLock);
1410 notifyEOS_l(audio, finalResult, delayUs);
1411}
1412
1413void NuPlayer2::Renderer::notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs) {
1414 if (audio && delayUs > 0) {
1415 sp<AMessage> msg = new AMessage(kWhatEOS, this);
1416 msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1417 msg->setInt32("finalResult", finalResult);
1418 msg->post(delayUs);
1419 return;
1420 }
1421 sp<AMessage> notify = mNotify->dup();
1422 notify->setInt32("what", kWhatEOS);
1423 notify->setInt32("audio", static_cast<int32_t>(audio));
1424 notify->setInt32("finalResult", finalResult);
1425 notify->post(delayUs);
1426
1427 if (audio) {
1428 // Video might outlive audio. Clear anchor to enable video only case.
1429 mAnchorTimeMediaUs = -1;
1430 mHasAudio = false;
1431 if (mNextVideoTimeMediaUs >= 0) {
1432 int64_t mediaUs = 0;
Wei Jia353c3d32019-01-04 12:44:38 -08001433 int64_t nowUs = ALooper::GetNowUs();
1434 status_t result = mMediaClock->getMediaTime(nowUs, &mediaUs);
1435 if (result == OK) {
1436 if (mNextVideoTimeMediaUs > mediaUs) {
1437 mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
1438 }
1439 } else {
1440 mMediaClock->updateAnchor(
1441 mNextVideoTimeMediaUs, nowUs, mNextVideoTimeMediaUs + 100000);
Wei Jia53692fa2017-12-11 10:33:46 -08001442 }
1443 }
1444 }
1445}
1446
1447void NuPlayer2::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
1448 sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
1449 msg->setInt32("reason", reason);
1450 msg->post();
1451}
1452
1453void NuPlayer2::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1454 int32_t audio;
1455 CHECK(msg->findInt32("audio", &audio));
1456
1457 if (dropBufferIfStale(audio, msg)) {
1458 return;
1459 }
1460
1461 if (audio) {
1462 mHasAudio = true;
1463 } else {
1464 mHasVideo = true;
1465 }
1466
1467 if (mHasVideo) {
1468 if (mVideoScheduler == NULL) {
1469 mVideoScheduler = new VideoFrameScheduler();
1470 mVideoScheduler->init();
1471 }
1472 }
1473
1474 sp<RefBase> obj;
1475 CHECK(msg->findObject("buffer", &obj));
1476 sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
1477
1478 sp<AMessage> notifyConsumed;
1479 CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1480
1481 QueueEntry entry;
1482 entry.mBuffer = buffer;
1483 entry.mNotifyConsumed = notifyConsumed;
1484 entry.mOffset = 0;
1485 entry.mFinalResult = OK;
1486 entry.mBufferOrdinal = ++mTotalBuffersQueued;
1487
1488 if (audio) {
1489 Mutex::Autolock autoLock(mLock);
1490 mAudioQueue.push_back(entry);
1491 postDrainAudioQueue_l();
1492 } else {
1493 mVideoQueue.push_back(entry);
1494 postDrainVideoQueue();
1495 }
1496
1497 Mutex::Autolock autoLock(mLock);
1498 if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1499 return;
1500 }
1501
1502 sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1503 sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1504
1505 if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1506 // EOS signalled on either queue.
1507 syncQueuesDone_l();
1508 return;
1509 }
1510
1511 int64_t firstAudioTimeUs;
1512 int64_t firstVideoTimeUs;
1513 CHECK(firstAudioBuffer->meta()
1514 ->findInt64("timeUs", &firstAudioTimeUs));
1515 CHECK(firstVideoBuffer->meta()
1516 ->findInt64("timeUs", &firstVideoTimeUs));
1517
1518 int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1519
1520 ALOGV("queueDiff = %.2f secs", diff / 1E6);
1521
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -08001522 if (diff > 100000LL) {
Wei Jia53692fa2017-12-11 10:33:46 -08001523 // Audio data starts More than 0.1 secs before video.
1524 // Drop some audio.
1525
1526 (*mAudioQueue.begin()).mNotifyConsumed->post();
1527 mAudioQueue.erase(mAudioQueue.begin());
1528 return;
1529 }
1530
1531 syncQueuesDone_l();
1532}
1533
1534void NuPlayer2::Renderer::syncQueuesDone_l() {
1535 if (!mSyncQueues) {
1536 return;
1537 }
1538
1539 mSyncQueues = false;
1540
1541 if (!mAudioQueue.empty()) {
1542 postDrainAudioQueue_l();
1543 }
1544
1545 if (!mVideoQueue.empty()) {
1546 mLock.unlock();
1547 postDrainVideoQueue();
1548 mLock.lock();
1549 }
1550}
1551
1552void NuPlayer2::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1553 int32_t audio;
1554 CHECK(msg->findInt32("audio", &audio));
1555
1556 if (dropBufferIfStale(audio, msg)) {
1557 return;
1558 }
1559
1560 int32_t finalResult;
1561 CHECK(msg->findInt32("finalResult", &finalResult));
1562
1563 QueueEntry entry;
1564 entry.mOffset = 0;
1565 entry.mFinalResult = finalResult;
1566
1567 if (audio) {
1568 Mutex::Autolock autoLock(mLock);
1569 if (mAudioQueue.empty() && mSyncQueues) {
1570 syncQueuesDone_l();
1571 }
1572 mAudioQueue.push_back(entry);
1573 postDrainAudioQueue_l();
1574 } else {
1575 if (mVideoQueue.empty() && getSyncQueues()) {
1576 Mutex::Autolock autoLock(mLock);
1577 syncQueuesDone_l();
1578 }
1579 mVideoQueue.push_back(entry);
1580 postDrainVideoQueue();
1581 }
1582}
1583
1584void NuPlayer2::Renderer::onFlush(const sp<AMessage> &msg) {
1585 int32_t audio, notifyComplete;
1586 CHECK(msg->findInt32("audio", &audio));
1587
1588 {
1589 Mutex::Autolock autoLock(mLock);
1590 if (audio) {
1591 notifyComplete = mNotifyCompleteAudio;
1592 mNotifyCompleteAudio = false;
1593 mLastAudioMediaTimeUs = -1;
1594 } else {
1595 notifyComplete = mNotifyCompleteVideo;
1596 mNotifyCompleteVideo = false;
1597 }
1598
1599 // If we're currently syncing the queues, i.e. dropping audio while
1600 // aligning the first audio/video buffer times and only one of the
1601 // two queues has data, we may starve that queue by not requesting
1602 // more buffers from the decoder. If the other source then encounters
1603 // a discontinuity that leads to flushing, we'll never find the
1604 // corresponding discontinuity on the other queue.
1605 // Therefore we'll stop syncing the queues if at least one of them
1606 // is flushed.
1607 syncQueuesDone_l();
1608 }
1609 clearAnchorTime();
1610
1611 ALOGV("flushing %s", audio ? "audio" : "video");
1612 if (audio) {
1613 {
1614 Mutex::Autolock autoLock(mLock);
1615 flushQueue(&mAudioQueue);
1616
1617 ++mAudioDrainGeneration;
1618 ++mAudioEOSGeneration;
1619 prepareForMediaRenderingStart_l();
1620
1621 // the frame count will be reset after flush.
1622 clearAudioFirstAnchorTime_l();
1623 }
1624
1625 mDrainAudioQueuePending = false;
1626
1627 if (offloadingAudio()) {
1628 mAudioSink->pause();
1629 mAudioSink->flush();
1630 if (!mPaused) {
1631 mAudioSink->start();
1632 }
1633 } else {
1634 mAudioSink->pause();
1635 mAudioSink->flush();
1636 // Call stop() to signal to the AudioSink to completely fill the
1637 // internal buffer before resuming playback.
1638 // FIXME: this is ignored after flush().
1639 mAudioSink->stop();
1640 if (mPaused) {
1641 // Race condition: if renderer is paused and audio sink is stopped,
1642 // we need to make sure that the audio track buffer fully drains
1643 // before delivering data.
1644 // FIXME: remove this if we can detect if stop() is complete.
1645 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
1646 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
1647 } else {
1648 mAudioSink->start();
1649 }
1650 mNumFramesWritten = 0;
1651 }
1652 mNextAudioClockUpdateTimeUs = -1;
1653 } else {
1654 flushQueue(&mVideoQueue);
1655
1656 mDrainVideoQueuePending = false;
1657
1658 if (mVideoScheduler != NULL) {
1659 mVideoScheduler->restart();
1660 }
1661
1662 Mutex::Autolock autoLock(mLock);
1663 ++mVideoDrainGeneration;
1664 prepareForMediaRenderingStart_l();
1665 }
1666
1667 mVideoSampleReceived = false;
1668
1669 if (notifyComplete) {
1670 notifyFlushComplete(audio);
1671 }
1672}
1673
1674void NuPlayer2::Renderer::flushQueue(List<QueueEntry> *queue) {
1675 while (!queue->empty()) {
1676 QueueEntry *entry = &*queue->begin();
1677
1678 if (entry->mBuffer != NULL) {
1679 entry->mNotifyConsumed->post();
1680 } else if (entry->mNotifyConsumed != nullptr) {
1681 // Is it needed to open audio sink now?
1682 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1683 }
1684
1685 queue->erase(queue->begin());
1686 entry = NULL;
1687 }
1688}
1689
1690void NuPlayer2::Renderer::notifyFlushComplete(bool audio) {
1691 sp<AMessage> notify = mNotify->dup();
1692 notify->setInt32("what", kWhatFlushComplete);
1693 notify->setInt32("audio", static_cast<int32_t>(audio));
1694 notify->post();
1695}
1696
1697bool NuPlayer2::Renderer::dropBufferIfStale(
1698 bool audio, const sp<AMessage> &msg) {
1699 int32_t queueGeneration;
1700 CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1701
1702 if (queueGeneration == getQueueGeneration(audio)) {
1703 return false;
1704 }
1705
1706 sp<AMessage> notifyConsumed;
1707 if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1708 notifyConsumed->post();
1709 }
1710
1711 return true;
1712}
1713
1714void NuPlayer2::Renderer::onAudioSinkChanged() {
1715 if (offloadingAudio()) {
1716 return;
1717 }
1718 CHECK(!mDrainAudioQueuePending);
1719 mNumFramesWritten = 0;
1720 mAnchorNumFramesWritten = -1;
1721 uint32_t written;
1722 if (mAudioSink->getFramesWritten(&written) == OK) {
1723 mNumFramesWritten = written;
1724 }
1725}
1726
1727void NuPlayer2::Renderer::onDisableOffloadAudio() {
1728 Mutex::Autolock autoLock(mLock);
1729 mFlags &= ~FLAG_OFFLOAD_AUDIO;
1730 ++mAudioDrainGeneration;
1731 if (mAudioRenderingStartGeneration != -1) {
1732 prepareForMediaRenderingStart_l();
1733 }
1734}
1735
1736void NuPlayer2::Renderer::onEnableOffloadAudio() {
1737 Mutex::Autolock autoLock(mLock);
1738 mFlags |= FLAG_OFFLOAD_AUDIO;
1739 ++mAudioDrainGeneration;
1740 if (mAudioRenderingStartGeneration != -1) {
1741 prepareForMediaRenderingStart_l();
1742 }
1743}
1744
1745void NuPlayer2::Renderer::onPause() {
1746 if (mPaused) {
1747 return;
1748 }
1749
1750 {
1751 Mutex::Autolock autoLock(mLock);
1752 // we do not increment audio drain generation so that we fill audio buffer during pause.
1753 ++mVideoDrainGeneration;
1754 prepareForMediaRenderingStart_l();
1755 mPaused = true;
1756 mMediaClock->setPlaybackRate(0.0);
1757 }
1758
1759 mDrainAudioQueuePending = false;
1760 mDrainVideoQueuePending = false;
1761
1762 // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1763 mAudioSink->pause();
1764 startAudioOffloadPauseTimeout();
1765
1766 ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1767 mAudioQueue.size(), mVideoQueue.size());
1768}
1769
1770void NuPlayer2::Renderer::onResume() {
1771 if (!mPaused) {
1772 return;
1773 }
1774
1775 // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1776 cancelAudioOffloadPauseTimeout();
1777 if (mAudioSink->ready()) {
1778 status_t err = mAudioSink->start();
1779 if (err != OK) {
1780 ALOGE("cannot start AudioSink err %d", err);
1781 notifyAudioTearDown(kDueToError);
1782 }
1783 }
1784
1785 {
1786 Mutex::Autolock autoLock(mLock);
1787 mPaused = false;
1788 // rendering started message may have been delayed if we were paused.
1789 if (mRenderingDataDelivered) {
1790 notifyIfMediaRenderingStarted_l();
1791 }
1792 // configure audiosink as we did not do it when pausing
1793 if (mAudioSink != NULL && mAudioSink->ready()) {
1794 mAudioSink->setPlaybackRate(mPlaybackSettings);
1795 }
1796
1797 mMediaClock->setPlaybackRate(mPlaybackRate);
1798
1799 if (!mAudioQueue.empty()) {
1800 postDrainAudioQueue_l();
1801 }
1802 }
1803
1804 if (!mVideoQueue.empty()) {
1805 postDrainVideoQueue();
1806 }
1807}
1808
1809void NuPlayer2::Renderer::onSetVideoFrameRate(float fps) {
1810 if (mVideoScheduler == NULL) {
1811 mVideoScheduler = new VideoFrameScheduler();
1812 }
1813 mVideoScheduler->init(fps);
1814}
1815
1816int32_t NuPlayer2::Renderer::getQueueGeneration(bool audio) {
1817 Mutex::Autolock autoLock(mLock);
1818 return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1819}
1820
1821int32_t NuPlayer2::Renderer::getDrainGeneration(bool audio) {
1822 Mutex::Autolock autoLock(mLock);
1823 return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1824}
1825
1826bool NuPlayer2::Renderer::getSyncQueues() {
1827 Mutex::Autolock autoLock(mLock);
1828 return mSyncQueues;
1829}
1830
1831void NuPlayer2::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1832 if (mAudioTornDown) {
1833 return;
1834 }
1835 mAudioTornDown = true;
1836
1837 int64_t currentPositionUs;
1838 sp<AMessage> notify = mNotify->dup();
1839 if (getCurrentPosition(&currentPositionUs) == OK) {
1840 notify->setInt64("positionUs", currentPositionUs);
1841 }
1842
1843 mAudioSink->stop();
1844 mAudioSink->flush();
1845
1846 notify->setInt32("what", kWhatAudioTearDown);
1847 notify->setInt32("reason", reason);
1848 notify->post();
1849}
1850
1851void NuPlayer2::Renderer::startAudioOffloadPauseTimeout() {
1852 if (offloadingAudio()) {
1853 mWakeLock->acquire();
1854 sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1855 msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1856 msg->post(kOffloadPauseMaxUs);
1857 }
1858}
1859
1860void NuPlayer2::Renderer::cancelAudioOffloadPauseTimeout() {
1861 // We may have called startAudioOffloadPauseTimeout() without
1862 // the AudioSink open and with offloadingAudio enabled.
1863 //
1864 // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
1865 // we always release the wakelock and increment the pause timeout generation.
1866 //
1867 // Note: The acquired wakelock prevents the device from suspending
1868 // immediately after offload pause (in case a resume happens shortly thereafter).
1869 mWakeLock->release(true);
1870 ++mAudioOffloadPauseTimeoutGeneration;
1871}
1872
1873status_t NuPlayer2::Renderer::onOpenAudioSink(
1874 const sp<AMessage> &format,
1875 bool offloadOnly,
1876 bool hasVideo,
1877 uint32_t flags,
1878 bool isStreaming) {
1879 ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1880 offloadOnly, offloadingAudio());
1881 bool audioSinkChanged = false;
1882
1883 int32_t numChannels;
1884 CHECK(format->findInt32("channel-count", &numChannels));
1885
1886 int32_t channelMask;
1887 if (!format->findInt32("channel-mask", &channelMask)) {
1888 // signal to the AudioSink to derive the mask from count.
1889 channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1890 }
1891
1892 int32_t sampleRate;
1893 CHECK(format->findInt32("sample-rate", &sampleRate));
1894
Wei Jia4a16e2a2019-01-04 15:26:48 -08001895 // read pcm encoding from MediaCodec output format, if available
1896 int32_t pcmEncoding;
1897 audio_format_t audioFormat =
1898 format->findInt32(KEY_PCM_ENCODING, &pcmEncoding) ?
1899 audioFormatFromEncoding(pcmEncoding) : AUDIO_FORMAT_PCM_16_BIT;
1900
Wei Jia53692fa2017-12-11 10:33:46 -08001901 if (offloadingAudio()) {
Wei Jia53692fa2017-12-11 10:33:46 -08001902 AString mime;
1903 CHECK(format->findString("mime", &mime));
1904 status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1905
1906 if (err != OK) {
1907 ALOGE("Couldn't map mime \"%s\" to a valid "
1908 "audio_format", mime.c_str());
1909 onDisableOffloadAudio();
1910 } else {
1911 ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1912 mime.c_str(), audioFormat);
1913
1914 int avgBitRate = -1;
1915 format->findInt32("bitrate", &avgBitRate);
1916
1917 int32_t aacProfile = -1;
1918 if (audioFormat == AUDIO_FORMAT_AAC
1919 && format->findInt32("aac-profile", &aacProfile)) {
1920 // Redefine AAC format as per aac profile
1921 mapAACProfileToAudioFormat(
1922 audioFormat,
1923 aacProfile);
1924 }
1925
1926 audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1927 offloadInfo.duration_us = -1;
1928 format->findInt64(
1929 "durationUs", &offloadInfo.duration_us);
1930 offloadInfo.sample_rate = sampleRate;
1931 offloadInfo.channel_mask = channelMask;
1932 offloadInfo.format = audioFormat;
1933 offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1934 offloadInfo.bit_rate = avgBitRate;
1935 offloadInfo.has_video = hasVideo;
1936 offloadInfo.is_streaming = isStreaming;
1937
1938 if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1939 ALOGV("openAudioSink: no change in offload mode");
1940 // no change from previous configuration, everything ok.
1941 return OK;
1942 }
1943 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1944
1945 ALOGV("openAudioSink: try to open AudioSink in offload mode");
1946 uint32_t offloadFlags = flags;
1947 offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1948 offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1949 audioSinkChanged = true;
1950 mAudioSink->close();
1951
1952 err = mAudioSink->open(
1953 sampleRate,
1954 numChannels,
1955 (audio_channel_mask_t)channelMask,
1956 audioFormat,
1957 0 /* bufferCount - unused */,
1958 &NuPlayer2::Renderer::AudioSinkCallback,
1959 this,
1960 (audio_output_flags_t)offloadFlags,
1961 &offloadInfo);
1962
1963 if (err == OK) {
1964 err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1965 }
1966
1967 if (err == OK) {
1968 // If the playback is offloaded to h/w, we pass
1969 // the HAL some metadata information.
1970 // We don't want to do this for PCM because it
1971 // will be going through the AudioFlinger mixer
1972 // before reaching the hardware.
1973 // TODO
1974 mCurrentOffloadInfo = offloadInfo;
1975 if (!mPaused) { // for preview mode, don't start if paused
1976 err = mAudioSink->start();
1977 }
1978 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1979 }
1980 if (err != OK) {
1981 // Clean up, fall back to non offload mode.
1982 mAudioSink->close();
1983 onDisableOffloadAudio();
1984 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1985 ALOGV("openAudioSink: offload failed");
1986 if (offloadOnly) {
1987 notifyAudioTearDown(kForceNonOffload);
1988 }
1989 } else {
1990 mUseAudioCallback = true; // offload mode transfers data through callback
1991 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
1992 }
1993 }
1994 }
1995 if (!offloadOnly && !offloadingAudio()) {
1996 ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1997 uint32_t pcmFlags = flags;
1998 pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1999
2000 const PcmInfo info = {
2001 (audio_channel_mask_t)channelMask,
2002 (audio_output_flags_t)pcmFlags,
Wei Jia4a16e2a2019-01-04 15:26:48 -08002003 audioFormat,
Wei Jia53692fa2017-12-11 10:33:46 -08002004 numChannels,
2005 sampleRate
2006 };
2007 if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
2008 ALOGV("openAudioSink: no change in pcm mode");
2009 // no change from previous configuration, everything ok.
2010 return OK;
2011 }
2012
2013 audioSinkChanged = true;
2014 mAudioSink->close();
2015 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2016 // Note: It is possible to set up the callback, but not use it to send audio data.
2017 // This requires a fix in AudioSink to explicitly specify the transfer mode.
2018 mUseAudioCallback = getUseAudioCallbackSetting();
2019 if (mUseAudioCallback) {
2020 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
2021 }
2022
2023 // Compute the desired buffer size.
2024 // For callback mode, the amount of time before wakeup is about half the buffer size.
2025 const uint32_t frameCount =
2026 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
2027
2028 // The doNotReconnect means AudioSink will signal back and let NuPlayer2 to re-construct
2029 // AudioSink. We don't want this when there's video because it will cause a video seek to
2030 // the previous I frame. But we do want this when there's only audio because it will give
2031 // NuPlayer2 a chance to switch from non-offload mode to offload mode.
2032 // So we only set doNotReconnect when there's no video.
2033 const bool doNotReconnect = !hasVideo;
2034
2035 // We should always be able to set our playback settings if the sink is closed.
2036 LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
2037 "onOpenAudioSink: can't set playback rate on closed sink");
2038 status_t err = mAudioSink->open(
2039 sampleRate,
2040 numChannels,
2041 (audio_channel_mask_t)channelMask,
Wei Jia4a16e2a2019-01-04 15:26:48 -08002042 audioFormat,
Wei Jia53692fa2017-12-11 10:33:46 -08002043 0 /* bufferCount - unused */,
2044 mUseAudioCallback ? &NuPlayer2::Renderer::AudioSinkCallback : NULL,
2045 mUseAudioCallback ? this : NULL,
2046 (audio_output_flags_t)pcmFlags,
2047 NULL,
2048 doNotReconnect,
2049 frameCount);
2050 if (err != OK) {
2051 ALOGW("openAudioSink: non offloaded open failed status: %d", err);
2052 mAudioSink->close();
2053 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2054 return err;
2055 }
2056 mCurrentPcmInfo = info;
2057 if (!mPaused) { // for preview mode, don't start if paused
2058 mAudioSink->start();
2059 }
2060 }
2061 if (audioSinkChanged) {
2062 onAudioSinkChanged();
2063 }
2064 mAudioTornDown = false;
2065 return OK;
2066}
2067
2068void NuPlayer2::Renderer::onCloseAudioSink() {
2069 mAudioSink->close();
2070 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2071 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2072}
2073
2074void NuPlayer2::Renderer::onChangeAudioFormat(
2075 const sp<AMessage> &meta, const sp<AMessage> &notify) {
2076 sp<AMessage> format;
2077 CHECK(meta->findMessage("format", &format));
2078
2079 int32_t offloadOnly;
2080 CHECK(meta->findInt32("offload-only", &offloadOnly));
2081
2082 int32_t hasVideo;
2083 CHECK(meta->findInt32("has-video", &hasVideo));
2084
2085 uint32_t flags;
2086 CHECK(meta->findInt32("flags", (int32_t *)&flags));
2087
2088 uint32_t isStreaming;
2089 CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
2090
2091 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
2092
2093 if (err != OK) {
2094 notify->setInt32("err", err);
2095 }
2096 notify->post();
2097}
2098
2099} // namespace android