blob: fcc7fa5e31ac4879c52af3fa9cf6d05628898597 [file] [log] [blame]
Wei Jia53692fa2017-12-11 10:33:46 -08001/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "NuPlayer2Renderer"
19#include <utils/Log.h>
20
21#include "JWakeLock.h"
22#include "NuPlayer2Renderer.h"
23#include <algorithm>
24#include <cutils/properties.h>
25#include <media/stagefright/foundation/ADebug.h>
26#include <media/stagefright/foundation/AMessage.h>
27#include <media/stagefright/foundation/AUtils.h>
28#include <media/stagefright/MediaClock.h>
Wei Jia4a16e2a2019-01-04 15:26:48 -080029#include <media/stagefright/MediaCodecConstants.h>
30#include <media/stagefright/MediaDefs.h>
Wei Jia53692fa2017-12-11 10:33:46 -080031#include <media/stagefright/MediaErrors.h>
Wei Jia53692fa2017-12-11 10:33:46 -080032#include <media/stagefright/Utils.h>
33#include <media/stagefright/VideoFrameScheduler.h>
34#include <media/MediaCodecBuffer.h>
35
36#include <inttypes.h>
37
38namespace android {
39
40/*
41 * Example of common configuration settings in shell script form
42
43 #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
44 adb shell setprop audio.offload.disable 1
45
46 #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
47 adb shell setprop audio.offload.video 1
48
49 #Use audio callbacks for PCM data
50 adb shell setprop media.stagefright.audio.cbk 1
51
52 #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
53 adb shell setprop media.stagefright.audio.deep 1
54
55 #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
56 adb shell setprop media.stagefright.audio.sink 1000
57
58 * These configurations take effect for the next track played (not the current track).
59 */
60
61static inline bool getUseAudioCallbackSetting() {
62 return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
63}
64
65static inline int32_t getAudioSinkPcmMsSetting() {
66 return property_get_int32(
67 "media.stagefright.audio.sink", 500 /* default_value */);
68}
69
70// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
71// is closed to allow the audio DSP to power down.
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -080072static const int64_t kOffloadPauseMaxUs = 10000000LL;
Wei Jia53692fa2017-12-11 10:33:46 -080073
74// Maximum allowed delay from AudioSink, 1.5 seconds.
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -080075static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000LL;
Wei Jia53692fa2017-12-11 10:33:46 -080076
77static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
78
79// static
80const NuPlayer2::Renderer::PcmInfo NuPlayer2::Renderer::AUDIO_PCMINFO_INITIALIZER = {
81 AUDIO_CHANNEL_NONE,
82 AUDIO_OUTPUT_FLAG_NONE,
83 AUDIO_FORMAT_INVALID,
84 0, // mNumChannels
85 0 // mSampleRate
86};
87
88// static
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -080089const int64_t NuPlayer2::Renderer::kMinPositionUpdateDelayUs = 100000LL;
Wei Jia53692fa2017-12-11 10:33:46 -080090
Wei Jia4a16e2a2019-01-04 15:26:48 -080091static audio_format_t constexpr audioFormatFromEncoding(int32_t pcmEncoding) {
92 switch (pcmEncoding) {
93 case kAudioEncodingPcmFloat:
94 return AUDIO_FORMAT_PCM_FLOAT;
95 case kAudioEncodingPcm16bit:
96 return AUDIO_FORMAT_PCM_16_BIT;
97 case kAudioEncodingPcm8bit:
98 return AUDIO_FORMAT_PCM_8_BIT; // TODO: do we want to support this?
99 default:
100 ALOGE("%s: Invalid encoding: %d", __func__, pcmEncoding);
101 return AUDIO_FORMAT_INVALID;
102 }
103}
104
Wei Jia53692fa2017-12-11 10:33:46 -0800105NuPlayer2::Renderer::Renderer(
Wei Jia33abcc72018-01-30 09:47:38 -0800106 const sp<MediaPlayer2Interface::AudioSink> &sink,
Wei Jia53692fa2017-12-11 10:33:46 -0800107 const sp<MediaClock> &mediaClock,
108 const sp<AMessage> &notify,
109 uint32_t flags)
110 : mAudioSink(sink),
111 mUseVirtualAudioSink(false),
112 mNotify(notify),
113 mFlags(flags),
114 mNumFramesWritten(0),
115 mDrainAudioQueuePending(false),
116 mDrainVideoQueuePending(false),
117 mAudioQueueGeneration(0),
118 mVideoQueueGeneration(0),
119 mAudioDrainGeneration(0),
120 mVideoDrainGeneration(0),
121 mAudioEOSGeneration(0),
122 mMediaClock(mediaClock),
123 mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
124 mAudioFirstAnchorTimeMediaUs(-1),
125 mAnchorTimeMediaUs(-1),
126 mAnchorNumFramesWritten(-1),
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -0800127 mVideoLateByUs(0LL),
Wei Jia53692fa2017-12-11 10:33:46 -0800128 mNextVideoTimeMediaUs(-1),
129 mHasAudio(false),
130 mHasVideo(false),
131 mNotifyCompleteAudio(false),
132 mNotifyCompleteVideo(false),
133 mSyncQueues(false),
134 mPaused(false),
135 mPauseDrainAudioAllowedUs(0),
136 mVideoSampleReceived(false),
137 mVideoRenderingStarted(false),
138 mVideoRenderingStartGeneration(0),
139 mAudioRenderingStartGeneration(0),
140 mRenderingDataDelivered(false),
141 mNextAudioClockUpdateTimeUs(-1),
142 mLastAudioMediaTimeUs(-1),
143 mAudioOffloadPauseTimeoutGeneration(0),
144 mAudioTornDown(false),
145 mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
146 mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
147 mTotalBuffersQueued(0),
148 mLastAudioBufferDrained(0),
149 mUseAudioCallback(false),
150 mWakeLock(new JWakeLock()) {
151 CHECK(mediaClock != NULL);
152 mPlaybackRate = mPlaybackSettings.mSpeed;
153 mMediaClock->setPlaybackRate(mPlaybackRate);
154}
155
156NuPlayer2::Renderer::~Renderer() {
157 if (offloadingAudio()) {
158 mAudioSink->stop();
159 mAudioSink->flush();
160 mAudioSink->close();
161 }
162
163 // Try to avoid racing condition in case callback is still on.
164 Mutex::Autolock autoLock(mLock);
165 if (mUseAudioCallback) {
166 flushQueue(&mAudioQueue);
167 flushQueue(&mVideoQueue);
168 }
169 mWakeLock.clear();
170 mVideoScheduler.clear();
171 mNotify.clear();
172 mAudioSink.clear();
173}
174
175void NuPlayer2::Renderer::queueBuffer(
176 bool audio,
177 const sp<MediaCodecBuffer> &buffer,
178 const sp<AMessage> &notifyConsumed) {
179 sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
180 msg->setInt32("queueGeneration", getQueueGeneration(audio));
181 msg->setInt32("audio", static_cast<int32_t>(audio));
182 msg->setObject("buffer", buffer);
183 msg->setMessage("notifyConsumed", notifyConsumed);
184 msg->post();
185}
186
187void NuPlayer2::Renderer::queueEOS(bool audio, status_t finalResult) {
188 CHECK_NE(finalResult, (status_t)OK);
189
190 sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
191 msg->setInt32("queueGeneration", getQueueGeneration(audio));
192 msg->setInt32("audio", static_cast<int32_t>(audio));
193 msg->setInt32("finalResult", finalResult);
194 msg->post();
195}
196
197status_t NuPlayer2::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
198 sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
199 writeToAMessage(msg, rate);
200 sp<AMessage> response;
201 status_t err = msg->postAndAwaitResponse(&response);
202 if (err == OK && response != NULL) {
203 CHECK(response->findInt32("err", &err));
204 }
205 return err;
206}
207
208status_t NuPlayer2::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
209 if (rate.mSpeed == 0.f) {
210 onPause();
211 // don't call audiosink's setPlaybackRate if pausing, as pitch does not
212 // have to correspond to the any non-0 speed (e.g old speed). Keep
213 // settings nonetheless, using the old speed, in case audiosink changes.
214 AudioPlaybackRate newRate = rate;
215 newRate.mSpeed = mPlaybackSettings.mSpeed;
216 mPlaybackSettings = newRate;
217 return OK;
218 }
219
220 if (mAudioSink != NULL && mAudioSink->ready()) {
221 status_t err = mAudioSink->setPlaybackRate(rate);
222 if (err != OK) {
223 return err;
224 }
225 }
226 mPlaybackSettings = rate;
227 mPlaybackRate = rate.mSpeed;
228 mMediaClock->setPlaybackRate(mPlaybackRate);
229 return OK;
230}
231
232status_t NuPlayer2::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
233 sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
234 sp<AMessage> response;
235 status_t err = msg->postAndAwaitResponse(&response);
236 if (err == OK && response != NULL) {
237 CHECK(response->findInt32("err", &err));
238 if (err == OK) {
239 readFromAMessage(response, rate);
240 }
241 }
242 return err;
243}
244
245status_t NuPlayer2::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
246 if (mAudioSink != NULL && mAudioSink->ready()) {
247 status_t err = mAudioSink->getPlaybackRate(rate);
248 if (err == OK) {
249 if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
250 ALOGW("correcting mismatch in internal/external playback rate");
251 }
252 // get playback settings used by audiosink, as it may be
253 // slightly off due to audiosink not taking small changes.
254 mPlaybackSettings = *rate;
255 if (mPaused) {
256 rate->mSpeed = 0.f;
257 }
258 }
259 return err;
260 }
261 *rate = mPlaybackSettings;
262 return OK;
263}
264
265status_t NuPlayer2::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
266 sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
267 writeToAMessage(msg, sync, videoFpsHint);
268 sp<AMessage> response;
269 status_t err = msg->postAndAwaitResponse(&response);
270 if (err == OK && response != NULL) {
271 CHECK(response->findInt32("err", &err));
272 }
273 return err;
274}
275
276status_t NuPlayer2::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
277 if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
278 return BAD_VALUE;
279 }
280 // TODO: support sync sources
281 return INVALID_OPERATION;
282}
283
284status_t NuPlayer2::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
285 sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
286 sp<AMessage> response;
287 status_t err = msg->postAndAwaitResponse(&response);
288 if (err == OK && response != NULL) {
289 CHECK(response->findInt32("err", &err));
290 if (err == OK) {
291 readFromAMessage(response, sync, videoFps);
292 }
293 }
294 return err;
295}
296
297status_t NuPlayer2::Renderer::onGetSyncSettings(
298 AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
299 *sync = mSyncSettings;
300 *videoFps = -1.f;
301 return OK;
302}
303
304void NuPlayer2::Renderer::flush(bool audio, bool notifyComplete) {
305 {
306 Mutex::Autolock autoLock(mLock);
307 if (audio) {
308 mNotifyCompleteAudio |= notifyComplete;
309 clearAudioFirstAnchorTime_l();
310 ++mAudioQueueGeneration;
311 ++mAudioDrainGeneration;
312 } else {
313 mNotifyCompleteVideo |= notifyComplete;
314 ++mVideoQueueGeneration;
315 ++mVideoDrainGeneration;
316 }
317
318 mMediaClock->clearAnchor();
319 mVideoLateByUs = 0;
320 mNextVideoTimeMediaUs = -1;
321 mSyncQueues = false;
322 }
323
324 sp<AMessage> msg = new AMessage(kWhatFlush, this);
325 msg->setInt32("audio", static_cast<int32_t>(audio));
326 msg->post();
327}
328
329void NuPlayer2::Renderer::signalTimeDiscontinuity() {
330}
331
332void NuPlayer2::Renderer::signalDisableOffloadAudio() {
333 (new AMessage(kWhatDisableOffloadAudio, this))->post();
334}
335
336void NuPlayer2::Renderer::signalEnableOffloadAudio() {
337 (new AMessage(kWhatEnableOffloadAudio, this))->post();
338}
339
340void NuPlayer2::Renderer::pause() {
341 (new AMessage(kWhatPause, this))->post();
342}
343
344void NuPlayer2::Renderer::resume() {
345 (new AMessage(kWhatResume, this))->post();
346}
347
348void NuPlayer2::Renderer::setVideoFrameRate(float fps) {
349 sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
350 msg->setFloat("frame-rate", fps);
351 msg->post();
352}
353
354// Called on any threads without mLock acquired.
355status_t NuPlayer2::Renderer::getCurrentPosition(int64_t *mediaUs) {
356 status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
357 if (result == OK) {
358 return result;
359 }
360
361 // MediaClock has not started yet. Try to start it if possible.
362 {
363 Mutex::Autolock autoLock(mLock);
364 if (mAudioFirstAnchorTimeMediaUs == -1) {
365 return result;
366 }
367
368 AudioTimestamp ts;
369 status_t res = mAudioSink->getTimestamp(ts);
370 if (res != OK) {
371 return result;
372 }
373
374 // AudioSink has rendered some frames.
375 int64_t nowUs = ALooper::GetNowUs();
376 int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
377 + mAudioFirstAnchorTimeMediaUs;
378 mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
379 }
380
381 return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
382}
383
384void NuPlayer2::Renderer::clearAudioFirstAnchorTime_l() {
385 mAudioFirstAnchorTimeMediaUs = -1;
386 mMediaClock->setStartingTimeMedia(-1);
387}
388
389void NuPlayer2::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
390 if (mAudioFirstAnchorTimeMediaUs == -1) {
391 mAudioFirstAnchorTimeMediaUs = mediaUs;
392 mMediaClock->setStartingTimeMedia(mediaUs);
393 }
394}
395
396// Called on renderer looper.
397void NuPlayer2::Renderer::clearAnchorTime() {
398 mMediaClock->clearAnchor();
399 mAnchorTimeMediaUs = -1;
400 mAnchorNumFramesWritten = -1;
401}
402
403void NuPlayer2::Renderer::setVideoLateByUs(int64_t lateUs) {
404 Mutex::Autolock autoLock(mLock);
405 mVideoLateByUs = lateUs;
406}
407
408int64_t NuPlayer2::Renderer::getVideoLateByUs() {
409 Mutex::Autolock autoLock(mLock);
410 return mVideoLateByUs;
411}
412
413status_t NuPlayer2::Renderer::openAudioSink(
414 const sp<AMessage> &format,
415 bool offloadOnly,
416 bool hasVideo,
417 uint32_t flags,
418 bool *isOffloaded,
419 bool isStreaming) {
420 sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
421 msg->setMessage("format", format);
422 msg->setInt32("offload-only", offloadOnly);
423 msg->setInt32("has-video", hasVideo);
424 msg->setInt32("flags", flags);
425 msg->setInt32("isStreaming", isStreaming);
426
427 sp<AMessage> response;
428 status_t postStatus = msg->postAndAwaitResponse(&response);
429
430 int32_t err;
431 if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
432 err = INVALID_OPERATION;
433 } else if (err == OK && isOffloaded != NULL) {
434 int32_t offload;
435 CHECK(response->findInt32("offload", &offload));
436 *isOffloaded = (offload != 0);
437 }
438 return err;
439}
440
441void NuPlayer2::Renderer::closeAudioSink() {
442 sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
443
444 sp<AMessage> response;
445 msg->postAndAwaitResponse(&response);
446}
447
448void NuPlayer2::Renderer::changeAudioFormat(
449 const sp<AMessage> &format,
450 bool offloadOnly,
451 bool hasVideo,
452 uint32_t flags,
453 bool isStreaming,
454 const sp<AMessage> &notify) {
455 sp<AMessage> meta = new AMessage;
456 meta->setMessage("format", format);
457 meta->setInt32("offload-only", offloadOnly);
458 meta->setInt32("has-video", hasVideo);
459 meta->setInt32("flags", flags);
460 meta->setInt32("isStreaming", isStreaming);
461
462 sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
463 msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
464 msg->setMessage("notify", notify);
465 msg->setMessage("meta", meta);
466 msg->post();
467}
468
469void NuPlayer2::Renderer::onMessageReceived(const sp<AMessage> &msg) {
470 switch (msg->what()) {
471 case kWhatOpenAudioSink:
472 {
473 sp<AMessage> format;
474 CHECK(msg->findMessage("format", &format));
475
476 int32_t offloadOnly;
477 CHECK(msg->findInt32("offload-only", &offloadOnly));
478
479 int32_t hasVideo;
480 CHECK(msg->findInt32("has-video", &hasVideo));
481
482 uint32_t flags;
483 CHECK(msg->findInt32("flags", (int32_t *)&flags));
484
485 uint32_t isStreaming;
486 CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
487
488 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
489
490 sp<AMessage> response = new AMessage;
491 response->setInt32("err", err);
492 response->setInt32("offload", offloadingAudio());
493
494 sp<AReplyToken> replyID;
495 CHECK(msg->senderAwaitsResponse(&replyID));
496 response->postReply(replyID);
497
498 break;
499 }
500
501 case kWhatCloseAudioSink:
502 {
503 sp<AReplyToken> replyID;
504 CHECK(msg->senderAwaitsResponse(&replyID));
505
506 onCloseAudioSink();
507
508 sp<AMessage> response = new AMessage;
509 response->postReply(replyID);
510 break;
511 }
512
513 case kWhatStopAudioSink:
514 {
515 mAudioSink->stop();
516 break;
517 }
518
519 case kWhatChangeAudioFormat:
520 {
521 int32_t queueGeneration;
522 CHECK(msg->findInt32("queueGeneration", &queueGeneration));
523
524 sp<AMessage> notify;
525 CHECK(msg->findMessage("notify", &notify));
526
527 if (offloadingAudio()) {
528 ALOGW("changeAudioFormat should NOT be called in offload mode");
529 notify->setInt32("err", INVALID_OPERATION);
530 notify->post();
531 break;
532 }
533
534 sp<AMessage> meta;
535 CHECK(msg->findMessage("meta", &meta));
536
537 if (queueGeneration != getQueueGeneration(true /* audio */)
538 || mAudioQueue.empty()) {
539 onChangeAudioFormat(meta, notify);
540 break;
541 }
542
543 QueueEntry entry;
544 entry.mNotifyConsumed = notify;
545 entry.mMeta = meta;
546
547 Mutex::Autolock autoLock(mLock);
548 mAudioQueue.push_back(entry);
549 postDrainAudioQueue_l();
550
551 break;
552 }
553
554 case kWhatDrainAudioQueue:
555 {
556 mDrainAudioQueuePending = false;
557
558 int32_t generation;
559 CHECK(msg->findInt32("drainGeneration", &generation));
560 if (generation != getDrainGeneration(true /* audio */)) {
561 break;
562 }
563
564 if (onDrainAudioQueue()) {
565 uint32_t numFramesPlayed;
566 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
567 (status_t)OK);
568
569 // Handle AudioTrack race when start is immediately called after flush.
570 uint32_t numFramesPendingPlayout =
571 (mNumFramesWritten > numFramesPlayed ?
572 mNumFramesWritten - numFramesPlayed : 0);
573
574 // This is how long the audio sink will have data to
575 // play back.
576 int64_t delayUs =
577 mAudioSink->msecsPerFrame()
578 * numFramesPendingPlayout * 1000ll;
579 if (mPlaybackRate > 1.0f) {
580 delayUs /= mPlaybackRate;
581 }
582
583 // Let's give it more data after about half that time
584 // has elapsed.
585 delayUs /= 2;
586 // check the buffer size to estimate maximum delay permitted.
587 const int64_t maxDrainDelayUs = std::max(
588 mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
589 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
590 (long long)delayUs, (long long)maxDrainDelayUs);
591 Mutex::Autolock autoLock(mLock);
592 postDrainAudioQueue_l(delayUs);
593 }
594 break;
595 }
596
597 case kWhatDrainVideoQueue:
598 {
599 int32_t generation;
600 CHECK(msg->findInt32("drainGeneration", &generation));
601 if (generation != getDrainGeneration(false /* audio */)) {
602 break;
603 }
604
605 mDrainVideoQueuePending = false;
606
607 onDrainVideoQueue();
608
609 postDrainVideoQueue();
610 break;
611 }
612
613 case kWhatPostDrainVideoQueue:
614 {
615 int32_t generation;
616 CHECK(msg->findInt32("drainGeneration", &generation));
617 if (generation != getDrainGeneration(false /* audio */)) {
618 break;
619 }
620
621 mDrainVideoQueuePending = false;
622 postDrainVideoQueue();
623 break;
624 }
625
626 case kWhatQueueBuffer:
627 {
628 onQueueBuffer(msg);
629 break;
630 }
631
632 case kWhatQueueEOS:
633 {
634 onQueueEOS(msg);
635 break;
636 }
637
638 case kWhatEOS:
639 {
640 int32_t generation;
641 CHECK(msg->findInt32("audioEOSGeneration", &generation));
642 if (generation != mAudioEOSGeneration) {
643 break;
644 }
645 status_t finalResult;
646 CHECK(msg->findInt32("finalResult", &finalResult));
647 notifyEOS(true /* audio */, finalResult);
648 break;
649 }
650
651 case kWhatConfigPlayback:
652 {
653 sp<AReplyToken> replyID;
654 CHECK(msg->senderAwaitsResponse(&replyID));
655 AudioPlaybackRate rate;
656 readFromAMessage(msg, &rate);
657 status_t err = onConfigPlayback(rate);
658 sp<AMessage> response = new AMessage;
659 response->setInt32("err", err);
660 response->postReply(replyID);
661 break;
662 }
663
664 case kWhatGetPlaybackSettings:
665 {
666 sp<AReplyToken> replyID;
667 CHECK(msg->senderAwaitsResponse(&replyID));
668 AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
669 status_t err = onGetPlaybackSettings(&rate);
670 sp<AMessage> response = new AMessage;
671 if (err == OK) {
672 writeToAMessage(response, rate);
673 }
674 response->setInt32("err", err);
675 response->postReply(replyID);
676 break;
677 }
678
679 case kWhatConfigSync:
680 {
681 sp<AReplyToken> replyID;
682 CHECK(msg->senderAwaitsResponse(&replyID));
683 AVSyncSettings sync;
684 float videoFpsHint;
685 readFromAMessage(msg, &sync, &videoFpsHint);
686 status_t err = onConfigSync(sync, videoFpsHint);
687 sp<AMessage> response = new AMessage;
688 response->setInt32("err", err);
689 response->postReply(replyID);
690 break;
691 }
692
693 case kWhatGetSyncSettings:
694 {
695 sp<AReplyToken> replyID;
696 CHECK(msg->senderAwaitsResponse(&replyID));
697
698 ALOGV("kWhatGetSyncSettings");
699 AVSyncSettings sync;
700 float videoFps = -1.f;
701 status_t err = onGetSyncSettings(&sync, &videoFps);
702 sp<AMessage> response = new AMessage;
703 if (err == OK) {
704 writeToAMessage(response, sync, videoFps);
705 }
706 response->setInt32("err", err);
707 response->postReply(replyID);
708 break;
709 }
710
711 case kWhatFlush:
712 {
713 onFlush(msg);
714 break;
715 }
716
717 case kWhatDisableOffloadAudio:
718 {
719 onDisableOffloadAudio();
720 break;
721 }
722
723 case kWhatEnableOffloadAudio:
724 {
725 onEnableOffloadAudio();
726 break;
727 }
728
729 case kWhatPause:
730 {
731 onPause();
732 break;
733 }
734
735 case kWhatResume:
736 {
737 onResume();
738 break;
739 }
740
741 case kWhatSetVideoFrameRate:
742 {
743 float fps;
744 CHECK(msg->findFloat("frame-rate", &fps));
745 onSetVideoFrameRate(fps);
746 break;
747 }
748
749 case kWhatAudioTearDown:
750 {
751 int32_t reason;
752 CHECK(msg->findInt32("reason", &reason));
753
754 onAudioTearDown((AudioTearDownReason)reason);
755 break;
756 }
757
758 case kWhatAudioOffloadPauseTimeout:
759 {
760 int32_t generation;
761 CHECK(msg->findInt32("drainGeneration", &generation));
762 if (generation != mAudioOffloadPauseTimeoutGeneration) {
763 break;
764 }
765 ALOGV("Audio Offload tear down due to pause timeout.");
766 onAudioTearDown(kDueToTimeout);
767 mWakeLock->release();
768 break;
769 }
770
771 default:
772 TRESPASS();
773 break;
774 }
775}
776
777void NuPlayer2::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
778 if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
779 return;
780 }
781
782 if (mAudioQueue.empty()) {
783 return;
784 }
785
786 // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
787 if (mPaused) {
788 const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
789 if (diffUs > delayUs) {
790 delayUs = diffUs;
791 }
792 }
793
794 mDrainAudioQueuePending = true;
795 sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
796 msg->setInt32("drainGeneration", mAudioDrainGeneration);
797 msg->post(delayUs);
798}
799
800void NuPlayer2::Renderer::prepareForMediaRenderingStart_l() {
801 mAudioRenderingStartGeneration = mAudioDrainGeneration;
802 mVideoRenderingStartGeneration = mVideoDrainGeneration;
803 mRenderingDataDelivered = false;
804}
805
806void NuPlayer2::Renderer::notifyIfMediaRenderingStarted_l() {
807 if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
808 mAudioRenderingStartGeneration == mAudioDrainGeneration) {
809 mRenderingDataDelivered = true;
810 if (mPaused) {
811 return;
812 }
813 mVideoRenderingStartGeneration = -1;
814 mAudioRenderingStartGeneration = -1;
815
816 sp<AMessage> notify = mNotify->dup();
817 notify->setInt32("what", kWhatMediaRenderingStart);
818 notify->post();
819 }
820}
821
822// static
823size_t NuPlayer2::Renderer::AudioSinkCallback(
Wei Jia33abcc72018-01-30 09:47:38 -0800824 MediaPlayer2Interface::AudioSink * /* audioSink */,
Wei Jia53692fa2017-12-11 10:33:46 -0800825 void *buffer,
826 size_t size,
827 void *cookie,
Wei Jia33abcc72018-01-30 09:47:38 -0800828 MediaPlayer2Interface::AudioSink::cb_event_t event) {
Wei Jia53692fa2017-12-11 10:33:46 -0800829 NuPlayer2::Renderer *me = (NuPlayer2::Renderer *)cookie;
830
831 switch (event) {
Wei Jia33abcc72018-01-30 09:47:38 -0800832 case MediaPlayer2Interface::AudioSink::CB_EVENT_FILL_BUFFER:
Wei Jia53692fa2017-12-11 10:33:46 -0800833 {
834 return me->fillAudioBuffer(buffer, size);
835 break;
836 }
837
Wei Jia33abcc72018-01-30 09:47:38 -0800838 case MediaPlayer2Interface::AudioSink::CB_EVENT_STREAM_END:
Wei Jia53692fa2017-12-11 10:33:46 -0800839 {
840 ALOGV("AudioSink::CB_EVENT_STREAM_END");
841 me->notifyEOSCallback();
842 break;
843 }
844
Wei Jia33abcc72018-01-30 09:47:38 -0800845 case MediaPlayer2Interface::AudioSink::CB_EVENT_TEAR_DOWN:
Wei Jia53692fa2017-12-11 10:33:46 -0800846 {
847 ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
848 me->notifyAudioTearDown(kDueToError);
849 break;
850 }
851 }
852
853 return 0;
854}
855
856void NuPlayer2::Renderer::notifyEOSCallback() {
857 Mutex::Autolock autoLock(mLock);
858
859 if (!mUseAudioCallback) {
860 return;
861 }
862
863 notifyEOS_l(true /* audio */, ERROR_END_OF_STREAM);
864}
865
866size_t NuPlayer2::Renderer::fillAudioBuffer(void *buffer, size_t size) {
867 Mutex::Autolock autoLock(mLock);
868
869 if (!mUseAudioCallback) {
870 return 0;
871 }
872
873 bool hasEOS = false;
874
875 size_t sizeCopied = 0;
876 bool firstEntry = true;
877 QueueEntry *entry; // will be valid after while loop if hasEOS is set.
878 while (sizeCopied < size && !mAudioQueue.empty()) {
879 entry = &*mAudioQueue.begin();
880
881 if (entry->mBuffer == NULL) { // EOS
882 hasEOS = true;
883 mAudioQueue.erase(mAudioQueue.begin());
884 break;
885 }
886
887 if (firstEntry && entry->mOffset == 0) {
888 firstEntry = false;
889 int64_t mediaTimeUs;
890 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
891 ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
892 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
893 }
894
895 size_t copy = entry->mBuffer->size() - entry->mOffset;
896 size_t sizeRemaining = size - sizeCopied;
897 if (copy > sizeRemaining) {
898 copy = sizeRemaining;
899 }
900
901 memcpy((char *)buffer + sizeCopied,
902 entry->mBuffer->data() + entry->mOffset,
903 copy);
904
905 entry->mOffset += copy;
906 if (entry->mOffset == entry->mBuffer->size()) {
907 entry->mNotifyConsumed->post();
908 mAudioQueue.erase(mAudioQueue.begin());
909 entry = NULL;
910 }
911 sizeCopied += copy;
912
913 notifyIfMediaRenderingStarted_l();
914 }
915
916 if (mAudioFirstAnchorTimeMediaUs >= 0) {
917 int64_t nowUs = ALooper::GetNowUs();
918 int64_t nowMediaUs =
919 mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
920 // we don't know how much data we are queueing for offloaded tracks.
921 mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
922 }
923
924 // for non-offloaded audio, we need to compute the frames written because
925 // there is no EVENT_STREAM_END notification. The frames written gives
926 // an estimate on the pending played out duration.
927 if (!offloadingAudio()) {
928 mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
929 }
930
931 if (hasEOS) {
932 (new AMessage(kWhatStopAudioSink, this))->post();
933 // As there is currently no EVENT_STREAM_END callback notification for
934 // non-offloaded audio tracks, we need to post the EOS ourselves.
935 if (!offloadingAudio()) {
936 int64_t postEOSDelayUs = 0;
937 if (mAudioSink->needsTrailingPadding()) {
938 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
939 }
940 ALOGV("fillAudioBuffer: notifyEOS_l "
941 "mNumFramesWritten:%u finalResult:%d postEOSDelay:%lld",
942 mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
943 notifyEOS_l(true /* audio */, entry->mFinalResult, postEOSDelayUs);
944 }
945 }
946 return sizeCopied;
947}
948
949void NuPlayer2::Renderer::drainAudioQueueUntilLastEOS() {
950 List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
951 bool foundEOS = false;
952 while (it != mAudioQueue.end()) {
953 int32_t eos;
954 QueueEntry *entry = &*it++;
955 if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
956 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
957 itEOS = it;
958 foundEOS = true;
959 }
960 }
961
962 if (foundEOS) {
963 // post all replies before EOS and drop the samples
964 for (it = mAudioQueue.begin(); it != itEOS; it++) {
965 if (it->mBuffer == nullptr) {
966 if (it->mNotifyConsumed == nullptr) {
967 // delay doesn't matter as we don't even have an AudioTrack
968 notifyEOS(true /* audio */, it->mFinalResult);
969 } else {
970 // TAG for re-opening audio sink.
971 onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
972 }
973 } else {
974 it->mNotifyConsumed->post();
975 }
976 }
977 mAudioQueue.erase(mAudioQueue.begin(), itEOS);
978 }
979}
980
981bool NuPlayer2::Renderer::onDrainAudioQueue() {
982 // do not drain audio during teardown as queued buffers may be invalid.
983 if (mAudioTornDown) {
984 return false;
985 }
986 // TODO: This call to getPosition checks if AudioTrack has been created
987 // in AudioSink before draining audio. If AudioTrack doesn't exist, then
988 // CHECKs on getPosition will fail.
989 // We still need to figure out why AudioTrack is not created when
990 // this function is called. One possible reason could be leftover
991 // audio. Another possible place is to check whether decoder
992 // has received INFO_FORMAT_CHANGED as the first buffer since
993 // AudioSink is opened there, and possible interactions with flush
994 // immediately after start. Investigate error message
995 // "vorbis_dsp_synthesis returned -135", along with RTSP.
996 uint32_t numFramesPlayed;
997 if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
998 // When getPosition fails, renderer will not reschedule the draining
999 // unless new samples are queued.
1000 // If we have pending EOS (or "eos" marker for discontinuities), we need
1001 // to post these now as NuPlayer2Decoder might be waiting for it.
1002 drainAudioQueueUntilLastEOS();
1003
1004 ALOGW("onDrainAudioQueue(): audio sink is not ready");
1005 return false;
1006 }
1007
1008#if 0
1009 ssize_t numFramesAvailableToWrite =
1010 mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
1011
1012 if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
1013 ALOGI("audio sink underrun");
1014 } else {
1015 ALOGV("audio queue has %d frames left to play",
1016 mAudioSink->frameCount() - numFramesAvailableToWrite);
1017 }
1018#endif
1019
1020 uint32_t prevFramesWritten = mNumFramesWritten;
1021 while (!mAudioQueue.empty()) {
1022 QueueEntry *entry = &*mAudioQueue.begin();
1023
1024 if (entry->mBuffer == NULL) {
1025 if (entry->mNotifyConsumed != nullptr) {
1026 // TAG for re-open audio sink.
1027 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1028 mAudioQueue.erase(mAudioQueue.begin());
1029 continue;
1030 }
1031
1032 // EOS
1033 if (mPaused) {
1034 // Do not notify EOS when paused.
1035 // This is needed to avoid switch to next clip while in pause.
1036 ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
1037 return false;
1038 }
1039
1040 int64_t postEOSDelayUs = 0;
1041 if (mAudioSink->needsTrailingPadding()) {
1042 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
1043 }
1044 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
1045 mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1046
1047 mAudioQueue.erase(mAudioQueue.begin());
1048 entry = NULL;
1049 if (mAudioSink->needsTrailingPadding()) {
1050 // If we're not in gapless playback (i.e. through setNextPlayer), we
1051 // need to stop the track here, because that will play out the last
1052 // little bit at the end of the file. Otherwise short files won't play.
1053 mAudioSink->stop();
1054 mNumFramesWritten = 0;
1055 }
1056 return false;
1057 }
1058
1059 mLastAudioBufferDrained = entry->mBufferOrdinal;
1060
1061 // ignore 0-sized buffer which could be EOS marker with no data
1062 if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
1063 int64_t mediaTimeUs;
1064 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1065 ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
1066 mediaTimeUs / 1E6);
1067 onNewAudioMediaTime(mediaTimeUs);
1068 }
1069
1070 size_t copy = entry->mBuffer->size() - entry->mOffset;
1071
1072 ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
1073 copy, false /* blocking */);
1074 if (written < 0) {
1075 // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
1076 if (written == WOULD_BLOCK) {
1077 ALOGV("AudioSink write would block when writing %zu bytes", copy);
1078 } else {
1079 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
1080 // This can only happen when AudioSink was opened with doNotReconnect flag set to
1081 // true, in which case the NuPlayer2 will handle the reconnect.
1082 notifyAudioTearDown(kDueToError);
1083 }
1084 break;
1085 }
1086
1087 entry->mOffset += written;
1088 size_t remainder = entry->mBuffer->size() - entry->mOffset;
1089 if ((ssize_t)remainder < mAudioSink->frameSize()) {
1090 if (remainder > 0) {
1091 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
1092 remainder);
1093 entry->mOffset += remainder;
1094 copy -= remainder;
1095 }
1096
1097 entry->mNotifyConsumed->post();
1098 mAudioQueue.erase(mAudioQueue.begin());
1099
1100 entry = NULL;
1101 }
1102
1103 size_t copiedFrames = written / mAudioSink->frameSize();
1104 mNumFramesWritten += copiedFrames;
1105
1106 {
1107 Mutex::Autolock autoLock(mLock);
1108 int64_t maxTimeMedia;
1109 maxTimeMedia =
1110 mAnchorTimeMediaUs +
1111 (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
1112 * 1000LL * mAudioSink->msecsPerFrame());
1113 mMediaClock->updateMaxTimeMedia(maxTimeMedia);
1114
1115 notifyIfMediaRenderingStarted_l();
1116 }
1117
1118 if (written != (ssize_t)copy) {
1119 // A short count was received from AudioSink::write()
1120 //
1121 // AudioSink write is called in non-blocking mode.
1122 // It may return with a short count when:
1123 //
1124 // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
1125 // discarded.
1126 // 2) The data to be copied exceeds the available buffer in AudioSink.
1127 // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
1128 // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
1129
1130 // (Case 1)
1131 // Must be a multiple of the frame size. If it is not a multiple of a frame size, it
1132 // needs to fail, as we should not carry over fractional frames between calls.
1133 CHECK_EQ(copy % mAudioSink->frameSize(), 0u);
1134
1135 // (Case 2, 3, 4)
1136 // Return early to the caller.
1137 // Beware of calling immediately again as this may busy-loop if you are not careful.
1138 ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
1139 break;
1140 }
1141 }
1142
1143 // calculate whether we need to reschedule another write.
1144 bool reschedule = !mAudioQueue.empty()
1145 && (!mPaused
1146 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
1147 //ALOGD("reschedule:%d empty:%d mPaused:%d prevFramesWritten:%u mNumFramesWritten:%u",
1148 // reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
1149 return reschedule;
1150}
1151
1152int64_t NuPlayer2::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
1153 int32_t sampleRate = offloadingAudio() ?
1154 mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
1155 if (sampleRate == 0) {
1156 ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
1157 return 0;
1158 }
1159 // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
1160 return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
1161}
1162
1163// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
1164int64_t NuPlayer2::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
1165 int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1166 if (mUseVirtualAudioSink) {
1167 int64_t nowUs = ALooper::GetNowUs();
1168 int64_t mediaUs;
1169 if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -08001170 return 0LL;
Wei Jia53692fa2017-12-11 10:33:46 -08001171 } else {
1172 return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
1173 }
1174 }
1175
1176 const int64_t audioSinkPlayedUs = mAudioSink->getPlayedOutDurationUs(nowUs);
1177 int64_t pendingUs = writtenAudioDurationUs - audioSinkPlayedUs;
1178 if (pendingUs < 0) {
1179 // This shouldn't happen unless the timestamp is stale.
1180 ALOGW("%s: pendingUs %lld < 0, clamping to zero, potential resume after pause "
1181 "writtenAudioDurationUs: %lld, audioSinkPlayedUs: %lld",
1182 __func__, (long long)pendingUs,
1183 (long long)writtenAudioDurationUs, (long long)audioSinkPlayedUs);
1184 pendingUs = 0;
1185 }
1186 return pendingUs;
1187}
1188
1189int64_t NuPlayer2::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
1190 int64_t realUs;
1191 if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
1192 // If failed to get current position, e.g. due to audio clock is
1193 // not ready, then just play out video immediately without delay.
1194 return nowUs;
1195 }
1196 return realUs;
1197}
1198
1199void NuPlayer2::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1200 Mutex::Autolock autoLock(mLock);
1201 // TRICKY: vorbis decoder generates multiple frames with the same
1202 // timestamp, so only update on the first frame with a given timestamp
1203 if (mediaTimeUs == mAnchorTimeMediaUs) {
1204 return;
1205 }
1206 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1207
1208 // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
1209 if (mNextAudioClockUpdateTimeUs == -1) {
1210 AudioTimestamp ts;
1211 if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
1212 mNextAudioClockUpdateTimeUs = 0; // start our clock updates
1213 }
1214 }
1215 int64_t nowUs = ALooper::GetNowUs();
1216 if (mNextAudioClockUpdateTimeUs >= 0) {
1217 if (nowUs >= mNextAudioClockUpdateTimeUs) {
1218 int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1219 mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1220 mUseVirtualAudioSink = false;
1221 mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
1222 }
1223 } else {
1224 int64_t unused;
1225 if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
1226 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
1227 > kMaxAllowedAudioSinkDelayUs)) {
1228 // Enough data has been sent to AudioSink, but AudioSink has not rendered
1229 // any data yet. Something is wrong with AudioSink, e.g., the device is not
1230 // connected to audio out.
1231 // Switch to system clock. This essentially creates a virtual AudioSink with
1232 // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
1233 // This virtual AudioSink renders audio data starting from the very first sample
1234 // and it's paced by system clock.
1235 ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
1236 mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
1237 mUseVirtualAudioSink = true;
1238 }
1239 }
1240 mAnchorNumFramesWritten = mNumFramesWritten;
1241 mAnchorTimeMediaUs = mediaTimeUs;
1242}
1243
1244// Called without mLock acquired.
1245void NuPlayer2::Renderer::postDrainVideoQueue() {
1246 if (mDrainVideoQueuePending
1247 || getSyncQueues()
1248 || (mPaused && mVideoSampleReceived)) {
1249 return;
1250 }
1251
1252 if (mVideoQueue.empty()) {
1253 return;
1254 }
1255
1256 QueueEntry &entry = *mVideoQueue.begin();
1257
1258 sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1259 msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1260
1261 if (entry.mBuffer == NULL) {
1262 // EOS doesn't carry a timestamp.
1263 msg->post();
1264 mDrainVideoQueuePending = true;
1265 return;
1266 }
1267
1268 int64_t nowUs = ALooper::GetNowUs();
1269 if (mFlags & FLAG_REAL_TIME) {
1270 int64_t realTimeUs;
1271 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1272
1273 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1274
1275 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1276
1277 int64_t delayUs = realTimeUs - nowUs;
1278
1279 ALOGW_IF(delayUs > 500000, "unusually high delayUs: %lld", (long long)delayUs);
1280 // post 2 display refreshes before rendering is due
1281 msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1282
1283 mDrainVideoQueuePending = true;
1284 return;
1285 }
1286
1287 int64_t mediaTimeUs;
1288 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1289
1290 {
1291 Mutex::Autolock autoLock(mLock);
1292 if (mAnchorTimeMediaUs < 0) {
1293 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1294 mAnchorTimeMediaUs = mediaTimeUs;
1295 }
1296 }
Wei Jia353c3d32019-01-04 12:44:38 -08001297 mNextVideoTimeMediaUs = mediaTimeUs;
Wei Jia53692fa2017-12-11 10:33:46 -08001298 if (!mHasAudio) {
1299 // smooth out videos >= 10fps
Wei Jia353c3d32019-01-04 12:44:38 -08001300 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
Wei Jia53692fa2017-12-11 10:33:46 -08001301 }
1302
1303 if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
1304 msg->post();
1305 } else {
1306 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1307
1308 // post 2 display refreshes before rendering is due
1309 mMediaClock->addTimer(msg, mediaTimeUs, -twoVsyncsUs);
1310 }
1311
1312 mDrainVideoQueuePending = true;
1313}
1314
1315void NuPlayer2::Renderer::onDrainVideoQueue() {
1316 if (mVideoQueue.empty()) {
1317 return;
1318 }
1319
1320 QueueEntry *entry = &*mVideoQueue.begin();
1321
1322 if (entry->mBuffer == NULL) {
1323 // EOS
1324
1325 notifyEOS(false /* audio */, entry->mFinalResult);
1326
1327 mVideoQueue.erase(mVideoQueue.begin());
1328 entry = NULL;
1329
1330 setVideoLateByUs(0);
1331 return;
1332 }
1333
1334 int64_t nowUs = ALooper::GetNowUs();
1335 int64_t realTimeUs;
1336 int64_t mediaTimeUs = -1;
1337 if (mFlags & FLAG_REAL_TIME) {
1338 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1339 } else {
1340 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1341
1342 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1343 }
1344 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1345
1346 bool tooLate = false;
1347
1348 if (!mPaused) {
1349 setVideoLateByUs(nowUs - realTimeUs);
1350 tooLate = (mVideoLateByUs > 40000);
1351
1352 if (tooLate) {
1353 ALOGV("video late by %lld us (%.2f secs)",
1354 (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1355 } else {
1356 int64_t mediaUs = 0;
1357 mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1358 ALOGV("rendering video at media time %.2f secs",
1359 (mFlags & FLAG_REAL_TIME ? realTimeUs :
1360 mediaUs) / 1E6);
1361
1362 if (!(mFlags & FLAG_REAL_TIME)
1363 && mLastAudioMediaTimeUs != -1
1364 && mediaTimeUs > mLastAudioMediaTimeUs) {
1365 // If audio ends before video, video continues to drive media clock.
1366 // Also smooth out videos >= 10fps.
1367 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1368 }
1369 }
1370 } else {
1371 setVideoLateByUs(0);
1372 if (!mVideoSampleReceived && !mHasAudio) {
1373 // This will ensure that the first frame after a flush won't be used as anchor
1374 // when renderer is in paused state, because resume can happen any time after seek.
1375 clearAnchorTime();
1376 }
1377 }
1378
1379 // Always render the first video frame while keeping stats on A/V sync.
1380 if (!mVideoSampleReceived) {
1381 realTimeUs = nowUs;
1382 tooLate = false;
1383 }
1384
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -08001385 entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000LL);
Wei Jia53692fa2017-12-11 10:33:46 -08001386 entry->mNotifyConsumed->setInt32("render", !tooLate);
1387 entry->mNotifyConsumed->post();
1388 mVideoQueue.erase(mVideoQueue.begin());
1389 entry = NULL;
1390
1391 mVideoSampleReceived = true;
1392
1393 if (!mPaused) {
1394 if (!mVideoRenderingStarted) {
1395 mVideoRenderingStarted = true;
1396 notifyVideoRenderingStart();
1397 }
1398 Mutex::Autolock autoLock(mLock);
1399 notifyIfMediaRenderingStarted_l();
1400 }
1401}
1402
1403void NuPlayer2::Renderer::notifyVideoRenderingStart() {
1404 sp<AMessage> notify = mNotify->dup();
1405 notify->setInt32("what", kWhatVideoRenderingStart);
1406 notify->post();
1407}
1408
1409void NuPlayer2::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1410 Mutex::Autolock autoLock(mLock);
1411 notifyEOS_l(audio, finalResult, delayUs);
1412}
1413
1414void NuPlayer2::Renderer::notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs) {
1415 if (audio && delayUs > 0) {
1416 sp<AMessage> msg = new AMessage(kWhatEOS, this);
1417 msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1418 msg->setInt32("finalResult", finalResult);
1419 msg->post(delayUs);
1420 return;
1421 }
1422 sp<AMessage> notify = mNotify->dup();
1423 notify->setInt32("what", kWhatEOS);
1424 notify->setInt32("audio", static_cast<int32_t>(audio));
1425 notify->setInt32("finalResult", finalResult);
1426 notify->post(delayUs);
1427
1428 if (audio) {
1429 // Video might outlive audio. Clear anchor to enable video only case.
1430 mAnchorTimeMediaUs = -1;
1431 mHasAudio = false;
1432 if (mNextVideoTimeMediaUs >= 0) {
1433 int64_t mediaUs = 0;
Wei Jia353c3d32019-01-04 12:44:38 -08001434 int64_t nowUs = ALooper::GetNowUs();
1435 status_t result = mMediaClock->getMediaTime(nowUs, &mediaUs);
1436 if (result == OK) {
1437 if (mNextVideoTimeMediaUs > mediaUs) {
1438 mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
1439 }
1440 } else {
1441 mMediaClock->updateAnchor(
1442 mNextVideoTimeMediaUs, nowUs, mNextVideoTimeMediaUs + 100000);
Wei Jia53692fa2017-12-11 10:33:46 -08001443 }
1444 }
1445 }
1446}
1447
1448void NuPlayer2::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
1449 sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
1450 msg->setInt32("reason", reason);
1451 msg->post();
1452}
1453
1454void NuPlayer2::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1455 int32_t audio;
1456 CHECK(msg->findInt32("audio", &audio));
1457
1458 if (dropBufferIfStale(audio, msg)) {
1459 return;
1460 }
1461
1462 if (audio) {
1463 mHasAudio = true;
1464 } else {
1465 mHasVideo = true;
1466 }
1467
1468 if (mHasVideo) {
1469 if (mVideoScheduler == NULL) {
1470 mVideoScheduler = new VideoFrameScheduler();
1471 mVideoScheduler->init();
1472 }
1473 }
1474
1475 sp<RefBase> obj;
1476 CHECK(msg->findObject("buffer", &obj));
1477 sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
1478
1479 sp<AMessage> notifyConsumed;
1480 CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1481
1482 QueueEntry entry;
1483 entry.mBuffer = buffer;
1484 entry.mNotifyConsumed = notifyConsumed;
1485 entry.mOffset = 0;
1486 entry.mFinalResult = OK;
1487 entry.mBufferOrdinal = ++mTotalBuffersQueued;
1488
1489 if (audio) {
1490 Mutex::Autolock autoLock(mLock);
1491 mAudioQueue.push_back(entry);
1492 postDrainAudioQueue_l();
1493 } else {
1494 mVideoQueue.push_back(entry);
1495 postDrainVideoQueue();
1496 }
1497
1498 Mutex::Autolock autoLock(mLock);
1499 if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1500 return;
1501 }
1502
1503 sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1504 sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1505
1506 if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1507 // EOS signalled on either queue.
1508 syncQueuesDone_l();
1509 return;
1510 }
1511
1512 int64_t firstAudioTimeUs;
1513 int64_t firstVideoTimeUs;
1514 CHECK(firstAudioBuffer->meta()
1515 ->findInt64("timeUs", &firstAudioTimeUs));
1516 CHECK(firstVideoBuffer->meta()
1517 ->findInt64("timeUs", &firstVideoTimeUs));
1518
1519 int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1520
1521 ALOGV("queueDiff = %.2f secs", diff / 1E6);
1522
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -08001523 if (diff > 100000LL) {
Wei Jia53692fa2017-12-11 10:33:46 -08001524 // Audio data starts More than 0.1 secs before video.
1525 // Drop some audio.
1526
1527 (*mAudioQueue.begin()).mNotifyConsumed->post();
1528 mAudioQueue.erase(mAudioQueue.begin());
1529 return;
1530 }
1531
1532 syncQueuesDone_l();
1533}
1534
1535void NuPlayer2::Renderer::syncQueuesDone_l() {
1536 if (!mSyncQueues) {
1537 return;
1538 }
1539
1540 mSyncQueues = false;
1541
1542 if (!mAudioQueue.empty()) {
1543 postDrainAudioQueue_l();
1544 }
1545
1546 if (!mVideoQueue.empty()) {
1547 mLock.unlock();
1548 postDrainVideoQueue();
1549 mLock.lock();
1550 }
1551}
1552
1553void NuPlayer2::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1554 int32_t audio;
1555 CHECK(msg->findInt32("audio", &audio));
1556
1557 if (dropBufferIfStale(audio, msg)) {
1558 return;
1559 }
1560
1561 int32_t finalResult;
1562 CHECK(msg->findInt32("finalResult", &finalResult));
1563
1564 QueueEntry entry;
1565 entry.mOffset = 0;
1566 entry.mFinalResult = finalResult;
1567
1568 if (audio) {
1569 Mutex::Autolock autoLock(mLock);
1570 if (mAudioQueue.empty() && mSyncQueues) {
1571 syncQueuesDone_l();
1572 }
1573 mAudioQueue.push_back(entry);
1574 postDrainAudioQueue_l();
1575 } else {
1576 if (mVideoQueue.empty() && getSyncQueues()) {
1577 Mutex::Autolock autoLock(mLock);
1578 syncQueuesDone_l();
1579 }
1580 mVideoQueue.push_back(entry);
1581 postDrainVideoQueue();
1582 }
1583}
1584
1585void NuPlayer2::Renderer::onFlush(const sp<AMessage> &msg) {
1586 int32_t audio, notifyComplete;
1587 CHECK(msg->findInt32("audio", &audio));
1588
1589 {
1590 Mutex::Autolock autoLock(mLock);
1591 if (audio) {
1592 notifyComplete = mNotifyCompleteAudio;
1593 mNotifyCompleteAudio = false;
1594 mLastAudioMediaTimeUs = -1;
1595 } else {
1596 notifyComplete = mNotifyCompleteVideo;
1597 mNotifyCompleteVideo = false;
1598 }
1599
1600 // If we're currently syncing the queues, i.e. dropping audio while
1601 // aligning the first audio/video buffer times and only one of the
1602 // two queues has data, we may starve that queue by not requesting
1603 // more buffers from the decoder. If the other source then encounters
1604 // a discontinuity that leads to flushing, we'll never find the
1605 // corresponding discontinuity on the other queue.
1606 // Therefore we'll stop syncing the queues if at least one of them
1607 // is flushed.
1608 syncQueuesDone_l();
1609 }
1610 clearAnchorTime();
1611
1612 ALOGV("flushing %s", audio ? "audio" : "video");
1613 if (audio) {
1614 {
1615 Mutex::Autolock autoLock(mLock);
1616 flushQueue(&mAudioQueue);
1617
1618 ++mAudioDrainGeneration;
1619 ++mAudioEOSGeneration;
1620 prepareForMediaRenderingStart_l();
1621
1622 // the frame count will be reset after flush.
1623 clearAudioFirstAnchorTime_l();
1624 }
1625
1626 mDrainAudioQueuePending = false;
1627
1628 if (offloadingAudio()) {
1629 mAudioSink->pause();
1630 mAudioSink->flush();
1631 if (!mPaused) {
1632 mAudioSink->start();
1633 }
1634 } else {
1635 mAudioSink->pause();
1636 mAudioSink->flush();
1637 // Call stop() to signal to the AudioSink to completely fill the
1638 // internal buffer before resuming playback.
1639 // FIXME: this is ignored after flush().
1640 mAudioSink->stop();
1641 if (mPaused) {
1642 // Race condition: if renderer is paused and audio sink is stopped,
1643 // we need to make sure that the audio track buffer fully drains
1644 // before delivering data.
1645 // FIXME: remove this if we can detect if stop() is complete.
1646 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
1647 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
1648 } else {
1649 mAudioSink->start();
1650 }
1651 mNumFramesWritten = 0;
1652 }
1653 mNextAudioClockUpdateTimeUs = -1;
1654 } else {
1655 flushQueue(&mVideoQueue);
1656
1657 mDrainVideoQueuePending = false;
1658
1659 if (mVideoScheduler != NULL) {
1660 mVideoScheduler->restart();
1661 }
1662
1663 Mutex::Autolock autoLock(mLock);
1664 ++mVideoDrainGeneration;
1665 prepareForMediaRenderingStart_l();
1666 }
1667
1668 mVideoSampleReceived = false;
1669
1670 if (notifyComplete) {
1671 notifyFlushComplete(audio);
1672 }
1673}
1674
1675void NuPlayer2::Renderer::flushQueue(List<QueueEntry> *queue) {
1676 while (!queue->empty()) {
1677 QueueEntry *entry = &*queue->begin();
1678
1679 if (entry->mBuffer != NULL) {
1680 entry->mNotifyConsumed->post();
1681 } else if (entry->mNotifyConsumed != nullptr) {
1682 // Is it needed to open audio sink now?
1683 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1684 }
1685
1686 queue->erase(queue->begin());
1687 entry = NULL;
1688 }
1689}
1690
1691void NuPlayer2::Renderer::notifyFlushComplete(bool audio) {
1692 sp<AMessage> notify = mNotify->dup();
1693 notify->setInt32("what", kWhatFlushComplete);
1694 notify->setInt32("audio", static_cast<int32_t>(audio));
1695 notify->post();
1696}
1697
1698bool NuPlayer2::Renderer::dropBufferIfStale(
1699 bool audio, const sp<AMessage> &msg) {
1700 int32_t queueGeneration;
1701 CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1702
1703 if (queueGeneration == getQueueGeneration(audio)) {
1704 return false;
1705 }
1706
1707 sp<AMessage> notifyConsumed;
1708 if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1709 notifyConsumed->post();
1710 }
1711
1712 return true;
1713}
1714
1715void NuPlayer2::Renderer::onAudioSinkChanged() {
1716 if (offloadingAudio()) {
1717 return;
1718 }
1719 CHECK(!mDrainAudioQueuePending);
1720 mNumFramesWritten = 0;
1721 mAnchorNumFramesWritten = -1;
1722 uint32_t written;
1723 if (mAudioSink->getFramesWritten(&written) == OK) {
1724 mNumFramesWritten = written;
1725 }
1726}
1727
1728void NuPlayer2::Renderer::onDisableOffloadAudio() {
1729 Mutex::Autolock autoLock(mLock);
1730 mFlags &= ~FLAG_OFFLOAD_AUDIO;
1731 ++mAudioDrainGeneration;
1732 if (mAudioRenderingStartGeneration != -1) {
1733 prepareForMediaRenderingStart_l();
1734 }
1735}
1736
1737void NuPlayer2::Renderer::onEnableOffloadAudio() {
1738 Mutex::Autolock autoLock(mLock);
1739 mFlags |= FLAG_OFFLOAD_AUDIO;
1740 ++mAudioDrainGeneration;
1741 if (mAudioRenderingStartGeneration != -1) {
1742 prepareForMediaRenderingStart_l();
1743 }
1744}
1745
1746void NuPlayer2::Renderer::onPause() {
1747 if (mPaused) {
1748 return;
1749 }
1750
1751 {
1752 Mutex::Autolock autoLock(mLock);
1753 // we do not increment audio drain generation so that we fill audio buffer during pause.
1754 ++mVideoDrainGeneration;
1755 prepareForMediaRenderingStart_l();
1756 mPaused = true;
1757 mMediaClock->setPlaybackRate(0.0);
1758 }
1759
1760 mDrainAudioQueuePending = false;
1761 mDrainVideoQueuePending = false;
1762
1763 // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1764 mAudioSink->pause();
1765 startAudioOffloadPauseTimeout();
1766
1767 ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1768 mAudioQueue.size(), mVideoQueue.size());
1769}
1770
1771void NuPlayer2::Renderer::onResume() {
1772 if (!mPaused) {
1773 return;
1774 }
1775
1776 // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1777 cancelAudioOffloadPauseTimeout();
1778 if (mAudioSink->ready()) {
1779 status_t err = mAudioSink->start();
1780 if (err != OK) {
1781 ALOGE("cannot start AudioSink err %d", err);
1782 notifyAudioTearDown(kDueToError);
1783 }
1784 }
1785
1786 {
1787 Mutex::Autolock autoLock(mLock);
1788 mPaused = false;
1789 // rendering started message may have been delayed if we were paused.
1790 if (mRenderingDataDelivered) {
1791 notifyIfMediaRenderingStarted_l();
1792 }
1793 // configure audiosink as we did not do it when pausing
1794 if (mAudioSink != NULL && mAudioSink->ready()) {
1795 mAudioSink->setPlaybackRate(mPlaybackSettings);
1796 }
1797
1798 mMediaClock->setPlaybackRate(mPlaybackRate);
1799
1800 if (!mAudioQueue.empty()) {
1801 postDrainAudioQueue_l();
1802 }
1803 }
1804
1805 if (!mVideoQueue.empty()) {
1806 postDrainVideoQueue();
1807 }
1808}
1809
1810void NuPlayer2::Renderer::onSetVideoFrameRate(float fps) {
1811 if (mVideoScheduler == NULL) {
1812 mVideoScheduler = new VideoFrameScheduler();
1813 }
1814 mVideoScheduler->init(fps);
1815}
1816
1817int32_t NuPlayer2::Renderer::getQueueGeneration(bool audio) {
1818 Mutex::Autolock autoLock(mLock);
1819 return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1820}
1821
1822int32_t NuPlayer2::Renderer::getDrainGeneration(bool audio) {
1823 Mutex::Autolock autoLock(mLock);
1824 return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1825}
1826
1827bool NuPlayer2::Renderer::getSyncQueues() {
1828 Mutex::Autolock autoLock(mLock);
1829 return mSyncQueues;
1830}
1831
1832void NuPlayer2::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1833 if (mAudioTornDown) {
1834 return;
1835 }
1836 mAudioTornDown = true;
1837
1838 int64_t currentPositionUs;
1839 sp<AMessage> notify = mNotify->dup();
1840 if (getCurrentPosition(&currentPositionUs) == OK) {
1841 notify->setInt64("positionUs", currentPositionUs);
1842 }
1843
1844 mAudioSink->stop();
1845 mAudioSink->flush();
1846
1847 notify->setInt32("what", kWhatAudioTearDown);
1848 notify->setInt32("reason", reason);
1849 notify->post();
1850}
1851
1852void NuPlayer2::Renderer::startAudioOffloadPauseTimeout() {
1853 if (offloadingAudio()) {
1854 mWakeLock->acquire();
1855 sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1856 msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1857 msg->post(kOffloadPauseMaxUs);
1858 }
1859}
1860
1861void NuPlayer2::Renderer::cancelAudioOffloadPauseTimeout() {
1862 // We may have called startAudioOffloadPauseTimeout() without
1863 // the AudioSink open and with offloadingAudio enabled.
1864 //
1865 // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
1866 // we always release the wakelock and increment the pause timeout generation.
1867 //
1868 // Note: The acquired wakelock prevents the device from suspending
1869 // immediately after offload pause (in case a resume happens shortly thereafter).
1870 mWakeLock->release(true);
1871 ++mAudioOffloadPauseTimeoutGeneration;
1872}
1873
1874status_t NuPlayer2::Renderer::onOpenAudioSink(
1875 const sp<AMessage> &format,
1876 bool offloadOnly,
1877 bool hasVideo,
1878 uint32_t flags,
1879 bool isStreaming) {
1880 ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1881 offloadOnly, offloadingAudio());
1882 bool audioSinkChanged = false;
1883
1884 int32_t numChannels;
1885 CHECK(format->findInt32("channel-count", &numChannels));
1886
1887 int32_t channelMask;
1888 if (!format->findInt32("channel-mask", &channelMask)) {
1889 // signal to the AudioSink to derive the mask from count.
1890 channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1891 }
1892
1893 int32_t sampleRate;
1894 CHECK(format->findInt32("sample-rate", &sampleRate));
1895
Wei Jia4a16e2a2019-01-04 15:26:48 -08001896 // read pcm encoding from MediaCodec output format, if available
1897 int32_t pcmEncoding;
1898 audio_format_t audioFormat =
1899 format->findInt32(KEY_PCM_ENCODING, &pcmEncoding) ?
1900 audioFormatFromEncoding(pcmEncoding) : AUDIO_FORMAT_PCM_16_BIT;
1901
Wei Jia53692fa2017-12-11 10:33:46 -08001902 if (offloadingAudio()) {
Wei Jia53692fa2017-12-11 10:33:46 -08001903 AString mime;
1904 CHECK(format->findString("mime", &mime));
1905 status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1906
1907 if (err != OK) {
1908 ALOGE("Couldn't map mime \"%s\" to a valid "
1909 "audio_format", mime.c_str());
1910 onDisableOffloadAudio();
1911 } else {
1912 ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1913 mime.c_str(), audioFormat);
1914
1915 int avgBitRate = -1;
1916 format->findInt32("bitrate", &avgBitRate);
1917
1918 int32_t aacProfile = -1;
1919 if (audioFormat == AUDIO_FORMAT_AAC
1920 && format->findInt32("aac-profile", &aacProfile)) {
1921 // Redefine AAC format as per aac profile
1922 mapAACProfileToAudioFormat(
1923 audioFormat,
1924 aacProfile);
1925 }
1926
1927 audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1928 offloadInfo.duration_us = -1;
1929 format->findInt64(
1930 "durationUs", &offloadInfo.duration_us);
1931 offloadInfo.sample_rate = sampleRate;
1932 offloadInfo.channel_mask = channelMask;
1933 offloadInfo.format = audioFormat;
1934 offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1935 offloadInfo.bit_rate = avgBitRate;
1936 offloadInfo.has_video = hasVideo;
1937 offloadInfo.is_streaming = isStreaming;
1938
1939 if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1940 ALOGV("openAudioSink: no change in offload mode");
1941 // no change from previous configuration, everything ok.
1942 return OK;
1943 }
1944 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1945
1946 ALOGV("openAudioSink: try to open AudioSink in offload mode");
1947 uint32_t offloadFlags = flags;
1948 offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1949 offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1950 audioSinkChanged = true;
1951 mAudioSink->close();
1952
1953 err = mAudioSink->open(
1954 sampleRate,
1955 numChannels,
1956 (audio_channel_mask_t)channelMask,
1957 audioFormat,
1958 0 /* bufferCount - unused */,
1959 &NuPlayer2::Renderer::AudioSinkCallback,
1960 this,
1961 (audio_output_flags_t)offloadFlags,
1962 &offloadInfo);
1963
1964 if (err == OK) {
1965 err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1966 }
1967
1968 if (err == OK) {
1969 // If the playback is offloaded to h/w, we pass
1970 // the HAL some metadata information.
1971 // We don't want to do this for PCM because it
1972 // will be going through the AudioFlinger mixer
1973 // before reaching the hardware.
1974 // TODO
1975 mCurrentOffloadInfo = offloadInfo;
1976 if (!mPaused) { // for preview mode, don't start if paused
1977 err = mAudioSink->start();
1978 }
1979 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1980 }
1981 if (err != OK) {
1982 // Clean up, fall back to non offload mode.
1983 mAudioSink->close();
1984 onDisableOffloadAudio();
1985 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1986 ALOGV("openAudioSink: offload failed");
1987 if (offloadOnly) {
1988 notifyAudioTearDown(kForceNonOffload);
1989 }
1990 } else {
1991 mUseAudioCallback = true; // offload mode transfers data through callback
1992 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
1993 }
1994 }
1995 }
1996 if (!offloadOnly && !offloadingAudio()) {
1997 ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1998 uint32_t pcmFlags = flags;
1999 pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
2000
2001 const PcmInfo info = {
2002 (audio_channel_mask_t)channelMask,
2003 (audio_output_flags_t)pcmFlags,
Wei Jia4a16e2a2019-01-04 15:26:48 -08002004 audioFormat,
Wei Jia53692fa2017-12-11 10:33:46 -08002005 numChannels,
2006 sampleRate
2007 };
2008 if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
2009 ALOGV("openAudioSink: no change in pcm mode");
2010 // no change from previous configuration, everything ok.
2011 return OK;
2012 }
2013
2014 audioSinkChanged = true;
2015 mAudioSink->close();
2016 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2017 // Note: It is possible to set up the callback, but not use it to send audio data.
2018 // This requires a fix in AudioSink to explicitly specify the transfer mode.
2019 mUseAudioCallback = getUseAudioCallbackSetting();
2020 if (mUseAudioCallback) {
2021 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
2022 }
2023
2024 // Compute the desired buffer size.
2025 // For callback mode, the amount of time before wakeup is about half the buffer size.
2026 const uint32_t frameCount =
2027 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
2028
2029 // The doNotReconnect means AudioSink will signal back and let NuPlayer2 to re-construct
2030 // AudioSink. We don't want this when there's video because it will cause a video seek to
2031 // the previous I frame. But we do want this when there's only audio because it will give
2032 // NuPlayer2 a chance to switch from non-offload mode to offload mode.
2033 // So we only set doNotReconnect when there's no video.
2034 const bool doNotReconnect = !hasVideo;
2035
2036 // We should always be able to set our playback settings if the sink is closed.
2037 LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
2038 "onOpenAudioSink: can't set playback rate on closed sink");
2039 status_t err = mAudioSink->open(
2040 sampleRate,
2041 numChannels,
2042 (audio_channel_mask_t)channelMask,
Wei Jia4a16e2a2019-01-04 15:26:48 -08002043 audioFormat,
Wei Jia53692fa2017-12-11 10:33:46 -08002044 0 /* bufferCount - unused */,
2045 mUseAudioCallback ? &NuPlayer2::Renderer::AudioSinkCallback : NULL,
2046 mUseAudioCallback ? this : NULL,
2047 (audio_output_flags_t)pcmFlags,
2048 NULL,
2049 doNotReconnect,
2050 frameCount);
2051 if (err != OK) {
2052 ALOGW("openAudioSink: non offloaded open failed status: %d", err);
2053 mAudioSink->close();
2054 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2055 return err;
2056 }
2057 mCurrentPcmInfo = info;
2058 if (!mPaused) { // for preview mode, don't start if paused
2059 mAudioSink->start();
2060 }
2061 }
2062 if (audioSinkChanged) {
2063 onAudioSinkChanged();
2064 }
2065 mAudioTornDown = false;
2066 return OK;
2067}
2068
2069void NuPlayer2::Renderer::onCloseAudioSink() {
2070 mAudioSink->close();
2071 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2072 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2073}
2074
2075void NuPlayer2::Renderer::onChangeAudioFormat(
2076 const sp<AMessage> &meta, const sp<AMessage> &notify) {
2077 sp<AMessage> format;
2078 CHECK(meta->findMessage("format", &format));
2079
2080 int32_t offloadOnly;
2081 CHECK(meta->findInt32("offload-only", &offloadOnly));
2082
2083 int32_t hasVideo;
2084 CHECK(meta->findInt32("has-video", &hasVideo));
2085
2086 uint32_t flags;
2087 CHECK(meta->findInt32("flags", (int32_t *)&flags));
2088
2089 uint32_t isStreaming;
2090 CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
2091
2092 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
2093
2094 if (err != OK) {
2095 notify->setInt32("err", err);
2096 }
2097 notify->post();
2098}
2099
2100} // namespace android