blob: 2881fd4d3943073777d2d5a150b493c6f31c5a36 [file] [log] [blame]
Wei Jia53692fa2017-12-11 10:33:46 -08001/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "NuPlayer2Renderer"
19#include <utils/Log.h>
20
21#include "JWakeLock.h"
22#include "NuPlayer2Renderer.h"
23#include <algorithm>
24#include <cutils/properties.h>
25#include <media/stagefright/foundation/ADebug.h>
26#include <media/stagefright/foundation/AMessage.h>
27#include <media/stagefright/foundation/AUtils.h>
28#include <media/stagefright/MediaClock.h>
Wei Jia896b4d62019-01-04 15:26:48 -080029#include <media/stagefright/MediaCodecConstants.h>
30#include <media/stagefright/MediaDefs.h>
Wei Jia53692fa2017-12-11 10:33:46 -080031#include <media/stagefright/MediaErrors.h>
Wei Jia53692fa2017-12-11 10:33:46 -080032#include <media/stagefright/Utils.h>
Dichen Zhangc37b1902018-12-18 11:36:13 -080033#include <media/stagefright/VideoFrameScheduler2.h>
Wei Jia53692fa2017-12-11 10:33:46 -080034#include <media/MediaCodecBuffer.h>
35
36#include <inttypes.h>
37
38namespace android {
39
40/*
41 * Example of common configuration settings in shell script form
42
43 #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
44 adb shell setprop audio.offload.disable 1
45
46 #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
47 adb shell setprop audio.offload.video 1
48
49 #Use audio callbacks for PCM data
50 adb shell setprop media.stagefright.audio.cbk 1
51
52 #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
53 adb shell setprop media.stagefright.audio.deep 1
54
55 #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
56 adb shell setprop media.stagefright.audio.sink 1000
57
58 * These configurations take effect for the next track played (not the current track).
59 */
60
61static inline bool getUseAudioCallbackSetting() {
62 return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
63}
64
65static inline int32_t getAudioSinkPcmMsSetting() {
66 return property_get_int32(
67 "media.stagefright.audio.sink", 500 /* default_value */);
68}
69
70// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
71// is closed to allow the audio DSP to power down.
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -080072static const int64_t kOffloadPauseMaxUs = 10000000LL;
Wei Jia53692fa2017-12-11 10:33:46 -080073
74// Maximum allowed delay from AudioSink, 1.5 seconds.
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -080075static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000LL;
Wei Jia53692fa2017-12-11 10:33:46 -080076
77static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
78
79// static
80const NuPlayer2::Renderer::PcmInfo NuPlayer2::Renderer::AUDIO_PCMINFO_INITIALIZER = {
81 AUDIO_CHANNEL_NONE,
82 AUDIO_OUTPUT_FLAG_NONE,
83 AUDIO_FORMAT_INVALID,
84 0, // mNumChannels
85 0 // mSampleRate
86};
87
88// static
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -080089const int64_t NuPlayer2::Renderer::kMinPositionUpdateDelayUs = 100000LL;
Wei Jia53692fa2017-12-11 10:33:46 -080090
Wei Jia896b4d62019-01-04 15:26:48 -080091static audio_format_t constexpr audioFormatFromEncoding(int32_t pcmEncoding) {
92 switch (pcmEncoding) {
93 case kAudioEncodingPcmFloat:
94 return AUDIO_FORMAT_PCM_FLOAT;
95 case kAudioEncodingPcm16bit:
96 return AUDIO_FORMAT_PCM_16_BIT;
97 case kAudioEncodingPcm8bit:
98 return AUDIO_FORMAT_PCM_8_BIT; // TODO: do we want to support this?
99 default:
100 ALOGE("%s: Invalid encoding: %d", __func__, pcmEncoding);
101 return AUDIO_FORMAT_INVALID;
102 }
103}
104
Wei Jia53692fa2017-12-11 10:33:46 -0800105NuPlayer2::Renderer::Renderer(
Wei Jia33abcc72018-01-30 09:47:38 -0800106 const sp<MediaPlayer2Interface::AudioSink> &sink,
Wei Jia53692fa2017-12-11 10:33:46 -0800107 const sp<MediaClock> &mediaClock,
108 const sp<AMessage> &notify,
109 uint32_t flags)
110 : mAudioSink(sink),
111 mUseVirtualAudioSink(false),
112 mNotify(notify),
113 mFlags(flags),
114 mNumFramesWritten(0),
115 mDrainAudioQueuePending(false),
116 mDrainVideoQueuePending(false),
117 mAudioQueueGeneration(0),
118 mVideoQueueGeneration(0),
119 mAudioDrainGeneration(0),
120 mVideoDrainGeneration(0),
121 mAudioEOSGeneration(0),
122 mMediaClock(mediaClock),
123 mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
124 mAudioFirstAnchorTimeMediaUs(-1),
125 mAnchorTimeMediaUs(-1),
126 mAnchorNumFramesWritten(-1),
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -0800127 mVideoLateByUs(0LL),
Wei Jia53692fa2017-12-11 10:33:46 -0800128 mNextVideoTimeMediaUs(-1),
129 mHasAudio(false),
130 mHasVideo(false),
131 mNotifyCompleteAudio(false),
132 mNotifyCompleteVideo(false),
133 mSyncQueues(false),
Wei Jia6376cd52018-09-26 11:42:55 -0700134 mPaused(true),
Wei Jia53692fa2017-12-11 10:33:46 -0800135 mPauseDrainAudioAllowedUs(0),
136 mVideoSampleReceived(false),
137 mVideoRenderingStarted(false),
138 mVideoRenderingStartGeneration(0),
139 mAudioRenderingStartGeneration(0),
140 mRenderingDataDelivered(false),
141 mNextAudioClockUpdateTimeUs(-1),
142 mLastAudioMediaTimeUs(-1),
143 mAudioOffloadPauseTimeoutGeneration(0),
144 mAudioTornDown(false),
145 mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
146 mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
147 mTotalBuffersQueued(0),
148 mLastAudioBufferDrained(0),
149 mUseAudioCallback(false),
150 mWakeLock(new JWakeLock()) {
151 CHECK(mediaClock != NULL);
Wei Jia700a7c22018-09-14 18:04:35 -0700152 mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed);
Wei Jia53692fa2017-12-11 10:33:46 -0800153}
154
155NuPlayer2::Renderer::~Renderer() {
156 if (offloadingAudio()) {
157 mAudioSink->stop();
158 mAudioSink->flush();
159 mAudioSink->close();
160 }
161
162 // Try to avoid racing condition in case callback is still on.
163 Mutex::Autolock autoLock(mLock);
164 if (mUseAudioCallback) {
165 flushQueue(&mAudioQueue);
166 flushQueue(&mVideoQueue);
167 }
168 mWakeLock.clear();
169 mVideoScheduler.clear();
170 mNotify.clear();
171 mAudioSink.clear();
172}
173
174void NuPlayer2::Renderer::queueBuffer(
175 bool audio,
176 const sp<MediaCodecBuffer> &buffer,
177 const sp<AMessage> &notifyConsumed) {
178 sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
179 msg->setInt32("queueGeneration", getQueueGeneration(audio));
180 msg->setInt32("audio", static_cast<int32_t>(audio));
181 msg->setObject("buffer", buffer);
182 msg->setMessage("notifyConsumed", notifyConsumed);
183 msg->post();
184}
185
186void NuPlayer2::Renderer::queueEOS(bool audio, status_t finalResult) {
187 CHECK_NE(finalResult, (status_t)OK);
188
189 sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
190 msg->setInt32("queueGeneration", getQueueGeneration(audio));
191 msg->setInt32("audio", static_cast<int32_t>(audio));
192 msg->setInt32("finalResult", finalResult);
193 msg->post();
194}
195
196status_t NuPlayer2::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
197 sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
198 writeToAMessage(msg, rate);
199 sp<AMessage> response;
200 status_t err = msg->postAndAwaitResponse(&response);
201 if (err == OK && response != NULL) {
202 CHECK(response->findInt32("err", &err));
203 }
204 return err;
205}
206
207status_t NuPlayer2::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
Wei Jia700a7c22018-09-14 18:04:35 -0700208 if (rate.mSpeed <= 0.f) {
209 ALOGW("playback rate cannot be %f", rate.mSpeed);
210 return BAD_VALUE;
Wei Jia53692fa2017-12-11 10:33:46 -0800211 }
212
213 if (mAudioSink != NULL && mAudioSink->ready()) {
214 status_t err = mAudioSink->setPlaybackRate(rate);
215 if (err != OK) {
Wei Jia700a7c22018-09-14 18:04:35 -0700216 ALOGW("failed to get playback rate from audio sink, err(%d)", err);
Wei Jia53692fa2017-12-11 10:33:46 -0800217 return err;
218 }
219 }
220 mPlaybackSettings = rate;
Wei Jia700a7c22018-09-14 18:04:35 -0700221 mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed);
Wei Jia53692fa2017-12-11 10:33:46 -0800222 return OK;
223}
224
225status_t NuPlayer2::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
226 sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
227 sp<AMessage> response;
228 status_t err = msg->postAndAwaitResponse(&response);
229 if (err == OK && response != NULL) {
230 CHECK(response->findInt32("err", &err));
231 if (err == OK) {
232 readFromAMessage(response, rate);
233 }
234 }
235 return err;
236}
237
238status_t NuPlayer2::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
239 if (mAudioSink != NULL && mAudioSink->ready()) {
240 status_t err = mAudioSink->getPlaybackRate(rate);
241 if (err == OK) {
242 if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
Wei Jia48c16232018-11-17 17:22:59 -0800243 ALOGW("correcting mismatch in internal/external playback rate, %f vs %f",
244 rate->mSpeed, mPlaybackSettings.mSpeed);
Wei Jia53692fa2017-12-11 10:33:46 -0800245 }
246 // get playback settings used by audiosink, as it may be
247 // slightly off due to audiosink not taking small changes.
248 mPlaybackSettings = *rate;
Wei Jia53692fa2017-12-11 10:33:46 -0800249 }
250 return err;
251 }
252 *rate = mPlaybackSettings;
253 return OK;
254}
255
256status_t NuPlayer2::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
257 sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
258 writeToAMessage(msg, sync, videoFpsHint);
259 sp<AMessage> response;
260 status_t err = msg->postAndAwaitResponse(&response);
261 if (err == OK && response != NULL) {
262 CHECK(response->findInt32("err", &err));
263 }
264 return err;
265}
266
267status_t NuPlayer2::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
268 if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
269 return BAD_VALUE;
270 }
271 // TODO: support sync sources
272 return INVALID_OPERATION;
273}
274
275status_t NuPlayer2::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
276 sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
277 sp<AMessage> response;
278 status_t err = msg->postAndAwaitResponse(&response);
279 if (err == OK && response != NULL) {
280 CHECK(response->findInt32("err", &err));
281 if (err == OK) {
282 readFromAMessage(response, sync, videoFps);
283 }
284 }
285 return err;
286}
287
288status_t NuPlayer2::Renderer::onGetSyncSettings(
289 AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
290 *sync = mSyncSettings;
291 *videoFps = -1.f;
292 return OK;
293}
294
295void NuPlayer2::Renderer::flush(bool audio, bool notifyComplete) {
296 {
297 Mutex::Autolock autoLock(mLock);
298 if (audio) {
299 mNotifyCompleteAudio |= notifyComplete;
300 clearAudioFirstAnchorTime_l();
301 ++mAudioQueueGeneration;
302 ++mAudioDrainGeneration;
303 } else {
304 mNotifyCompleteVideo |= notifyComplete;
305 ++mVideoQueueGeneration;
306 ++mVideoDrainGeneration;
307 }
308
309 mMediaClock->clearAnchor();
310 mVideoLateByUs = 0;
311 mNextVideoTimeMediaUs = -1;
312 mSyncQueues = false;
313 }
314
315 sp<AMessage> msg = new AMessage(kWhatFlush, this);
316 msg->setInt32("audio", static_cast<int32_t>(audio));
317 msg->post();
318}
319
320void NuPlayer2::Renderer::signalTimeDiscontinuity() {
321}
322
323void NuPlayer2::Renderer::signalDisableOffloadAudio() {
324 (new AMessage(kWhatDisableOffloadAudio, this))->post();
325}
326
327void NuPlayer2::Renderer::signalEnableOffloadAudio() {
328 (new AMessage(kWhatEnableOffloadAudio, this))->post();
329}
330
331void NuPlayer2::Renderer::pause() {
332 (new AMessage(kWhatPause, this))->post();
333}
334
335void NuPlayer2::Renderer::resume() {
336 (new AMessage(kWhatResume, this))->post();
337}
338
339void NuPlayer2::Renderer::setVideoFrameRate(float fps) {
340 sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
341 msg->setFloat("frame-rate", fps);
342 msg->post();
343}
344
345// Called on any threads without mLock acquired.
346status_t NuPlayer2::Renderer::getCurrentPosition(int64_t *mediaUs) {
347 status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
348 if (result == OK) {
349 return result;
350 }
351
352 // MediaClock has not started yet. Try to start it if possible.
353 {
354 Mutex::Autolock autoLock(mLock);
355 if (mAudioFirstAnchorTimeMediaUs == -1) {
356 return result;
357 }
358
359 AudioTimestamp ts;
360 status_t res = mAudioSink->getTimestamp(ts);
361 if (res != OK) {
362 return result;
363 }
364
365 // AudioSink has rendered some frames.
366 int64_t nowUs = ALooper::GetNowUs();
367 int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
368 + mAudioFirstAnchorTimeMediaUs;
369 mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
370 }
371
372 return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
373}
374
375void NuPlayer2::Renderer::clearAudioFirstAnchorTime_l() {
376 mAudioFirstAnchorTimeMediaUs = -1;
377 mMediaClock->setStartingTimeMedia(-1);
378}
379
380void NuPlayer2::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
381 if (mAudioFirstAnchorTimeMediaUs == -1) {
382 mAudioFirstAnchorTimeMediaUs = mediaUs;
383 mMediaClock->setStartingTimeMedia(mediaUs);
384 }
385}
386
387// Called on renderer looper.
388void NuPlayer2::Renderer::clearAnchorTime() {
389 mMediaClock->clearAnchor();
390 mAnchorTimeMediaUs = -1;
391 mAnchorNumFramesWritten = -1;
392}
393
394void NuPlayer2::Renderer::setVideoLateByUs(int64_t lateUs) {
395 Mutex::Autolock autoLock(mLock);
396 mVideoLateByUs = lateUs;
397}
398
399int64_t NuPlayer2::Renderer::getVideoLateByUs() {
400 Mutex::Autolock autoLock(mLock);
401 return mVideoLateByUs;
402}
403
404status_t NuPlayer2::Renderer::openAudioSink(
405 const sp<AMessage> &format,
406 bool offloadOnly,
407 bool hasVideo,
408 uint32_t flags,
409 bool *isOffloaded,
410 bool isStreaming) {
411 sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
412 msg->setMessage("format", format);
413 msg->setInt32("offload-only", offloadOnly);
414 msg->setInt32("has-video", hasVideo);
415 msg->setInt32("flags", flags);
416 msg->setInt32("isStreaming", isStreaming);
417
418 sp<AMessage> response;
419 status_t postStatus = msg->postAndAwaitResponse(&response);
420
421 int32_t err;
422 if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
423 err = INVALID_OPERATION;
424 } else if (err == OK && isOffloaded != NULL) {
425 int32_t offload;
426 CHECK(response->findInt32("offload", &offload));
427 *isOffloaded = (offload != 0);
428 }
429 return err;
430}
431
432void NuPlayer2::Renderer::closeAudioSink() {
433 sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
434
435 sp<AMessage> response;
436 msg->postAndAwaitResponse(&response);
437}
438
439void NuPlayer2::Renderer::changeAudioFormat(
440 const sp<AMessage> &format,
441 bool offloadOnly,
442 bool hasVideo,
443 uint32_t flags,
444 bool isStreaming,
445 const sp<AMessage> &notify) {
446 sp<AMessage> meta = new AMessage;
447 meta->setMessage("format", format);
448 meta->setInt32("offload-only", offloadOnly);
449 meta->setInt32("has-video", hasVideo);
450 meta->setInt32("flags", flags);
451 meta->setInt32("isStreaming", isStreaming);
452
453 sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
454 msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
455 msg->setMessage("notify", notify);
456 msg->setMessage("meta", meta);
457 msg->post();
458}
459
460void NuPlayer2::Renderer::onMessageReceived(const sp<AMessage> &msg) {
461 switch (msg->what()) {
462 case kWhatOpenAudioSink:
463 {
464 sp<AMessage> format;
465 CHECK(msg->findMessage("format", &format));
466
467 int32_t offloadOnly;
468 CHECK(msg->findInt32("offload-only", &offloadOnly));
469
470 int32_t hasVideo;
471 CHECK(msg->findInt32("has-video", &hasVideo));
472
473 uint32_t flags;
474 CHECK(msg->findInt32("flags", (int32_t *)&flags));
475
476 uint32_t isStreaming;
477 CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
478
479 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
480
481 sp<AMessage> response = new AMessage;
482 response->setInt32("err", err);
483 response->setInt32("offload", offloadingAudio());
484
485 sp<AReplyToken> replyID;
486 CHECK(msg->senderAwaitsResponse(&replyID));
487 response->postReply(replyID);
488
489 break;
490 }
491
492 case kWhatCloseAudioSink:
493 {
494 sp<AReplyToken> replyID;
495 CHECK(msg->senderAwaitsResponse(&replyID));
496
497 onCloseAudioSink();
498
499 sp<AMessage> response = new AMessage;
500 response->postReply(replyID);
501 break;
502 }
503
504 case kWhatStopAudioSink:
505 {
506 mAudioSink->stop();
507 break;
508 }
509
510 case kWhatChangeAudioFormat:
511 {
512 int32_t queueGeneration;
513 CHECK(msg->findInt32("queueGeneration", &queueGeneration));
514
515 sp<AMessage> notify;
516 CHECK(msg->findMessage("notify", &notify));
517
518 if (offloadingAudio()) {
519 ALOGW("changeAudioFormat should NOT be called in offload mode");
520 notify->setInt32("err", INVALID_OPERATION);
521 notify->post();
522 break;
523 }
524
525 sp<AMessage> meta;
526 CHECK(msg->findMessage("meta", &meta));
527
528 if (queueGeneration != getQueueGeneration(true /* audio */)
529 || mAudioQueue.empty()) {
530 onChangeAudioFormat(meta, notify);
531 break;
532 }
533
534 QueueEntry entry;
535 entry.mNotifyConsumed = notify;
536 entry.mMeta = meta;
537
538 Mutex::Autolock autoLock(mLock);
539 mAudioQueue.push_back(entry);
540 postDrainAudioQueue_l();
541
542 break;
543 }
544
545 case kWhatDrainAudioQueue:
546 {
547 mDrainAudioQueuePending = false;
548
549 int32_t generation;
550 CHECK(msg->findInt32("drainGeneration", &generation));
551 if (generation != getDrainGeneration(true /* audio */)) {
552 break;
553 }
554
555 if (onDrainAudioQueue()) {
556 uint32_t numFramesPlayed;
557 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
558 (status_t)OK);
559
560 // Handle AudioTrack race when start is immediately called after flush.
561 uint32_t numFramesPendingPlayout =
562 (mNumFramesWritten > numFramesPlayed ?
563 mNumFramesWritten - numFramesPlayed : 0);
564
565 // This is how long the audio sink will have data to
566 // play back.
567 int64_t delayUs =
568 mAudioSink->msecsPerFrame()
569 * numFramesPendingPlayout * 1000ll;
Wei Jia700a7c22018-09-14 18:04:35 -0700570 if (mPlaybackSettings.mSpeed > 1.0f) {
571 delayUs /= mPlaybackSettings.mSpeed;
Wei Jia53692fa2017-12-11 10:33:46 -0800572 }
573
574 // Let's give it more data after about half that time
575 // has elapsed.
576 delayUs /= 2;
577 // check the buffer size to estimate maximum delay permitted.
578 const int64_t maxDrainDelayUs = std::max(
579 mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
580 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
581 (long long)delayUs, (long long)maxDrainDelayUs);
582 Mutex::Autolock autoLock(mLock);
583 postDrainAudioQueue_l(delayUs);
584 }
585 break;
586 }
587
588 case kWhatDrainVideoQueue:
589 {
590 int32_t generation;
591 CHECK(msg->findInt32("drainGeneration", &generation));
592 if (generation != getDrainGeneration(false /* audio */)) {
593 break;
594 }
595
596 mDrainVideoQueuePending = false;
597
598 onDrainVideoQueue();
599
600 postDrainVideoQueue();
601 break;
602 }
603
604 case kWhatPostDrainVideoQueue:
605 {
606 int32_t generation;
607 CHECK(msg->findInt32("drainGeneration", &generation));
608 if (generation != getDrainGeneration(false /* audio */)) {
609 break;
610 }
611
612 mDrainVideoQueuePending = false;
613 postDrainVideoQueue();
614 break;
615 }
616
617 case kWhatQueueBuffer:
618 {
619 onQueueBuffer(msg);
620 break;
621 }
622
623 case kWhatQueueEOS:
624 {
625 onQueueEOS(msg);
626 break;
627 }
628
629 case kWhatEOS:
630 {
631 int32_t generation;
632 CHECK(msg->findInt32("audioEOSGeneration", &generation));
633 if (generation != mAudioEOSGeneration) {
634 break;
635 }
636 status_t finalResult;
637 CHECK(msg->findInt32("finalResult", &finalResult));
638 notifyEOS(true /* audio */, finalResult);
639 break;
640 }
641
642 case kWhatConfigPlayback:
643 {
644 sp<AReplyToken> replyID;
645 CHECK(msg->senderAwaitsResponse(&replyID));
646 AudioPlaybackRate rate;
647 readFromAMessage(msg, &rate);
648 status_t err = onConfigPlayback(rate);
649 sp<AMessage> response = new AMessage;
650 response->setInt32("err", err);
651 response->postReply(replyID);
652 break;
653 }
654
655 case kWhatGetPlaybackSettings:
656 {
657 sp<AReplyToken> replyID;
658 CHECK(msg->senderAwaitsResponse(&replyID));
659 AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
660 status_t err = onGetPlaybackSettings(&rate);
661 sp<AMessage> response = new AMessage;
662 if (err == OK) {
663 writeToAMessage(response, rate);
664 }
665 response->setInt32("err", err);
666 response->postReply(replyID);
667 break;
668 }
669
670 case kWhatConfigSync:
671 {
672 sp<AReplyToken> replyID;
673 CHECK(msg->senderAwaitsResponse(&replyID));
674 AVSyncSettings sync;
675 float videoFpsHint;
676 readFromAMessage(msg, &sync, &videoFpsHint);
677 status_t err = onConfigSync(sync, videoFpsHint);
678 sp<AMessage> response = new AMessage;
679 response->setInt32("err", err);
680 response->postReply(replyID);
681 break;
682 }
683
684 case kWhatGetSyncSettings:
685 {
686 sp<AReplyToken> replyID;
687 CHECK(msg->senderAwaitsResponse(&replyID));
688
689 ALOGV("kWhatGetSyncSettings");
690 AVSyncSettings sync;
691 float videoFps = -1.f;
692 status_t err = onGetSyncSettings(&sync, &videoFps);
693 sp<AMessage> response = new AMessage;
694 if (err == OK) {
695 writeToAMessage(response, sync, videoFps);
696 }
697 response->setInt32("err", err);
698 response->postReply(replyID);
699 break;
700 }
701
702 case kWhatFlush:
703 {
704 onFlush(msg);
705 break;
706 }
707
708 case kWhatDisableOffloadAudio:
709 {
710 onDisableOffloadAudio();
711 break;
712 }
713
714 case kWhatEnableOffloadAudio:
715 {
716 onEnableOffloadAudio();
717 break;
718 }
719
720 case kWhatPause:
721 {
722 onPause();
723 break;
724 }
725
726 case kWhatResume:
727 {
728 onResume();
729 break;
730 }
731
732 case kWhatSetVideoFrameRate:
733 {
734 float fps;
735 CHECK(msg->findFloat("frame-rate", &fps));
736 onSetVideoFrameRate(fps);
737 break;
738 }
739
740 case kWhatAudioTearDown:
741 {
742 int32_t reason;
743 CHECK(msg->findInt32("reason", &reason));
744
745 onAudioTearDown((AudioTearDownReason)reason);
746 break;
747 }
748
749 case kWhatAudioOffloadPauseTimeout:
750 {
751 int32_t generation;
752 CHECK(msg->findInt32("drainGeneration", &generation));
753 if (generation != mAudioOffloadPauseTimeoutGeneration) {
754 break;
755 }
756 ALOGV("Audio Offload tear down due to pause timeout.");
757 onAudioTearDown(kDueToTimeout);
758 mWakeLock->release();
759 break;
760 }
761
762 default:
763 TRESPASS();
764 break;
765 }
766}
767
768void NuPlayer2::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
769 if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
770 return;
771 }
772
773 if (mAudioQueue.empty()) {
774 return;
775 }
776
777 // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
778 if (mPaused) {
779 const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
780 if (diffUs > delayUs) {
781 delayUs = diffUs;
782 }
783 }
784
785 mDrainAudioQueuePending = true;
786 sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
787 msg->setInt32("drainGeneration", mAudioDrainGeneration);
788 msg->post(delayUs);
789}
790
791void NuPlayer2::Renderer::prepareForMediaRenderingStart_l() {
792 mAudioRenderingStartGeneration = mAudioDrainGeneration;
793 mVideoRenderingStartGeneration = mVideoDrainGeneration;
794 mRenderingDataDelivered = false;
795}
796
797void NuPlayer2::Renderer::notifyIfMediaRenderingStarted_l() {
798 if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
799 mAudioRenderingStartGeneration == mAudioDrainGeneration) {
800 mRenderingDataDelivered = true;
801 if (mPaused) {
802 return;
803 }
804 mVideoRenderingStartGeneration = -1;
805 mAudioRenderingStartGeneration = -1;
806
807 sp<AMessage> notify = mNotify->dup();
808 notify->setInt32("what", kWhatMediaRenderingStart);
809 notify->post();
810 }
811}
812
813// static
814size_t NuPlayer2::Renderer::AudioSinkCallback(
Wei Jia33abcc72018-01-30 09:47:38 -0800815 MediaPlayer2Interface::AudioSink * /* audioSink */,
Wei Jia53692fa2017-12-11 10:33:46 -0800816 void *buffer,
817 size_t size,
818 void *cookie,
Wei Jia33abcc72018-01-30 09:47:38 -0800819 MediaPlayer2Interface::AudioSink::cb_event_t event) {
Wei Jia53692fa2017-12-11 10:33:46 -0800820 NuPlayer2::Renderer *me = (NuPlayer2::Renderer *)cookie;
821
822 switch (event) {
Wei Jia33abcc72018-01-30 09:47:38 -0800823 case MediaPlayer2Interface::AudioSink::CB_EVENT_FILL_BUFFER:
Wei Jia53692fa2017-12-11 10:33:46 -0800824 {
825 return me->fillAudioBuffer(buffer, size);
826 break;
827 }
828
Wei Jia33abcc72018-01-30 09:47:38 -0800829 case MediaPlayer2Interface::AudioSink::CB_EVENT_STREAM_END:
Wei Jia53692fa2017-12-11 10:33:46 -0800830 {
831 ALOGV("AudioSink::CB_EVENT_STREAM_END");
832 me->notifyEOSCallback();
833 break;
834 }
835
Wei Jia33abcc72018-01-30 09:47:38 -0800836 case MediaPlayer2Interface::AudioSink::CB_EVENT_TEAR_DOWN:
Wei Jia53692fa2017-12-11 10:33:46 -0800837 {
838 ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
839 me->notifyAudioTearDown(kDueToError);
840 break;
841 }
842 }
843
844 return 0;
845}
846
847void NuPlayer2::Renderer::notifyEOSCallback() {
848 Mutex::Autolock autoLock(mLock);
849
850 if (!mUseAudioCallback) {
851 return;
852 }
853
854 notifyEOS_l(true /* audio */, ERROR_END_OF_STREAM);
855}
856
857size_t NuPlayer2::Renderer::fillAudioBuffer(void *buffer, size_t size) {
858 Mutex::Autolock autoLock(mLock);
859
860 if (!mUseAudioCallback) {
861 return 0;
862 }
863
864 bool hasEOS = false;
865
866 size_t sizeCopied = 0;
867 bool firstEntry = true;
868 QueueEntry *entry; // will be valid after while loop if hasEOS is set.
869 while (sizeCopied < size && !mAudioQueue.empty()) {
870 entry = &*mAudioQueue.begin();
871
872 if (entry->mBuffer == NULL) { // EOS
873 hasEOS = true;
874 mAudioQueue.erase(mAudioQueue.begin());
875 break;
876 }
877
878 if (firstEntry && entry->mOffset == 0) {
879 firstEntry = false;
880 int64_t mediaTimeUs;
881 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
882 ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
883 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
884 }
885
886 size_t copy = entry->mBuffer->size() - entry->mOffset;
887 size_t sizeRemaining = size - sizeCopied;
888 if (copy > sizeRemaining) {
889 copy = sizeRemaining;
890 }
891
892 memcpy((char *)buffer + sizeCopied,
893 entry->mBuffer->data() + entry->mOffset,
894 copy);
895
896 entry->mOffset += copy;
897 if (entry->mOffset == entry->mBuffer->size()) {
898 entry->mNotifyConsumed->post();
899 mAudioQueue.erase(mAudioQueue.begin());
900 entry = NULL;
901 }
902 sizeCopied += copy;
903
904 notifyIfMediaRenderingStarted_l();
905 }
906
907 if (mAudioFirstAnchorTimeMediaUs >= 0) {
908 int64_t nowUs = ALooper::GetNowUs();
909 int64_t nowMediaUs =
910 mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
911 // we don't know how much data we are queueing for offloaded tracks.
912 mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
913 }
914
915 // for non-offloaded audio, we need to compute the frames written because
916 // there is no EVENT_STREAM_END notification. The frames written gives
917 // an estimate on the pending played out duration.
918 if (!offloadingAudio()) {
919 mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
920 }
921
922 if (hasEOS) {
923 (new AMessage(kWhatStopAudioSink, this))->post();
924 // As there is currently no EVENT_STREAM_END callback notification for
925 // non-offloaded audio tracks, we need to post the EOS ourselves.
926 if (!offloadingAudio()) {
927 int64_t postEOSDelayUs = 0;
928 if (mAudioSink->needsTrailingPadding()) {
929 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
930 }
931 ALOGV("fillAudioBuffer: notifyEOS_l "
932 "mNumFramesWritten:%u finalResult:%d postEOSDelay:%lld",
933 mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
934 notifyEOS_l(true /* audio */, entry->mFinalResult, postEOSDelayUs);
935 }
936 }
937 return sizeCopied;
938}
939
940void NuPlayer2::Renderer::drainAudioQueueUntilLastEOS() {
941 List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
942 bool foundEOS = false;
943 while (it != mAudioQueue.end()) {
944 int32_t eos;
945 QueueEntry *entry = &*it++;
946 if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
947 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
948 itEOS = it;
949 foundEOS = true;
950 }
951 }
952
953 if (foundEOS) {
954 // post all replies before EOS and drop the samples
955 for (it = mAudioQueue.begin(); it != itEOS; it++) {
956 if (it->mBuffer == nullptr) {
957 if (it->mNotifyConsumed == nullptr) {
958 // delay doesn't matter as we don't even have an AudioTrack
959 notifyEOS(true /* audio */, it->mFinalResult);
960 } else {
961 // TAG for re-opening audio sink.
962 onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
963 }
964 } else {
965 it->mNotifyConsumed->post();
966 }
967 }
968 mAudioQueue.erase(mAudioQueue.begin(), itEOS);
969 }
970}
971
972bool NuPlayer2::Renderer::onDrainAudioQueue() {
973 // do not drain audio during teardown as queued buffers may be invalid.
974 if (mAudioTornDown) {
975 return false;
976 }
977 // TODO: This call to getPosition checks if AudioTrack has been created
978 // in AudioSink before draining audio. If AudioTrack doesn't exist, then
979 // CHECKs on getPosition will fail.
980 // We still need to figure out why AudioTrack is not created when
981 // this function is called. One possible reason could be leftover
982 // audio. Another possible place is to check whether decoder
983 // has received INFO_FORMAT_CHANGED as the first buffer since
984 // AudioSink is opened there, and possible interactions with flush
985 // immediately after start. Investigate error message
986 // "vorbis_dsp_synthesis returned -135", along with RTSP.
987 uint32_t numFramesPlayed;
988 if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
989 // When getPosition fails, renderer will not reschedule the draining
990 // unless new samples are queued.
991 // If we have pending EOS (or "eos" marker for discontinuities), we need
992 // to post these now as NuPlayer2Decoder might be waiting for it.
993 drainAudioQueueUntilLastEOS();
994
995 ALOGW("onDrainAudioQueue(): audio sink is not ready");
996 return false;
997 }
998
999#if 0
1000 ssize_t numFramesAvailableToWrite =
1001 mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
1002
1003 if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
1004 ALOGI("audio sink underrun");
1005 } else {
1006 ALOGV("audio queue has %d frames left to play",
1007 mAudioSink->frameCount() - numFramesAvailableToWrite);
1008 }
1009#endif
1010
1011 uint32_t prevFramesWritten = mNumFramesWritten;
1012 while (!mAudioQueue.empty()) {
1013 QueueEntry *entry = &*mAudioQueue.begin();
1014
1015 if (entry->mBuffer == NULL) {
1016 if (entry->mNotifyConsumed != nullptr) {
1017 // TAG for re-open audio sink.
1018 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1019 mAudioQueue.erase(mAudioQueue.begin());
1020 continue;
1021 }
1022
1023 // EOS
1024 if (mPaused) {
1025 // Do not notify EOS when paused.
1026 // This is needed to avoid switch to next clip while in pause.
1027 ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
1028 return false;
1029 }
1030
1031 int64_t postEOSDelayUs = 0;
1032 if (mAudioSink->needsTrailingPadding()) {
1033 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
1034 }
1035 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
1036 mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1037
1038 mAudioQueue.erase(mAudioQueue.begin());
1039 entry = NULL;
1040 if (mAudioSink->needsTrailingPadding()) {
1041 // If we're not in gapless playback (i.e. through setNextPlayer), we
1042 // need to stop the track here, because that will play out the last
1043 // little bit at the end of the file. Otherwise short files won't play.
1044 mAudioSink->stop();
1045 mNumFramesWritten = 0;
1046 }
1047 return false;
1048 }
1049
1050 mLastAudioBufferDrained = entry->mBufferOrdinal;
1051
1052 // ignore 0-sized buffer which could be EOS marker with no data
1053 if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
1054 int64_t mediaTimeUs;
1055 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1056 ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
1057 mediaTimeUs / 1E6);
1058 onNewAudioMediaTime(mediaTimeUs);
1059 }
1060
1061 size_t copy = entry->mBuffer->size() - entry->mOffset;
1062
1063 ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
1064 copy, false /* blocking */);
1065 if (written < 0) {
1066 // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
1067 if (written == WOULD_BLOCK) {
1068 ALOGV("AudioSink write would block when writing %zu bytes", copy);
1069 } else {
1070 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
1071 // This can only happen when AudioSink was opened with doNotReconnect flag set to
1072 // true, in which case the NuPlayer2 will handle the reconnect.
1073 notifyAudioTearDown(kDueToError);
1074 }
1075 break;
1076 }
1077
1078 entry->mOffset += written;
1079 size_t remainder = entry->mBuffer->size() - entry->mOffset;
1080 if ((ssize_t)remainder < mAudioSink->frameSize()) {
1081 if (remainder > 0) {
1082 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
1083 remainder);
1084 entry->mOffset += remainder;
1085 copy -= remainder;
1086 }
1087
1088 entry->mNotifyConsumed->post();
1089 mAudioQueue.erase(mAudioQueue.begin());
1090
1091 entry = NULL;
1092 }
1093
1094 size_t copiedFrames = written / mAudioSink->frameSize();
1095 mNumFramesWritten += copiedFrames;
1096
1097 {
1098 Mutex::Autolock autoLock(mLock);
1099 int64_t maxTimeMedia;
1100 maxTimeMedia =
1101 mAnchorTimeMediaUs +
1102 (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
1103 * 1000LL * mAudioSink->msecsPerFrame());
1104 mMediaClock->updateMaxTimeMedia(maxTimeMedia);
1105
1106 notifyIfMediaRenderingStarted_l();
1107 }
1108
1109 if (written != (ssize_t)copy) {
1110 // A short count was received from AudioSink::write()
1111 //
1112 // AudioSink write is called in non-blocking mode.
1113 // It may return with a short count when:
1114 //
1115 // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
1116 // discarded.
1117 // 2) The data to be copied exceeds the available buffer in AudioSink.
1118 // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
1119 // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
1120
1121 // (Case 1)
1122 // Must be a multiple of the frame size. If it is not a multiple of a frame size, it
1123 // needs to fail, as we should not carry over fractional frames between calls.
1124 CHECK_EQ(copy % mAudioSink->frameSize(), 0u);
1125
1126 // (Case 2, 3, 4)
1127 // Return early to the caller.
1128 // Beware of calling immediately again as this may busy-loop if you are not careful.
1129 ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
1130 break;
1131 }
1132 }
1133
1134 // calculate whether we need to reschedule another write.
1135 bool reschedule = !mAudioQueue.empty()
1136 && (!mPaused
1137 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
1138 //ALOGD("reschedule:%d empty:%d mPaused:%d prevFramesWritten:%u mNumFramesWritten:%u",
1139 // reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
1140 return reschedule;
1141}
1142
1143int64_t NuPlayer2::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
1144 int32_t sampleRate = offloadingAudio() ?
1145 mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
1146 if (sampleRate == 0) {
1147 ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
1148 return 0;
1149 }
1150 // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
1151 return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
1152}
1153
1154// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
1155int64_t NuPlayer2::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
1156 int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1157 if (mUseVirtualAudioSink) {
1158 int64_t nowUs = ALooper::GetNowUs();
1159 int64_t mediaUs;
1160 if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -08001161 return 0LL;
Wei Jia53692fa2017-12-11 10:33:46 -08001162 } else {
1163 return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
1164 }
1165 }
1166
1167 const int64_t audioSinkPlayedUs = mAudioSink->getPlayedOutDurationUs(nowUs);
1168 int64_t pendingUs = writtenAudioDurationUs - audioSinkPlayedUs;
1169 if (pendingUs < 0) {
1170 // This shouldn't happen unless the timestamp is stale.
1171 ALOGW("%s: pendingUs %lld < 0, clamping to zero, potential resume after pause "
1172 "writtenAudioDurationUs: %lld, audioSinkPlayedUs: %lld",
1173 __func__, (long long)pendingUs,
1174 (long long)writtenAudioDurationUs, (long long)audioSinkPlayedUs);
1175 pendingUs = 0;
1176 }
1177 return pendingUs;
1178}
1179
1180int64_t NuPlayer2::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
1181 int64_t realUs;
1182 if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
1183 // If failed to get current position, e.g. due to audio clock is
1184 // not ready, then just play out video immediately without delay.
1185 return nowUs;
1186 }
1187 return realUs;
1188}
1189
1190void NuPlayer2::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1191 Mutex::Autolock autoLock(mLock);
1192 // TRICKY: vorbis decoder generates multiple frames with the same
1193 // timestamp, so only update on the first frame with a given timestamp
1194 if (mediaTimeUs == mAnchorTimeMediaUs) {
1195 return;
1196 }
1197 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1198
1199 // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
1200 if (mNextAudioClockUpdateTimeUs == -1) {
1201 AudioTimestamp ts;
1202 if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
1203 mNextAudioClockUpdateTimeUs = 0; // start our clock updates
1204 }
1205 }
1206 int64_t nowUs = ALooper::GetNowUs();
1207 if (mNextAudioClockUpdateTimeUs >= 0) {
1208 if (nowUs >= mNextAudioClockUpdateTimeUs) {
1209 int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1210 mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1211 mUseVirtualAudioSink = false;
1212 mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
1213 }
1214 } else {
1215 int64_t unused;
1216 if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
1217 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
1218 > kMaxAllowedAudioSinkDelayUs)) {
1219 // Enough data has been sent to AudioSink, but AudioSink has not rendered
1220 // any data yet. Something is wrong with AudioSink, e.g., the device is not
1221 // connected to audio out.
1222 // Switch to system clock. This essentially creates a virtual AudioSink with
1223 // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
1224 // This virtual AudioSink renders audio data starting from the very first sample
1225 // and it's paced by system clock.
1226 ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
1227 mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
1228 mUseVirtualAudioSink = true;
1229 }
1230 }
1231 mAnchorNumFramesWritten = mNumFramesWritten;
1232 mAnchorTimeMediaUs = mediaTimeUs;
1233}
1234
1235// Called without mLock acquired.
1236void NuPlayer2::Renderer::postDrainVideoQueue() {
1237 if (mDrainVideoQueuePending
1238 || getSyncQueues()
1239 || (mPaused && mVideoSampleReceived)) {
1240 return;
1241 }
1242
1243 if (mVideoQueue.empty()) {
1244 return;
1245 }
1246
1247 QueueEntry &entry = *mVideoQueue.begin();
1248
1249 sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1250 msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1251
1252 if (entry.mBuffer == NULL) {
1253 // EOS doesn't carry a timestamp.
1254 msg->post();
1255 mDrainVideoQueuePending = true;
1256 return;
1257 }
1258
1259 int64_t nowUs = ALooper::GetNowUs();
1260 if (mFlags & FLAG_REAL_TIME) {
1261 int64_t realTimeUs;
1262 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1263
1264 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1265
1266 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1267
1268 int64_t delayUs = realTimeUs - nowUs;
1269
1270 ALOGW_IF(delayUs > 500000, "unusually high delayUs: %lld", (long long)delayUs);
1271 // post 2 display refreshes before rendering is due
1272 msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1273
1274 mDrainVideoQueuePending = true;
1275 return;
1276 }
1277
1278 int64_t mediaTimeUs;
1279 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1280
1281 {
1282 Mutex::Autolock autoLock(mLock);
1283 if (mAnchorTimeMediaUs < 0) {
1284 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1285 mAnchorTimeMediaUs = mediaTimeUs;
1286 }
1287 }
Wei Jia1225b932019-01-04 12:44:38 -08001288 mNextVideoTimeMediaUs = mediaTimeUs;
Wei Jia53692fa2017-12-11 10:33:46 -08001289 if (!mHasAudio) {
1290 // smooth out videos >= 10fps
Wei Jia1225b932019-01-04 12:44:38 -08001291 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
Wei Jia53692fa2017-12-11 10:33:46 -08001292 }
1293
1294 if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
1295 msg->post();
1296 } else {
1297 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1298
1299 // post 2 display refreshes before rendering is due
1300 mMediaClock->addTimer(msg, mediaTimeUs, -twoVsyncsUs);
1301 }
1302
1303 mDrainVideoQueuePending = true;
1304}
1305
1306void NuPlayer2::Renderer::onDrainVideoQueue() {
1307 if (mVideoQueue.empty()) {
1308 return;
1309 }
1310
1311 QueueEntry *entry = &*mVideoQueue.begin();
1312
1313 if (entry->mBuffer == NULL) {
1314 // EOS
1315
1316 notifyEOS(false /* audio */, entry->mFinalResult);
1317
1318 mVideoQueue.erase(mVideoQueue.begin());
1319 entry = NULL;
1320
1321 setVideoLateByUs(0);
1322 return;
1323 }
1324
1325 int64_t nowUs = ALooper::GetNowUs();
1326 int64_t realTimeUs;
1327 int64_t mediaTimeUs = -1;
1328 if (mFlags & FLAG_REAL_TIME) {
1329 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1330 } else {
1331 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1332
1333 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1334 }
1335 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1336
1337 bool tooLate = false;
1338
1339 if (!mPaused) {
1340 setVideoLateByUs(nowUs - realTimeUs);
1341 tooLate = (mVideoLateByUs > 40000);
1342
1343 if (tooLate) {
1344 ALOGV("video late by %lld us (%.2f secs)",
1345 (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1346 } else {
1347 int64_t mediaUs = 0;
1348 mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1349 ALOGV("rendering video at media time %.2f secs",
1350 (mFlags & FLAG_REAL_TIME ? realTimeUs :
1351 mediaUs) / 1E6);
1352
1353 if (!(mFlags & FLAG_REAL_TIME)
1354 && mLastAudioMediaTimeUs != -1
1355 && mediaTimeUs > mLastAudioMediaTimeUs) {
1356 // If audio ends before video, video continues to drive media clock.
1357 // Also smooth out videos >= 10fps.
1358 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1359 }
1360 }
1361 } else {
1362 setVideoLateByUs(0);
1363 if (!mVideoSampleReceived && !mHasAudio) {
1364 // This will ensure that the first frame after a flush won't be used as anchor
1365 // when renderer is in paused state, because resume can happen any time after seek.
1366 clearAnchorTime();
1367 }
1368 }
1369
1370 // Always render the first video frame while keeping stats on A/V sync.
1371 if (!mVideoSampleReceived) {
1372 realTimeUs = nowUs;
1373 tooLate = false;
1374 }
1375
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -08001376 entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000LL);
Wei Jia53692fa2017-12-11 10:33:46 -08001377 entry->mNotifyConsumed->setInt32("render", !tooLate);
1378 entry->mNotifyConsumed->post();
1379 mVideoQueue.erase(mVideoQueue.begin());
1380 entry = NULL;
1381
1382 mVideoSampleReceived = true;
1383
1384 if (!mPaused) {
1385 if (!mVideoRenderingStarted) {
1386 mVideoRenderingStarted = true;
1387 notifyVideoRenderingStart();
1388 }
1389 Mutex::Autolock autoLock(mLock);
1390 notifyIfMediaRenderingStarted_l();
1391 }
1392}
1393
1394void NuPlayer2::Renderer::notifyVideoRenderingStart() {
1395 sp<AMessage> notify = mNotify->dup();
1396 notify->setInt32("what", kWhatVideoRenderingStart);
1397 notify->post();
1398}
1399
1400void NuPlayer2::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1401 Mutex::Autolock autoLock(mLock);
1402 notifyEOS_l(audio, finalResult, delayUs);
1403}
1404
1405void NuPlayer2::Renderer::notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs) {
1406 if (audio && delayUs > 0) {
1407 sp<AMessage> msg = new AMessage(kWhatEOS, this);
1408 msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1409 msg->setInt32("finalResult", finalResult);
1410 msg->post(delayUs);
1411 return;
1412 }
1413 sp<AMessage> notify = mNotify->dup();
1414 notify->setInt32("what", kWhatEOS);
1415 notify->setInt32("audio", static_cast<int32_t>(audio));
1416 notify->setInt32("finalResult", finalResult);
1417 notify->post(delayUs);
1418
1419 if (audio) {
1420 // Video might outlive audio. Clear anchor to enable video only case.
1421 mAnchorTimeMediaUs = -1;
1422 mHasAudio = false;
1423 if (mNextVideoTimeMediaUs >= 0) {
1424 int64_t mediaUs = 0;
Wei Jia1225b932019-01-04 12:44:38 -08001425 int64_t nowUs = ALooper::GetNowUs();
1426 status_t result = mMediaClock->getMediaTime(nowUs, &mediaUs);
1427 if (result == OK) {
1428 if (mNextVideoTimeMediaUs > mediaUs) {
1429 mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
1430 }
1431 } else {
1432 mMediaClock->updateAnchor(
1433 mNextVideoTimeMediaUs, nowUs, mNextVideoTimeMediaUs + 100000);
Wei Jia53692fa2017-12-11 10:33:46 -08001434 }
1435 }
1436 }
1437}
1438
1439void NuPlayer2::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
1440 sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
1441 msg->setInt32("reason", reason);
1442 msg->post();
1443}
1444
1445void NuPlayer2::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1446 int32_t audio;
1447 CHECK(msg->findInt32("audio", &audio));
1448
1449 if (dropBufferIfStale(audio, msg)) {
1450 return;
1451 }
1452
1453 if (audio) {
1454 mHasAudio = true;
1455 } else {
1456 mHasVideo = true;
1457 }
1458
1459 if (mHasVideo) {
1460 if (mVideoScheduler == NULL) {
Dichen Zhangc37b1902018-12-18 11:36:13 -08001461 mVideoScheduler = new VideoFrameScheduler2();
Wei Jia53692fa2017-12-11 10:33:46 -08001462 mVideoScheduler->init();
1463 }
1464 }
1465
1466 sp<RefBase> obj;
1467 CHECK(msg->findObject("buffer", &obj));
1468 sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
1469
1470 sp<AMessage> notifyConsumed;
1471 CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1472
1473 QueueEntry entry;
1474 entry.mBuffer = buffer;
1475 entry.mNotifyConsumed = notifyConsumed;
1476 entry.mOffset = 0;
1477 entry.mFinalResult = OK;
1478 entry.mBufferOrdinal = ++mTotalBuffersQueued;
1479
1480 if (audio) {
1481 Mutex::Autolock autoLock(mLock);
1482 mAudioQueue.push_back(entry);
1483 postDrainAudioQueue_l();
1484 } else {
1485 mVideoQueue.push_back(entry);
1486 postDrainVideoQueue();
1487 }
1488
1489 Mutex::Autolock autoLock(mLock);
1490 if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1491 return;
1492 }
1493
1494 sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1495 sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1496
1497 if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1498 // EOS signalled on either queue.
1499 syncQueuesDone_l();
1500 return;
1501 }
1502
1503 int64_t firstAudioTimeUs;
1504 int64_t firstVideoTimeUs;
1505 CHECK(firstAudioBuffer->meta()
1506 ->findInt64("timeUs", &firstAudioTimeUs));
1507 CHECK(firstVideoBuffer->meta()
1508 ->findInt64("timeUs", &firstVideoTimeUs));
1509
1510 int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1511
1512 ALOGV("queueDiff = %.2f secs", diff / 1E6);
1513
Chih-Hung Hsiehd42529d2018-12-11 13:53:10 -08001514 if (diff > 100000LL) {
Wei Jia53692fa2017-12-11 10:33:46 -08001515 // Audio data starts More than 0.1 secs before video.
1516 // Drop some audio.
1517
1518 (*mAudioQueue.begin()).mNotifyConsumed->post();
1519 mAudioQueue.erase(mAudioQueue.begin());
1520 return;
1521 }
1522
1523 syncQueuesDone_l();
1524}
1525
1526void NuPlayer2::Renderer::syncQueuesDone_l() {
1527 if (!mSyncQueues) {
1528 return;
1529 }
1530
1531 mSyncQueues = false;
1532
1533 if (!mAudioQueue.empty()) {
1534 postDrainAudioQueue_l();
1535 }
1536
1537 if (!mVideoQueue.empty()) {
1538 mLock.unlock();
1539 postDrainVideoQueue();
1540 mLock.lock();
1541 }
1542}
1543
1544void NuPlayer2::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1545 int32_t audio;
1546 CHECK(msg->findInt32("audio", &audio));
1547
1548 if (dropBufferIfStale(audio, msg)) {
1549 return;
1550 }
1551
1552 int32_t finalResult;
1553 CHECK(msg->findInt32("finalResult", &finalResult));
1554
1555 QueueEntry entry;
1556 entry.mOffset = 0;
1557 entry.mFinalResult = finalResult;
1558
1559 if (audio) {
1560 Mutex::Autolock autoLock(mLock);
1561 if (mAudioQueue.empty() && mSyncQueues) {
1562 syncQueuesDone_l();
1563 }
1564 mAudioQueue.push_back(entry);
1565 postDrainAudioQueue_l();
1566 } else {
1567 if (mVideoQueue.empty() && getSyncQueues()) {
1568 Mutex::Autolock autoLock(mLock);
1569 syncQueuesDone_l();
1570 }
1571 mVideoQueue.push_back(entry);
1572 postDrainVideoQueue();
1573 }
1574}
1575
1576void NuPlayer2::Renderer::onFlush(const sp<AMessage> &msg) {
1577 int32_t audio, notifyComplete;
1578 CHECK(msg->findInt32("audio", &audio));
1579
1580 {
1581 Mutex::Autolock autoLock(mLock);
1582 if (audio) {
1583 notifyComplete = mNotifyCompleteAudio;
1584 mNotifyCompleteAudio = false;
1585 mLastAudioMediaTimeUs = -1;
1586 } else {
1587 notifyComplete = mNotifyCompleteVideo;
1588 mNotifyCompleteVideo = false;
Wei Jiad1864f92018-10-19 12:34:56 -07001589 mVideoRenderingStarted = false;
Wei Jia53692fa2017-12-11 10:33:46 -08001590 }
1591
1592 // If we're currently syncing the queues, i.e. dropping audio while
1593 // aligning the first audio/video buffer times and only one of the
1594 // two queues has data, we may starve that queue by not requesting
1595 // more buffers from the decoder. If the other source then encounters
1596 // a discontinuity that leads to flushing, we'll never find the
1597 // corresponding discontinuity on the other queue.
1598 // Therefore we'll stop syncing the queues if at least one of them
1599 // is flushed.
1600 syncQueuesDone_l();
1601 }
1602 clearAnchorTime();
1603
1604 ALOGV("flushing %s", audio ? "audio" : "video");
1605 if (audio) {
1606 {
1607 Mutex::Autolock autoLock(mLock);
1608 flushQueue(&mAudioQueue);
1609
1610 ++mAudioDrainGeneration;
1611 ++mAudioEOSGeneration;
1612 prepareForMediaRenderingStart_l();
1613
1614 // the frame count will be reset after flush.
1615 clearAudioFirstAnchorTime_l();
1616 }
1617
1618 mDrainAudioQueuePending = false;
1619
1620 if (offloadingAudio()) {
1621 mAudioSink->pause();
1622 mAudioSink->flush();
1623 if (!mPaused) {
1624 mAudioSink->start();
1625 }
1626 } else {
1627 mAudioSink->pause();
1628 mAudioSink->flush();
1629 // Call stop() to signal to the AudioSink to completely fill the
1630 // internal buffer before resuming playback.
1631 // FIXME: this is ignored after flush().
1632 mAudioSink->stop();
1633 if (mPaused) {
1634 // Race condition: if renderer is paused and audio sink is stopped,
1635 // we need to make sure that the audio track buffer fully drains
1636 // before delivering data.
1637 // FIXME: remove this if we can detect if stop() is complete.
1638 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
1639 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
1640 } else {
1641 mAudioSink->start();
1642 }
1643 mNumFramesWritten = 0;
1644 }
1645 mNextAudioClockUpdateTimeUs = -1;
1646 } else {
1647 flushQueue(&mVideoQueue);
1648
1649 mDrainVideoQueuePending = false;
1650
1651 if (mVideoScheduler != NULL) {
1652 mVideoScheduler->restart();
1653 }
1654
1655 Mutex::Autolock autoLock(mLock);
1656 ++mVideoDrainGeneration;
1657 prepareForMediaRenderingStart_l();
1658 }
1659
1660 mVideoSampleReceived = false;
1661
1662 if (notifyComplete) {
1663 notifyFlushComplete(audio);
1664 }
1665}
1666
1667void NuPlayer2::Renderer::flushQueue(List<QueueEntry> *queue) {
1668 while (!queue->empty()) {
1669 QueueEntry *entry = &*queue->begin();
1670
1671 if (entry->mBuffer != NULL) {
1672 entry->mNotifyConsumed->post();
1673 } else if (entry->mNotifyConsumed != nullptr) {
1674 // Is it needed to open audio sink now?
1675 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1676 }
1677
1678 queue->erase(queue->begin());
1679 entry = NULL;
1680 }
1681}
1682
1683void NuPlayer2::Renderer::notifyFlushComplete(bool audio) {
1684 sp<AMessage> notify = mNotify->dup();
1685 notify->setInt32("what", kWhatFlushComplete);
1686 notify->setInt32("audio", static_cast<int32_t>(audio));
1687 notify->post();
1688}
1689
1690bool NuPlayer2::Renderer::dropBufferIfStale(
1691 bool audio, const sp<AMessage> &msg) {
1692 int32_t queueGeneration;
1693 CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1694
1695 if (queueGeneration == getQueueGeneration(audio)) {
1696 return false;
1697 }
1698
1699 sp<AMessage> notifyConsumed;
1700 if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1701 notifyConsumed->post();
1702 }
1703
1704 return true;
1705}
1706
1707void NuPlayer2::Renderer::onAudioSinkChanged() {
1708 if (offloadingAudio()) {
1709 return;
1710 }
1711 CHECK(!mDrainAudioQueuePending);
1712 mNumFramesWritten = 0;
1713 mAnchorNumFramesWritten = -1;
1714 uint32_t written;
1715 if (mAudioSink->getFramesWritten(&written) == OK) {
1716 mNumFramesWritten = written;
1717 }
1718}
1719
1720void NuPlayer2::Renderer::onDisableOffloadAudio() {
1721 Mutex::Autolock autoLock(mLock);
1722 mFlags &= ~FLAG_OFFLOAD_AUDIO;
1723 ++mAudioDrainGeneration;
1724 if (mAudioRenderingStartGeneration != -1) {
1725 prepareForMediaRenderingStart_l();
1726 }
1727}
1728
1729void NuPlayer2::Renderer::onEnableOffloadAudio() {
1730 Mutex::Autolock autoLock(mLock);
1731 mFlags |= FLAG_OFFLOAD_AUDIO;
1732 ++mAudioDrainGeneration;
1733 if (mAudioRenderingStartGeneration != -1) {
1734 prepareForMediaRenderingStart_l();
1735 }
1736}
1737
1738void NuPlayer2::Renderer::onPause() {
1739 if (mPaused) {
1740 return;
1741 }
1742
1743 {
1744 Mutex::Autolock autoLock(mLock);
1745 // we do not increment audio drain generation so that we fill audio buffer during pause.
1746 ++mVideoDrainGeneration;
1747 prepareForMediaRenderingStart_l();
1748 mPaused = true;
1749 mMediaClock->setPlaybackRate(0.0);
1750 }
1751
1752 mDrainAudioQueuePending = false;
1753 mDrainVideoQueuePending = false;
1754
1755 // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1756 mAudioSink->pause();
1757 startAudioOffloadPauseTimeout();
1758
1759 ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1760 mAudioQueue.size(), mVideoQueue.size());
1761}
1762
1763void NuPlayer2::Renderer::onResume() {
1764 if (!mPaused) {
1765 return;
1766 }
1767
1768 // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1769 cancelAudioOffloadPauseTimeout();
1770 if (mAudioSink->ready()) {
1771 status_t err = mAudioSink->start();
1772 if (err != OK) {
1773 ALOGE("cannot start AudioSink err %d", err);
1774 notifyAudioTearDown(kDueToError);
1775 }
1776 }
1777
1778 {
1779 Mutex::Autolock autoLock(mLock);
1780 mPaused = false;
1781 // rendering started message may have been delayed if we were paused.
1782 if (mRenderingDataDelivered) {
1783 notifyIfMediaRenderingStarted_l();
1784 }
1785 // configure audiosink as we did not do it when pausing
1786 if (mAudioSink != NULL && mAudioSink->ready()) {
1787 mAudioSink->setPlaybackRate(mPlaybackSettings);
1788 }
1789
Wei Jia700a7c22018-09-14 18:04:35 -07001790 mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed);
Wei Jia53692fa2017-12-11 10:33:46 -08001791
1792 if (!mAudioQueue.empty()) {
1793 postDrainAudioQueue_l();
1794 }
1795 }
1796
1797 if (!mVideoQueue.empty()) {
1798 postDrainVideoQueue();
1799 }
1800}
1801
1802void NuPlayer2::Renderer::onSetVideoFrameRate(float fps) {
1803 if (mVideoScheduler == NULL) {
Dichen Zhangc37b1902018-12-18 11:36:13 -08001804 mVideoScheduler = new VideoFrameScheduler2();
Wei Jia53692fa2017-12-11 10:33:46 -08001805 }
1806 mVideoScheduler->init(fps);
1807}
1808
1809int32_t NuPlayer2::Renderer::getQueueGeneration(bool audio) {
1810 Mutex::Autolock autoLock(mLock);
1811 return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1812}
1813
1814int32_t NuPlayer2::Renderer::getDrainGeneration(bool audio) {
1815 Mutex::Autolock autoLock(mLock);
1816 return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1817}
1818
1819bool NuPlayer2::Renderer::getSyncQueues() {
1820 Mutex::Autolock autoLock(mLock);
1821 return mSyncQueues;
1822}
1823
1824void NuPlayer2::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1825 if (mAudioTornDown) {
1826 return;
1827 }
1828 mAudioTornDown = true;
1829
1830 int64_t currentPositionUs;
1831 sp<AMessage> notify = mNotify->dup();
1832 if (getCurrentPosition(&currentPositionUs) == OK) {
1833 notify->setInt64("positionUs", currentPositionUs);
1834 }
1835
1836 mAudioSink->stop();
1837 mAudioSink->flush();
1838
1839 notify->setInt32("what", kWhatAudioTearDown);
1840 notify->setInt32("reason", reason);
1841 notify->post();
1842}
1843
1844void NuPlayer2::Renderer::startAudioOffloadPauseTimeout() {
1845 if (offloadingAudio()) {
1846 mWakeLock->acquire();
1847 sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1848 msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1849 msg->post(kOffloadPauseMaxUs);
1850 }
1851}
1852
1853void NuPlayer2::Renderer::cancelAudioOffloadPauseTimeout() {
1854 // We may have called startAudioOffloadPauseTimeout() without
1855 // the AudioSink open and with offloadingAudio enabled.
1856 //
1857 // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
1858 // we always release the wakelock and increment the pause timeout generation.
1859 //
1860 // Note: The acquired wakelock prevents the device from suspending
1861 // immediately after offload pause (in case a resume happens shortly thereafter).
1862 mWakeLock->release(true);
1863 ++mAudioOffloadPauseTimeoutGeneration;
1864}
1865
1866status_t NuPlayer2::Renderer::onOpenAudioSink(
1867 const sp<AMessage> &format,
1868 bool offloadOnly,
1869 bool hasVideo,
1870 uint32_t flags,
1871 bool isStreaming) {
1872 ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1873 offloadOnly, offloadingAudio());
Dichen Zhangf8726912018-10-17 13:31:26 -07001874
Wei Jia53692fa2017-12-11 10:33:46 -08001875 bool audioSinkChanged = false;
1876
1877 int32_t numChannels;
1878 CHECK(format->findInt32("channel-count", &numChannels));
1879
1880 int32_t channelMask;
1881 if (!format->findInt32("channel-mask", &channelMask)) {
1882 // signal to the AudioSink to derive the mask from count.
1883 channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1884 }
1885
1886 int32_t sampleRate;
1887 CHECK(format->findInt32("sample-rate", &sampleRate));
1888
Wei Jia896b4d62019-01-04 15:26:48 -08001889 // read pcm encoding from MediaCodec output format, if available
1890 int32_t pcmEncoding;
1891 audio_format_t audioFormat =
1892 format->findInt32(KEY_PCM_ENCODING, &pcmEncoding) ?
1893 audioFormatFromEncoding(pcmEncoding) : AUDIO_FORMAT_PCM_16_BIT;
1894
Wei Jia53692fa2017-12-11 10:33:46 -08001895 if (offloadingAudio()) {
Wei Jia53692fa2017-12-11 10:33:46 -08001896 AString mime;
1897 CHECK(format->findString("mime", &mime));
1898 status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1899
1900 if (err != OK) {
1901 ALOGE("Couldn't map mime \"%s\" to a valid "
1902 "audio_format", mime.c_str());
1903 onDisableOffloadAudio();
1904 } else {
1905 ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1906 mime.c_str(), audioFormat);
1907
1908 int avgBitRate = -1;
1909 format->findInt32("bitrate", &avgBitRate);
1910
1911 int32_t aacProfile = -1;
1912 if (audioFormat == AUDIO_FORMAT_AAC
1913 && format->findInt32("aac-profile", &aacProfile)) {
1914 // Redefine AAC format as per aac profile
1915 mapAACProfileToAudioFormat(
1916 audioFormat,
1917 aacProfile);
1918 }
1919
1920 audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1921 offloadInfo.duration_us = -1;
1922 format->findInt64(
1923 "durationUs", &offloadInfo.duration_us);
1924 offloadInfo.sample_rate = sampleRate;
1925 offloadInfo.channel_mask = channelMask;
1926 offloadInfo.format = audioFormat;
1927 offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1928 offloadInfo.bit_rate = avgBitRate;
1929 offloadInfo.has_video = hasVideo;
1930 offloadInfo.is_streaming = isStreaming;
1931
1932 if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1933 ALOGV("openAudioSink: no change in offload mode");
1934 // no change from previous configuration, everything ok.
1935 return OK;
1936 }
1937 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1938
1939 ALOGV("openAudioSink: try to open AudioSink in offload mode");
1940 uint32_t offloadFlags = flags;
1941 offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1942 offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1943 audioSinkChanged = true;
1944 mAudioSink->close();
1945
1946 err = mAudioSink->open(
1947 sampleRate,
1948 numChannels,
1949 (audio_channel_mask_t)channelMask,
1950 audioFormat,
Wei Jia53692fa2017-12-11 10:33:46 -08001951 &NuPlayer2::Renderer::AudioSinkCallback,
1952 this,
1953 (audio_output_flags_t)offloadFlags,
1954 &offloadInfo);
1955
1956 if (err == OK) {
1957 err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1958 }
1959
1960 if (err == OK) {
1961 // If the playback is offloaded to h/w, we pass
1962 // the HAL some metadata information.
1963 // We don't want to do this for PCM because it
1964 // will be going through the AudioFlinger mixer
1965 // before reaching the hardware.
1966 // TODO
1967 mCurrentOffloadInfo = offloadInfo;
1968 if (!mPaused) { // for preview mode, don't start if paused
1969 err = mAudioSink->start();
1970 }
1971 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1972 }
1973 if (err != OK) {
1974 // Clean up, fall back to non offload mode.
1975 mAudioSink->close();
1976 onDisableOffloadAudio();
1977 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1978 ALOGV("openAudioSink: offload failed");
1979 if (offloadOnly) {
1980 notifyAudioTearDown(kForceNonOffload);
1981 }
1982 } else {
1983 mUseAudioCallback = true; // offload mode transfers data through callback
1984 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
1985 }
1986 }
1987 }
1988 if (!offloadOnly && !offloadingAudio()) {
1989 ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1990 uint32_t pcmFlags = flags;
1991 pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1992
1993 const PcmInfo info = {
1994 (audio_channel_mask_t)channelMask,
1995 (audio_output_flags_t)pcmFlags,
Wei Jia896b4d62019-01-04 15:26:48 -08001996 audioFormat,
Wei Jia53692fa2017-12-11 10:33:46 -08001997 numChannels,
1998 sampleRate
1999 };
2000 if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
2001 ALOGV("openAudioSink: no change in pcm mode");
2002 // no change from previous configuration, everything ok.
2003 return OK;
2004 }
2005
2006 audioSinkChanged = true;
2007 mAudioSink->close();
2008 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2009 // Note: It is possible to set up the callback, but not use it to send audio data.
2010 // This requires a fix in AudioSink to explicitly specify the transfer mode.
2011 mUseAudioCallback = getUseAudioCallbackSetting();
2012 if (mUseAudioCallback) {
2013 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
2014 }
2015
2016 // Compute the desired buffer size.
2017 // For callback mode, the amount of time before wakeup is about half the buffer size.
2018 const uint32_t frameCount =
2019 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
2020
Wei Jia53692fa2017-12-11 10:33:46 -08002021 // We should always be able to set our playback settings if the sink is closed.
2022 LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
2023 "onOpenAudioSink: can't set playback rate on closed sink");
2024 status_t err = mAudioSink->open(
2025 sampleRate,
2026 numChannels,
2027 (audio_channel_mask_t)channelMask,
Wei Jia896b4d62019-01-04 15:26:48 -08002028 audioFormat,
Wei Jia53692fa2017-12-11 10:33:46 -08002029 mUseAudioCallback ? &NuPlayer2::Renderer::AudioSinkCallback : NULL,
2030 mUseAudioCallback ? this : NULL,
2031 (audio_output_flags_t)pcmFlags,
2032 NULL,
Wei Jia53692fa2017-12-11 10:33:46 -08002033 frameCount);
2034 if (err != OK) {
2035 ALOGW("openAudioSink: non offloaded open failed status: %d", err);
2036 mAudioSink->close();
2037 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2038 return err;
2039 }
2040 mCurrentPcmInfo = info;
2041 if (!mPaused) { // for preview mode, don't start if paused
2042 mAudioSink->start();
2043 }
2044 }
2045 if (audioSinkChanged) {
2046 onAudioSinkChanged();
2047 }
2048 mAudioTornDown = false;
2049 return OK;
2050}
2051
2052void NuPlayer2::Renderer::onCloseAudioSink() {
2053 mAudioSink->close();
2054 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2055 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2056}
2057
2058void NuPlayer2::Renderer::onChangeAudioFormat(
2059 const sp<AMessage> &meta, const sp<AMessage> &notify) {
2060 sp<AMessage> format;
2061 CHECK(meta->findMessage("format", &format));
2062
2063 int32_t offloadOnly;
2064 CHECK(meta->findInt32("offload-only", &offloadOnly));
2065
2066 int32_t hasVideo;
2067 CHECK(meta->findInt32("has-video", &hasVideo));
2068
2069 uint32_t flags;
2070 CHECK(meta->findInt32("flags", (int32_t *)&flags));
2071
2072 uint32_t isStreaming;
2073 CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
2074
2075 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
2076
2077 if (err != OK) {
2078 notify->setInt32("err", err);
2079 }
2080 notify->post();
2081}
2082
2083} // namespace android