blob: 71f5dce7e5ca4b4f9a077e870f9abfdf0c39f6b4 [file] [log] [blame]
Wei Jia53692fa2017-12-11 10:33:46 -08001/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "NuPlayer2Renderer"
19#include <utils/Log.h>
20
21#include "JWakeLock.h"
22#include "NuPlayer2Renderer.h"
23#include <algorithm>
24#include <cutils/properties.h>
25#include <media/stagefright/foundation/ADebug.h>
26#include <media/stagefright/foundation/AMessage.h>
27#include <media/stagefright/foundation/AUtils.h>
28#include <media/stagefright/MediaClock.h>
29#include <media/stagefright/MediaErrors.h>
30#include <media/stagefright/MetaData.h>
31#include <media/stagefright/Utils.h>
32#include <media/stagefright/VideoFrameScheduler.h>
33#include <media/MediaCodecBuffer.h>
34
35#include <inttypes.h>
36
37namespace android {
38
39/*
40 * Example of common configuration settings in shell script form
41
42 #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
43 adb shell setprop audio.offload.disable 1
44
45 #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
46 adb shell setprop audio.offload.video 1
47
48 #Use audio callbacks for PCM data
49 adb shell setprop media.stagefright.audio.cbk 1
50
51 #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
52 adb shell setprop media.stagefright.audio.deep 1
53
54 #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
55 adb shell setprop media.stagefright.audio.sink 1000
56
57 * These configurations take effect for the next track played (not the current track).
58 */
59
60static inline bool getUseAudioCallbackSetting() {
61 return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
62}
63
64static inline int32_t getAudioSinkPcmMsSetting() {
65 return property_get_int32(
66 "media.stagefright.audio.sink", 500 /* default_value */);
67}
68
69// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
70// is closed to allow the audio DSP to power down.
71static const int64_t kOffloadPauseMaxUs = 10000000ll;
72
73// Maximum allowed delay from AudioSink, 1.5 seconds.
74static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll;
75
76static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
77
78// static
79const NuPlayer2::Renderer::PcmInfo NuPlayer2::Renderer::AUDIO_PCMINFO_INITIALIZER = {
80 AUDIO_CHANNEL_NONE,
81 AUDIO_OUTPUT_FLAG_NONE,
82 AUDIO_FORMAT_INVALID,
83 0, // mNumChannels
84 0 // mSampleRate
85};
86
87// static
88const int64_t NuPlayer2::Renderer::kMinPositionUpdateDelayUs = 100000ll;
89
90NuPlayer2::Renderer::Renderer(
91 const sp<MediaPlayer2Base::AudioSink> &sink,
92 const sp<MediaClock> &mediaClock,
93 const sp<AMessage> &notify,
94 uint32_t flags)
95 : mAudioSink(sink),
96 mUseVirtualAudioSink(false),
97 mNotify(notify),
98 mFlags(flags),
99 mNumFramesWritten(0),
100 mDrainAudioQueuePending(false),
101 mDrainVideoQueuePending(false),
102 mAudioQueueGeneration(0),
103 mVideoQueueGeneration(0),
104 mAudioDrainGeneration(0),
105 mVideoDrainGeneration(0),
106 mAudioEOSGeneration(0),
107 mMediaClock(mediaClock),
108 mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
109 mAudioFirstAnchorTimeMediaUs(-1),
110 mAnchorTimeMediaUs(-1),
111 mAnchorNumFramesWritten(-1),
112 mVideoLateByUs(0ll),
113 mNextVideoTimeMediaUs(-1),
114 mHasAudio(false),
115 mHasVideo(false),
116 mNotifyCompleteAudio(false),
117 mNotifyCompleteVideo(false),
118 mSyncQueues(false),
119 mPaused(false),
120 mPauseDrainAudioAllowedUs(0),
121 mVideoSampleReceived(false),
122 mVideoRenderingStarted(false),
123 mVideoRenderingStartGeneration(0),
124 mAudioRenderingStartGeneration(0),
125 mRenderingDataDelivered(false),
126 mNextAudioClockUpdateTimeUs(-1),
127 mLastAudioMediaTimeUs(-1),
128 mAudioOffloadPauseTimeoutGeneration(0),
129 mAudioTornDown(false),
130 mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
131 mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
132 mTotalBuffersQueued(0),
133 mLastAudioBufferDrained(0),
134 mUseAudioCallback(false),
135 mWakeLock(new JWakeLock()) {
136 CHECK(mediaClock != NULL);
137 mPlaybackRate = mPlaybackSettings.mSpeed;
138 mMediaClock->setPlaybackRate(mPlaybackRate);
139}
140
141NuPlayer2::Renderer::~Renderer() {
142 if (offloadingAudio()) {
143 mAudioSink->stop();
144 mAudioSink->flush();
145 mAudioSink->close();
146 }
147
148 // Try to avoid racing condition in case callback is still on.
149 Mutex::Autolock autoLock(mLock);
150 if (mUseAudioCallback) {
151 flushQueue(&mAudioQueue);
152 flushQueue(&mVideoQueue);
153 }
154 mWakeLock.clear();
155 mVideoScheduler.clear();
156 mNotify.clear();
157 mAudioSink.clear();
158}
159
160void NuPlayer2::Renderer::queueBuffer(
161 bool audio,
162 const sp<MediaCodecBuffer> &buffer,
163 const sp<AMessage> &notifyConsumed) {
164 sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
165 msg->setInt32("queueGeneration", getQueueGeneration(audio));
166 msg->setInt32("audio", static_cast<int32_t>(audio));
167 msg->setObject("buffer", buffer);
168 msg->setMessage("notifyConsumed", notifyConsumed);
169 msg->post();
170}
171
172void NuPlayer2::Renderer::queueEOS(bool audio, status_t finalResult) {
173 CHECK_NE(finalResult, (status_t)OK);
174
175 sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
176 msg->setInt32("queueGeneration", getQueueGeneration(audio));
177 msg->setInt32("audio", static_cast<int32_t>(audio));
178 msg->setInt32("finalResult", finalResult);
179 msg->post();
180}
181
182status_t NuPlayer2::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
183 sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
184 writeToAMessage(msg, rate);
185 sp<AMessage> response;
186 status_t err = msg->postAndAwaitResponse(&response);
187 if (err == OK && response != NULL) {
188 CHECK(response->findInt32("err", &err));
189 }
190 return err;
191}
192
193status_t NuPlayer2::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
194 if (rate.mSpeed == 0.f) {
195 onPause();
196 // don't call audiosink's setPlaybackRate if pausing, as pitch does not
197 // have to correspond to the any non-0 speed (e.g old speed). Keep
198 // settings nonetheless, using the old speed, in case audiosink changes.
199 AudioPlaybackRate newRate = rate;
200 newRate.mSpeed = mPlaybackSettings.mSpeed;
201 mPlaybackSettings = newRate;
202 return OK;
203 }
204
205 if (mAudioSink != NULL && mAudioSink->ready()) {
206 status_t err = mAudioSink->setPlaybackRate(rate);
207 if (err != OK) {
208 return err;
209 }
210 }
211 mPlaybackSettings = rate;
212 mPlaybackRate = rate.mSpeed;
213 mMediaClock->setPlaybackRate(mPlaybackRate);
214 return OK;
215}
216
217status_t NuPlayer2::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
218 sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
219 sp<AMessage> response;
220 status_t err = msg->postAndAwaitResponse(&response);
221 if (err == OK && response != NULL) {
222 CHECK(response->findInt32("err", &err));
223 if (err == OK) {
224 readFromAMessage(response, rate);
225 }
226 }
227 return err;
228}
229
230status_t NuPlayer2::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
231 if (mAudioSink != NULL && mAudioSink->ready()) {
232 status_t err = mAudioSink->getPlaybackRate(rate);
233 if (err == OK) {
234 if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
235 ALOGW("correcting mismatch in internal/external playback rate");
236 }
237 // get playback settings used by audiosink, as it may be
238 // slightly off due to audiosink not taking small changes.
239 mPlaybackSettings = *rate;
240 if (mPaused) {
241 rate->mSpeed = 0.f;
242 }
243 }
244 return err;
245 }
246 *rate = mPlaybackSettings;
247 return OK;
248}
249
250status_t NuPlayer2::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
251 sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
252 writeToAMessage(msg, sync, videoFpsHint);
253 sp<AMessage> response;
254 status_t err = msg->postAndAwaitResponse(&response);
255 if (err == OK && response != NULL) {
256 CHECK(response->findInt32("err", &err));
257 }
258 return err;
259}
260
261status_t NuPlayer2::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
262 if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
263 return BAD_VALUE;
264 }
265 // TODO: support sync sources
266 return INVALID_OPERATION;
267}
268
269status_t NuPlayer2::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
270 sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
271 sp<AMessage> response;
272 status_t err = msg->postAndAwaitResponse(&response);
273 if (err == OK && response != NULL) {
274 CHECK(response->findInt32("err", &err));
275 if (err == OK) {
276 readFromAMessage(response, sync, videoFps);
277 }
278 }
279 return err;
280}
281
282status_t NuPlayer2::Renderer::onGetSyncSettings(
283 AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
284 *sync = mSyncSettings;
285 *videoFps = -1.f;
286 return OK;
287}
288
289void NuPlayer2::Renderer::flush(bool audio, bool notifyComplete) {
290 {
291 Mutex::Autolock autoLock(mLock);
292 if (audio) {
293 mNotifyCompleteAudio |= notifyComplete;
294 clearAudioFirstAnchorTime_l();
295 ++mAudioQueueGeneration;
296 ++mAudioDrainGeneration;
297 } else {
298 mNotifyCompleteVideo |= notifyComplete;
299 ++mVideoQueueGeneration;
300 ++mVideoDrainGeneration;
301 }
302
303 mMediaClock->clearAnchor();
304 mVideoLateByUs = 0;
305 mNextVideoTimeMediaUs = -1;
306 mSyncQueues = false;
307 }
308
309 sp<AMessage> msg = new AMessage(kWhatFlush, this);
310 msg->setInt32("audio", static_cast<int32_t>(audio));
311 msg->post();
312}
313
314void NuPlayer2::Renderer::signalTimeDiscontinuity() {
315}
316
317void NuPlayer2::Renderer::signalDisableOffloadAudio() {
318 (new AMessage(kWhatDisableOffloadAudio, this))->post();
319}
320
321void NuPlayer2::Renderer::signalEnableOffloadAudio() {
322 (new AMessage(kWhatEnableOffloadAudio, this))->post();
323}
324
325void NuPlayer2::Renderer::pause() {
326 (new AMessage(kWhatPause, this))->post();
327}
328
329void NuPlayer2::Renderer::resume() {
330 (new AMessage(kWhatResume, this))->post();
331}
332
333void NuPlayer2::Renderer::setVideoFrameRate(float fps) {
334 sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
335 msg->setFloat("frame-rate", fps);
336 msg->post();
337}
338
339// Called on any threads without mLock acquired.
340status_t NuPlayer2::Renderer::getCurrentPosition(int64_t *mediaUs) {
341 status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
342 if (result == OK) {
343 return result;
344 }
345
346 // MediaClock has not started yet. Try to start it if possible.
347 {
348 Mutex::Autolock autoLock(mLock);
349 if (mAudioFirstAnchorTimeMediaUs == -1) {
350 return result;
351 }
352
353 AudioTimestamp ts;
354 status_t res = mAudioSink->getTimestamp(ts);
355 if (res != OK) {
356 return result;
357 }
358
359 // AudioSink has rendered some frames.
360 int64_t nowUs = ALooper::GetNowUs();
361 int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
362 + mAudioFirstAnchorTimeMediaUs;
363 mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
364 }
365
366 return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
367}
368
369void NuPlayer2::Renderer::clearAudioFirstAnchorTime_l() {
370 mAudioFirstAnchorTimeMediaUs = -1;
371 mMediaClock->setStartingTimeMedia(-1);
372}
373
374void NuPlayer2::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
375 if (mAudioFirstAnchorTimeMediaUs == -1) {
376 mAudioFirstAnchorTimeMediaUs = mediaUs;
377 mMediaClock->setStartingTimeMedia(mediaUs);
378 }
379}
380
381// Called on renderer looper.
382void NuPlayer2::Renderer::clearAnchorTime() {
383 mMediaClock->clearAnchor();
384 mAnchorTimeMediaUs = -1;
385 mAnchorNumFramesWritten = -1;
386}
387
388void NuPlayer2::Renderer::setVideoLateByUs(int64_t lateUs) {
389 Mutex::Autolock autoLock(mLock);
390 mVideoLateByUs = lateUs;
391}
392
393int64_t NuPlayer2::Renderer::getVideoLateByUs() {
394 Mutex::Autolock autoLock(mLock);
395 return mVideoLateByUs;
396}
397
398status_t NuPlayer2::Renderer::openAudioSink(
399 const sp<AMessage> &format,
400 bool offloadOnly,
401 bool hasVideo,
402 uint32_t flags,
403 bool *isOffloaded,
404 bool isStreaming) {
405 sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
406 msg->setMessage("format", format);
407 msg->setInt32("offload-only", offloadOnly);
408 msg->setInt32("has-video", hasVideo);
409 msg->setInt32("flags", flags);
410 msg->setInt32("isStreaming", isStreaming);
411
412 sp<AMessage> response;
413 status_t postStatus = msg->postAndAwaitResponse(&response);
414
415 int32_t err;
416 if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
417 err = INVALID_OPERATION;
418 } else if (err == OK && isOffloaded != NULL) {
419 int32_t offload;
420 CHECK(response->findInt32("offload", &offload));
421 *isOffloaded = (offload != 0);
422 }
423 return err;
424}
425
426void NuPlayer2::Renderer::closeAudioSink() {
427 sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
428
429 sp<AMessage> response;
430 msg->postAndAwaitResponse(&response);
431}
432
433void NuPlayer2::Renderer::changeAudioFormat(
434 const sp<AMessage> &format,
435 bool offloadOnly,
436 bool hasVideo,
437 uint32_t flags,
438 bool isStreaming,
439 const sp<AMessage> &notify) {
440 sp<AMessage> meta = new AMessage;
441 meta->setMessage("format", format);
442 meta->setInt32("offload-only", offloadOnly);
443 meta->setInt32("has-video", hasVideo);
444 meta->setInt32("flags", flags);
445 meta->setInt32("isStreaming", isStreaming);
446
447 sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
448 msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
449 msg->setMessage("notify", notify);
450 msg->setMessage("meta", meta);
451 msg->post();
452}
453
454void NuPlayer2::Renderer::onMessageReceived(const sp<AMessage> &msg) {
455 switch (msg->what()) {
456 case kWhatOpenAudioSink:
457 {
458 sp<AMessage> format;
459 CHECK(msg->findMessage("format", &format));
460
461 int32_t offloadOnly;
462 CHECK(msg->findInt32("offload-only", &offloadOnly));
463
464 int32_t hasVideo;
465 CHECK(msg->findInt32("has-video", &hasVideo));
466
467 uint32_t flags;
468 CHECK(msg->findInt32("flags", (int32_t *)&flags));
469
470 uint32_t isStreaming;
471 CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
472
473 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
474
475 sp<AMessage> response = new AMessage;
476 response->setInt32("err", err);
477 response->setInt32("offload", offloadingAudio());
478
479 sp<AReplyToken> replyID;
480 CHECK(msg->senderAwaitsResponse(&replyID));
481 response->postReply(replyID);
482
483 break;
484 }
485
486 case kWhatCloseAudioSink:
487 {
488 sp<AReplyToken> replyID;
489 CHECK(msg->senderAwaitsResponse(&replyID));
490
491 onCloseAudioSink();
492
493 sp<AMessage> response = new AMessage;
494 response->postReply(replyID);
495 break;
496 }
497
498 case kWhatStopAudioSink:
499 {
500 mAudioSink->stop();
501 break;
502 }
503
504 case kWhatChangeAudioFormat:
505 {
506 int32_t queueGeneration;
507 CHECK(msg->findInt32("queueGeneration", &queueGeneration));
508
509 sp<AMessage> notify;
510 CHECK(msg->findMessage("notify", &notify));
511
512 if (offloadingAudio()) {
513 ALOGW("changeAudioFormat should NOT be called in offload mode");
514 notify->setInt32("err", INVALID_OPERATION);
515 notify->post();
516 break;
517 }
518
519 sp<AMessage> meta;
520 CHECK(msg->findMessage("meta", &meta));
521
522 if (queueGeneration != getQueueGeneration(true /* audio */)
523 || mAudioQueue.empty()) {
524 onChangeAudioFormat(meta, notify);
525 break;
526 }
527
528 QueueEntry entry;
529 entry.mNotifyConsumed = notify;
530 entry.mMeta = meta;
531
532 Mutex::Autolock autoLock(mLock);
533 mAudioQueue.push_back(entry);
534 postDrainAudioQueue_l();
535
536 break;
537 }
538
539 case kWhatDrainAudioQueue:
540 {
541 mDrainAudioQueuePending = false;
542
543 int32_t generation;
544 CHECK(msg->findInt32("drainGeneration", &generation));
545 if (generation != getDrainGeneration(true /* audio */)) {
546 break;
547 }
548
549 if (onDrainAudioQueue()) {
550 uint32_t numFramesPlayed;
551 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
552 (status_t)OK);
553
554 // Handle AudioTrack race when start is immediately called after flush.
555 uint32_t numFramesPendingPlayout =
556 (mNumFramesWritten > numFramesPlayed ?
557 mNumFramesWritten - numFramesPlayed : 0);
558
559 // This is how long the audio sink will have data to
560 // play back.
561 int64_t delayUs =
562 mAudioSink->msecsPerFrame()
563 * numFramesPendingPlayout * 1000ll;
564 if (mPlaybackRate > 1.0f) {
565 delayUs /= mPlaybackRate;
566 }
567
568 // Let's give it more data after about half that time
569 // has elapsed.
570 delayUs /= 2;
571 // check the buffer size to estimate maximum delay permitted.
572 const int64_t maxDrainDelayUs = std::max(
573 mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
574 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
575 (long long)delayUs, (long long)maxDrainDelayUs);
576 Mutex::Autolock autoLock(mLock);
577 postDrainAudioQueue_l(delayUs);
578 }
579 break;
580 }
581
582 case kWhatDrainVideoQueue:
583 {
584 int32_t generation;
585 CHECK(msg->findInt32("drainGeneration", &generation));
586 if (generation != getDrainGeneration(false /* audio */)) {
587 break;
588 }
589
590 mDrainVideoQueuePending = false;
591
592 onDrainVideoQueue();
593
594 postDrainVideoQueue();
595 break;
596 }
597
598 case kWhatPostDrainVideoQueue:
599 {
600 int32_t generation;
601 CHECK(msg->findInt32("drainGeneration", &generation));
602 if (generation != getDrainGeneration(false /* audio */)) {
603 break;
604 }
605
606 mDrainVideoQueuePending = false;
607 postDrainVideoQueue();
608 break;
609 }
610
611 case kWhatQueueBuffer:
612 {
613 onQueueBuffer(msg);
614 break;
615 }
616
617 case kWhatQueueEOS:
618 {
619 onQueueEOS(msg);
620 break;
621 }
622
623 case kWhatEOS:
624 {
625 int32_t generation;
626 CHECK(msg->findInt32("audioEOSGeneration", &generation));
627 if (generation != mAudioEOSGeneration) {
628 break;
629 }
630 status_t finalResult;
631 CHECK(msg->findInt32("finalResult", &finalResult));
632 notifyEOS(true /* audio */, finalResult);
633 break;
634 }
635
636 case kWhatConfigPlayback:
637 {
638 sp<AReplyToken> replyID;
639 CHECK(msg->senderAwaitsResponse(&replyID));
640 AudioPlaybackRate rate;
641 readFromAMessage(msg, &rate);
642 status_t err = onConfigPlayback(rate);
643 sp<AMessage> response = new AMessage;
644 response->setInt32("err", err);
645 response->postReply(replyID);
646 break;
647 }
648
649 case kWhatGetPlaybackSettings:
650 {
651 sp<AReplyToken> replyID;
652 CHECK(msg->senderAwaitsResponse(&replyID));
653 AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
654 status_t err = onGetPlaybackSettings(&rate);
655 sp<AMessage> response = new AMessage;
656 if (err == OK) {
657 writeToAMessage(response, rate);
658 }
659 response->setInt32("err", err);
660 response->postReply(replyID);
661 break;
662 }
663
664 case kWhatConfigSync:
665 {
666 sp<AReplyToken> replyID;
667 CHECK(msg->senderAwaitsResponse(&replyID));
668 AVSyncSettings sync;
669 float videoFpsHint;
670 readFromAMessage(msg, &sync, &videoFpsHint);
671 status_t err = onConfigSync(sync, videoFpsHint);
672 sp<AMessage> response = new AMessage;
673 response->setInt32("err", err);
674 response->postReply(replyID);
675 break;
676 }
677
678 case kWhatGetSyncSettings:
679 {
680 sp<AReplyToken> replyID;
681 CHECK(msg->senderAwaitsResponse(&replyID));
682
683 ALOGV("kWhatGetSyncSettings");
684 AVSyncSettings sync;
685 float videoFps = -1.f;
686 status_t err = onGetSyncSettings(&sync, &videoFps);
687 sp<AMessage> response = new AMessage;
688 if (err == OK) {
689 writeToAMessage(response, sync, videoFps);
690 }
691 response->setInt32("err", err);
692 response->postReply(replyID);
693 break;
694 }
695
696 case kWhatFlush:
697 {
698 onFlush(msg);
699 break;
700 }
701
702 case kWhatDisableOffloadAudio:
703 {
704 onDisableOffloadAudio();
705 break;
706 }
707
708 case kWhatEnableOffloadAudio:
709 {
710 onEnableOffloadAudio();
711 break;
712 }
713
714 case kWhatPause:
715 {
716 onPause();
717 break;
718 }
719
720 case kWhatResume:
721 {
722 onResume();
723 break;
724 }
725
726 case kWhatSetVideoFrameRate:
727 {
728 float fps;
729 CHECK(msg->findFloat("frame-rate", &fps));
730 onSetVideoFrameRate(fps);
731 break;
732 }
733
734 case kWhatAudioTearDown:
735 {
736 int32_t reason;
737 CHECK(msg->findInt32("reason", &reason));
738
739 onAudioTearDown((AudioTearDownReason)reason);
740 break;
741 }
742
743 case kWhatAudioOffloadPauseTimeout:
744 {
745 int32_t generation;
746 CHECK(msg->findInt32("drainGeneration", &generation));
747 if (generation != mAudioOffloadPauseTimeoutGeneration) {
748 break;
749 }
750 ALOGV("Audio Offload tear down due to pause timeout.");
751 onAudioTearDown(kDueToTimeout);
752 mWakeLock->release();
753 break;
754 }
755
756 default:
757 TRESPASS();
758 break;
759 }
760}
761
762void NuPlayer2::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
763 if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
764 return;
765 }
766
767 if (mAudioQueue.empty()) {
768 return;
769 }
770
771 // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
772 if (mPaused) {
773 const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
774 if (diffUs > delayUs) {
775 delayUs = diffUs;
776 }
777 }
778
779 mDrainAudioQueuePending = true;
780 sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
781 msg->setInt32("drainGeneration", mAudioDrainGeneration);
782 msg->post(delayUs);
783}
784
785void NuPlayer2::Renderer::prepareForMediaRenderingStart_l() {
786 mAudioRenderingStartGeneration = mAudioDrainGeneration;
787 mVideoRenderingStartGeneration = mVideoDrainGeneration;
788 mRenderingDataDelivered = false;
789}
790
791void NuPlayer2::Renderer::notifyIfMediaRenderingStarted_l() {
792 if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
793 mAudioRenderingStartGeneration == mAudioDrainGeneration) {
794 mRenderingDataDelivered = true;
795 if (mPaused) {
796 return;
797 }
798 mVideoRenderingStartGeneration = -1;
799 mAudioRenderingStartGeneration = -1;
800
801 sp<AMessage> notify = mNotify->dup();
802 notify->setInt32("what", kWhatMediaRenderingStart);
803 notify->post();
804 }
805}
806
807// static
808size_t NuPlayer2::Renderer::AudioSinkCallback(
809 MediaPlayer2Base::AudioSink * /* audioSink */,
810 void *buffer,
811 size_t size,
812 void *cookie,
813 MediaPlayer2Base::AudioSink::cb_event_t event) {
814 NuPlayer2::Renderer *me = (NuPlayer2::Renderer *)cookie;
815
816 switch (event) {
817 case MediaPlayer2Base::AudioSink::CB_EVENT_FILL_BUFFER:
818 {
819 return me->fillAudioBuffer(buffer, size);
820 break;
821 }
822
823 case MediaPlayer2Base::AudioSink::CB_EVENT_STREAM_END:
824 {
825 ALOGV("AudioSink::CB_EVENT_STREAM_END");
826 me->notifyEOSCallback();
827 break;
828 }
829
830 case MediaPlayer2Base::AudioSink::CB_EVENT_TEAR_DOWN:
831 {
832 ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
833 me->notifyAudioTearDown(kDueToError);
834 break;
835 }
836 }
837
838 return 0;
839}
840
841void NuPlayer2::Renderer::notifyEOSCallback() {
842 Mutex::Autolock autoLock(mLock);
843
844 if (!mUseAudioCallback) {
845 return;
846 }
847
848 notifyEOS_l(true /* audio */, ERROR_END_OF_STREAM);
849}
850
851size_t NuPlayer2::Renderer::fillAudioBuffer(void *buffer, size_t size) {
852 Mutex::Autolock autoLock(mLock);
853
854 if (!mUseAudioCallback) {
855 return 0;
856 }
857
858 bool hasEOS = false;
859
860 size_t sizeCopied = 0;
861 bool firstEntry = true;
862 QueueEntry *entry; // will be valid after while loop if hasEOS is set.
863 while (sizeCopied < size && !mAudioQueue.empty()) {
864 entry = &*mAudioQueue.begin();
865
866 if (entry->mBuffer == NULL) { // EOS
867 hasEOS = true;
868 mAudioQueue.erase(mAudioQueue.begin());
869 break;
870 }
871
872 if (firstEntry && entry->mOffset == 0) {
873 firstEntry = false;
874 int64_t mediaTimeUs;
875 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
876 ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
877 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
878 }
879
880 size_t copy = entry->mBuffer->size() - entry->mOffset;
881 size_t sizeRemaining = size - sizeCopied;
882 if (copy > sizeRemaining) {
883 copy = sizeRemaining;
884 }
885
886 memcpy((char *)buffer + sizeCopied,
887 entry->mBuffer->data() + entry->mOffset,
888 copy);
889
890 entry->mOffset += copy;
891 if (entry->mOffset == entry->mBuffer->size()) {
892 entry->mNotifyConsumed->post();
893 mAudioQueue.erase(mAudioQueue.begin());
894 entry = NULL;
895 }
896 sizeCopied += copy;
897
898 notifyIfMediaRenderingStarted_l();
899 }
900
901 if (mAudioFirstAnchorTimeMediaUs >= 0) {
902 int64_t nowUs = ALooper::GetNowUs();
903 int64_t nowMediaUs =
904 mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
905 // we don't know how much data we are queueing for offloaded tracks.
906 mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
907 }
908
909 // for non-offloaded audio, we need to compute the frames written because
910 // there is no EVENT_STREAM_END notification. The frames written gives
911 // an estimate on the pending played out duration.
912 if (!offloadingAudio()) {
913 mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
914 }
915
916 if (hasEOS) {
917 (new AMessage(kWhatStopAudioSink, this))->post();
918 // As there is currently no EVENT_STREAM_END callback notification for
919 // non-offloaded audio tracks, we need to post the EOS ourselves.
920 if (!offloadingAudio()) {
921 int64_t postEOSDelayUs = 0;
922 if (mAudioSink->needsTrailingPadding()) {
923 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
924 }
925 ALOGV("fillAudioBuffer: notifyEOS_l "
926 "mNumFramesWritten:%u finalResult:%d postEOSDelay:%lld",
927 mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
928 notifyEOS_l(true /* audio */, entry->mFinalResult, postEOSDelayUs);
929 }
930 }
931 return sizeCopied;
932}
933
934void NuPlayer2::Renderer::drainAudioQueueUntilLastEOS() {
935 List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
936 bool foundEOS = false;
937 while (it != mAudioQueue.end()) {
938 int32_t eos;
939 QueueEntry *entry = &*it++;
940 if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
941 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
942 itEOS = it;
943 foundEOS = true;
944 }
945 }
946
947 if (foundEOS) {
948 // post all replies before EOS and drop the samples
949 for (it = mAudioQueue.begin(); it != itEOS; it++) {
950 if (it->mBuffer == nullptr) {
951 if (it->mNotifyConsumed == nullptr) {
952 // delay doesn't matter as we don't even have an AudioTrack
953 notifyEOS(true /* audio */, it->mFinalResult);
954 } else {
955 // TAG for re-opening audio sink.
956 onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
957 }
958 } else {
959 it->mNotifyConsumed->post();
960 }
961 }
962 mAudioQueue.erase(mAudioQueue.begin(), itEOS);
963 }
964}
965
966bool NuPlayer2::Renderer::onDrainAudioQueue() {
967 // do not drain audio during teardown as queued buffers may be invalid.
968 if (mAudioTornDown) {
969 return false;
970 }
971 // TODO: This call to getPosition checks if AudioTrack has been created
972 // in AudioSink before draining audio. If AudioTrack doesn't exist, then
973 // CHECKs on getPosition will fail.
974 // We still need to figure out why AudioTrack is not created when
975 // this function is called. One possible reason could be leftover
976 // audio. Another possible place is to check whether decoder
977 // has received INFO_FORMAT_CHANGED as the first buffer since
978 // AudioSink is opened there, and possible interactions with flush
979 // immediately after start. Investigate error message
980 // "vorbis_dsp_synthesis returned -135", along with RTSP.
981 uint32_t numFramesPlayed;
982 if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
983 // When getPosition fails, renderer will not reschedule the draining
984 // unless new samples are queued.
985 // If we have pending EOS (or "eos" marker for discontinuities), we need
986 // to post these now as NuPlayer2Decoder might be waiting for it.
987 drainAudioQueueUntilLastEOS();
988
989 ALOGW("onDrainAudioQueue(): audio sink is not ready");
990 return false;
991 }
992
993#if 0
994 ssize_t numFramesAvailableToWrite =
995 mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
996
997 if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
998 ALOGI("audio sink underrun");
999 } else {
1000 ALOGV("audio queue has %d frames left to play",
1001 mAudioSink->frameCount() - numFramesAvailableToWrite);
1002 }
1003#endif
1004
1005 uint32_t prevFramesWritten = mNumFramesWritten;
1006 while (!mAudioQueue.empty()) {
1007 QueueEntry *entry = &*mAudioQueue.begin();
1008
1009 if (entry->mBuffer == NULL) {
1010 if (entry->mNotifyConsumed != nullptr) {
1011 // TAG for re-open audio sink.
1012 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1013 mAudioQueue.erase(mAudioQueue.begin());
1014 continue;
1015 }
1016
1017 // EOS
1018 if (mPaused) {
1019 // Do not notify EOS when paused.
1020 // This is needed to avoid switch to next clip while in pause.
1021 ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
1022 return false;
1023 }
1024
1025 int64_t postEOSDelayUs = 0;
1026 if (mAudioSink->needsTrailingPadding()) {
1027 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
1028 }
1029 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
1030 mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1031
1032 mAudioQueue.erase(mAudioQueue.begin());
1033 entry = NULL;
1034 if (mAudioSink->needsTrailingPadding()) {
1035 // If we're not in gapless playback (i.e. through setNextPlayer), we
1036 // need to stop the track here, because that will play out the last
1037 // little bit at the end of the file. Otherwise short files won't play.
1038 mAudioSink->stop();
1039 mNumFramesWritten = 0;
1040 }
1041 return false;
1042 }
1043
1044 mLastAudioBufferDrained = entry->mBufferOrdinal;
1045
1046 // ignore 0-sized buffer which could be EOS marker with no data
1047 if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
1048 int64_t mediaTimeUs;
1049 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1050 ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
1051 mediaTimeUs / 1E6);
1052 onNewAudioMediaTime(mediaTimeUs);
1053 }
1054
1055 size_t copy = entry->mBuffer->size() - entry->mOffset;
1056
1057 ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
1058 copy, false /* blocking */);
1059 if (written < 0) {
1060 // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
1061 if (written == WOULD_BLOCK) {
1062 ALOGV("AudioSink write would block when writing %zu bytes", copy);
1063 } else {
1064 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
1065 // This can only happen when AudioSink was opened with doNotReconnect flag set to
1066 // true, in which case the NuPlayer2 will handle the reconnect.
1067 notifyAudioTearDown(kDueToError);
1068 }
1069 break;
1070 }
1071
1072 entry->mOffset += written;
1073 size_t remainder = entry->mBuffer->size() - entry->mOffset;
1074 if ((ssize_t)remainder < mAudioSink->frameSize()) {
1075 if (remainder > 0) {
1076 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
1077 remainder);
1078 entry->mOffset += remainder;
1079 copy -= remainder;
1080 }
1081
1082 entry->mNotifyConsumed->post();
1083 mAudioQueue.erase(mAudioQueue.begin());
1084
1085 entry = NULL;
1086 }
1087
1088 size_t copiedFrames = written / mAudioSink->frameSize();
1089 mNumFramesWritten += copiedFrames;
1090
1091 {
1092 Mutex::Autolock autoLock(mLock);
1093 int64_t maxTimeMedia;
1094 maxTimeMedia =
1095 mAnchorTimeMediaUs +
1096 (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
1097 * 1000LL * mAudioSink->msecsPerFrame());
1098 mMediaClock->updateMaxTimeMedia(maxTimeMedia);
1099
1100 notifyIfMediaRenderingStarted_l();
1101 }
1102
1103 if (written != (ssize_t)copy) {
1104 // A short count was received from AudioSink::write()
1105 //
1106 // AudioSink write is called in non-blocking mode.
1107 // It may return with a short count when:
1108 //
1109 // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
1110 // discarded.
1111 // 2) The data to be copied exceeds the available buffer in AudioSink.
1112 // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
1113 // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
1114
1115 // (Case 1)
1116 // Must be a multiple of the frame size. If it is not a multiple of a frame size, it
1117 // needs to fail, as we should not carry over fractional frames between calls.
1118 CHECK_EQ(copy % mAudioSink->frameSize(), 0u);
1119
1120 // (Case 2, 3, 4)
1121 // Return early to the caller.
1122 // Beware of calling immediately again as this may busy-loop if you are not careful.
1123 ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
1124 break;
1125 }
1126 }
1127
1128 // calculate whether we need to reschedule another write.
1129 bool reschedule = !mAudioQueue.empty()
1130 && (!mPaused
1131 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
1132 //ALOGD("reschedule:%d empty:%d mPaused:%d prevFramesWritten:%u mNumFramesWritten:%u",
1133 // reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
1134 return reschedule;
1135}
1136
1137int64_t NuPlayer2::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
1138 int32_t sampleRate = offloadingAudio() ?
1139 mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
1140 if (sampleRate == 0) {
1141 ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
1142 return 0;
1143 }
1144 // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
1145 return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
1146}
1147
1148// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
1149int64_t NuPlayer2::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
1150 int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1151 if (mUseVirtualAudioSink) {
1152 int64_t nowUs = ALooper::GetNowUs();
1153 int64_t mediaUs;
1154 if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
1155 return 0ll;
1156 } else {
1157 return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
1158 }
1159 }
1160
1161 const int64_t audioSinkPlayedUs = mAudioSink->getPlayedOutDurationUs(nowUs);
1162 int64_t pendingUs = writtenAudioDurationUs - audioSinkPlayedUs;
1163 if (pendingUs < 0) {
1164 // This shouldn't happen unless the timestamp is stale.
1165 ALOGW("%s: pendingUs %lld < 0, clamping to zero, potential resume after pause "
1166 "writtenAudioDurationUs: %lld, audioSinkPlayedUs: %lld",
1167 __func__, (long long)pendingUs,
1168 (long long)writtenAudioDurationUs, (long long)audioSinkPlayedUs);
1169 pendingUs = 0;
1170 }
1171 return pendingUs;
1172}
1173
1174int64_t NuPlayer2::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
1175 int64_t realUs;
1176 if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
1177 // If failed to get current position, e.g. due to audio clock is
1178 // not ready, then just play out video immediately without delay.
1179 return nowUs;
1180 }
1181 return realUs;
1182}
1183
1184void NuPlayer2::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1185 Mutex::Autolock autoLock(mLock);
1186 // TRICKY: vorbis decoder generates multiple frames with the same
1187 // timestamp, so only update on the first frame with a given timestamp
1188 if (mediaTimeUs == mAnchorTimeMediaUs) {
1189 return;
1190 }
1191 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1192
1193 // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
1194 if (mNextAudioClockUpdateTimeUs == -1) {
1195 AudioTimestamp ts;
1196 if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
1197 mNextAudioClockUpdateTimeUs = 0; // start our clock updates
1198 }
1199 }
1200 int64_t nowUs = ALooper::GetNowUs();
1201 if (mNextAudioClockUpdateTimeUs >= 0) {
1202 if (nowUs >= mNextAudioClockUpdateTimeUs) {
1203 int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1204 mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1205 mUseVirtualAudioSink = false;
1206 mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
1207 }
1208 } else {
1209 int64_t unused;
1210 if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
1211 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
1212 > kMaxAllowedAudioSinkDelayUs)) {
1213 // Enough data has been sent to AudioSink, but AudioSink has not rendered
1214 // any data yet. Something is wrong with AudioSink, e.g., the device is not
1215 // connected to audio out.
1216 // Switch to system clock. This essentially creates a virtual AudioSink with
1217 // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
1218 // This virtual AudioSink renders audio data starting from the very first sample
1219 // and it's paced by system clock.
1220 ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
1221 mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
1222 mUseVirtualAudioSink = true;
1223 }
1224 }
1225 mAnchorNumFramesWritten = mNumFramesWritten;
1226 mAnchorTimeMediaUs = mediaTimeUs;
1227}
1228
1229// Called without mLock acquired.
1230void NuPlayer2::Renderer::postDrainVideoQueue() {
1231 if (mDrainVideoQueuePending
1232 || getSyncQueues()
1233 || (mPaused && mVideoSampleReceived)) {
1234 return;
1235 }
1236
1237 if (mVideoQueue.empty()) {
1238 return;
1239 }
1240
1241 QueueEntry &entry = *mVideoQueue.begin();
1242
1243 sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1244 msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1245
1246 if (entry.mBuffer == NULL) {
1247 // EOS doesn't carry a timestamp.
1248 msg->post();
1249 mDrainVideoQueuePending = true;
1250 return;
1251 }
1252
1253 int64_t nowUs = ALooper::GetNowUs();
1254 if (mFlags & FLAG_REAL_TIME) {
1255 int64_t realTimeUs;
1256 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1257
1258 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1259
1260 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1261
1262 int64_t delayUs = realTimeUs - nowUs;
1263
1264 ALOGW_IF(delayUs > 500000, "unusually high delayUs: %lld", (long long)delayUs);
1265 // post 2 display refreshes before rendering is due
1266 msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1267
1268 mDrainVideoQueuePending = true;
1269 return;
1270 }
1271
1272 int64_t mediaTimeUs;
1273 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1274
1275 {
1276 Mutex::Autolock autoLock(mLock);
1277 if (mAnchorTimeMediaUs < 0) {
1278 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1279 mAnchorTimeMediaUs = mediaTimeUs;
1280 }
1281 }
1282 mNextVideoTimeMediaUs = mediaTimeUs + 100000;
1283 if (!mHasAudio) {
1284 // smooth out videos >= 10fps
1285 mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
1286 }
1287
1288 if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
1289 msg->post();
1290 } else {
1291 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1292
1293 // post 2 display refreshes before rendering is due
1294 mMediaClock->addTimer(msg, mediaTimeUs, -twoVsyncsUs);
1295 }
1296
1297 mDrainVideoQueuePending = true;
1298}
1299
1300void NuPlayer2::Renderer::onDrainVideoQueue() {
1301 if (mVideoQueue.empty()) {
1302 return;
1303 }
1304
1305 QueueEntry *entry = &*mVideoQueue.begin();
1306
1307 if (entry->mBuffer == NULL) {
1308 // EOS
1309
1310 notifyEOS(false /* audio */, entry->mFinalResult);
1311
1312 mVideoQueue.erase(mVideoQueue.begin());
1313 entry = NULL;
1314
1315 setVideoLateByUs(0);
1316 return;
1317 }
1318
1319 int64_t nowUs = ALooper::GetNowUs();
1320 int64_t realTimeUs;
1321 int64_t mediaTimeUs = -1;
1322 if (mFlags & FLAG_REAL_TIME) {
1323 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1324 } else {
1325 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1326
1327 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1328 }
1329 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1330
1331 bool tooLate = false;
1332
1333 if (!mPaused) {
1334 setVideoLateByUs(nowUs - realTimeUs);
1335 tooLate = (mVideoLateByUs > 40000);
1336
1337 if (tooLate) {
1338 ALOGV("video late by %lld us (%.2f secs)",
1339 (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1340 } else {
1341 int64_t mediaUs = 0;
1342 mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1343 ALOGV("rendering video at media time %.2f secs",
1344 (mFlags & FLAG_REAL_TIME ? realTimeUs :
1345 mediaUs) / 1E6);
1346
1347 if (!(mFlags & FLAG_REAL_TIME)
1348 && mLastAudioMediaTimeUs != -1
1349 && mediaTimeUs > mLastAudioMediaTimeUs) {
1350 // If audio ends before video, video continues to drive media clock.
1351 // Also smooth out videos >= 10fps.
1352 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1353 }
1354 }
1355 } else {
1356 setVideoLateByUs(0);
1357 if (!mVideoSampleReceived && !mHasAudio) {
1358 // This will ensure that the first frame after a flush won't be used as anchor
1359 // when renderer is in paused state, because resume can happen any time after seek.
1360 clearAnchorTime();
1361 }
1362 }
1363
1364 // Always render the first video frame while keeping stats on A/V sync.
1365 if (!mVideoSampleReceived) {
1366 realTimeUs = nowUs;
1367 tooLate = false;
1368 }
1369
1370 entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
1371 entry->mNotifyConsumed->setInt32("render", !tooLate);
1372 entry->mNotifyConsumed->post();
1373 mVideoQueue.erase(mVideoQueue.begin());
1374 entry = NULL;
1375
1376 mVideoSampleReceived = true;
1377
1378 if (!mPaused) {
1379 if (!mVideoRenderingStarted) {
1380 mVideoRenderingStarted = true;
1381 notifyVideoRenderingStart();
1382 }
1383 Mutex::Autolock autoLock(mLock);
1384 notifyIfMediaRenderingStarted_l();
1385 }
1386}
1387
1388void NuPlayer2::Renderer::notifyVideoRenderingStart() {
1389 sp<AMessage> notify = mNotify->dup();
1390 notify->setInt32("what", kWhatVideoRenderingStart);
1391 notify->post();
1392}
1393
1394void NuPlayer2::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1395 Mutex::Autolock autoLock(mLock);
1396 notifyEOS_l(audio, finalResult, delayUs);
1397}
1398
1399void NuPlayer2::Renderer::notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs) {
1400 if (audio && delayUs > 0) {
1401 sp<AMessage> msg = new AMessage(kWhatEOS, this);
1402 msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1403 msg->setInt32("finalResult", finalResult);
1404 msg->post(delayUs);
1405 return;
1406 }
1407 sp<AMessage> notify = mNotify->dup();
1408 notify->setInt32("what", kWhatEOS);
1409 notify->setInt32("audio", static_cast<int32_t>(audio));
1410 notify->setInt32("finalResult", finalResult);
1411 notify->post(delayUs);
1412
1413 if (audio) {
1414 // Video might outlive audio. Clear anchor to enable video only case.
1415 mAnchorTimeMediaUs = -1;
1416 mHasAudio = false;
1417 if (mNextVideoTimeMediaUs >= 0) {
1418 int64_t mediaUs = 0;
1419 mMediaClock->getMediaTime(ALooper::GetNowUs(), &mediaUs);
1420 if (mNextVideoTimeMediaUs > mediaUs) {
1421 mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
1422 }
1423 }
1424 }
1425}
1426
1427void NuPlayer2::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
1428 sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
1429 msg->setInt32("reason", reason);
1430 msg->post();
1431}
1432
1433void NuPlayer2::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1434 int32_t audio;
1435 CHECK(msg->findInt32("audio", &audio));
1436
1437 if (dropBufferIfStale(audio, msg)) {
1438 return;
1439 }
1440
1441 if (audio) {
1442 mHasAudio = true;
1443 } else {
1444 mHasVideo = true;
1445 }
1446
1447 if (mHasVideo) {
1448 if (mVideoScheduler == NULL) {
1449 mVideoScheduler = new VideoFrameScheduler();
1450 mVideoScheduler->init();
1451 }
1452 }
1453
1454 sp<RefBase> obj;
1455 CHECK(msg->findObject("buffer", &obj));
1456 sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
1457
1458 sp<AMessage> notifyConsumed;
1459 CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1460
1461 QueueEntry entry;
1462 entry.mBuffer = buffer;
1463 entry.mNotifyConsumed = notifyConsumed;
1464 entry.mOffset = 0;
1465 entry.mFinalResult = OK;
1466 entry.mBufferOrdinal = ++mTotalBuffersQueued;
1467
1468 if (audio) {
1469 Mutex::Autolock autoLock(mLock);
1470 mAudioQueue.push_back(entry);
1471 postDrainAudioQueue_l();
1472 } else {
1473 mVideoQueue.push_back(entry);
1474 postDrainVideoQueue();
1475 }
1476
1477 Mutex::Autolock autoLock(mLock);
1478 if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1479 return;
1480 }
1481
1482 sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1483 sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1484
1485 if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1486 // EOS signalled on either queue.
1487 syncQueuesDone_l();
1488 return;
1489 }
1490
1491 int64_t firstAudioTimeUs;
1492 int64_t firstVideoTimeUs;
1493 CHECK(firstAudioBuffer->meta()
1494 ->findInt64("timeUs", &firstAudioTimeUs));
1495 CHECK(firstVideoBuffer->meta()
1496 ->findInt64("timeUs", &firstVideoTimeUs));
1497
1498 int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1499
1500 ALOGV("queueDiff = %.2f secs", diff / 1E6);
1501
1502 if (diff > 100000ll) {
1503 // Audio data starts More than 0.1 secs before video.
1504 // Drop some audio.
1505
1506 (*mAudioQueue.begin()).mNotifyConsumed->post();
1507 mAudioQueue.erase(mAudioQueue.begin());
1508 return;
1509 }
1510
1511 syncQueuesDone_l();
1512}
1513
1514void NuPlayer2::Renderer::syncQueuesDone_l() {
1515 if (!mSyncQueues) {
1516 return;
1517 }
1518
1519 mSyncQueues = false;
1520
1521 if (!mAudioQueue.empty()) {
1522 postDrainAudioQueue_l();
1523 }
1524
1525 if (!mVideoQueue.empty()) {
1526 mLock.unlock();
1527 postDrainVideoQueue();
1528 mLock.lock();
1529 }
1530}
1531
1532void NuPlayer2::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1533 int32_t audio;
1534 CHECK(msg->findInt32("audio", &audio));
1535
1536 if (dropBufferIfStale(audio, msg)) {
1537 return;
1538 }
1539
1540 int32_t finalResult;
1541 CHECK(msg->findInt32("finalResult", &finalResult));
1542
1543 QueueEntry entry;
1544 entry.mOffset = 0;
1545 entry.mFinalResult = finalResult;
1546
1547 if (audio) {
1548 Mutex::Autolock autoLock(mLock);
1549 if (mAudioQueue.empty() && mSyncQueues) {
1550 syncQueuesDone_l();
1551 }
1552 mAudioQueue.push_back(entry);
1553 postDrainAudioQueue_l();
1554 } else {
1555 if (mVideoQueue.empty() && getSyncQueues()) {
1556 Mutex::Autolock autoLock(mLock);
1557 syncQueuesDone_l();
1558 }
1559 mVideoQueue.push_back(entry);
1560 postDrainVideoQueue();
1561 }
1562}
1563
1564void NuPlayer2::Renderer::onFlush(const sp<AMessage> &msg) {
1565 int32_t audio, notifyComplete;
1566 CHECK(msg->findInt32("audio", &audio));
1567
1568 {
1569 Mutex::Autolock autoLock(mLock);
1570 if (audio) {
1571 notifyComplete = mNotifyCompleteAudio;
1572 mNotifyCompleteAudio = false;
1573 mLastAudioMediaTimeUs = -1;
1574 } else {
1575 notifyComplete = mNotifyCompleteVideo;
1576 mNotifyCompleteVideo = false;
1577 }
1578
1579 // If we're currently syncing the queues, i.e. dropping audio while
1580 // aligning the first audio/video buffer times and only one of the
1581 // two queues has data, we may starve that queue by not requesting
1582 // more buffers from the decoder. If the other source then encounters
1583 // a discontinuity that leads to flushing, we'll never find the
1584 // corresponding discontinuity on the other queue.
1585 // Therefore we'll stop syncing the queues if at least one of them
1586 // is flushed.
1587 syncQueuesDone_l();
1588 }
1589 clearAnchorTime();
1590
1591 ALOGV("flushing %s", audio ? "audio" : "video");
1592 if (audio) {
1593 {
1594 Mutex::Autolock autoLock(mLock);
1595 flushQueue(&mAudioQueue);
1596
1597 ++mAudioDrainGeneration;
1598 ++mAudioEOSGeneration;
1599 prepareForMediaRenderingStart_l();
1600
1601 // the frame count will be reset after flush.
1602 clearAudioFirstAnchorTime_l();
1603 }
1604
1605 mDrainAudioQueuePending = false;
1606
1607 if (offloadingAudio()) {
1608 mAudioSink->pause();
1609 mAudioSink->flush();
1610 if (!mPaused) {
1611 mAudioSink->start();
1612 }
1613 } else {
1614 mAudioSink->pause();
1615 mAudioSink->flush();
1616 // Call stop() to signal to the AudioSink to completely fill the
1617 // internal buffer before resuming playback.
1618 // FIXME: this is ignored after flush().
1619 mAudioSink->stop();
1620 if (mPaused) {
1621 // Race condition: if renderer is paused and audio sink is stopped,
1622 // we need to make sure that the audio track buffer fully drains
1623 // before delivering data.
1624 // FIXME: remove this if we can detect if stop() is complete.
1625 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
1626 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
1627 } else {
1628 mAudioSink->start();
1629 }
1630 mNumFramesWritten = 0;
1631 }
1632 mNextAudioClockUpdateTimeUs = -1;
1633 } else {
1634 flushQueue(&mVideoQueue);
1635
1636 mDrainVideoQueuePending = false;
1637
1638 if (mVideoScheduler != NULL) {
1639 mVideoScheduler->restart();
1640 }
1641
1642 Mutex::Autolock autoLock(mLock);
1643 ++mVideoDrainGeneration;
1644 prepareForMediaRenderingStart_l();
1645 }
1646
1647 mVideoSampleReceived = false;
1648
1649 if (notifyComplete) {
1650 notifyFlushComplete(audio);
1651 }
1652}
1653
1654void NuPlayer2::Renderer::flushQueue(List<QueueEntry> *queue) {
1655 while (!queue->empty()) {
1656 QueueEntry *entry = &*queue->begin();
1657
1658 if (entry->mBuffer != NULL) {
1659 entry->mNotifyConsumed->post();
1660 } else if (entry->mNotifyConsumed != nullptr) {
1661 // Is it needed to open audio sink now?
1662 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1663 }
1664
1665 queue->erase(queue->begin());
1666 entry = NULL;
1667 }
1668}
1669
1670void NuPlayer2::Renderer::notifyFlushComplete(bool audio) {
1671 sp<AMessage> notify = mNotify->dup();
1672 notify->setInt32("what", kWhatFlushComplete);
1673 notify->setInt32("audio", static_cast<int32_t>(audio));
1674 notify->post();
1675}
1676
1677bool NuPlayer2::Renderer::dropBufferIfStale(
1678 bool audio, const sp<AMessage> &msg) {
1679 int32_t queueGeneration;
1680 CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1681
1682 if (queueGeneration == getQueueGeneration(audio)) {
1683 return false;
1684 }
1685
1686 sp<AMessage> notifyConsumed;
1687 if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1688 notifyConsumed->post();
1689 }
1690
1691 return true;
1692}
1693
1694void NuPlayer2::Renderer::onAudioSinkChanged() {
1695 if (offloadingAudio()) {
1696 return;
1697 }
1698 CHECK(!mDrainAudioQueuePending);
1699 mNumFramesWritten = 0;
1700 mAnchorNumFramesWritten = -1;
1701 uint32_t written;
1702 if (mAudioSink->getFramesWritten(&written) == OK) {
1703 mNumFramesWritten = written;
1704 }
1705}
1706
1707void NuPlayer2::Renderer::onDisableOffloadAudio() {
1708 Mutex::Autolock autoLock(mLock);
1709 mFlags &= ~FLAG_OFFLOAD_AUDIO;
1710 ++mAudioDrainGeneration;
1711 if (mAudioRenderingStartGeneration != -1) {
1712 prepareForMediaRenderingStart_l();
1713 }
1714}
1715
1716void NuPlayer2::Renderer::onEnableOffloadAudio() {
1717 Mutex::Autolock autoLock(mLock);
1718 mFlags |= FLAG_OFFLOAD_AUDIO;
1719 ++mAudioDrainGeneration;
1720 if (mAudioRenderingStartGeneration != -1) {
1721 prepareForMediaRenderingStart_l();
1722 }
1723}
1724
1725void NuPlayer2::Renderer::onPause() {
1726 if (mPaused) {
1727 return;
1728 }
1729
1730 {
1731 Mutex::Autolock autoLock(mLock);
1732 // we do not increment audio drain generation so that we fill audio buffer during pause.
1733 ++mVideoDrainGeneration;
1734 prepareForMediaRenderingStart_l();
1735 mPaused = true;
1736 mMediaClock->setPlaybackRate(0.0);
1737 }
1738
1739 mDrainAudioQueuePending = false;
1740 mDrainVideoQueuePending = false;
1741
1742 // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1743 mAudioSink->pause();
1744 startAudioOffloadPauseTimeout();
1745
1746 ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1747 mAudioQueue.size(), mVideoQueue.size());
1748}
1749
1750void NuPlayer2::Renderer::onResume() {
1751 if (!mPaused) {
1752 return;
1753 }
1754
1755 // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1756 cancelAudioOffloadPauseTimeout();
1757 if (mAudioSink->ready()) {
1758 status_t err = mAudioSink->start();
1759 if (err != OK) {
1760 ALOGE("cannot start AudioSink err %d", err);
1761 notifyAudioTearDown(kDueToError);
1762 }
1763 }
1764
1765 {
1766 Mutex::Autolock autoLock(mLock);
1767 mPaused = false;
1768 // rendering started message may have been delayed if we were paused.
1769 if (mRenderingDataDelivered) {
1770 notifyIfMediaRenderingStarted_l();
1771 }
1772 // configure audiosink as we did not do it when pausing
1773 if (mAudioSink != NULL && mAudioSink->ready()) {
1774 mAudioSink->setPlaybackRate(mPlaybackSettings);
1775 }
1776
1777 mMediaClock->setPlaybackRate(mPlaybackRate);
1778
1779 if (!mAudioQueue.empty()) {
1780 postDrainAudioQueue_l();
1781 }
1782 }
1783
1784 if (!mVideoQueue.empty()) {
1785 postDrainVideoQueue();
1786 }
1787}
1788
1789void NuPlayer2::Renderer::onSetVideoFrameRate(float fps) {
1790 if (mVideoScheduler == NULL) {
1791 mVideoScheduler = new VideoFrameScheduler();
1792 }
1793 mVideoScheduler->init(fps);
1794}
1795
1796int32_t NuPlayer2::Renderer::getQueueGeneration(bool audio) {
1797 Mutex::Autolock autoLock(mLock);
1798 return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1799}
1800
1801int32_t NuPlayer2::Renderer::getDrainGeneration(bool audio) {
1802 Mutex::Autolock autoLock(mLock);
1803 return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1804}
1805
1806bool NuPlayer2::Renderer::getSyncQueues() {
1807 Mutex::Autolock autoLock(mLock);
1808 return mSyncQueues;
1809}
1810
1811void NuPlayer2::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1812 if (mAudioTornDown) {
1813 return;
1814 }
1815 mAudioTornDown = true;
1816
1817 int64_t currentPositionUs;
1818 sp<AMessage> notify = mNotify->dup();
1819 if (getCurrentPosition(&currentPositionUs) == OK) {
1820 notify->setInt64("positionUs", currentPositionUs);
1821 }
1822
1823 mAudioSink->stop();
1824 mAudioSink->flush();
1825
1826 notify->setInt32("what", kWhatAudioTearDown);
1827 notify->setInt32("reason", reason);
1828 notify->post();
1829}
1830
1831void NuPlayer2::Renderer::startAudioOffloadPauseTimeout() {
1832 if (offloadingAudio()) {
1833 mWakeLock->acquire();
1834 sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1835 msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1836 msg->post(kOffloadPauseMaxUs);
1837 }
1838}
1839
1840void NuPlayer2::Renderer::cancelAudioOffloadPauseTimeout() {
1841 // We may have called startAudioOffloadPauseTimeout() without
1842 // the AudioSink open and with offloadingAudio enabled.
1843 //
1844 // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
1845 // we always release the wakelock and increment the pause timeout generation.
1846 //
1847 // Note: The acquired wakelock prevents the device from suspending
1848 // immediately after offload pause (in case a resume happens shortly thereafter).
1849 mWakeLock->release(true);
1850 ++mAudioOffloadPauseTimeoutGeneration;
1851}
1852
1853status_t NuPlayer2::Renderer::onOpenAudioSink(
1854 const sp<AMessage> &format,
1855 bool offloadOnly,
1856 bool hasVideo,
1857 uint32_t flags,
1858 bool isStreaming) {
1859 ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1860 offloadOnly, offloadingAudio());
1861 bool audioSinkChanged = false;
1862
1863 int32_t numChannels;
1864 CHECK(format->findInt32("channel-count", &numChannels));
1865
1866 int32_t channelMask;
1867 if (!format->findInt32("channel-mask", &channelMask)) {
1868 // signal to the AudioSink to derive the mask from count.
1869 channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1870 }
1871
1872 int32_t sampleRate;
1873 CHECK(format->findInt32("sample-rate", &sampleRate));
1874
1875 if (offloadingAudio()) {
1876 audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
1877 AString mime;
1878 CHECK(format->findString("mime", &mime));
1879 status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1880
1881 if (err != OK) {
1882 ALOGE("Couldn't map mime \"%s\" to a valid "
1883 "audio_format", mime.c_str());
1884 onDisableOffloadAudio();
1885 } else {
1886 ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1887 mime.c_str(), audioFormat);
1888
1889 int avgBitRate = -1;
1890 format->findInt32("bitrate", &avgBitRate);
1891
1892 int32_t aacProfile = -1;
1893 if (audioFormat == AUDIO_FORMAT_AAC
1894 && format->findInt32("aac-profile", &aacProfile)) {
1895 // Redefine AAC format as per aac profile
1896 mapAACProfileToAudioFormat(
1897 audioFormat,
1898 aacProfile);
1899 }
1900
1901 audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1902 offloadInfo.duration_us = -1;
1903 format->findInt64(
1904 "durationUs", &offloadInfo.duration_us);
1905 offloadInfo.sample_rate = sampleRate;
1906 offloadInfo.channel_mask = channelMask;
1907 offloadInfo.format = audioFormat;
1908 offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1909 offloadInfo.bit_rate = avgBitRate;
1910 offloadInfo.has_video = hasVideo;
1911 offloadInfo.is_streaming = isStreaming;
1912
1913 if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1914 ALOGV("openAudioSink: no change in offload mode");
1915 // no change from previous configuration, everything ok.
1916 return OK;
1917 }
1918 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1919
1920 ALOGV("openAudioSink: try to open AudioSink in offload mode");
1921 uint32_t offloadFlags = flags;
1922 offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1923 offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1924 audioSinkChanged = true;
1925 mAudioSink->close();
1926
1927 err = mAudioSink->open(
1928 sampleRate,
1929 numChannels,
1930 (audio_channel_mask_t)channelMask,
1931 audioFormat,
1932 0 /* bufferCount - unused */,
1933 &NuPlayer2::Renderer::AudioSinkCallback,
1934 this,
1935 (audio_output_flags_t)offloadFlags,
1936 &offloadInfo);
1937
1938 if (err == OK) {
1939 err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1940 }
1941
1942 if (err == OK) {
1943 // If the playback is offloaded to h/w, we pass
1944 // the HAL some metadata information.
1945 // We don't want to do this for PCM because it
1946 // will be going through the AudioFlinger mixer
1947 // before reaching the hardware.
1948 // TODO
1949 mCurrentOffloadInfo = offloadInfo;
1950 if (!mPaused) { // for preview mode, don't start if paused
1951 err = mAudioSink->start();
1952 }
1953 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1954 }
1955 if (err != OK) {
1956 // Clean up, fall back to non offload mode.
1957 mAudioSink->close();
1958 onDisableOffloadAudio();
1959 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1960 ALOGV("openAudioSink: offload failed");
1961 if (offloadOnly) {
1962 notifyAudioTearDown(kForceNonOffload);
1963 }
1964 } else {
1965 mUseAudioCallback = true; // offload mode transfers data through callback
1966 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
1967 }
1968 }
1969 }
1970 if (!offloadOnly && !offloadingAudio()) {
1971 ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1972 uint32_t pcmFlags = flags;
1973 pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1974
1975 const PcmInfo info = {
1976 (audio_channel_mask_t)channelMask,
1977 (audio_output_flags_t)pcmFlags,
1978 AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
1979 numChannels,
1980 sampleRate
1981 };
1982 if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
1983 ALOGV("openAudioSink: no change in pcm mode");
1984 // no change from previous configuration, everything ok.
1985 return OK;
1986 }
1987
1988 audioSinkChanged = true;
1989 mAudioSink->close();
1990 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1991 // Note: It is possible to set up the callback, but not use it to send audio data.
1992 // This requires a fix in AudioSink to explicitly specify the transfer mode.
1993 mUseAudioCallback = getUseAudioCallbackSetting();
1994 if (mUseAudioCallback) {
1995 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
1996 }
1997
1998 // Compute the desired buffer size.
1999 // For callback mode, the amount of time before wakeup is about half the buffer size.
2000 const uint32_t frameCount =
2001 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
2002
2003 // The doNotReconnect means AudioSink will signal back and let NuPlayer2 to re-construct
2004 // AudioSink. We don't want this when there's video because it will cause a video seek to
2005 // the previous I frame. But we do want this when there's only audio because it will give
2006 // NuPlayer2 a chance to switch from non-offload mode to offload mode.
2007 // So we only set doNotReconnect when there's no video.
2008 const bool doNotReconnect = !hasVideo;
2009
2010 // We should always be able to set our playback settings if the sink is closed.
2011 LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
2012 "onOpenAudioSink: can't set playback rate on closed sink");
2013 status_t err = mAudioSink->open(
2014 sampleRate,
2015 numChannels,
2016 (audio_channel_mask_t)channelMask,
2017 AUDIO_FORMAT_PCM_16_BIT,
2018 0 /* bufferCount - unused */,
2019 mUseAudioCallback ? &NuPlayer2::Renderer::AudioSinkCallback : NULL,
2020 mUseAudioCallback ? this : NULL,
2021 (audio_output_flags_t)pcmFlags,
2022 NULL,
2023 doNotReconnect,
2024 frameCount);
2025 if (err != OK) {
2026 ALOGW("openAudioSink: non offloaded open failed status: %d", err);
2027 mAudioSink->close();
2028 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2029 return err;
2030 }
2031 mCurrentPcmInfo = info;
2032 if (!mPaused) { // for preview mode, don't start if paused
2033 mAudioSink->start();
2034 }
2035 }
2036 if (audioSinkChanged) {
2037 onAudioSinkChanged();
2038 }
2039 mAudioTornDown = false;
2040 return OK;
2041}
2042
2043void NuPlayer2::Renderer::onCloseAudioSink() {
2044 mAudioSink->close();
2045 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2046 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2047}
2048
2049void NuPlayer2::Renderer::onChangeAudioFormat(
2050 const sp<AMessage> &meta, const sp<AMessage> &notify) {
2051 sp<AMessage> format;
2052 CHECK(meta->findMessage("format", &format));
2053
2054 int32_t offloadOnly;
2055 CHECK(meta->findInt32("offload-only", &offloadOnly));
2056
2057 int32_t hasVideo;
2058 CHECK(meta->findInt32("has-video", &hasVideo));
2059
2060 uint32_t flags;
2061 CHECK(meta->findInt32("flags", (int32_t *)&flags));
2062
2063 uint32_t isStreaming;
2064 CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
2065
2066 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
2067
2068 if (err != OK) {
2069 notify->setInt32("err", err);
2070 }
2071 notify->post();
2072}
2073
2074} // namespace android
2075