| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2010 The Android Open Source Project | 
|  | 3 | * | 
|  | 4 | * Licensed under the Apache License, Version 2.0 (the "License"); | 
|  | 5 | * you may not use this file except in compliance with the License. | 
|  | 6 | * You may obtain a copy of the License at | 
|  | 7 | * | 
|  | 8 | *      http://www.apache.org/licenses/LICENSE-2.0 | 
|  | 9 | * | 
|  | 10 | * Unless required by applicable law or agreed to in writing, software | 
|  | 11 | * distributed under the License is distributed on an "AS IS" BASIS, | 
|  | 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
|  | 13 | * See the License for the specific language governing permissions and | 
|  | 14 | * limitations under the License. | 
|  | 15 | */ | 
|  | 16 |  | 
|  | 17 | //#define LOG_NDEBUG 0 | 
|  | 18 | #define LOG_TAG "NuPlayer2Renderer" | 
|  | 19 | #include <utils/Log.h> | 
|  | 20 |  | 
|  | 21 | #include "JWakeLock.h" | 
|  | 22 | #include "NuPlayer2Renderer.h" | 
|  | 23 | #include <algorithm> | 
|  | 24 | #include <cutils/properties.h> | 
|  | 25 | #include <media/stagefright/foundation/ADebug.h> | 
|  | 26 | #include <media/stagefright/foundation/AMessage.h> | 
|  | 27 | #include <media/stagefright/foundation/AUtils.h> | 
|  | 28 | #include <media/stagefright/MediaClock.h> | 
| Wei Jia | 896b4d6 | 2019-01-04 15:26:48 -0800 | [diff] [blame] | 29 | #include <media/stagefright/MediaCodecConstants.h> | 
|  | 30 | #include <media/stagefright/MediaDefs.h> | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 31 | #include <media/stagefright/MediaErrors.h> | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 32 | #include <media/stagefright/Utils.h> | 
| Dichen Zhang | c37b190 | 2018-12-18 11:36:13 -0800 | [diff] [blame] | 33 | #include <media/stagefright/VideoFrameScheduler2.h> | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 34 | #include <media/MediaCodecBuffer.h> | 
|  | 35 |  | 
|  | 36 | #include <inttypes.h> | 
|  | 37 |  | 
|  | 38 | namespace android { | 
|  | 39 |  | 
|  | 40 | /* | 
|  | 41 | * Example of common configuration settings in shell script form | 
|  | 42 |  | 
|  | 43 | #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager | 
|  | 44 | adb shell setprop audio.offload.disable 1 | 
|  | 45 |  | 
|  | 46 | #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager | 
|  | 47 | adb shell setprop audio.offload.video 1 | 
|  | 48 |  | 
|  | 49 | #Use audio callbacks for PCM data | 
|  | 50 | adb shell setprop media.stagefright.audio.cbk 1 | 
|  | 51 |  | 
|  | 52 | #Use deep buffer for PCM data with video (it is generally enabled for audio-only) | 
|  | 53 | adb shell setprop media.stagefright.audio.deep 1 | 
|  | 54 |  | 
|  | 55 | #Set size of buffers for pcm audio sink in msec (example: 1000 msec) | 
|  | 56 | adb shell setprop media.stagefright.audio.sink 1000 | 
|  | 57 |  | 
|  | 58 | * These configurations take effect for the next track played (not the current track). | 
|  | 59 | */ | 
|  | 60 |  | 
|  | 61 | static inline bool getUseAudioCallbackSetting() { | 
|  | 62 | return property_get_bool("media.stagefright.audio.cbk", false /* default_value */); | 
|  | 63 | } | 
|  | 64 |  | 
|  | 65 | static inline int32_t getAudioSinkPcmMsSetting() { | 
|  | 66 | return property_get_int32( | 
|  | 67 | "media.stagefright.audio.sink", 500 /* default_value */); | 
|  | 68 | } | 
|  | 69 |  | 
|  | 70 | // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink | 
|  | 71 | // is closed to allow the audio DSP to power down. | 
| Chih-Hung Hsieh | d42529d | 2018-12-11 13:53:10 -0800 | [diff] [blame] | 72 | static const int64_t kOffloadPauseMaxUs = 10000000LL; | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 73 |  | 
|  | 74 | // Maximum allowed delay from AudioSink, 1.5 seconds. | 
| Chih-Hung Hsieh | d42529d | 2018-12-11 13:53:10 -0800 | [diff] [blame] | 75 | static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000LL; | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 76 |  | 
|  | 77 | static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000; | 
|  | 78 |  | 
|  | 79 | // static | 
|  | 80 | const NuPlayer2::Renderer::PcmInfo NuPlayer2::Renderer::AUDIO_PCMINFO_INITIALIZER = { | 
|  | 81 | AUDIO_CHANNEL_NONE, | 
|  | 82 | AUDIO_OUTPUT_FLAG_NONE, | 
|  | 83 | AUDIO_FORMAT_INVALID, | 
|  | 84 | 0, // mNumChannels | 
|  | 85 | 0 // mSampleRate | 
|  | 86 | }; | 
|  | 87 |  | 
|  | 88 | // static | 
| Chih-Hung Hsieh | d42529d | 2018-12-11 13:53:10 -0800 | [diff] [blame] | 89 | const int64_t NuPlayer2::Renderer::kMinPositionUpdateDelayUs = 100000LL; | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 90 |  | 
| Wei Jia | 896b4d6 | 2019-01-04 15:26:48 -0800 | [diff] [blame] | 91 | static audio_format_t constexpr audioFormatFromEncoding(int32_t pcmEncoding) { | 
|  | 92 | switch (pcmEncoding) { | 
|  | 93 | case kAudioEncodingPcmFloat: | 
|  | 94 | return AUDIO_FORMAT_PCM_FLOAT; | 
|  | 95 | case kAudioEncodingPcm16bit: | 
|  | 96 | return AUDIO_FORMAT_PCM_16_BIT; | 
|  | 97 | case kAudioEncodingPcm8bit: | 
|  | 98 | return AUDIO_FORMAT_PCM_8_BIT;  // TODO: do we want to support this? | 
|  | 99 | default: | 
|  | 100 | ALOGE("%s: Invalid encoding: %d", __func__, pcmEncoding); | 
|  | 101 | return AUDIO_FORMAT_INVALID; | 
|  | 102 | } | 
|  | 103 | } | 
|  | 104 |  | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 105 | NuPlayer2::Renderer::Renderer( | 
| Wei Jia | 33abcc7 | 2018-01-30 09:47:38 -0800 | [diff] [blame] | 106 | const sp<MediaPlayer2Interface::AudioSink> &sink, | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 107 | const sp<MediaClock> &mediaClock, | 
|  | 108 | const sp<AMessage> ¬ify, | 
| Dichen Zhang | fc9f40f | 2019-01-04 14:15:28 -0800 | [diff] [blame] | 109 | const sp<JObjectHolder> &context, | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 110 | uint32_t flags) | 
|  | 111 | : mAudioSink(sink), | 
|  | 112 | mUseVirtualAudioSink(false), | 
|  | 113 | mNotify(notify), | 
|  | 114 | mFlags(flags), | 
|  | 115 | mNumFramesWritten(0), | 
|  | 116 | mDrainAudioQueuePending(false), | 
|  | 117 | mDrainVideoQueuePending(false), | 
|  | 118 | mAudioQueueGeneration(0), | 
|  | 119 | mVideoQueueGeneration(0), | 
|  | 120 | mAudioDrainGeneration(0), | 
|  | 121 | mVideoDrainGeneration(0), | 
|  | 122 | mAudioEOSGeneration(0), | 
|  | 123 | mMediaClock(mediaClock), | 
|  | 124 | mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT), | 
|  | 125 | mAudioFirstAnchorTimeMediaUs(-1), | 
|  | 126 | mAnchorTimeMediaUs(-1), | 
|  | 127 | mAnchorNumFramesWritten(-1), | 
| Chih-Hung Hsieh | d42529d | 2018-12-11 13:53:10 -0800 | [diff] [blame] | 128 | mVideoLateByUs(0LL), | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 129 | mNextVideoTimeMediaUs(-1), | 
|  | 130 | mHasAudio(false), | 
|  | 131 | mHasVideo(false), | 
|  | 132 | mNotifyCompleteAudio(false), | 
|  | 133 | mNotifyCompleteVideo(false), | 
|  | 134 | mSyncQueues(false), | 
| Wei Jia | 6376cd5 | 2018-09-26 11:42:55 -0700 | [diff] [blame] | 135 | mPaused(true), | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 136 | mPauseDrainAudioAllowedUs(0), | 
|  | 137 | mVideoSampleReceived(false), | 
|  | 138 | mVideoRenderingStarted(false), | 
|  | 139 | mVideoRenderingStartGeneration(0), | 
|  | 140 | mAudioRenderingStartGeneration(0), | 
|  | 141 | mRenderingDataDelivered(false), | 
|  | 142 | mNextAudioClockUpdateTimeUs(-1), | 
|  | 143 | mLastAudioMediaTimeUs(-1), | 
|  | 144 | mAudioOffloadPauseTimeoutGeneration(0), | 
|  | 145 | mAudioTornDown(false), | 
|  | 146 | mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER), | 
|  | 147 | mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER), | 
|  | 148 | mTotalBuffersQueued(0), | 
|  | 149 | mLastAudioBufferDrained(0), | 
|  | 150 | mUseAudioCallback(false), | 
| Dichen Zhang | fc9f40f | 2019-01-04 14:15:28 -0800 | [diff] [blame] | 151 | mWakeLock(new JWakeLock(context)) { | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 152 | CHECK(mediaClock != NULL); | 
| Wei Jia | 700a7c2 | 2018-09-14 18:04:35 -0700 | [diff] [blame] | 153 | mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed); | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 154 | } | 
|  | 155 |  | 
|  | 156 | NuPlayer2::Renderer::~Renderer() { | 
|  | 157 | if (offloadingAudio()) { | 
|  | 158 | mAudioSink->stop(); | 
|  | 159 | mAudioSink->flush(); | 
|  | 160 | mAudioSink->close(); | 
|  | 161 | } | 
|  | 162 |  | 
|  | 163 | // Try to avoid racing condition in case callback is still on. | 
|  | 164 | Mutex::Autolock autoLock(mLock); | 
|  | 165 | if (mUseAudioCallback) { | 
|  | 166 | flushQueue(&mAudioQueue); | 
|  | 167 | flushQueue(&mVideoQueue); | 
|  | 168 | } | 
|  | 169 | mWakeLock.clear(); | 
|  | 170 | mVideoScheduler.clear(); | 
|  | 171 | mNotify.clear(); | 
|  | 172 | mAudioSink.clear(); | 
|  | 173 | } | 
|  | 174 |  | 
|  | 175 | void NuPlayer2::Renderer::queueBuffer( | 
|  | 176 | bool audio, | 
|  | 177 | const sp<MediaCodecBuffer> &buffer, | 
|  | 178 | const sp<AMessage> ¬ifyConsumed) { | 
|  | 179 | sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this); | 
|  | 180 | msg->setInt32("queueGeneration", getQueueGeneration(audio)); | 
|  | 181 | msg->setInt32("audio", static_cast<int32_t>(audio)); | 
|  | 182 | msg->setObject("buffer", buffer); | 
|  | 183 | msg->setMessage("notifyConsumed", notifyConsumed); | 
|  | 184 | msg->post(); | 
|  | 185 | } | 
|  | 186 |  | 
|  | 187 | void NuPlayer2::Renderer::queueEOS(bool audio, status_t finalResult) { | 
|  | 188 | CHECK_NE(finalResult, (status_t)OK); | 
|  | 189 |  | 
|  | 190 | sp<AMessage> msg = new AMessage(kWhatQueueEOS, this); | 
|  | 191 | msg->setInt32("queueGeneration", getQueueGeneration(audio)); | 
|  | 192 | msg->setInt32("audio", static_cast<int32_t>(audio)); | 
|  | 193 | msg->setInt32("finalResult", finalResult); | 
|  | 194 | msg->post(); | 
|  | 195 | } | 
|  | 196 |  | 
|  | 197 | status_t NuPlayer2::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) { | 
|  | 198 | sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this); | 
|  | 199 | writeToAMessage(msg, rate); | 
|  | 200 | sp<AMessage> response; | 
|  | 201 | status_t err = msg->postAndAwaitResponse(&response); | 
|  | 202 | if (err == OK && response != NULL) { | 
|  | 203 | CHECK(response->findInt32("err", &err)); | 
|  | 204 | } | 
|  | 205 | return err; | 
|  | 206 | } | 
|  | 207 |  | 
|  | 208 | status_t NuPlayer2::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) { | 
| Wei Jia | 700a7c2 | 2018-09-14 18:04:35 -0700 | [diff] [blame] | 209 | if (rate.mSpeed <= 0.f) { | 
|  | 210 | ALOGW("playback rate cannot be %f", rate.mSpeed); | 
|  | 211 | return BAD_VALUE; | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 212 | } | 
|  | 213 |  | 
|  | 214 | if (mAudioSink != NULL && mAudioSink->ready()) { | 
|  | 215 | status_t err = mAudioSink->setPlaybackRate(rate); | 
|  | 216 | if (err != OK) { | 
| Wei Jia | 700a7c2 | 2018-09-14 18:04:35 -0700 | [diff] [blame] | 217 | ALOGW("failed to get playback rate from audio sink, err(%d)", err); | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 218 | return err; | 
|  | 219 | } | 
|  | 220 | } | 
|  | 221 | mPlaybackSettings = rate; | 
| Wei Jia | 700a7c2 | 2018-09-14 18:04:35 -0700 | [diff] [blame] | 222 | mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed); | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 223 | return OK; | 
|  | 224 | } | 
|  | 225 |  | 
|  | 226 | status_t NuPlayer2::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) { | 
|  | 227 | sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this); | 
|  | 228 | sp<AMessage> response; | 
|  | 229 | status_t err = msg->postAndAwaitResponse(&response); | 
|  | 230 | if (err == OK && response != NULL) { | 
|  | 231 | CHECK(response->findInt32("err", &err)); | 
|  | 232 | if (err == OK) { | 
|  | 233 | readFromAMessage(response, rate); | 
|  | 234 | } | 
|  | 235 | } | 
|  | 236 | return err; | 
|  | 237 | } | 
|  | 238 |  | 
|  | 239 | status_t NuPlayer2::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) { | 
|  | 240 | if (mAudioSink != NULL && mAudioSink->ready()) { | 
|  | 241 | status_t err = mAudioSink->getPlaybackRate(rate); | 
|  | 242 | if (err == OK) { | 
|  | 243 | if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) { | 
| Wei Jia | 48c1623 | 2018-11-17 17:22:59 -0800 | [diff] [blame] | 244 | ALOGW("correcting mismatch in internal/external playback rate, %f vs %f", | 
|  | 245 | rate->mSpeed, mPlaybackSettings.mSpeed); | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 246 | } | 
|  | 247 | // get playback settings used by audiosink, as it may be | 
|  | 248 | // slightly off due to audiosink not taking small changes. | 
|  | 249 | mPlaybackSettings = *rate; | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 250 | } | 
|  | 251 | return err; | 
|  | 252 | } | 
|  | 253 | *rate = mPlaybackSettings; | 
|  | 254 | return OK; | 
|  | 255 | } | 
|  | 256 |  | 
|  | 257 | status_t NuPlayer2::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) { | 
|  | 258 | sp<AMessage> msg = new AMessage(kWhatConfigSync, this); | 
|  | 259 | writeToAMessage(msg, sync, videoFpsHint); | 
|  | 260 | sp<AMessage> response; | 
|  | 261 | status_t err = msg->postAndAwaitResponse(&response); | 
|  | 262 | if (err == OK && response != NULL) { | 
|  | 263 | CHECK(response->findInt32("err", &err)); | 
|  | 264 | } | 
|  | 265 | return err; | 
|  | 266 | } | 
|  | 267 |  | 
|  | 268 | status_t NuPlayer2::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) { | 
|  | 269 | if (sync.mSource != AVSYNC_SOURCE_DEFAULT) { | 
|  | 270 | return BAD_VALUE; | 
|  | 271 | } | 
|  | 272 | // TODO: support sync sources | 
|  | 273 | return INVALID_OPERATION; | 
|  | 274 | } | 
|  | 275 |  | 
|  | 276 | status_t NuPlayer2::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) { | 
|  | 277 | sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this); | 
|  | 278 | sp<AMessage> response; | 
|  | 279 | status_t err = msg->postAndAwaitResponse(&response); | 
|  | 280 | if (err == OK && response != NULL) { | 
|  | 281 | CHECK(response->findInt32("err", &err)); | 
|  | 282 | if (err == OK) { | 
|  | 283 | readFromAMessage(response, sync, videoFps); | 
|  | 284 | } | 
|  | 285 | } | 
|  | 286 | return err; | 
|  | 287 | } | 
|  | 288 |  | 
|  | 289 | status_t NuPlayer2::Renderer::onGetSyncSettings( | 
|  | 290 | AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) { | 
|  | 291 | *sync = mSyncSettings; | 
|  | 292 | *videoFps = -1.f; | 
|  | 293 | return OK; | 
|  | 294 | } | 
|  | 295 |  | 
|  | 296 | void NuPlayer2::Renderer::flush(bool audio, bool notifyComplete) { | 
|  | 297 | { | 
|  | 298 | Mutex::Autolock autoLock(mLock); | 
|  | 299 | if (audio) { | 
|  | 300 | mNotifyCompleteAudio |= notifyComplete; | 
|  | 301 | clearAudioFirstAnchorTime_l(); | 
|  | 302 | ++mAudioQueueGeneration; | 
|  | 303 | ++mAudioDrainGeneration; | 
|  | 304 | } else { | 
|  | 305 | mNotifyCompleteVideo |= notifyComplete; | 
|  | 306 | ++mVideoQueueGeneration; | 
|  | 307 | ++mVideoDrainGeneration; | 
|  | 308 | } | 
|  | 309 |  | 
|  | 310 | mMediaClock->clearAnchor(); | 
|  | 311 | mVideoLateByUs = 0; | 
|  | 312 | mNextVideoTimeMediaUs = -1; | 
|  | 313 | mSyncQueues = false; | 
|  | 314 | } | 
|  | 315 |  | 
|  | 316 | sp<AMessage> msg = new AMessage(kWhatFlush, this); | 
|  | 317 | msg->setInt32("audio", static_cast<int32_t>(audio)); | 
|  | 318 | msg->post(); | 
|  | 319 | } | 
|  | 320 |  | 
|  | 321 | void NuPlayer2::Renderer::signalTimeDiscontinuity() { | 
|  | 322 | } | 
|  | 323 |  | 
|  | 324 | void NuPlayer2::Renderer::signalDisableOffloadAudio() { | 
|  | 325 | (new AMessage(kWhatDisableOffloadAudio, this))->post(); | 
|  | 326 | } | 
|  | 327 |  | 
|  | 328 | void NuPlayer2::Renderer::signalEnableOffloadAudio() { | 
|  | 329 | (new AMessage(kWhatEnableOffloadAudio, this))->post(); | 
|  | 330 | } | 
|  | 331 |  | 
|  | 332 | void NuPlayer2::Renderer::pause() { | 
|  | 333 | (new AMessage(kWhatPause, this))->post(); | 
|  | 334 | } | 
|  | 335 |  | 
|  | 336 | void NuPlayer2::Renderer::resume() { | 
|  | 337 | (new AMessage(kWhatResume, this))->post(); | 
|  | 338 | } | 
|  | 339 |  | 
|  | 340 | void NuPlayer2::Renderer::setVideoFrameRate(float fps) { | 
|  | 341 | sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this); | 
|  | 342 | msg->setFloat("frame-rate", fps); | 
|  | 343 | msg->post(); | 
|  | 344 | } | 
|  | 345 |  | 
|  | 346 | // Called on any threads without mLock acquired. | 
|  | 347 | status_t NuPlayer2::Renderer::getCurrentPosition(int64_t *mediaUs) { | 
|  | 348 | status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs); | 
|  | 349 | if (result == OK) { | 
|  | 350 | return result; | 
|  | 351 | } | 
|  | 352 |  | 
|  | 353 | // MediaClock has not started yet. Try to start it if possible. | 
|  | 354 | { | 
|  | 355 | Mutex::Autolock autoLock(mLock); | 
|  | 356 | if (mAudioFirstAnchorTimeMediaUs == -1) { | 
|  | 357 | return result; | 
|  | 358 | } | 
|  | 359 |  | 
|  | 360 | AudioTimestamp ts; | 
|  | 361 | status_t res = mAudioSink->getTimestamp(ts); | 
|  | 362 | if (res != OK) { | 
|  | 363 | return result; | 
|  | 364 | } | 
|  | 365 |  | 
|  | 366 | // AudioSink has rendered some frames. | 
|  | 367 | int64_t nowUs = ALooper::GetNowUs(); | 
|  | 368 | int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs) | 
|  | 369 | + mAudioFirstAnchorTimeMediaUs; | 
|  | 370 | mMediaClock->updateAnchor(nowMediaUs, nowUs, -1); | 
|  | 371 | } | 
|  | 372 |  | 
|  | 373 | return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs); | 
|  | 374 | } | 
|  | 375 |  | 
|  | 376 | void NuPlayer2::Renderer::clearAudioFirstAnchorTime_l() { | 
|  | 377 | mAudioFirstAnchorTimeMediaUs = -1; | 
|  | 378 | mMediaClock->setStartingTimeMedia(-1); | 
|  | 379 | } | 
|  | 380 |  | 
|  | 381 | void NuPlayer2::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) { | 
|  | 382 | if (mAudioFirstAnchorTimeMediaUs == -1) { | 
|  | 383 | mAudioFirstAnchorTimeMediaUs = mediaUs; | 
|  | 384 | mMediaClock->setStartingTimeMedia(mediaUs); | 
|  | 385 | } | 
|  | 386 | } | 
|  | 387 |  | 
|  | 388 | // Called on renderer looper. | 
|  | 389 | void NuPlayer2::Renderer::clearAnchorTime() { | 
|  | 390 | mMediaClock->clearAnchor(); | 
|  | 391 | mAnchorTimeMediaUs = -1; | 
|  | 392 | mAnchorNumFramesWritten = -1; | 
|  | 393 | } | 
|  | 394 |  | 
|  | 395 | void NuPlayer2::Renderer::setVideoLateByUs(int64_t lateUs) { | 
|  | 396 | Mutex::Autolock autoLock(mLock); | 
|  | 397 | mVideoLateByUs = lateUs; | 
|  | 398 | } | 
|  | 399 |  | 
|  | 400 | int64_t NuPlayer2::Renderer::getVideoLateByUs() { | 
|  | 401 | Mutex::Autolock autoLock(mLock); | 
|  | 402 | return mVideoLateByUs; | 
|  | 403 | } | 
|  | 404 |  | 
|  | 405 | status_t NuPlayer2::Renderer::openAudioSink( | 
|  | 406 | const sp<AMessage> &format, | 
|  | 407 | bool offloadOnly, | 
|  | 408 | bool hasVideo, | 
|  | 409 | uint32_t flags, | 
|  | 410 | bool *isOffloaded, | 
|  | 411 | bool isStreaming) { | 
|  | 412 | sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this); | 
|  | 413 | msg->setMessage("format", format); | 
|  | 414 | msg->setInt32("offload-only", offloadOnly); | 
|  | 415 | msg->setInt32("has-video", hasVideo); | 
|  | 416 | msg->setInt32("flags", flags); | 
|  | 417 | msg->setInt32("isStreaming", isStreaming); | 
|  | 418 |  | 
|  | 419 | sp<AMessage> response; | 
|  | 420 | status_t postStatus = msg->postAndAwaitResponse(&response); | 
|  | 421 |  | 
|  | 422 | int32_t err; | 
|  | 423 | if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) { | 
|  | 424 | err = INVALID_OPERATION; | 
|  | 425 | } else if (err == OK && isOffloaded != NULL) { | 
|  | 426 | int32_t offload; | 
|  | 427 | CHECK(response->findInt32("offload", &offload)); | 
|  | 428 | *isOffloaded = (offload != 0); | 
|  | 429 | } | 
|  | 430 | return err; | 
|  | 431 | } | 
|  | 432 |  | 
|  | 433 | void NuPlayer2::Renderer::closeAudioSink() { | 
|  | 434 | sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this); | 
|  | 435 |  | 
|  | 436 | sp<AMessage> response; | 
|  | 437 | msg->postAndAwaitResponse(&response); | 
|  | 438 | } | 
|  | 439 |  | 
|  | 440 | void NuPlayer2::Renderer::changeAudioFormat( | 
|  | 441 | const sp<AMessage> &format, | 
|  | 442 | bool offloadOnly, | 
|  | 443 | bool hasVideo, | 
|  | 444 | uint32_t flags, | 
|  | 445 | bool isStreaming, | 
|  | 446 | const sp<AMessage> ¬ify) { | 
|  | 447 | sp<AMessage> meta = new AMessage; | 
|  | 448 | meta->setMessage("format", format); | 
|  | 449 | meta->setInt32("offload-only", offloadOnly); | 
|  | 450 | meta->setInt32("has-video", hasVideo); | 
|  | 451 | meta->setInt32("flags", flags); | 
|  | 452 | meta->setInt32("isStreaming", isStreaming); | 
|  | 453 |  | 
|  | 454 | sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this); | 
|  | 455 | msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */)); | 
|  | 456 | msg->setMessage("notify", notify); | 
|  | 457 | msg->setMessage("meta", meta); | 
|  | 458 | msg->post(); | 
|  | 459 | } | 
|  | 460 |  | 
|  | 461 | void NuPlayer2::Renderer::onMessageReceived(const sp<AMessage> &msg) { | 
|  | 462 | switch (msg->what()) { | 
|  | 463 | case kWhatOpenAudioSink: | 
|  | 464 | { | 
|  | 465 | sp<AMessage> format; | 
|  | 466 | CHECK(msg->findMessage("format", &format)); | 
|  | 467 |  | 
|  | 468 | int32_t offloadOnly; | 
|  | 469 | CHECK(msg->findInt32("offload-only", &offloadOnly)); | 
|  | 470 |  | 
|  | 471 | int32_t hasVideo; | 
|  | 472 | CHECK(msg->findInt32("has-video", &hasVideo)); | 
|  | 473 |  | 
|  | 474 | uint32_t flags; | 
|  | 475 | CHECK(msg->findInt32("flags", (int32_t *)&flags)); | 
|  | 476 |  | 
|  | 477 | uint32_t isStreaming; | 
|  | 478 | CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming)); | 
|  | 479 |  | 
|  | 480 | status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming); | 
|  | 481 |  | 
|  | 482 | sp<AMessage> response = new AMessage; | 
|  | 483 | response->setInt32("err", err); | 
|  | 484 | response->setInt32("offload", offloadingAudio()); | 
|  | 485 |  | 
|  | 486 | sp<AReplyToken> replyID; | 
|  | 487 | CHECK(msg->senderAwaitsResponse(&replyID)); | 
|  | 488 | response->postReply(replyID); | 
|  | 489 |  | 
|  | 490 | break; | 
|  | 491 | } | 
|  | 492 |  | 
|  | 493 | case kWhatCloseAudioSink: | 
|  | 494 | { | 
|  | 495 | sp<AReplyToken> replyID; | 
|  | 496 | CHECK(msg->senderAwaitsResponse(&replyID)); | 
|  | 497 |  | 
|  | 498 | onCloseAudioSink(); | 
|  | 499 |  | 
|  | 500 | sp<AMessage> response = new AMessage; | 
|  | 501 | response->postReply(replyID); | 
|  | 502 | break; | 
|  | 503 | } | 
|  | 504 |  | 
|  | 505 | case kWhatStopAudioSink: | 
|  | 506 | { | 
|  | 507 | mAudioSink->stop(); | 
|  | 508 | break; | 
|  | 509 | } | 
|  | 510 |  | 
|  | 511 | case kWhatChangeAudioFormat: | 
|  | 512 | { | 
|  | 513 | int32_t queueGeneration; | 
|  | 514 | CHECK(msg->findInt32("queueGeneration", &queueGeneration)); | 
|  | 515 |  | 
|  | 516 | sp<AMessage> notify; | 
|  | 517 | CHECK(msg->findMessage("notify", ¬ify)); | 
|  | 518 |  | 
|  | 519 | if (offloadingAudio()) { | 
|  | 520 | ALOGW("changeAudioFormat should NOT be called in offload mode"); | 
|  | 521 | notify->setInt32("err", INVALID_OPERATION); | 
|  | 522 | notify->post(); | 
|  | 523 | break; | 
|  | 524 | } | 
|  | 525 |  | 
|  | 526 | sp<AMessage> meta; | 
|  | 527 | CHECK(msg->findMessage("meta", &meta)); | 
|  | 528 |  | 
|  | 529 | if (queueGeneration != getQueueGeneration(true /* audio */) | 
|  | 530 | || mAudioQueue.empty()) { | 
|  | 531 | onChangeAudioFormat(meta, notify); | 
|  | 532 | break; | 
|  | 533 | } | 
|  | 534 |  | 
|  | 535 | QueueEntry entry; | 
|  | 536 | entry.mNotifyConsumed = notify; | 
|  | 537 | entry.mMeta = meta; | 
|  | 538 |  | 
|  | 539 | Mutex::Autolock autoLock(mLock); | 
|  | 540 | mAudioQueue.push_back(entry); | 
|  | 541 | postDrainAudioQueue_l(); | 
|  | 542 |  | 
|  | 543 | break; | 
|  | 544 | } | 
|  | 545 |  | 
|  | 546 | case kWhatDrainAudioQueue: | 
|  | 547 | { | 
|  | 548 | mDrainAudioQueuePending = false; | 
|  | 549 |  | 
|  | 550 | int32_t generation; | 
|  | 551 | CHECK(msg->findInt32("drainGeneration", &generation)); | 
|  | 552 | if (generation != getDrainGeneration(true /* audio */)) { | 
|  | 553 | break; | 
|  | 554 | } | 
|  | 555 |  | 
|  | 556 | if (onDrainAudioQueue()) { | 
|  | 557 | uint32_t numFramesPlayed; | 
|  | 558 | CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), | 
|  | 559 | (status_t)OK); | 
|  | 560 |  | 
|  | 561 | // Handle AudioTrack race when start is immediately called after flush. | 
|  | 562 | uint32_t numFramesPendingPlayout = | 
|  | 563 | (mNumFramesWritten > numFramesPlayed ? | 
|  | 564 | mNumFramesWritten - numFramesPlayed : 0); | 
|  | 565 |  | 
|  | 566 | // This is how long the audio sink will have data to | 
|  | 567 | // play back. | 
|  | 568 | int64_t delayUs = | 
|  | 569 | mAudioSink->msecsPerFrame() | 
|  | 570 | * numFramesPendingPlayout * 1000ll; | 
| Wei Jia | 700a7c2 | 2018-09-14 18:04:35 -0700 | [diff] [blame] | 571 | if (mPlaybackSettings.mSpeed > 1.0f) { | 
|  | 572 | delayUs /= mPlaybackSettings.mSpeed; | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 573 | } | 
|  | 574 |  | 
|  | 575 | // Let's give it more data after about half that time | 
|  | 576 | // has elapsed. | 
|  | 577 | delayUs /= 2; | 
|  | 578 | // check the buffer size to estimate maximum delay permitted. | 
|  | 579 | const int64_t maxDrainDelayUs = std::max( | 
|  | 580 | mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */); | 
|  | 581 | ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld", | 
|  | 582 | (long long)delayUs, (long long)maxDrainDelayUs); | 
|  | 583 | Mutex::Autolock autoLock(mLock); | 
|  | 584 | postDrainAudioQueue_l(delayUs); | 
|  | 585 | } | 
|  | 586 | break; | 
|  | 587 | } | 
|  | 588 |  | 
|  | 589 | case kWhatDrainVideoQueue: | 
|  | 590 | { | 
|  | 591 | int32_t generation; | 
|  | 592 | CHECK(msg->findInt32("drainGeneration", &generation)); | 
|  | 593 | if (generation != getDrainGeneration(false /* audio */)) { | 
|  | 594 | break; | 
|  | 595 | } | 
|  | 596 |  | 
|  | 597 | mDrainVideoQueuePending = false; | 
|  | 598 |  | 
|  | 599 | onDrainVideoQueue(); | 
|  | 600 |  | 
|  | 601 | postDrainVideoQueue(); | 
|  | 602 | break; | 
|  | 603 | } | 
|  | 604 |  | 
|  | 605 | case kWhatPostDrainVideoQueue: | 
|  | 606 | { | 
|  | 607 | int32_t generation; | 
|  | 608 | CHECK(msg->findInt32("drainGeneration", &generation)); | 
|  | 609 | if (generation != getDrainGeneration(false /* audio */)) { | 
|  | 610 | break; | 
|  | 611 | } | 
|  | 612 |  | 
|  | 613 | mDrainVideoQueuePending = false; | 
|  | 614 | postDrainVideoQueue(); | 
|  | 615 | break; | 
|  | 616 | } | 
|  | 617 |  | 
|  | 618 | case kWhatQueueBuffer: | 
|  | 619 | { | 
|  | 620 | onQueueBuffer(msg); | 
|  | 621 | break; | 
|  | 622 | } | 
|  | 623 |  | 
|  | 624 | case kWhatQueueEOS: | 
|  | 625 | { | 
|  | 626 | onQueueEOS(msg); | 
|  | 627 | break; | 
|  | 628 | } | 
|  | 629 |  | 
|  | 630 | case kWhatEOS: | 
|  | 631 | { | 
|  | 632 | int32_t generation; | 
|  | 633 | CHECK(msg->findInt32("audioEOSGeneration", &generation)); | 
|  | 634 | if (generation != mAudioEOSGeneration) { | 
|  | 635 | break; | 
|  | 636 | } | 
|  | 637 | status_t finalResult; | 
|  | 638 | CHECK(msg->findInt32("finalResult", &finalResult)); | 
|  | 639 | notifyEOS(true /* audio */, finalResult); | 
|  | 640 | break; | 
|  | 641 | } | 
|  | 642 |  | 
|  | 643 | case kWhatConfigPlayback: | 
|  | 644 | { | 
|  | 645 | sp<AReplyToken> replyID; | 
|  | 646 | CHECK(msg->senderAwaitsResponse(&replyID)); | 
|  | 647 | AudioPlaybackRate rate; | 
|  | 648 | readFromAMessage(msg, &rate); | 
|  | 649 | status_t err = onConfigPlayback(rate); | 
|  | 650 | sp<AMessage> response = new AMessage; | 
|  | 651 | response->setInt32("err", err); | 
|  | 652 | response->postReply(replyID); | 
|  | 653 | break; | 
|  | 654 | } | 
|  | 655 |  | 
|  | 656 | case kWhatGetPlaybackSettings: | 
|  | 657 | { | 
|  | 658 | sp<AReplyToken> replyID; | 
|  | 659 | CHECK(msg->senderAwaitsResponse(&replyID)); | 
|  | 660 | AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT; | 
|  | 661 | status_t err = onGetPlaybackSettings(&rate); | 
|  | 662 | sp<AMessage> response = new AMessage; | 
|  | 663 | if (err == OK) { | 
|  | 664 | writeToAMessage(response, rate); | 
|  | 665 | } | 
|  | 666 | response->setInt32("err", err); | 
|  | 667 | response->postReply(replyID); | 
|  | 668 | break; | 
|  | 669 | } | 
|  | 670 |  | 
|  | 671 | case kWhatConfigSync: | 
|  | 672 | { | 
|  | 673 | sp<AReplyToken> replyID; | 
|  | 674 | CHECK(msg->senderAwaitsResponse(&replyID)); | 
|  | 675 | AVSyncSettings sync; | 
|  | 676 | float videoFpsHint; | 
|  | 677 | readFromAMessage(msg, &sync, &videoFpsHint); | 
|  | 678 | status_t err = onConfigSync(sync, videoFpsHint); | 
|  | 679 | sp<AMessage> response = new AMessage; | 
|  | 680 | response->setInt32("err", err); | 
|  | 681 | response->postReply(replyID); | 
|  | 682 | break; | 
|  | 683 | } | 
|  | 684 |  | 
|  | 685 | case kWhatGetSyncSettings: | 
|  | 686 | { | 
|  | 687 | sp<AReplyToken> replyID; | 
|  | 688 | CHECK(msg->senderAwaitsResponse(&replyID)); | 
|  | 689 |  | 
|  | 690 | ALOGV("kWhatGetSyncSettings"); | 
|  | 691 | AVSyncSettings sync; | 
|  | 692 | float videoFps = -1.f; | 
|  | 693 | status_t err = onGetSyncSettings(&sync, &videoFps); | 
|  | 694 | sp<AMessage> response = new AMessage; | 
|  | 695 | if (err == OK) { | 
|  | 696 | writeToAMessage(response, sync, videoFps); | 
|  | 697 | } | 
|  | 698 | response->setInt32("err", err); | 
|  | 699 | response->postReply(replyID); | 
|  | 700 | break; | 
|  | 701 | } | 
|  | 702 |  | 
|  | 703 | case kWhatFlush: | 
|  | 704 | { | 
|  | 705 | onFlush(msg); | 
|  | 706 | break; | 
|  | 707 | } | 
|  | 708 |  | 
|  | 709 | case kWhatDisableOffloadAudio: | 
|  | 710 | { | 
|  | 711 | onDisableOffloadAudio(); | 
|  | 712 | break; | 
|  | 713 | } | 
|  | 714 |  | 
|  | 715 | case kWhatEnableOffloadAudio: | 
|  | 716 | { | 
|  | 717 | onEnableOffloadAudio(); | 
|  | 718 | break; | 
|  | 719 | } | 
|  | 720 |  | 
|  | 721 | case kWhatPause: | 
|  | 722 | { | 
|  | 723 | onPause(); | 
|  | 724 | break; | 
|  | 725 | } | 
|  | 726 |  | 
|  | 727 | case kWhatResume: | 
|  | 728 | { | 
|  | 729 | onResume(); | 
|  | 730 | break; | 
|  | 731 | } | 
|  | 732 |  | 
|  | 733 | case kWhatSetVideoFrameRate: | 
|  | 734 | { | 
|  | 735 | float fps; | 
|  | 736 | CHECK(msg->findFloat("frame-rate", &fps)); | 
|  | 737 | onSetVideoFrameRate(fps); | 
|  | 738 | break; | 
|  | 739 | } | 
|  | 740 |  | 
|  | 741 | case kWhatAudioTearDown: | 
|  | 742 | { | 
|  | 743 | int32_t reason; | 
|  | 744 | CHECK(msg->findInt32("reason", &reason)); | 
|  | 745 |  | 
|  | 746 | onAudioTearDown((AudioTearDownReason)reason); | 
|  | 747 | break; | 
|  | 748 | } | 
|  | 749 |  | 
|  | 750 | case kWhatAudioOffloadPauseTimeout: | 
|  | 751 | { | 
|  | 752 | int32_t generation; | 
|  | 753 | CHECK(msg->findInt32("drainGeneration", &generation)); | 
|  | 754 | if (generation != mAudioOffloadPauseTimeoutGeneration) { | 
|  | 755 | break; | 
|  | 756 | } | 
|  | 757 | ALOGV("Audio Offload tear down due to pause timeout."); | 
|  | 758 | onAudioTearDown(kDueToTimeout); | 
|  | 759 | mWakeLock->release(); | 
|  | 760 | break; | 
|  | 761 | } | 
|  | 762 |  | 
|  | 763 | default: | 
|  | 764 | TRESPASS(); | 
|  | 765 | break; | 
|  | 766 | } | 
|  | 767 | } | 
|  | 768 |  | 
|  | 769 | void NuPlayer2::Renderer::postDrainAudioQueue_l(int64_t delayUs) { | 
|  | 770 | if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) { | 
|  | 771 | return; | 
|  | 772 | } | 
|  | 773 |  | 
|  | 774 | if (mAudioQueue.empty()) { | 
|  | 775 | return; | 
|  | 776 | } | 
|  | 777 |  | 
|  | 778 | // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data. | 
|  | 779 | if (mPaused) { | 
|  | 780 | const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs(); | 
|  | 781 | if (diffUs > delayUs) { | 
|  | 782 | delayUs = diffUs; | 
|  | 783 | } | 
|  | 784 | } | 
|  | 785 |  | 
|  | 786 | mDrainAudioQueuePending = true; | 
|  | 787 | sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this); | 
|  | 788 | msg->setInt32("drainGeneration", mAudioDrainGeneration); | 
|  | 789 | msg->post(delayUs); | 
|  | 790 | } | 
|  | 791 |  | 
|  | 792 | void NuPlayer2::Renderer::prepareForMediaRenderingStart_l() { | 
|  | 793 | mAudioRenderingStartGeneration = mAudioDrainGeneration; | 
|  | 794 | mVideoRenderingStartGeneration = mVideoDrainGeneration; | 
|  | 795 | mRenderingDataDelivered = false; | 
|  | 796 | } | 
|  | 797 |  | 
|  | 798 | void NuPlayer2::Renderer::notifyIfMediaRenderingStarted_l() { | 
|  | 799 | if (mVideoRenderingStartGeneration == mVideoDrainGeneration && | 
|  | 800 | mAudioRenderingStartGeneration == mAudioDrainGeneration) { | 
|  | 801 | mRenderingDataDelivered = true; | 
|  | 802 | if (mPaused) { | 
|  | 803 | return; | 
|  | 804 | } | 
|  | 805 | mVideoRenderingStartGeneration = -1; | 
|  | 806 | mAudioRenderingStartGeneration = -1; | 
|  | 807 |  | 
|  | 808 | sp<AMessage> notify = mNotify->dup(); | 
|  | 809 | notify->setInt32("what", kWhatMediaRenderingStart); | 
|  | 810 | notify->post(); | 
|  | 811 | } | 
|  | 812 | } | 
|  | 813 |  | 
|  | 814 | // static | 
|  | 815 | size_t NuPlayer2::Renderer::AudioSinkCallback( | 
| Wei Jia | 33abcc7 | 2018-01-30 09:47:38 -0800 | [diff] [blame] | 816 | MediaPlayer2Interface::AudioSink * /* audioSink */, | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 817 | void *buffer, | 
|  | 818 | size_t size, | 
|  | 819 | void *cookie, | 
| Wei Jia | 33abcc7 | 2018-01-30 09:47:38 -0800 | [diff] [blame] | 820 | MediaPlayer2Interface::AudioSink::cb_event_t event) { | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 821 | NuPlayer2::Renderer *me = (NuPlayer2::Renderer *)cookie; | 
|  | 822 |  | 
|  | 823 | switch (event) { | 
| Wei Jia | 33abcc7 | 2018-01-30 09:47:38 -0800 | [diff] [blame] | 824 | case MediaPlayer2Interface::AudioSink::CB_EVENT_FILL_BUFFER: | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 825 | { | 
|  | 826 | return me->fillAudioBuffer(buffer, size); | 
|  | 827 | break; | 
|  | 828 | } | 
|  | 829 |  | 
| Wei Jia | 33abcc7 | 2018-01-30 09:47:38 -0800 | [diff] [blame] | 830 | case MediaPlayer2Interface::AudioSink::CB_EVENT_STREAM_END: | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 831 | { | 
|  | 832 | ALOGV("AudioSink::CB_EVENT_STREAM_END"); | 
|  | 833 | me->notifyEOSCallback(); | 
|  | 834 | break; | 
|  | 835 | } | 
|  | 836 |  | 
| Wei Jia | 33abcc7 | 2018-01-30 09:47:38 -0800 | [diff] [blame] | 837 | case MediaPlayer2Interface::AudioSink::CB_EVENT_TEAR_DOWN: | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 838 | { | 
|  | 839 | ALOGV("AudioSink::CB_EVENT_TEAR_DOWN"); | 
|  | 840 | me->notifyAudioTearDown(kDueToError); | 
|  | 841 | break; | 
|  | 842 | } | 
|  | 843 | } | 
|  | 844 |  | 
|  | 845 | return 0; | 
|  | 846 | } | 
|  | 847 |  | 
|  | 848 | void NuPlayer2::Renderer::notifyEOSCallback() { | 
|  | 849 | Mutex::Autolock autoLock(mLock); | 
|  | 850 |  | 
|  | 851 | if (!mUseAudioCallback) { | 
|  | 852 | return; | 
|  | 853 | } | 
|  | 854 |  | 
|  | 855 | notifyEOS_l(true /* audio */, ERROR_END_OF_STREAM); | 
|  | 856 | } | 
|  | 857 |  | 
|  | 858 | size_t NuPlayer2::Renderer::fillAudioBuffer(void *buffer, size_t size) { | 
|  | 859 | Mutex::Autolock autoLock(mLock); | 
|  | 860 |  | 
|  | 861 | if (!mUseAudioCallback) { | 
|  | 862 | return 0; | 
|  | 863 | } | 
|  | 864 |  | 
|  | 865 | bool hasEOS = false; | 
|  | 866 |  | 
|  | 867 | size_t sizeCopied = 0; | 
|  | 868 | bool firstEntry = true; | 
|  | 869 | QueueEntry *entry;  // will be valid after while loop if hasEOS is set. | 
|  | 870 | while (sizeCopied < size && !mAudioQueue.empty()) { | 
|  | 871 | entry = &*mAudioQueue.begin(); | 
|  | 872 |  | 
|  | 873 | if (entry->mBuffer == NULL) { // EOS | 
|  | 874 | hasEOS = true; | 
|  | 875 | mAudioQueue.erase(mAudioQueue.begin()); | 
|  | 876 | break; | 
|  | 877 | } | 
|  | 878 |  | 
|  | 879 | if (firstEntry && entry->mOffset == 0) { | 
|  | 880 | firstEntry = false; | 
|  | 881 | int64_t mediaTimeUs; | 
|  | 882 | CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); | 
|  | 883 | ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6); | 
|  | 884 | setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); | 
|  | 885 | } | 
|  | 886 |  | 
|  | 887 | size_t copy = entry->mBuffer->size() - entry->mOffset; | 
|  | 888 | size_t sizeRemaining = size - sizeCopied; | 
|  | 889 | if (copy > sizeRemaining) { | 
|  | 890 | copy = sizeRemaining; | 
|  | 891 | } | 
|  | 892 |  | 
|  | 893 | memcpy((char *)buffer + sizeCopied, | 
|  | 894 | entry->mBuffer->data() + entry->mOffset, | 
|  | 895 | copy); | 
|  | 896 |  | 
|  | 897 | entry->mOffset += copy; | 
|  | 898 | if (entry->mOffset == entry->mBuffer->size()) { | 
|  | 899 | entry->mNotifyConsumed->post(); | 
|  | 900 | mAudioQueue.erase(mAudioQueue.begin()); | 
|  | 901 | entry = NULL; | 
|  | 902 | } | 
|  | 903 | sizeCopied += copy; | 
|  | 904 |  | 
|  | 905 | notifyIfMediaRenderingStarted_l(); | 
|  | 906 | } | 
|  | 907 |  | 
|  | 908 | if (mAudioFirstAnchorTimeMediaUs >= 0) { | 
|  | 909 | int64_t nowUs = ALooper::GetNowUs(); | 
|  | 910 | int64_t nowMediaUs = | 
|  | 911 | mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs); | 
|  | 912 | // we don't know how much data we are queueing for offloaded tracks. | 
|  | 913 | mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX); | 
|  | 914 | } | 
|  | 915 |  | 
|  | 916 | // for non-offloaded audio, we need to compute the frames written because | 
|  | 917 | // there is no EVENT_STREAM_END notification. The frames written gives | 
|  | 918 | // an estimate on the pending played out duration. | 
|  | 919 | if (!offloadingAudio()) { | 
|  | 920 | mNumFramesWritten += sizeCopied / mAudioSink->frameSize(); | 
|  | 921 | } | 
|  | 922 |  | 
|  | 923 | if (hasEOS) { | 
|  | 924 | (new AMessage(kWhatStopAudioSink, this))->post(); | 
|  | 925 | // As there is currently no EVENT_STREAM_END callback notification for | 
|  | 926 | // non-offloaded audio tracks, we need to post the EOS ourselves. | 
|  | 927 | if (!offloadingAudio()) { | 
|  | 928 | int64_t postEOSDelayUs = 0; | 
|  | 929 | if (mAudioSink->needsTrailingPadding()) { | 
|  | 930 | postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); | 
|  | 931 | } | 
|  | 932 | ALOGV("fillAudioBuffer: notifyEOS_l " | 
|  | 933 | "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld", | 
|  | 934 | mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs); | 
|  | 935 | notifyEOS_l(true /* audio */, entry->mFinalResult, postEOSDelayUs); | 
|  | 936 | } | 
|  | 937 | } | 
|  | 938 | return sizeCopied; | 
|  | 939 | } | 
|  | 940 |  | 
|  | 941 | void NuPlayer2::Renderer::drainAudioQueueUntilLastEOS() { | 
|  | 942 | List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it; | 
|  | 943 | bool foundEOS = false; | 
|  | 944 | while (it != mAudioQueue.end()) { | 
|  | 945 | int32_t eos; | 
|  | 946 | QueueEntry *entry = &*it++; | 
|  | 947 | if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr) | 
|  | 948 | || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) { | 
|  | 949 | itEOS = it; | 
|  | 950 | foundEOS = true; | 
|  | 951 | } | 
|  | 952 | } | 
|  | 953 |  | 
|  | 954 | if (foundEOS) { | 
|  | 955 | // post all replies before EOS and drop the samples | 
|  | 956 | for (it = mAudioQueue.begin(); it != itEOS; it++) { | 
|  | 957 | if (it->mBuffer == nullptr) { | 
|  | 958 | if (it->mNotifyConsumed == nullptr) { | 
|  | 959 | // delay doesn't matter as we don't even have an AudioTrack | 
|  | 960 | notifyEOS(true /* audio */, it->mFinalResult); | 
|  | 961 | } else { | 
|  | 962 | // TAG for re-opening audio sink. | 
|  | 963 | onChangeAudioFormat(it->mMeta, it->mNotifyConsumed); | 
|  | 964 | } | 
|  | 965 | } else { | 
|  | 966 | it->mNotifyConsumed->post(); | 
|  | 967 | } | 
|  | 968 | } | 
|  | 969 | mAudioQueue.erase(mAudioQueue.begin(), itEOS); | 
|  | 970 | } | 
|  | 971 | } | 
|  | 972 |  | 
|  | 973 | bool NuPlayer2::Renderer::onDrainAudioQueue() { | 
|  | 974 | // do not drain audio during teardown as queued buffers may be invalid. | 
|  | 975 | if (mAudioTornDown) { | 
|  | 976 | return false; | 
|  | 977 | } | 
|  | 978 | // TODO: This call to getPosition checks if AudioTrack has been created | 
|  | 979 | // in AudioSink before draining audio. If AudioTrack doesn't exist, then | 
|  | 980 | // CHECKs on getPosition will fail. | 
|  | 981 | // We still need to figure out why AudioTrack is not created when | 
|  | 982 | // this function is called. One possible reason could be leftover | 
|  | 983 | // audio. Another possible place is to check whether decoder | 
|  | 984 | // has received INFO_FORMAT_CHANGED as the first buffer since | 
|  | 985 | // AudioSink is opened there, and possible interactions with flush | 
|  | 986 | // immediately after start. Investigate error message | 
|  | 987 | // "vorbis_dsp_synthesis returned -135", along with RTSP. | 
|  | 988 | uint32_t numFramesPlayed; | 
|  | 989 | if (mAudioSink->getPosition(&numFramesPlayed) != OK) { | 
|  | 990 | // When getPosition fails, renderer will not reschedule the draining | 
|  | 991 | // unless new samples are queued. | 
|  | 992 | // If we have pending EOS (or "eos" marker for discontinuities), we need | 
|  | 993 | // to post these now as NuPlayer2Decoder might be waiting for it. | 
|  | 994 | drainAudioQueueUntilLastEOS(); | 
|  | 995 |  | 
|  | 996 | ALOGW("onDrainAudioQueue(): audio sink is not ready"); | 
|  | 997 | return false; | 
|  | 998 | } | 
|  | 999 |  | 
|  | 1000 | #if 0 | 
|  | 1001 | ssize_t numFramesAvailableToWrite = | 
|  | 1002 | mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed); | 
|  | 1003 |  | 
|  | 1004 | if (numFramesAvailableToWrite == mAudioSink->frameCount()) { | 
|  | 1005 | ALOGI("audio sink underrun"); | 
|  | 1006 | } else { | 
|  | 1007 | ALOGV("audio queue has %d frames left to play", | 
|  | 1008 | mAudioSink->frameCount() - numFramesAvailableToWrite); | 
|  | 1009 | } | 
|  | 1010 | #endif | 
|  | 1011 |  | 
|  | 1012 | uint32_t prevFramesWritten = mNumFramesWritten; | 
|  | 1013 | while (!mAudioQueue.empty()) { | 
|  | 1014 | QueueEntry *entry = &*mAudioQueue.begin(); | 
|  | 1015 |  | 
|  | 1016 | if (entry->mBuffer == NULL) { | 
|  | 1017 | if (entry->mNotifyConsumed != nullptr) { | 
|  | 1018 | // TAG for re-open audio sink. | 
|  | 1019 | onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed); | 
|  | 1020 | mAudioQueue.erase(mAudioQueue.begin()); | 
|  | 1021 | continue; | 
|  | 1022 | } | 
|  | 1023 |  | 
|  | 1024 | // EOS | 
|  | 1025 | if (mPaused) { | 
|  | 1026 | // Do not notify EOS when paused. | 
|  | 1027 | // This is needed to avoid switch to next clip while in pause. | 
|  | 1028 | ALOGV("onDrainAudioQueue(): Do not notify EOS when paused"); | 
|  | 1029 | return false; | 
|  | 1030 | } | 
|  | 1031 |  | 
|  | 1032 | int64_t postEOSDelayUs = 0; | 
|  | 1033 | if (mAudioSink->needsTrailingPadding()) { | 
|  | 1034 | postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); | 
|  | 1035 | } | 
|  | 1036 | notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); | 
|  | 1037 | mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten); | 
|  | 1038 |  | 
|  | 1039 | mAudioQueue.erase(mAudioQueue.begin()); | 
|  | 1040 | entry = NULL; | 
|  | 1041 | if (mAudioSink->needsTrailingPadding()) { | 
|  | 1042 | // If we're not in gapless playback (i.e. through setNextPlayer), we | 
|  | 1043 | // need to stop the track here, because that will play out the last | 
|  | 1044 | // little bit at the end of the file. Otherwise short files won't play. | 
|  | 1045 | mAudioSink->stop(); | 
|  | 1046 | mNumFramesWritten = 0; | 
|  | 1047 | } | 
|  | 1048 | return false; | 
|  | 1049 | } | 
|  | 1050 |  | 
|  | 1051 | mLastAudioBufferDrained = entry->mBufferOrdinal; | 
|  | 1052 |  | 
|  | 1053 | // ignore 0-sized buffer which could be EOS marker with no data | 
|  | 1054 | if (entry->mOffset == 0 && entry->mBuffer->size() > 0) { | 
|  | 1055 | int64_t mediaTimeUs; | 
|  | 1056 | CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); | 
|  | 1057 | ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs", | 
|  | 1058 | mediaTimeUs / 1E6); | 
|  | 1059 | onNewAudioMediaTime(mediaTimeUs); | 
|  | 1060 | } | 
|  | 1061 |  | 
|  | 1062 | size_t copy = entry->mBuffer->size() - entry->mOffset; | 
|  | 1063 |  | 
|  | 1064 | ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset, | 
|  | 1065 | copy, false /* blocking */); | 
|  | 1066 | if (written < 0) { | 
|  | 1067 | // An error in AudioSink write. Perhaps the AudioSink was not properly opened. | 
|  | 1068 | if (written == WOULD_BLOCK) { | 
|  | 1069 | ALOGV("AudioSink write would block when writing %zu bytes", copy); | 
|  | 1070 | } else { | 
|  | 1071 | ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy); | 
|  | 1072 | // This can only happen when AudioSink was opened with doNotReconnect flag set to | 
|  | 1073 | // true, in which case the NuPlayer2 will handle the reconnect. | 
|  | 1074 | notifyAudioTearDown(kDueToError); | 
|  | 1075 | } | 
|  | 1076 | break; | 
|  | 1077 | } | 
|  | 1078 |  | 
|  | 1079 | entry->mOffset += written; | 
|  | 1080 | size_t remainder = entry->mBuffer->size() - entry->mOffset; | 
|  | 1081 | if ((ssize_t)remainder < mAudioSink->frameSize()) { | 
|  | 1082 | if (remainder > 0) { | 
|  | 1083 | ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.", | 
|  | 1084 | remainder); | 
|  | 1085 | entry->mOffset += remainder; | 
|  | 1086 | copy -= remainder; | 
|  | 1087 | } | 
|  | 1088 |  | 
|  | 1089 | entry->mNotifyConsumed->post(); | 
|  | 1090 | mAudioQueue.erase(mAudioQueue.begin()); | 
|  | 1091 |  | 
|  | 1092 | entry = NULL; | 
|  | 1093 | } | 
|  | 1094 |  | 
|  | 1095 | size_t copiedFrames = written / mAudioSink->frameSize(); | 
|  | 1096 | mNumFramesWritten += copiedFrames; | 
|  | 1097 |  | 
|  | 1098 | { | 
|  | 1099 | Mutex::Autolock autoLock(mLock); | 
|  | 1100 | int64_t maxTimeMedia; | 
|  | 1101 | maxTimeMedia = | 
|  | 1102 | mAnchorTimeMediaUs + | 
|  | 1103 | (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL) | 
|  | 1104 | * 1000LL * mAudioSink->msecsPerFrame()); | 
|  | 1105 | mMediaClock->updateMaxTimeMedia(maxTimeMedia); | 
|  | 1106 |  | 
|  | 1107 | notifyIfMediaRenderingStarted_l(); | 
|  | 1108 | } | 
|  | 1109 |  | 
|  | 1110 | if (written != (ssize_t)copy) { | 
|  | 1111 | // A short count was received from AudioSink::write() | 
|  | 1112 | // | 
|  | 1113 | // AudioSink write is called in non-blocking mode. | 
|  | 1114 | // It may return with a short count when: | 
|  | 1115 | // | 
|  | 1116 | // 1) Size to be copied is not a multiple of the frame size. Fractional frames are | 
|  | 1117 | //    discarded. | 
|  | 1118 | // 2) The data to be copied exceeds the available buffer in AudioSink. | 
|  | 1119 | // 3) An error occurs and data has been partially copied to the buffer in AudioSink. | 
|  | 1120 | // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded. | 
|  | 1121 |  | 
|  | 1122 | // (Case 1) | 
|  | 1123 | // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it | 
|  | 1124 | // needs to fail, as we should not carry over fractional frames between calls. | 
|  | 1125 | CHECK_EQ(copy % mAudioSink->frameSize(), 0u); | 
|  | 1126 |  | 
|  | 1127 | // (Case 2, 3, 4) | 
|  | 1128 | // Return early to the caller. | 
|  | 1129 | // Beware of calling immediately again as this may busy-loop if you are not careful. | 
|  | 1130 | ALOGV("AudioSink write short frame count %zd < %zu", written, copy); | 
|  | 1131 | break; | 
|  | 1132 | } | 
|  | 1133 | } | 
|  | 1134 |  | 
|  | 1135 | // calculate whether we need to reschedule another write. | 
|  | 1136 | bool reschedule = !mAudioQueue.empty() | 
|  | 1137 | && (!mPaused | 
|  | 1138 | || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers | 
|  | 1139 | //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u", | 
|  | 1140 | //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten); | 
|  | 1141 | return reschedule; | 
|  | 1142 | } | 
|  | 1143 |  | 
|  | 1144 | int64_t NuPlayer2::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) { | 
|  | 1145 | int32_t sampleRate = offloadingAudio() ? | 
|  | 1146 | mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate; | 
|  | 1147 | if (sampleRate == 0) { | 
|  | 1148 | ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload"); | 
|  | 1149 | return 0; | 
|  | 1150 | } | 
|  | 1151 | // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours. | 
|  | 1152 | return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate); | 
|  | 1153 | } | 
|  | 1154 |  | 
|  | 1155 | // Calculate duration of pending samples if played at normal rate (i.e., 1.0). | 
|  | 1156 | int64_t NuPlayer2::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) { | 
|  | 1157 | int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten); | 
|  | 1158 | if (mUseVirtualAudioSink) { | 
|  | 1159 | int64_t nowUs = ALooper::GetNowUs(); | 
|  | 1160 | int64_t mediaUs; | 
|  | 1161 | if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) { | 
| Chih-Hung Hsieh | d42529d | 2018-12-11 13:53:10 -0800 | [diff] [blame] | 1162 | return 0LL; | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1163 | } else { | 
|  | 1164 | return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs); | 
|  | 1165 | } | 
|  | 1166 | } | 
|  | 1167 |  | 
|  | 1168 | const int64_t audioSinkPlayedUs = mAudioSink->getPlayedOutDurationUs(nowUs); | 
|  | 1169 | int64_t pendingUs = writtenAudioDurationUs - audioSinkPlayedUs; | 
|  | 1170 | if (pendingUs < 0) { | 
|  | 1171 | // This shouldn't happen unless the timestamp is stale. | 
|  | 1172 | ALOGW("%s: pendingUs %lld < 0, clamping to zero, potential resume after pause " | 
|  | 1173 | "writtenAudioDurationUs: %lld, audioSinkPlayedUs: %lld", | 
|  | 1174 | __func__, (long long)pendingUs, | 
|  | 1175 | (long long)writtenAudioDurationUs, (long long)audioSinkPlayedUs); | 
|  | 1176 | pendingUs = 0; | 
|  | 1177 | } | 
|  | 1178 | return pendingUs; | 
|  | 1179 | } | 
|  | 1180 |  | 
|  | 1181 | int64_t NuPlayer2::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) { | 
|  | 1182 | int64_t realUs; | 
|  | 1183 | if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) { | 
|  | 1184 | // If failed to get current position, e.g. due to audio clock is | 
|  | 1185 | // not ready, then just play out video immediately without delay. | 
|  | 1186 | return nowUs; | 
|  | 1187 | } | 
|  | 1188 | return realUs; | 
|  | 1189 | } | 
|  | 1190 |  | 
|  | 1191 | void NuPlayer2::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) { | 
|  | 1192 | Mutex::Autolock autoLock(mLock); | 
|  | 1193 | // TRICKY: vorbis decoder generates multiple frames with the same | 
|  | 1194 | // timestamp, so only update on the first frame with a given timestamp | 
|  | 1195 | if (mediaTimeUs == mAnchorTimeMediaUs) { | 
|  | 1196 | return; | 
|  | 1197 | } | 
|  | 1198 | setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); | 
|  | 1199 |  | 
|  | 1200 | // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start | 
|  | 1201 | if (mNextAudioClockUpdateTimeUs == -1) { | 
|  | 1202 | AudioTimestamp ts; | 
|  | 1203 | if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) { | 
|  | 1204 | mNextAudioClockUpdateTimeUs = 0; // start our clock updates | 
|  | 1205 | } | 
|  | 1206 | } | 
|  | 1207 | int64_t nowUs = ALooper::GetNowUs(); | 
|  | 1208 | if (mNextAudioClockUpdateTimeUs >= 0) { | 
|  | 1209 | if (nowUs >= mNextAudioClockUpdateTimeUs) { | 
|  | 1210 | int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs); | 
|  | 1211 | mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs); | 
|  | 1212 | mUseVirtualAudioSink = false; | 
|  | 1213 | mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs; | 
|  | 1214 | } | 
|  | 1215 | } else { | 
|  | 1216 | int64_t unused; | 
|  | 1217 | if ((mMediaClock->getMediaTime(nowUs, &unused) != OK) | 
|  | 1218 | && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten) | 
|  | 1219 | > kMaxAllowedAudioSinkDelayUs)) { | 
|  | 1220 | // Enough data has been sent to AudioSink, but AudioSink has not rendered | 
|  | 1221 | // any data yet. Something is wrong with AudioSink, e.g., the device is not | 
|  | 1222 | // connected to audio out. | 
|  | 1223 | // Switch to system clock. This essentially creates a virtual AudioSink with | 
|  | 1224 | // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten). | 
|  | 1225 | // This virtual AudioSink renders audio data starting from the very first sample | 
|  | 1226 | // and it's paced by system clock. | 
|  | 1227 | ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock."); | 
|  | 1228 | mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs); | 
|  | 1229 | mUseVirtualAudioSink = true; | 
|  | 1230 | } | 
|  | 1231 | } | 
|  | 1232 | mAnchorNumFramesWritten = mNumFramesWritten; | 
|  | 1233 | mAnchorTimeMediaUs = mediaTimeUs; | 
|  | 1234 | } | 
|  | 1235 |  | 
|  | 1236 | // Called without mLock acquired. | 
|  | 1237 | void NuPlayer2::Renderer::postDrainVideoQueue() { | 
|  | 1238 | if (mDrainVideoQueuePending | 
|  | 1239 | || getSyncQueues() | 
|  | 1240 | || (mPaused && mVideoSampleReceived)) { | 
|  | 1241 | return; | 
|  | 1242 | } | 
|  | 1243 |  | 
|  | 1244 | if (mVideoQueue.empty()) { | 
|  | 1245 | return; | 
|  | 1246 | } | 
|  | 1247 |  | 
|  | 1248 | QueueEntry &entry = *mVideoQueue.begin(); | 
|  | 1249 |  | 
|  | 1250 | sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this); | 
|  | 1251 | msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */)); | 
|  | 1252 |  | 
|  | 1253 | if (entry.mBuffer == NULL) { | 
|  | 1254 | // EOS doesn't carry a timestamp. | 
|  | 1255 | msg->post(); | 
|  | 1256 | mDrainVideoQueuePending = true; | 
|  | 1257 | return; | 
|  | 1258 | } | 
|  | 1259 |  | 
|  | 1260 | int64_t nowUs = ALooper::GetNowUs(); | 
|  | 1261 | if (mFlags & FLAG_REAL_TIME) { | 
|  | 1262 | int64_t realTimeUs; | 
|  | 1263 | CHECK(entry.mBuffer->meta()->findInt64("timeUs", &realTimeUs)); | 
|  | 1264 |  | 
|  | 1265 | realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000; | 
|  | 1266 |  | 
|  | 1267 | int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000); | 
|  | 1268 |  | 
|  | 1269 | int64_t delayUs = realTimeUs - nowUs; | 
|  | 1270 |  | 
|  | 1271 | ALOGW_IF(delayUs > 500000, "unusually high delayUs: %lld", (long long)delayUs); | 
|  | 1272 | // post 2 display refreshes before rendering is due | 
|  | 1273 | msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0); | 
|  | 1274 |  | 
|  | 1275 | mDrainVideoQueuePending = true; | 
|  | 1276 | return; | 
|  | 1277 | } | 
|  | 1278 |  | 
|  | 1279 | int64_t mediaTimeUs; | 
|  | 1280 | CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); | 
|  | 1281 |  | 
|  | 1282 | { | 
|  | 1283 | Mutex::Autolock autoLock(mLock); | 
|  | 1284 | if (mAnchorTimeMediaUs < 0) { | 
|  | 1285 | mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs); | 
|  | 1286 | mAnchorTimeMediaUs = mediaTimeUs; | 
|  | 1287 | } | 
|  | 1288 | } | 
| Wei Jia | 1225b93 | 2019-01-04 12:44:38 -0800 | [diff] [blame] | 1289 | mNextVideoTimeMediaUs = mediaTimeUs; | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1290 | if (!mHasAudio) { | 
|  | 1291 | // smooth out videos >= 10fps | 
| Wei Jia | 1225b93 | 2019-01-04 12:44:38 -0800 | [diff] [blame] | 1292 | mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000); | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1293 | } | 
|  | 1294 |  | 
|  | 1295 | if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) { | 
|  | 1296 | msg->post(); | 
|  | 1297 | } else { | 
|  | 1298 | int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000); | 
|  | 1299 |  | 
|  | 1300 | // post 2 display refreshes before rendering is due | 
|  | 1301 | mMediaClock->addTimer(msg, mediaTimeUs, -twoVsyncsUs); | 
|  | 1302 | } | 
|  | 1303 |  | 
|  | 1304 | mDrainVideoQueuePending = true; | 
|  | 1305 | } | 
|  | 1306 |  | 
|  | 1307 | void NuPlayer2::Renderer::onDrainVideoQueue() { | 
|  | 1308 | if (mVideoQueue.empty()) { | 
|  | 1309 | return; | 
|  | 1310 | } | 
|  | 1311 |  | 
|  | 1312 | QueueEntry *entry = &*mVideoQueue.begin(); | 
|  | 1313 |  | 
|  | 1314 | if (entry->mBuffer == NULL) { | 
|  | 1315 | // EOS | 
|  | 1316 |  | 
|  | 1317 | notifyEOS(false /* audio */, entry->mFinalResult); | 
|  | 1318 |  | 
|  | 1319 | mVideoQueue.erase(mVideoQueue.begin()); | 
|  | 1320 | entry = NULL; | 
|  | 1321 |  | 
|  | 1322 | setVideoLateByUs(0); | 
|  | 1323 | return; | 
|  | 1324 | } | 
|  | 1325 |  | 
|  | 1326 | int64_t nowUs = ALooper::GetNowUs(); | 
|  | 1327 | int64_t realTimeUs; | 
|  | 1328 | int64_t mediaTimeUs = -1; | 
|  | 1329 | if (mFlags & FLAG_REAL_TIME) { | 
|  | 1330 | CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs)); | 
|  | 1331 | } else { | 
|  | 1332 | CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); | 
|  | 1333 |  | 
|  | 1334 | realTimeUs = getRealTimeUs(mediaTimeUs, nowUs); | 
|  | 1335 | } | 
|  | 1336 | realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000; | 
|  | 1337 |  | 
|  | 1338 | bool tooLate = false; | 
|  | 1339 |  | 
|  | 1340 | if (!mPaused) { | 
|  | 1341 | setVideoLateByUs(nowUs - realTimeUs); | 
|  | 1342 | tooLate = (mVideoLateByUs > 40000); | 
|  | 1343 |  | 
|  | 1344 | if (tooLate) { | 
|  | 1345 | ALOGV("video late by %lld us (%.2f secs)", | 
|  | 1346 | (long long)mVideoLateByUs, mVideoLateByUs / 1E6); | 
|  | 1347 | } else { | 
|  | 1348 | int64_t mediaUs = 0; | 
|  | 1349 | mMediaClock->getMediaTime(realTimeUs, &mediaUs); | 
|  | 1350 | ALOGV("rendering video at media time %.2f secs", | 
|  | 1351 | (mFlags & FLAG_REAL_TIME ? realTimeUs : | 
|  | 1352 | mediaUs) / 1E6); | 
|  | 1353 |  | 
|  | 1354 | if (!(mFlags & FLAG_REAL_TIME) | 
|  | 1355 | && mLastAudioMediaTimeUs != -1 | 
|  | 1356 | && mediaTimeUs > mLastAudioMediaTimeUs) { | 
|  | 1357 | // If audio ends before video, video continues to drive media clock. | 
|  | 1358 | // Also smooth out videos >= 10fps. | 
|  | 1359 | mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000); | 
|  | 1360 | } | 
|  | 1361 | } | 
|  | 1362 | } else { | 
|  | 1363 | setVideoLateByUs(0); | 
|  | 1364 | if (!mVideoSampleReceived && !mHasAudio) { | 
|  | 1365 | // This will ensure that the first frame after a flush won't be used as anchor | 
|  | 1366 | // when renderer is in paused state, because resume can happen any time after seek. | 
|  | 1367 | clearAnchorTime(); | 
|  | 1368 | } | 
|  | 1369 | } | 
|  | 1370 |  | 
|  | 1371 | // Always render the first video frame while keeping stats on A/V sync. | 
|  | 1372 | if (!mVideoSampleReceived) { | 
|  | 1373 | realTimeUs = nowUs; | 
|  | 1374 | tooLate = false; | 
|  | 1375 | } | 
|  | 1376 |  | 
| Chih-Hung Hsieh | d42529d | 2018-12-11 13:53:10 -0800 | [diff] [blame] | 1377 | entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000LL); | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1378 | entry->mNotifyConsumed->setInt32("render", !tooLate); | 
|  | 1379 | entry->mNotifyConsumed->post(); | 
|  | 1380 | mVideoQueue.erase(mVideoQueue.begin()); | 
|  | 1381 | entry = NULL; | 
|  | 1382 |  | 
|  | 1383 | mVideoSampleReceived = true; | 
|  | 1384 |  | 
|  | 1385 | if (!mPaused) { | 
|  | 1386 | if (!mVideoRenderingStarted) { | 
|  | 1387 | mVideoRenderingStarted = true; | 
|  | 1388 | notifyVideoRenderingStart(); | 
|  | 1389 | } | 
|  | 1390 | Mutex::Autolock autoLock(mLock); | 
|  | 1391 | notifyIfMediaRenderingStarted_l(); | 
|  | 1392 | } | 
|  | 1393 | } | 
|  | 1394 |  | 
|  | 1395 | void NuPlayer2::Renderer::notifyVideoRenderingStart() { | 
|  | 1396 | sp<AMessage> notify = mNotify->dup(); | 
|  | 1397 | notify->setInt32("what", kWhatVideoRenderingStart); | 
|  | 1398 | notify->post(); | 
|  | 1399 | } | 
|  | 1400 |  | 
|  | 1401 | void NuPlayer2::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) { | 
|  | 1402 | Mutex::Autolock autoLock(mLock); | 
|  | 1403 | notifyEOS_l(audio, finalResult, delayUs); | 
|  | 1404 | } | 
|  | 1405 |  | 
|  | 1406 | void NuPlayer2::Renderer::notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs) { | 
|  | 1407 | if (audio && delayUs > 0) { | 
|  | 1408 | sp<AMessage> msg = new AMessage(kWhatEOS, this); | 
|  | 1409 | msg->setInt32("audioEOSGeneration", mAudioEOSGeneration); | 
|  | 1410 | msg->setInt32("finalResult", finalResult); | 
|  | 1411 | msg->post(delayUs); | 
|  | 1412 | return; | 
|  | 1413 | } | 
|  | 1414 | sp<AMessage> notify = mNotify->dup(); | 
|  | 1415 | notify->setInt32("what", kWhatEOS); | 
|  | 1416 | notify->setInt32("audio", static_cast<int32_t>(audio)); | 
|  | 1417 | notify->setInt32("finalResult", finalResult); | 
|  | 1418 | notify->post(delayUs); | 
|  | 1419 |  | 
|  | 1420 | if (audio) { | 
|  | 1421 | // Video might outlive audio. Clear anchor to enable video only case. | 
|  | 1422 | mAnchorTimeMediaUs = -1; | 
|  | 1423 | mHasAudio = false; | 
|  | 1424 | if (mNextVideoTimeMediaUs >= 0) { | 
|  | 1425 | int64_t mediaUs = 0; | 
| Wei Jia | 1225b93 | 2019-01-04 12:44:38 -0800 | [diff] [blame] | 1426 | int64_t nowUs = ALooper::GetNowUs(); | 
|  | 1427 | status_t result = mMediaClock->getMediaTime(nowUs, &mediaUs); | 
|  | 1428 | if (result == OK) { | 
|  | 1429 | if (mNextVideoTimeMediaUs > mediaUs) { | 
|  | 1430 | mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs); | 
|  | 1431 | } | 
|  | 1432 | } else { | 
|  | 1433 | mMediaClock->updateAnchor( | 
|  | 1434 | mNextVideoTimeMediaUs, nowUs, mNextVideoTimeMediaUs + 100000); | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1435 | } | 
|  | 1436 | } | 
|  | 1437 | } | 
|  | 1438 | } | 
|  | 1439 |  | 
|  | 1440 | void NuPlayer2::Renderer::notifyAudioTearDown(AudioTearDownReason reason) { | 
|  | 1441 | sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this); | 
|  | 1442 | msg->setInt32("reason", reason); | 
|  | 1443 | msg->post(); | 
|  | 1444 | } | 
|  | 1445 |  | 
|  | 1446 | void NuPlayer2::Renderer::onQueueBuffer(const sp<AMessage> &msg) { | 
|  | 1447 | int32_t audio; | 
|  | 1448 | CHECK(msg->findInt32("audio", &audio)); | 
|  | 1449 |  | 
|  | 1450 | if (dropBufferIfStale(audio, msg)) { | 
|  | 1451 | return; | 
|  | 1452 | } | 
|  | 1453 |  | 
|  | 1454 | if (audio) { | 
|  | 1455 | mHasAudio = true; | 
|  | 1456 | } else { | 
|  | 1457 | mHasVideo = true; | 
|  | 1458 | } | 
|  | 1459 |  | 
|  | 1460 | if (mHasVideo) { | 
|  | 1461 | if (mVideoScheduler == NULL) { | 
| Dichen Zhang | c37b190 | 2018-12-18 11:36:13 -0800 | [diff] [blame] | 1462 | mVideoScheduler = new VideoFrameScheduler2(); | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1463 | mVideoScheduler->init(); | 
|  | 1464 | } | 
|  | 1465 | } | 
|  | 1466 |  | 
|  | 1467 | sp<RefBase> obj; | 
|  | 1468 | CHECK(msg->findObject("buffer", &obj)); | 
|  | 1469 | sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get()); | 
|  | 1470 |  | 
|  | 1471 | sp<AMessage> notifyConsumed; | 
|  | 1472 | CHECK(msg->findMessage("notifyConsumed", ¬ifyConsumed)); | 
|  | 1473 |  | 
|  | 1474 | QueueEntry entry; | 
|  | 1475 | entry.mBuffer = buffer; | 
|  | 1476 | entry.mNotifyConsumed = notifyConsumed; | 
|  | 1477 | entry.mOffset = 0; | 
|  | 1478 | entry.mFinalResult = OK; | 
|  | 1479 | entry.mBufferOrdinal = ++mTotalBuffersQueued; | 
|  | 1480 |  | 
|  | 1481 | if (audio) { | 
|  | 1482 | Mutex::Autolock autoLock(mLock); | 
|  | 1483 | mAudioQueue.push_back(entry); | 
|  | 1484 | postDrainAudioQueue_l(); | 
|  | 1485 | } else { | 
|  | 1486 | mVideoQueue.push_back(entry); | 
|  | 1487 | postDrainVideoQueue(); | 
|  | 1488 | } | 
|  | 1489 |  | 
|  | 1490 | Mutex::Autolock autoLock(mLock); | 
|  | 1491 | if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) { | 
|  | 1492 | return; | 
|  | 1493 | } | 
|  | 1494 |  | 
|  | 1495 | sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer; | 
|  | 1496 | sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer; | 
|  | 1497 |  | 
|  | 1498 | if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) { | 
|  | 1499 | // EOS signalled on either queue. | 
|  | 1500 | syncQueuesDone_l(); | 
|  | 1501 | return; | 
|  | 1502 | } | 
|  | 1503 |  | 
|  | 1504 | int64_t firstAudioTimeUs; | 
|  | 1505 | int64_t firstVideoTimeUs; | 
|  | 1506 | CHECK(firstAudioBuffer->meta() | 
|  | 1507 | ->findInt64("timeUs", &firstAudioTimeUs)); | 
|  | 1508 | CHECK(firstVideoBuffer->meta() | 
|  | 1509 | ->findInt64("timeUs", &firstVideoTimeUs)); | 
|  | 1510 |  | 
|  | 1511 | int64_t diff = firstVideoTimeUs - firstAudioTimeUs; | 
|  | 1512 |  | 
|  | 1513 | ALOGV("queueDiff = %.2f secs", diff / 1E6); | 
|  | 1514 |  | 
| Chih-Hung Hsieh | d42529d | 2018-12-11 13:53:10 -0800 | [diff] [blame] | 1515 | if (diff > 100000LL) { | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1516 | // Audio data starts More than 0.1 secs before video. | 
|  | 1517 | // Drop some audio. | 
|  | 1518 |  | 
|  | 1519 | (*mAudioQueue.begin()).mNotifyConsumed->post(); | 
|  | 1520 | mAudioQueue.erase(mAudioQueue.begin()); | 
|  | 1521 | return; | 
|  | 1522 | } | 
|  | 1523 |  | 
|  | 1524 | syncQueuesDone_l(); | 
|  | 1525 | } | 
|  | 1526 |  | 
|  | 1527 | void NuPlayer2::Renderer::syncQueuesDone_l() { | 
|  | 1528 | if (!mSyncQueues) { | 
|  | 1529 | return; | 
|  | 1530 | } | 
|  | 1531 |  | 
|  | 1532 | mSyncQueues = false; | 
|  | 1533 |  | 
|  | 1534 | if (!mAudioQueue.empty()) { | 
|  | 1535 | postDrainAudioQueue_l(); | 
|  | 1536 | } | 
|  | 1537 |  | 
|  | 1538 | if (!mVideoQueue.empty()) { | 
|  | 1539 | mLock.unlock(); | 
|  | 1540 | postDrainVideoQueue(); | 
|  | 1541 | mLock.lock(); | 
|  | 1542 | } | 
|  | 1543 | } | 
|  | 1544 |  | 
|  | 1545 | void NuPlayer2::Renderer::onQueueEOS(const sp<AMessage> &msg) { | 
|  | 1546 | int32_t audio; | 
|  | 1547 | CHECK(msg->findInt32("audio", &audio)); | 
|  | 1548 |  | 
|  | 1549 | if (dropBufferIfStale(audio, msg)) { | 
|  | 1550 | return; | 
|  | 1551 | } | 
|  | 1552 |  | 
|  | 1553 | int32_t finalResult; | 
|  | 1554 | CHECK(msg->findInt32("finalResult", &finalResult)); | 
|  | 1555 |  | 
|  | 1556 | QueueEntry entry; | 
|  | 1557 | entry.mOffset = 0; | 
|  | 1558 | entry.mFinalResult = finalResult; | 
|  | 1559 |  | 
|  | 1560 | if (audio) { | 
|  | 1561 | Mutex::Autolock autoLock(mLock); | 
|  | 1562 | if (mAudioQueue.empty() && mSyncQueues) { | 
|  | 1563 | syncQueuesDone_l(); | 
|  | 1564 | } | 
|  | 1565 | mAudioQueue.push_back(entry); | 
|  | 1566 | postDrainAudioQueue_l(); | 
|  | 1567 | } else { | 
|  | 1568 | if (mVideoQueue.empty() && getSyncQueues()) { | 
|  | 1569 | Mutex::Autolock autoLock(mLock); | 
|  | 1570 | syncQueuesDone_l(); | 
|  | 1571 | } | 
|  | 1572 | mVideoQueue.push_back(entry); | 
|  | 1573 | postDrainVideoQueue(); | 
|  | 1574 | } | 
|  | 1575 | } | 
|  | 1576 |  | 
|  | 1577 | void NuPlayer2::Renderer::onFlush(const sp<AMessage> &msg) { | 
|  | 1578 | int32_t audio, notifyComplete; | 
|  | 1579 | CHECK(msg->findInt32("audio", &audio)); | 
|  | 1580 |  | 
|  | 1581 | { | 
|  | 1582 | Mutex::Autolock autoLock(mLock); | 
|  | 1583 | if (audio) { | 
|  | 1584 | notifyComplete = mNotifyCompleteAudio; | 
|  | 1585 | mNotifyCompleteAudio = false; | 
|  | 1586 | mLastAudioMediaTimeUs = -1; | 
|  | 1587 | } else { | 
|  | 1588 | notifyComplete = mNotifyCompleteVideo; | 
|  | 1589 | mNotifyCompleteVideo = false; | 
| Wei Jia | d1864f9 | 2018-10-19 12:34:56 -0700 | [diff] [blame] | 1590 | mVideoRenderingStarted = false; | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1591 | } | 
|  | 1592 |  | 
|  | 1593 | // If we're currently syncing the queues, i.e. dropping audio while | 
|  | 1594 | // aligning the first audio/video buffer times and only one of the | 
|  | 1595 | // two queues has data, we may starve that queue by not requesting | 
|  | 1596 | // more buffers from the decoder. If the other source then encounters | 
|  | 1597 | // a discontinuity that leads to flushing, we'll never find the | 
|  | 1598 | // corresponding discontinuity on the other queue. | 
|  | 1599 | // Therefore we'll stop syncing the queues if at least one of them | 
|  | 1600 | // is flushed. | 
|  | 1601 | syncQueuesDone_l(); | 
|  | 1602 | } | 
|  | 1603 | clearAnchorTime(); | 
|  | 1604 |  | 
|  | 1605 | ALOGV("flushing %s", audio ? "audio" : "video"); | 
|  | 1606 | if (audio) { | 
|  | 1607 | { | 
|  | 1608 | Mutex::Autolock autoLock(mLock); | 
|  | 1609 | flushQueue(&mAudioQueue); | 
|  | 1610 |  | 
|  | 1611 | ++mAudioDrainGeneration; | 
|  | 1612 | ++mAudioEOSGeneration; | 
|  | 1613 | prepareForMediaRenderingStart_l(); | 
|  | 1614 |  | 
|  | 1615 | // the frame count will be reset after flush. | 
|  | 1616 | clearAudioFirstAnchorTime_l(); | 
|  | 1617 | } | 
|  | 1618 |  | 
|  | 1619 | mDrainAudioQueuePending = false; | 
|  | 1620 |  | 
|  | 1621 | if (offloadingAudio()) { | 
|  | 1622 | mAudioSink->pause(); | 
|  | 1623 | mAudioSink->flush(); | 
|  | 1624 | if (!mPaused) { | 
|  | 1625 | mAudioSink->start(); | 
|  | 1626 | } | 
|  | 1627 | } else { | 
|  | 1628 | mAudioSink->pause(); | 
|  | 1629 | mAudioSink->flush(); | 
|  | 1630 | // Call stop() to signal to the AudioSink to completely fill the | 
|  | 1631 | // internal buffer before resuming playback. | 
|  | 1632 | // FIXME: this is ignored after flush(). | 
|  | 1633 | mAudioSink->stop(); | 
|  | 1634 | if (mPaused) { | 
|  | 1635 | // Race condition: if renderer is paused and audio sink is stopped, | 
|  | 1636 | // we need to make sure that the audio track buffer fully drains | 
|  | 1637 | // before delivering data. | 
|  | 1638 | // FIXME: remove this if we can detect if stop() is complete. | 
|  | 1639 | const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms) | 
|  | 1640 | mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs; | 
|  | 1641 | } else { | 
|  | 1642 | mAudioSink->start(); | 
|  | 1643 | } | 
|  | 1644 | mNumFramesWritten = 0; | 
|  | 1645 | } | 
|  | 1646 | mNextAudioClockUpdateTimeUs = -1; | 
|  | 1647 | } else { | 
|  | 1648 | flushQueue(&mVideoQueue); | 
|  | 1649 |  | 
|  | 1650 | mDrainVideoQueuePending = false; | 
|  | 1651 |  | 
|  | 1652 | if (mVideoScheduler != NULL) { | 
|  | 1653 | mVideoScheduler->restart(); | 
|  | 1654 | } | 
|  | 1655 |  | 
|  | 1656 | Mutex::Autolock autoLock(mLock); | 
|  | 1657 | ++mVideoDrainGeneration; | 
|  | 1658 | prepareForMediaRenderingStart_l(); | 
|  | 1659 | } | 
|  | 1660 |  | 
|  | 1661 | mVideoSampleReceived = false; | 
|  | 1662 |  | 
|  | 1663 | if (notifyComplete) { | 
|  | 1664 | notifyFlushComplete(audio); | 
|  | 1665 | } | 
|  | 1666 | } | 
|  | 1667 |  | 
|  | 1668 | void NuPlayer2::Renderer::flushQueue(List<QueueEntry> *queue) { | 
|  | 1669 | while (!queue->empty()) { | 
|  | 1670 | QueueEntry *entry = &*queue->begin(); | 
|  | 1671 |  | 
|  | 1672 | if (entry->mBuffer != NULL) { | 
|  | 1673 | entry->mNotifyConsumed->post(); | 
|  | 1674 | } else if (entry->mNotifyConsumed != nullptr) { | 
|  | 1675 | // Is it needed to open audio sink now? | 
|  | 1676 | onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed); | 
|  | 1677 | } | 
|  | 1678 |  | 
|  | 1679 | queue->erase(queue->begin()); | 
|  | 1680 | entry = NULL; | 
|  | 1681 | } | 
|  | 1682 | } | 
|  | 1683 |  | 
|  | 1684 | void NuPlayer2::Renderer::notifyFlushComplete(bool audio) { | 
|  | 1685 | sp<AMessage> notify = mNotify->dup(); | 
|  | 1686 | notify->setInt32("what", kWhatFlushComplete); | 
|  | 1687 | notify->setInt32("audio", static_cast<int32_t>(audio)); | 
|  | 1688 | notify->post(); | 
|  | 1689 | } | 
|  | 1690 |  | 
|  | 1691 | bool NuPlayer2::Renderer::dropBufferIfStale( | 
|  | 1692 | bool audio, const sp<AMessage> &msg) { | 
|  | 1693 | int32_t queueGeneration; | 
|  | 1694 | CHECK(msg->findInt32("queueGeneration", &queueGeneration)); | 
|  | 1695 |  | 
|  | 1696 | if (queueGeneration == getQueueGeneration(audio)) { | 
|  | 1697 | return false; | 
|  | 1698 | } | 
|  | 1699 |  | 
|  | 1700 | sp<AMessage> notifyConsumed; | 
|  | 1701 | if (msg->findMessage("notifyConsumed", ¬ifyConsumed)) { | 
|  | 1702 | notifyConsumed->post(); | 
|  | 1703 | } | 
|  | 1704 |  | 
|  | 1705 | return true; | 
|  | 1706 | } | 
|  | 1707 |  | 
|  | 1708 | void NuPlayer2::Renderer::onAudioSinkChanged() { | 
|  | 1709 | if (offloadingAudio()) { | 
|  | 1710 | return; | 
|  | 1711 | } | 
|  | 1712 | CHECK(!mDrainAudioQueuePending); | 
|  | 1713 | mNumFramesWritten = 0; | 
|  | 1714 | mAnchorNumFramesWritten = -1; | 
|  | 1715 | uint32_t written; | 
|  | 1716 | if (mAudioSink->getFramesWritten(&written) == OK) { | 
|  | 1717 | mNumFramesWritten = written; | 
|  | 1718 | } | 
|  | 1719 | } | 
|  | 1720 |  | 
|  | 1721 | void NuPlayer2::Renderer::onDisableOffloadAudio() { | 
|  | 1722 | Mutex::Autolock autoLock(mLock); | 
|  | 1723 | mFlags &= ~FLAG_OFFLOAD_AUDIO; | 
|  | 1724 | ++mAudioDrainGeneration; | 
|  | 1725 | if (mAudioRenderingStartGeneration != -1) { | 
|  | 1726 | prepareForMediaRenderingStart_l(); | 
|  | 1727 | } | 
|  | 1728 | } | 
|  | 1729 |  | 
|  | 1730 | void NuPlayer2::Renderer::onEnableOffloadAudio() { | 
|  | 1731 | Mutex::Autolock autoLock(mLock); | 
|  | 1732 | mFlags |= FLAG_OFFLOAD_AUDIO; | 
|  | 1733 | ++mAudioDrainGeneration; | 
|  | 1734 | if (mAudioRenderingStartGeneration != -1) { | 
|  | 1735 | prepareForMediaRenderingStart_l(); | 
|  | 1736 | } | 
|  | 1737 | } | 
|  | 1738 |  | 
|  | 1739 | void NuPlayer2::Renderer::onPause() { | 
|  | 1740 | if (mPaused) { | 
|  | 1741 | return; | 
|  | 1742 | } | 
|  | 1743 |  | 
|  | 1744 | { | 
|  | 1745 | Mutex::Autolock autoLock(mLock); | 
|  | 1746 | // we do not increment audio drain generation so that we fill audio buffer during pause. | 
|  | 1747 | ++mVideoDrainGeneration; | 
|  | 1748 | prepareForMediaRenderingStart_l(); | 
|  | 1749 | mPaused = true; | 
|  | 1750 | mMediaClock->setPlaybackRate(0.0); | 
|  | 1751 | } | 
|  | 1752 |  | 
|  | 1753 | mDrainAudioQueuePending = false; | 
|  | 1754 | mDrainVideoQueuePending = false; | 
|  | 1755 |  | 
|  | 1756 | // Note: audio data may not have been decoded, and the AudioSink may not be opened. | 
|  | 1757 | mAudioSink->pause(); | 
|  | 1758 | startAudioOffloadPauseTimeout(); | 
|  | 1759 |  | 
|  | 1760 | ALOGV("now paused audio queue has %zu entries, video has %zu entries", | 
|  | 1761 | mAudioQueue.size(), mVideoQueue.size()); | 
|  | 1762 | } | 
|  | 1763 |  | 
|  | 1764 | void NuPlayer2::Renderer::onResume() { | 
|  | 1765 | if (!mPaused) { | 
|  | 1766 | return; | 
|  | 1767 | } | 
|  | 1768 |  | 
|  | 1769 | // Note: audio data may not have been decoded, and the AudioSink may not be opened. | 
|  | 1770 | cancelAudioOffloadPauseTimeout(); | 
|  | 1771 | if (mAudioSink->ready()) { | 
|  | 1772 | status_t err = mAudioSink->start(); | 
|  | 1773 | if (err != OK) { | 
|  | 1774 | ALOGE("cannot start AudioSink err %d", err); | 
|  | 1775 | notifyAudioTearDown(kDueToError); | 
|  | 1776 | } | 
|  | 1777 | } | 
|  | 1778 |  | 
|  | 1779 | { | 
|  | 1780 | Mutex::Autolock autoLock(mLock); | 
|  | 1781 | mPaused = false; | 
|  | 1782 | // rendering started message may have been delayed if we were paused. | 
|  | 1783 | if (mRenderingDataDelivered) { | 
|  | 1784 | notifyIfMediaRenderingStarted_l(); | 
|  | 1785 | } | 
|  | 1786 | // configure audiosink as we did not do it when pausing | 
|  | 1787 | if (mAudioSink != NULL && mAudioSink->ready()) { | 
|  | 1788 | mAudioSink->setPlaybackRate(mPlaybackSettings); | 
|  | 1789 | } | 
|  | 1790 |  | 
| Wei Jia | 700a7c2 | 2018-09-14 18:04:35 -0700 | [diff] [blame] | 1791 | mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed); | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1792 |  | 
|  | 1793 | if (!mAudioQueue.empty()) { | 
|  | 1794 | postDrainAudioQueue_l(); | 
|  | 1795 | } | 
|  | 1796 | } | 
|  | 1797 |  | 
|  | 1798 | if (!mVideoQueue.empty()) { | 
|  | 1799 | postDrainVideoQueue(); | 
|  | 1800 | } | 
|  | 1801 | } | 
|  | 1802 |  | 
|  | 1803 | void NuPlayer2::Renderer::onSetVideoFrameRate(float fps) { | 
|  | 1804 | if (mVideoScheduler == NULL) { | 
| Dichen Zhang | c37b190 | 2018-12-18 11:36:13 -0800 | [diff] [blame] | 1805 | mVideoScheduler = new VideoFrameScheduler2(); | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1806 | } | 
|  | 1807 | mVideoScheduler->init(fps); | 
|  | 1808 | } | 
|  | 1809 |  | 
|  | 1810 | int32_t NuPlayer2::Renderer::getQueueGeneration(bool audio) { | 
|  | 1811 | Mutex::Autolock autoLock(mLock); | 
|  | 1812 | return (audio ? mAudioQueueGeneration : mVideoQueueGeneration); | 
|  | 1813 | } | 
|  | 1814 |  | 
|  | 1815 | int32_t NuPlayer2::Renderer::getDrainGeneration(bool audio) { | 
|  | 1816 | Mutex::Autolock autoLock(mLock); | 
|  | 1817 | return (audio ? mAudioDrainGeneration : mVideoDrainGeneration); | 
|  | 1818 | } | 
|  | 1819 |  | 
|  | 1820 | bool NuPlayer2::Renderer::getSyncQueues() { | 
|  | 1821 | Mutex::Autolock autoLock(mLock); | 
|  | 1822 | return mSyncQueues; | 
|  | 1823 | } | 
|  | 1824 |  | 
|  | 1825 | void NuPlayer2::Renderer::onAudioTearDown(AudioTearDownReason reason) { | 
|  | 1826 | if (mAudioTornDown) { | 
|  | 1827 | return; | 
|  | 1828 | } | 
|  | 1829 | mAudioTornDown = true; | 
|  | 1830 |  | 
|  | 1831 | int64_t currentPositionUs; | 
|  | 1832 | sp<AMessage> notify = mNotify->dup(); | 
|  | 1833 | if (getCurrentPosition(¤tPositionUs) == OK) { | 
|  | 1834 | notify->setInt64("positionUs", currentPositionUs); | 
|  | 1835 | } | 
|  | 1836 |  | 
|  | 1837 | mAudioSink->stop(); | 
|  | 1838 | mAudioSink->flush(); | 
|  | 1839 |  | 
|  | 1840 | notify->setInt32("what", kWhatAudioTearDown); | 
|  | 1841 | notify->setInt32("reason", reason); | 
|  | 1842 | notify->post(); | 
|  | 1843 | } | 
|  | 1844 |  | 
|  | 1845 | void NuPlayer2::Renderer::startAudioOffloadPauseTimeout() { | 
|  | 1846 | if (offloadingAudio()) { | 
|  | 1847 | mWakeLock->acquire(); | 
|  | 1848 | sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this); | 
|  | 1849 | msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration); | 
|  | 1850 | msg->post(kOffloadPauseMaxUs); | 
|  | 1851 | } | 
|  | 1852 | } | 
|  | 1853 |  | 
|  | 1854 | void NuPlayer2::Renderer::cancelAudioOffloadPauseTimeout() { | 
|  | 1855 | // We may have called startAudioOffloadPauseTimeout() without | 
|  | 1856 | // the AudioSink open and with offloadingAudio enabled. | 
|  | 1857 | // | 
|  | 1858 | // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless | 
|  | 1859 | // we always release the wakelock and increment the pause timeout generation. | 
|  | 1860 | // | 
|  | 1861 | // Note: The acquired wakelock prevents the device from suspending | 
|  | 1862 | // immediately after offload pause (in case a resume happens shortly thereafter). | 
|  | 1863 | mWakeLock->release(true); | 
|  | 1864 | ++mAudioOffloadPauseTimeoutGeneration; | 
|  | 1865 | } | 
|  | 1866 |  | 
|  | 1867 | status_t NuPlayer2::Renderer::onOpenAudioSink( | 
|  | 1868 | const sp<AMessage> &format, | 
|  | 1869 | bool offloadOnly, | 
|  | 1870 | bool hasVideo, | 
|  | 1871 | uint32_t flags, | 
|  | 1872 | bool isStreaming) { | 
|  | 1873 | ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)", | 
|  | 1874 | offloadOnly, offloadingAudio()); | 
| Dichen Zhang | f872691 | 2018-10-17 13:31:26 -0700 | [diff] [blame] | 1875 |  | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1876 | bool audioSinkChanged = false; | 
|  | 1877 |  | 
|  | 1878 | int32_t numChannels; | 
|  | 1879 | CHECK(format->findInt32("channel-count", &numChannels)); | 
|  | 1880 |  | 
|  | 1881 | int32_t channelMask; | 
|  | 1882 | if (!format->findInt32("channel-mask", &channelMask)) { | 
|  | 1883 | // signal to the AudioSink to derive the mask from count. | 
|  | 1884 | channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER; | 
|  | 1885 | } | 
|  | 1886 |  | 
|  | 1887 | int32_t sampleRate; | 
|  | 1888 | CHECK(format->findInt32("sample-rate", &sampleRate)); | 
|  | 1889 |  | 
| Wei Jia | 896b4d6 | 2019-01-04 15:26:48 -0800 | [diff] [blame] | 1890 | // read pcm encoding from MediaCodec output format, if available | 
|  | 1891 | int32_t pcmEncoding; | 
|  | 1892 | audio_format_t audioFormat = | 
|  | 1893 | format->findInt32(KEY_PCM_ENCODING, &pcmEncoding) ? | 
|  | 1894 | audioFormatFromEncoding(pcmEncoding) : AUDIO_FORMAT_PCM_16_BIT; | 
|  | 1895 |  | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1896 | if (offloadingAudio()) { | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1897 | AString mime; | 
|  | 1898 | CHECK(format->findString("mime", &mime)); | 
|  | 1899 | status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str()); | 
|  | 1900 |  | 
|  | 1901 | if (err != OK) { | 
|  | 1902 | ALOGE("Couldn't map mime \"%s\" to a valid " | 
|  | 1903 | "audio_format", mime.c_str()); | 
|  | 1904 | onDisableOffloadAudio(); | 
|  | 1905 | } else { | 
|  | 1906 | ALOGV("Mime \"%s\" mapped to audio_format 0x%x", | 
|  | 1907 | mime.c_str(), audioFormat); | 
|  | 1908 |  | 
|  | 1909 | int avgBitRate = -1; | 
|  | 1910 | format->findInt32("bitrate", &avgBitRate); | 
|  | 1911 |  | 
|  | 1912 | int32_t aacProfile = -1; | 
|  | 1913 | if (audioFormat == AUDIO_FORMAT_AAC | 
|  | 1914 | && format->findInt32("aac-profile", &aacProfile)) { | 
|  | 1915 | // Redefine AAC format as per aac profile | 
|  | 1916 | mapAACProfileToAudioFormat( | 
|  | 1917 | audioFormat, | 
|  | 1918 | aacProfile); | 
|  | 1919 | } | 
|  | 1920 |  | 
|  | 1921 | audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER; | 
|  | 1922 | offloadInfo.duration_us = -1; | 
|  | 1923 | format->findInt64( | 
|  | 1924 | "durationUs", &offloadInfo.duration_us); | 
|  | 1925 | offloadInfo.sample_rate = sampleRate; | 
|  | 1926 | offloadInfo.channel_mask = channelMask; | 
|  | 1927 | offloadInfo.format = audioFormat; | 
|  | 1928 | offloadInfo.stream_type = AUDIO_STREAM_MUSIC; | 
|  | 1929 | offloadInfo.bit_rate = avgBitRate; | 
|  | 1930 | offloadInfo.has_video = hasVideo; | 
|  | 1931 | offloadInfo.is_streaming = isStreaming; | 
|  | 1932 |  | 
|  | 1933 | if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) { | 
|  | 1934 | ALOGV("openAudioSink: no change in offload mode"); | 
|  | 1935 | // no change from previous configuration, everything ok. | 
|  | 1936 | return OK; | 
|  | 1937 | } | 
|  | 1938 | mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; | 
|  | 1939 |  | 
|  | 1940 | ALOGV("openAudioSink: try to open AudioSink in offload mode"); | 
|  | 1941 | uint32_t offloadFlags = flags; | 
|  | 1942 | offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; | 
|  | 1943 | offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER; | 
|  | 1944 | audioSinkChanged = true; | 
|  | 1945 | mAudioSink->close(); | 
|  | 1946 |  | 
|  | 1947 | err = mAudioSink->open( | 
|  | 1948 | sampleRate, | 
|  | 1949 | numChannels, | 
|  | 1950 | (audio_channel_mask_t)channelMask, | 
|  | 1951 | audioFormat, | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1952 | &NuPlayer2::Renderer::AudioSinkCallback, | 
|  | 1953 | this, | 
|  | 1954 | (audio_output_flags_t)offloadFlags, | 
|  | 1955 | &offloadInfo); | 
|  | 1956 |  | 
|  | 1957 | if (err == OK) { | 
|  | 1958 | err = mAudioSink->setPlaybackRate(mPlaybackSettings); | 
|  | 1959 | } | 
|  | 1960 |  | 
|  | 1961 | if (err == OK) { | 
|  | 1962 | // If the playback is offloaded to h/w, we pass | 
|  | 1963 | // the HAL some metadata information. | 
|  | 1964 | // We don't want to do this for PCM because it | 
|  | 1965 | // will be going through the AudioFlinger mixer | 
|  | 1966 | // before reaching the hardware. | 
|  | 1967 | // TODO | 
|  | 1968 | mCurrentOffloadInfo = offloadInfo; | 
|  | 1969 | if (!mPaused) { // for preview mode, don't start if paused | 
|  | 1970 | err = mAudioSink->start(); | 
|  | 1971 | } | 
|  | 1972 | ALOGV_IF(err == OK, "openAudioSink: offload succeeded"); | 
|  | 1973 | } | 
|  | 1974 | if (err != OK) { | 
|  | 1975 | // Clean up, fall back to non offload mode. | 
|  | 1976 | mAudioSink->close(); | 
|  | 1977 | onDisableOffloadAudio(); | 
|  | 1978 | mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; | 
|  | 1979 | ALOGV("openAudioSink: offload failed"); | 
|  | 1980 | if (offloadOnly) { | 
|  | 1981 | notifyAudioTearDown(kForceNonOffload); | 
|  | 1982 | } | 
|  | 1983 | } else { | 
|  | 1984 | mUseAudioCallback = true;  // offload mode transfers data through callback | 
|  | 1985 | ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message. | 
|  | 1986 | } | 
|  | 1987 | } | 
|  | 1988 | } | 
|  | 1989 | if (!offloadOnly && !offloadingAudio()) { | 
|  | 1990 | ALOGV("openAudioSink: open AudioSink in NON-offload mode"); | 
|  | 1991 | uint32_t pcmFlags = flags; | 
|  | 1992 | pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; | 
|  | 1993 |  | 
|  | 1994 | const PcmInfo info = { | 
|  | 1995 | (audio_channel_mask_t)channelMask, | 
|  | 1996 | (audio_output_flags_t)pcmFlags, | 
| Wei Jia | 896b4d6 | 2019-01-04 15:26:48 -0800 | [diff] [blame] | 1997 | audioFormat, | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 1998 | numChannels, | 
|  | 1999 | sampleRate | 
|  | 2000 | }; | 
|  | 2001 | if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) { | 
|  | 2002 | ALOGV("openAudioSink: no change in pcm mode"); | 
|  | 2003 | // no change from previous configuration, everything ok. | 
|  | 2004 | return OK; | 
|  | 2005 | } | 
|  | 2006 |  | 
|  | 2007 | audioSinkChanged = true; | 
|  | 2008 | mAudioSink->close(); | 
|  | 2009 | mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; | 
|  | 2010 | // Note: It is possible to set up the callback, but not use it to send audio data. | 
|  | 2011 | // This requires a fix in AudioSink to explicitly specify the transfer mode. | 
|  | 2012 | mUseAudioCallback = getUseAudioCallbackSetting(); | 
|  | 2013 | if (mUseAudioCallback) { | 
|  | 2014 | ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message. | 
|  | 2015 | } | 
|  | 2016 |  | 
|  | 2017 | // Compute the desired buffer size. | 
|  | 2018 | // For callback mode, the amount of time before wakeup is about half the buffer size. | 
|  | 2019 | const uint32_t frameCount = | 
|  | 2020 | (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000; | 
|  | 2021 |  | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 2022 | // We should always be able to set our playback settings if the sink is closed. | 
|  | 2023 | LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK, | 
|  | 2024 | "onOpenAudioSink: can't set playback rate on closed sink"); | 
|  | 2025 | status_t err = mAudioSink->open( | 
|  | 2026 | sampleRate, | 
|  | 2027 | numChannels, | 
|  | 2028 | (audio_channel_mask_t)channelMask, | 
| Wei Jia | 896b4d6 | 2019-01-04 15:26:48 -0800 | [diff] [blame] | 2029 | audioFormat, | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 2030 | mUseAudioCallback ? &NuPlayer2::Renderer::AudioSinkCallback : NULL, | 
|  | 2031 | mUseAudioCallback ? this : NULL, | 
|  | 2032 | (audio_output_flags_t)pcmFlags, | 
|  | 2033 | NULL, | 
| Wei Jia | 53692fa | 2017-12-11 10:33:46 -0800 | [diff] [blame] | 2034 | frameCount); | 
|  | 2035 | if (err != OK) { | 
|  | 2036 | ALOGW("openAudioSink: non offloaded open failed status: %d", err); | 
|  | 2037 | mAudioSink->close(); | 
|  | 2038 | mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; | 
|  | 2039 | return err; | 
|  | 2040 | } | 
|  | 2041 | mCurrentPcmInfo = info; | 
|  | 2042 | if (!mPaused) { // for preview mode, don't start if paused | 
|  | 2043 | mAudioSink->start(); | 
|  | 2044 | } | 
|  | 2045 | } | 
|  | 2046 | if (audioSinkChanged) { | 
|  | 2047 | onAudioSinkChanged(); | 
|  | 2048 | } | 
|  | 2049 | mAudioTornDown = false; | 
|  | 2050 | return OK; | 
|  | 2051 | } | 
|  | 2052 |  | 
|  | 2053 | void NuPlayer2::Renderer::onCloseAudioSink() { | 
|  | 2054 | mAudioSink->close(); | 
|  | 2055 | mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; | 
|  | 2056 | mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; | 
|  | 2057 | } | 
|  | 2058 |  | 
|  | 2059 | void NuPlayer2::Renderer::onChangeAudioFormat( | 
|  | 2060 | const sp<AMessage> &meta, const sp<AMessage> ¬ify) { | 
|  | 2061 | sp<AMessage> format; | 
|  | 2062 | CHECK(meta->findMessage("format", &format)); | 
|  | 2063 |  | 
|  | 2064 | int32_t offloadOnly; | 
|  | 2065 | CHECK(meta->findInt32("offload-only", &offloadOnly)); | 
|  | 2066 |  | 
|  | 2067 | int32_t hasVideo; | 
|  | 2068 | CHECK(meta->findInt32("has-video", &hasVideo)); | 
|  | 2069 |  | 
|  | 2070 | uint32_t flags; | 
|  | 2071 | CHECK(meta->findInt32("flags", (int32_t *)&flags)); | 
|  | 2072 |  | 
|  | 2073 | uint32_t isStreaming; | 
|  | 2074 | CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming)); | 
|  | 2075 |  | 
|  | 2076 | status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming); | 
|  | 2077 |  | 
|  | 2078 | if (err != OK) { | 
|  | 2079 | notify->setInt32("err", err); | 
|  | 2080 | } | 
|  | 2081 | notify->post(); | 
|  | 2082 | } | 
|  | 2083 |  | 
|  | 2084 | }  // namespace android |