| /* |
| * Copyright (C) 2010 The Android Open Source Project |
| * |
| * Licensed under the Apache License, Version 2.0 (the "License"); |
| * you may not use this file except in compliance with the License. |
| * You may obtain a copy of the License at |
| * |
| * http://www.apache.org/licenses/LICENSE-2.0 |
| * |
| * Unless required by applicable law or agreed to in writing, software |
| * distributed under the License is distributed on an "AS IS" BASIS, |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| * See the License for the specific language governing permissions and |
| * limitations under the License. |
| */ |
| |
| //#define LOG_NDEBUG 0 |
| #define LOG_TAG "NuPlayerRenderer" |
| #include <utils/Log.h> |
| |
| #include "NuPlayerRenderer.h" |
| |
| #include <cutils/properties.h> |
| |
| #include <media/stagefright/foundation/ABuffer.h> |
| #include <media/stagefright/foundation/ADebug.h> |
| #include <media/stagefright/foundation/AMessage.h> |
| #include <media/stagefright/MediaErrors.h> |
| #include <media/stagefright/MetaData.h> |
| |
| #include <VideoFrameScheduler.h> |
| |
| #include <inttypes.h> |
| |
| namespace android { |
| |
| // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink |
| // is closed to allow the audio DSP to power down. |
| static const int64_t kOffloadPauseMaxUs = 60000000ll; |
| |
| // static |
| const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll; |
| |
| static bool sFrameAccurateAVsync = false; |
| |
| static void readProperties() { |
| char value[PROPERTY_VALUE_MAX]; |
| if (property_get("persist.sys.media.avsync", value, NULL)) { |
| sFrameAccurateAVsync = |
| !strcmp("1", value) || !strcasecmp("true", value); |
| } |
| } |
| |
| NuPlayer::Renderer::Renderer( |
| const sp<MediaPlayerBase::AudioSink> &sink, |
| const sp<AMessage> ¬ify, |
| uint32_t flags) |
| : mAudioSink(sink), |
| mNotify(notify), |
| mFlags(flags), |
| mNumFramesWritten(0), |
| mDrainAudioQueuePending(false), |
| mDrainVideoQueuePending(false), |
| mAudioQueueGeneration(0), |
| mVideoQueueGeneration(0), |
| mFirstAnchorTimeMediaUs(-1), |
| mAnchorTimeMediaUs(-1), |
| mAnchorTimeRealUs(-1), |
| mFlushingAudio(false), |
| mFlushingVideo(false), |
| mHasAudio(false), |
| mHasVideo(false), |
| mSyncQueues(false), |
| mPaused(false), |
| mVideoSampleReceived(false), |
| mVideoRenderingStarted(false), |
| mVideoRenderingStartGeneration(0), |
| mAudioRenderingStartGeneration(0), |
| mLastPositionUpdateUs(-1ll), |
| mVideoLateByUs(0ll), |
| mAudioOffloadPauseTimeoutGeneration(0), |
| mAudioOffloadTornDown(false) { |
| readProperties(); |
| } |
| |
| NuPlayer::Renderer::~Renderer() { |
| if (offloadingAudio()) { |
| mAudioSink->stop(); |
| mAudioSink->flush(); |
| mAudioSink->close(); |
| } |
| } |
| |
| void NuPlayer::Renderer::queueBuffer( |
| bool audio, |
| const sp<ABuffer> &buffer, |
| const sp<AMessage> ¬ifyConsumed) { |
| sp<AMessage> msg = new AMessage(kWhatQueueBuffer, id()); |
| msg->setInt32("audio", static_cast<int32_t>(audio)); |
| msg->setBuffer("buffer", buffer); |
| msg->setMessage("notifyConsumed", notifyConsumed); |
| msg->post(); |
| } |
| |
| void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) { |
| CHECK_NE(finalResult, (status_t)OK); |
| |
| sp<AMessage> msg = new AMessage(kWhatQueueEOS, id()); |
| msg->setInt32("audio", static_cast<int32_t>(audio)); |
| msg->setInt32("finalResult", finalResult); |
| msg->post(); |
| } |
| |
| void NuPlayer::Renderer::flush(bool audio) { |
| { |
| Mutex::Autolock autoLock(mFlushLock); |
| if (audio) { |
| if (mFlushingAudio) { |
| return; |
| } |
| mFlushingAudio = true; |
| } else { |
| if (mFlushingVideo) { |
| return; |
| } |
| mFlushingVideo = true; |
| } |
| } |
| |
| sp<AMessage> msg = new AMessage(kWhatFlush, id()); |
| msg->setInt32("audio", static_cast<int32_t>(audio)); |
| msg->post(); |
| } |
| |
| void NuPlayer::Renderer::signalTimeDiscontinuity() { |
| Mutex::Autolock autoLock(mLock); |
| // CHECK(mAudioQueue.empty()); |
| // CHECK(mVideoQueue.empty()); |
| mFirstAnchorTimeMediaUs = -1; |
| mAnchorTimeMediaUs = -1; |
| mAnchorTimeRealUs = -1; |
| mSyncQueues = false; |
| } |
| |
| void NuPlayer::Renderer::signalAudioSinkChanged() { |
| (new AMessage(kWhatAudioSinkChanged, id()))->post(); |
| } |
| |
| void NuPlayer::Renderer::signalDisableOffloadAudio() { |
| (new AMessage(kWhatDisableOffloadAudio, id()))->post(); |
| } |
| |
| void NuPlayer::Renderer::pause() { |
| (new AMessage(kWhatPause, id()))->post(); |
| } |
| |
| void NuPlayer::Renderer::resume() { |
| (new AMessage(kWhatResume, id()))->post(); |
| } |
| |
| void NuPlayer::Renderer::setVideoFrameRate(float fps) { |
| sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, id()); |
| msg->setFloat("frame-rate", fps); |
| msg->post(); |
| } |
| |
| void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) { |
| switch (msg->what()) { |
| case kWhatStopAudioSink: |
| { |
| mAudioSink->stop(); |
| break; |
| } |
| |
| case kWhatDrainAudioQueue: |
| { |
| int32_t generation; |
| CHECK(msg->findInt32("generation", &generation)); |
| if (generation != mAudioQueueGeneration) { |
| break; |
| } |
| |
| mDrainAudioQueuePending = false; |
| |
| if (onDrainAudioQueue()) { |
| uint32_t numFramesPlayed; |
| CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), |
| (status_t)OK); |
| |
| uint32_t numFramesPendingPlayout = |
| mNumFramesWritten - numFramesPlayed; |
| |
| // This is how long the audio sink will have data to |
| // play back. |
| int64_t delayUs = |
| mAudioSink->msecsPerFrame() |
| * numFramesPendingPlayout * 1000ll; |
| |
| // Let's give it more data after about half that time |
| // has elapsed. |
| // kWhatDrainAudioQueue is used for non-offloading mode, |
| // and mLock is used only for offloading mode. Therefore, |
| // no need to acquire mLock here. |
| postDrainAudioQueue_l(delayUs / 2); |
| } |
| break; |
| } |
| |
| case kWhatDrainVideoQueue: |
| { |
| int32_t generation; |
| CHECK(msg->findInt32("generation", &generation)); |
| if (generation != mVideoQueueGeneration) { |
| break; |
| } |
| |
| mDrainVideoQueuePending = false; |
| |
| onDrainVideoQueue(); |
| |
| postDrainVideoQueue(); |
| break; |
| } |
| |
| case kWhatQueueBuffer: |
| { |
| onQueueBuffer(msg); |
| break; |
| } |
| |
| case kWhatQueueEOS: |
| { |
| onQueueEOS(msg); |
| break; |
| } |
| |
| case kWhatFlush: |
| { |
| onFlush(msg); |
| break; |
| } |
| |
| case kWhatAudioSinkChanged: |
| { |
| onAudioSinkChanged(); |
| break; |
| } |
| |
| case kWhatDisableOffloadAudio: |
| { |
| onDisableOffloadAudio(); |
| break; |
| } |
| |
| case kWhatPause: |
| { |
| onPause(); |
| break; |
| } |
| |
| case kWhatResume: |
| { |
| onResume(); |
| break; |
| } |
| |
| case kWhatSetVideoFrameRate: |
| { |
| float fps; |
| CHECK(msg->findFloat("frame-rate", &fps)); |
| onSetVideoFrameRate(fps); |
| break; |
| } |
| |
| case kWhatAudioOffloadTearDown: |
| { |
| onAudioOffloadTearDown(); |
| break; |
| } |
| |
| case kWhatAudioOffloadPauseTimeout: |
| { |
| int32_t generation; |
| CHECK(msg->findInt32("generation", &generation)); |
| if (generation != mAudioOffloadPauseTimeoutGeneration) { |
| break; |
| } |
| onAudioOffloadTearDown(); |
| break; |
| } |
| |
| default: |
| TRESPASS(); |
| break; |
| } |
| } |
| |
| void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) { |
| if (mDrainAudioQueuePending || mSyncQueues || mPaused |
| || offloadingAudio()) { |
| return; |
| } |
| |
| if (mAudioQueue.empty()) { |
| return; |
| } |
| |
| mDrainAudioQueuePending = true; |
| sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, id()); |
| msg->setInt32("generation", mAudioQueueGeneration); |
| msg->post(delayUs); |
| } |
| |
| void NuPlayer::Renderer::prepareForMediaRenderingStart() { |
| mAudioRenderingStartGeneration = mAudioQueueGeneration; |
| mVideoRenderingStartGeneration = mVideoQueueGeneration; |
| } |
| |
| void NuPlayer::Renderer::notifyIfMediaRenderingStarted() { |
| if (mVideoRenderingStartGeneration == mVideoQueueGeneration && |
| mAudioRenderingStartGeneration == mAudioQueueGeneration) { |
| mVideoRenderingStartGeneration = -1; |
| mAudioRenderingStartGeneration = -1; |
| |
| sp<AMessage> notify = mNotify->dup(); |
| notify->setInt32("what", kWhatMediaRenderingStart); |
| notify->post(); |
| } |
| } |
| |
| // static |
| size_t NuPlayer::Renderer::AudioSinkCallback( |
| MediaPlayerBase::AudioSink * /* audioSink */, |
| void *buffer, |
| size_t size, |
| void *cookie, |
| MediaPlayerBase::AudioSink::cb_event_t event) { |
| NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie; |
| |
| switch (event) { |
| case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER: |
| { |
| return me->fillAudioBuffer(buffer, size); |
| break; |
| } |
| |
| case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END: |
| { |
| me->notifyEOS(true /* audio */, ERROR_END_OF_STREAM); |
| break; |
| } |
| |
| case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN: |
| { |
| me->notifyAudioOffloadTearDown(); |
| break; |
| } |
| } |
| |
| return 0; |
| } |
| |
| size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) { |
| Mutex::Autolock autoLock(mLock); |
| |
| if (!offloadingAudio() || mPaused) { |
| return 0; |
| } |
| |
| bool hasEOS = false; |
| |
| size_t sizeCopied = 0; |
| bool firstEntry = true; |
| while (sizeCopied < size && !mAudioQueue.empty()) { |
| QueueEntry *entry = &*mAudioQueue.begin(); |
| |
| if (entry->mBuffer == NULL) { // EOS |
| hasEOS = true; |
| mAudioQueue.erase(mAudioQueue.begin()); |
| entry = NULL; |
| break; |
| } |
| |
| if (firstEntry && entry->mOffset == 0) { |
| firstEntry = false; |
| int64_t mediaTimeUs; |
| CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); |
| ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6); |
| if (mFirstAnchorTimeMediaUs == -1) { |
| mFirstAnchorTimeMediaUs = mediaTimeUs; |
| } |
| |
| int64_t nowUs = ALooper::GetNowUs(); |
| mAnchorTimeMediaUs = |
| mFirstAnchorTimeMediaUs + getPlayedOutAudioDurationUs(nowUs); |
| mAnchorTimeRealUs = nowUs; |
| |
| notifyPosition(); |
| } |
| |
| size_t copy = entry->mBuffer->size() - entry->mOffset; |
| size_t sizeRemaining = size - sizeCopied; |
| if (copy > sizeRemaining) { |
| copy = sizeRemaining; |
| } |
| |
| memcpy((char *)buffer + sizeCopied, |
| entry->mBuffer->data() + entry->mOffset, |
| copy); |
| |
| entry->mOffset += copy; |
| if (entry->mOffset == entry->mBuffer->size()) { |
| entry->mNotifyConsumed->post(); |
| mAudioQueue.erase(mAudioQueue.begin()); |
| entry = NULL; |
| } |
| sizeCopied += copy; |
| notifyIfMediaRenderingStarted(); |
| } |
| |
| if (hasEOS) { |
| (new AMessage(kWhatStopAudioSink, id()))->post(); |
| } |
| |
| return sizeCopied; |
| } |
| |
| bool NuPlayer::Renderer::onDrainAudioQueue() { |
| uint32_t numFramesPlayed; |
| if (mAudioSink->getPosition(&numFramesPlayed) != OK) { |
| return false; |
| } |
| |
| ssize_t numFramesAvailableToWrite = |
| mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed); |
| |
| #if 0 |
| if (numFramesAvailableToWrite == mAudioSink->frameCount()) { |
| ALOGI("audio sink underrun"); |
| } else { |
| ALOGV("audio queue has %d frames left to play", |
| mAudioSink->frameCount() - numFramesAvailableToWrite); |
| } |
| #endif |
| |
| size_t numBytesAvailableToWrite = |
| numFramesAvailableToWrite * mAudioSink->frameSize(); |
| |
| while (numBytesAvailableToWrite > 0 && !mAudioQueue.empty()) { |
| QueueEntry *entry = &*mAudioQueue.begin(); |
| |
| if (entry->mBuffer == NULL) { |
| // EOS |
| int64_t postEOSDelayUs = 0; |
| if (mAudioSink->needsTrailingPadding()) { |
| postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); |
| } |
| notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); |
| |
| mAudioQueue.erase(mAudioQueue.begin()); |
| entry = NULL; |
| return false; |
| } |
| |
| if (entry->mOffset == 0) { |
| int64_t mediaTimeUs; |
| CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); |
| ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6); |
| if (mFirstAnchorTimeMediaUs == -1) { |
| mFirstAnchorTimeMediaUs = mediaTimeUs; |
| } |
| mAnchorTimeMediaUs = mediaTimeUs; |
| |
| int64_t nowUs = ALooper::GetNowUs(); |
| mAnchorTimeRealUs = nowUs + getPendingAudioPlayoutDurationUs(nowUs); |
| |
| notifyPosition(); |
| } |
| |
| size_t copy = entry->mBuffer->size() - entry->mOffset; |
| if (copy > numBytesAvailableToWrite) { |
| copy = numBytesAvailableToWrite; |
| } |
| |
| ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset, copy); |
| if (written < 0) { |
| // An error in AudioSink write is fatal here. |
| LOG_ALWAYS_FATAL("AudioSink write error(%zd) when writing %zu bytes", written, copy); |
| } |
| |
| entry->mOffset += written; |
| if (entry->mOffset == entry->mBuffer->size()) { |
| entry->mNotifyConsumed->post(); |
| mAudioQueue.erase(mAudioQueue.begin()); |
| |
| entry = NULL; |
| } |
| |
| numBytesAvailableToWrite -= written; |
| size_t copiedFrames = written / mAudioSink->frameSize(); |
| mNumFramesWritten += copiedFrames; |
| |
| notifyIfMediaRenderingStarted(); |
| |
| if (written != (ssize_t)copy) { |
| // A short count was received from AudioSink::write() |
| // |
| // AudioSink write should block until exactly the number of bytes are delivered. |
| // But it may return with a short count (without an error) when: |
| // |
| // 1) Size to be copied is not a multiple of the frame size. We consider this fatal. |
| // 2) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded. |
| |
| // (Case 1) |
| // Must be a multiple of the frame size. If it is not a multiple of a frame size, it |
| // needs to fail, as we should not carry over fractional frames between calls. |
| CHECK_EQ(copy % mAudioSink->frameSize(), 0); |
| |
| // (Case 2) |
| // Return early to the caller. |
| // Beware of calling immediately again as this may busy-loop if you are not careful. |
| ALOGW("AudioSink write short frame count %zd < %zu", written, copy); |
| break; |
| } |
| } |
| return !mAudioQueue.empty(); |
| } |
| |
| int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) { |
| int64_t writtenAudioDurationUs = |
| mNumFramesWritten * 1000LL * mAudioSink->msecsPerFrame(); |
| return writtenAudioDurationUs - getPlayedOutAudioDurationUs(nowUs); |
| } |
| |
| void NuPlayer::Renderer::postDrainVideoQueue() { |
| if (mDrainVideoQueuePending |
| || mSyncQueues |
| || (mPaused && mVideoSampleReceived)) { |
| return; |
| } |
| |
| if (mVideoQueue.empty()) { |
| return; |
| } |
| |
| QueueEntry &entry = *mVideoQueue.begin(); |
| |
| sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, id()); |
| msg->setInt32("generation", mVideoQueueGeneration); |
| |
| if (entry.mBuffer == NULL) { |
| // EOS doesn't carry a timestamp. |
| msg->post(); |
| mDrainVideoQueuePending = true; |
| return; |
| } |
| |
| int64_t delayUs; |
| int64_t nowUs = ALooper::GetNowUs(); |
| int64_t realTimeUs; |
| if (mFlags & FLAG_REAL_TIME) { |
| int64_t mediaTimeUs; |
| CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); |
| realTimeUs = mediaTimeUs; |
| } else { |
| int64_t mediaTimeUs; |
| CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); |
| |
| if (mFirstAnchorTimeMediaUs == -1 && !mHasAudio) { |
| mFirstAnchorTimeMediaUs = mediaTimeUs; |
| } |
| if (mAnchorTimeMediaUs < 0) { |
| if (!mHasAudio) { |
| mAnchorTimeMediaUs = mediaTimeUs; |
| mAnchorTimeRealUs = nowUs; |
| notifyPosition(); |
| } |
| realTimeUs = nowUs; |
| } else { |
| realTimeUs = |
| (mediaTimeUs - mAnchorTimeMediaUs) + mAnchorTimeRealUs; |
| } |
| } |
| |
| realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000; |
| int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000); |
| |
| delayUs = realTimeUs - nowUs; |
| |
| ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs); |
| // post 2 display refreshes before rendering is due |
| // FIXME currently this increases power consumption, so unless frame-accurate |
| // AV sync is requested, post closer to required render time (at 0.63 vsyncs) |
| if (!sFrameAccurateAVsync) { |
| twoVsyncsUs >>= 4; |
| } |
| msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0); |
| |
| mDrainVideoQueuePending = true; |
| } |
| |
| void NuPlayer::Renderer::onDrainVideoQueue() { |
| if (mVideoQueue.empty()) { |
| return; |
| } |
| |
| QueueEntry *entry = &*mVideoQueue.begin(); |
| |
| if (entry->mBuffer == NULL) { |
| // EOS |
| |
| notifyEOS(false /* audio */, entry->mFinalResult); |
| |
| mVideoQueue.erase(mVideoQueue.begin()); |
| entry = NULL; |
| |
| mVideoLateByUs = 0ll; |
| return; |
| } |
| |
| int64_t realTimeUs; |
| if (mFlags & FLAG_REAL_TIME) { |
| CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs)); |
| } else { |
| int64_t mediaTimeUs; |
| CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); |
| |
| realTimeUs = mediaTimeUs - mAnchorTimeMediaUs + mAnchorTimeRealUs; |
| } |
| |
| bool tooLate = false; |
| |
| if (!mPaused) { |
| mVideoLateByUs = ALooper::GetNowUs() - realTimeUs; |
| tooLate = (mVideoLateByUs > 40000); |
| |
| if (tooLate) { |
| ALOGV("video late by %lld us (%.2f secs)", |
| mVideoLateByUs, mVideoLateByUs / 1E6); |
| } else { |
| ALOGV("rendering video at media time %.2f secs", |
| (mFlags & FLAG_REAL_TIME ? realTimeUs : |
| (realTimeUs + mAnchorTimeMediaUs - mAnchorTimeRealUs)) / 1E6); |
| } |
| } else { |
| mVideoLateByUs = 0ll; |
| } |
| |
| entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll); |
| entry->mNotifyConsumed->setInt32("render", !tooLate); |
| entry->mNotifyConsumed->post(); |
| mVideoQueue.erase(mVideoQueue.begin()); |
| entry = NULL; |
| |
| mVideoSampleReceived = true; |
| |
| if (!mPaused) { |
| if (!mVideoRenderingStarted) { |
| mVideoRenderingStarted = true; |
| notifyVideoRenderingStart(); |
| } |
| notifyIfMediaRenderingStarted(); |
| } |
| } |
| |
| void NuPlayer::Renderer::notifyVideoRenderingStart() { |
| sp<AMessage> notify = mNotify->dup(); |
| notify->setInt32("what", kWhatVideoRenderingStart); |
| notify->post(); |
| } |
| |
| void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) { |
| sp<AMessage> notify = mNotify->dup(); |
| notify->setInt32("what", kWhatEOS); |
| notify->setInt32("audio", static_cast<int32_t>(audio)); |
| notify->setInt32("finalResult", finalResult); |
| notify->post(delayUs); |
| } |
| |
| void NuPlayer::Renderer::notifyAudioOffloadTearDown() { |
| (new AMessage(kWhatAudioOffloadTearDown, id()))->post(); |
| } |
| |
| void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) { |
| int32_t audio; |
| CHECK(msg->findInt32("audio", &audio)); |
| |
| if (audio) { |
| mHasAudio = true; |
| } else { |
| mHasVideo = true; |
| if (mVideoScheduler == NULL) { |
| mVideoScheduler = new VideoFrameScheduler(); |
| mVideoScheduler->init(); |
| } |
| } |
| |
| if (dropBufferWhileFlushing(audio, msg)) { |
| return; |
| } |
| |
| sp<ABuffer> buffer; |
| CHECK(msg->findBuffer("buffer", &buffer)); |
| |
| sp<AMessage> notifyConsumed; |
| CHECK(msg->findMessage("notifyConsumed", ¬ifyConsumed)); |
| |
| QueueEntry entry; |
| entry.mBuffer = buffer; |
| entry.mNotifyConsumed = notifyConsumed; |
| entry.mOffset = 0; |
| entry.mFinalResult = OK; |
| |
| if (audio) { |
| Mutex::Autolock autoLock(mLock); |
| mAudioQueue.push_back(entry); |
| postDrainAudioQueue_l(); |
| } else { |
| mVideoQueue.push_back(entry); |
| postDrainVideoQueue(); |
| } |
| |
| Mutex::Autolock autoLock(mLock); |
| if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) { |
| return; |
| } |
| |
| sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer; |
| sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer; |
| |
| if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) { |
| // EOS signalled on either queue. |
| syncQueuesDone_l(); |
| return; |
| } |
| |
| int64_t firstAudioTimeUs; |
| int64_t firstVideoTimeUs; |
| CHECK(firstAudioBuffer->meta() |
| ->findInt64("timeUs", &firstAudioTimeUs)); |
| CHECK(firstVideoBuffer->meta() |
| ->findInt64("timeUs", &firstVideoTimeUs)); |
| |
| int64_t diff = firstVideoTimeUs - firstAudioTimeUs; |
| |
| ALOGV("queueDiff = %.2f secs", diff / 1E6); |
| |
| if (diff > 100000ll) { |
| // Audio data starts More than 0.1 secs before video. |
| // Drop some audio. |
| |
| (*mAudioQueue.begin()).mNotifyConsumed->post(); |
| mAudioQueue.erase(mAudioQueue.begin()); |
| return; |
| } |
| |
| syncQueuesDone_l(); |
| } |
| |
| void NuPlayer::Renderer::syncQueuesDone_l() { |
| if (!mSyncQueues) { |
| return; |
| } |
| |
| mSyncQueues = false; |
| |
| if (!mAudioQueue.empty()) { |
| postDrainAudioQueue_l(); |
| } |
| |
| if (!mVideoQueue.empty()) { |
| postDrainVideoQueue(); |
| } |
| } |
| |
| void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) { |
| int32_t audio; |
| CHECK(msg->findInt32("audio", &audio)); |
| |
| if (dropBufferWhileFlushing(audio, msg)) { |
| return; |
| } |
| |
| int32_t finalResult; |
| CHECK(msg->findInt32("finalResult", &finalResult)); |
| |
| QueueEntry entry; |
| entry.mOffset = 0; |
| entry.mFinalResult = finalResult; |
| |
| if (audio) { |
| Mutex::Autolock autoLock(mLock); |
| if (mAudioQueue.empty() && mSyncQueues) { |
| syncQueuesDone_l(); |
| } |
| mAudioQueue.push_back(entry); |
| postDrainAudioQueue_l(); |
| } else { |
| if (mVideoQueue.empty() && mSyncQueues) { |
| Mutex::Autolock autoLock(mLock); |
| syncQueuesDone_l(); |
| } |
| mVideoQueue.push_back(entry); |
| postDrainVideoQueue(); |
| } |
| } |
| |
| void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) { |
| int32_t audio; |
| CHECK(msg->findInt32("audio", &audio)); |
| |
| { |
| Mutex::Autolock autoLock(mFlushLock); |
| if (audio) { |
| mFlushingAudio = false; |
| } else { |
| mFlushingVideo = false; |
| } |
| } |
| |
| // If we're currently syncing the queues, i.e. dropping audio while |
| // aligning the first audio/video buffer times and only one of the |
| // two queues has data, we may starve that queue by not requesting |
| // more buffers from the decoder. If the other source then encounters |
| // a discontinuity that leads to flushing, we'll never find the |
| // corresponding discontinuity on the other queue. |
| // Therefore we'll stop syncing the queues if at least one of them |
| // is flushed. |
| { |
| Mutex::Autolock autoLock(mLock); |
| syncQueuesDone_l(); |
| } |
| |
| ALOGV("flushing %s", audio ? "audio" : "video"); |
| if (audio) { |
| { |
| Mutex::Autolock autoLock(mLock); |
| flushQueue(&mAudioQueue); |
| |
| ++mAudioQueueGeneration; |
| prepareForMediaRenderingStart(); |
| |
| if (offloadingAudio()) { |
| mFirstAnchorTimeMediaUs = -1; |
| } |
| } |
| |
| mDrainAudioQueuePending = false; |
| |
| if (offloadingAudio()) { |
| mAudioSink->pause(); |
| mAudioSink->flush(); |
| mAudioSink->start(); |
| } |
| } else { |
| flushQueue(&mVideoQueue); |
| |
| mDrainVideoQueuePending = false; |
| ++mVideoQueueGeneration; |
| |
| if (mVideoScheduler != NULL) { |
| mVideoScheduler->restart(); |
| } |
| |
| prepareForMediaRenderingStart(); |
| } |
| |
| mVideoSampleReceived = false; |
| notifyFlushComplete(audio); |
| } |
| |
| void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) { |
| while (!queue->empty()) { |
| QueueEntry *entry = &*queue->begin(); |
| |
| if (entry->mBuffer != NULL) { |
| entry->mNotifyConsumed->post(); |
| } |
| |
| queue->erase(queue->begin()); |
| entry = NULL; |
| } |
| } |
| |
| void NuPlayer::Renderer::notifyFlushComplete(bool audio) { |
| sp<AMessage> notify = mNotify->dup(); |
| notify->setInt32("what", kWhatFlushComplete); |
| notify->setInt32("audio", static_cast<int32_t>(audio)); |
| notify->post(); |
| } |
| |
| bool NuPlayer::Renderer::dropBufferWhileFlushing( |
| bool audio, const sp<AMessage> &msg) { |
| bool flushing = false; |
| |
| { |
| Mutex::Autolock autoLock(mFlushLock); |
| if (audio) { |
| flushing = mFlushingAudio; |
| } else { |
| flushing = mFlushingVideo; |
| } |
| } |
| |
| if (!flushing) { |
| return false; |
| } |
| |
| sp<AMessage> notifyConsumed; |
| if (msg->findMessage("notifyConsumed", ¬ifyConsumed)) { |
| notifyConsumed->post(); |
| } |
| |
| return true; |
| } |
| |
| void NuPlayer::Renderer::onAudioSinkChanged() { |
| if (offloadingAudio()) { |
| return; |
| } |
| CHECK(!mDrainAudioQueuePending); |
| mNumFramesWritten = 0; |
| uint32_t written; |
| if (mAudioSink->getFramesWritten(&written) == OK) { |
| mNumFramesWritten = written; |
| } |
| } |
| |
| void NuPlayer::Renderer::onDisableOffloadAudio() { |
| Mutex::Autolock autoLock(mLock); |
| mFlags &= ~FLAG_OFFLOAD_AUDIO; |
| ++mAudioQueueGeneration; |
| } |
| |
| void NuPlayer::Renderer::notifyPosition() { |
| // notifyPosition() must be called only after setting mAnchorTimeRealUs |
| // and mAnchorTimeMediaUs, and must not be paused as it extrapolates position. |
| //CHECK_GE(mAnchorTimeRealUs, 0); |
| //CHECK_GE(mAnchorTimeMediaUs, 0); |
| //CHECK(!mPaused || !mHasAudio); // video-only does display in paused mode. |
| |
| int64_t nowUs = ALooper::GetNowUs(); |
| |
| if (mLastPositionUpdateUs >= 0 |
| && nowUs < mLastPositionUpdateUs + kMinPositionUpdateDelayUs) { |
| return; |
| } |
| mLastPositionUpdateUs = nowUs; |
| |
| int64_t positionUs = (nowUs - mAnchorTimeRealUs) + mAnchorTimeMediaUs; |
| |
| //ALOGD("notifyPosition: positionUs(%lld) nowUs(%lld) mAnchorTimeRealUs(%lld)" |
| // " mAnchorTimeMediaUs(%lld) mFirstAnchorTimeMediaUs(%lld)", |
| // (long long)positionUs, (long long)nowUs, (long long)mAnchorTimeRealUs, |
| // (long long)mAnchorTimeMediaUs, (long long)mFirstAnchorTimeMediaUs); |
| |
| // Due to adding the latency to mAnchorTimeRealUs in onDrainAudioQueue(), |
| // positionUs may be less than the first media time. This is avoided |
| // here to prevent potential retrograde motion of the position bar |
| // when starting up after a seek. |
| if (positionUs < mFirstAnchorTimeMediaUs) { |
| positionUs = mFirstAnchorTimeMediaUs; |
| } |
| sp<AMessage> notify = mNotify->dup(); |
| notify->setInt32("what", kWhatPosition); |
| notify->setInt64("positionUs", positionUs); |
| notify->setInt64("videoLateByUs", mVideoLateByUs); |
| notify->post(); |
| } |
| |
| void NuPlayer::Renderer::onPause() { |
| if (mPaused) { |
| ALOGW("Renderer::onPause() called while already paused!"); |
| return; |
| } |
| { |
| Mutex::Autolock autoLock(mLock); |
| ++mAudioQueueGeneration; |
| ++mVideoQueueGeneration; |
| prepareForMediaRenderingStart(); |
| mPaused = true; |
| } |
| |
| mDrainAudioQueuePending = false; |
| mDrainVideoQueuePending = false; |
| |
| if (mHasAudio) { |
| mAudioSink->pause(); |
| startAudioOffloadPauseTimeout(); |
| } |
| |
| ALOGV("now paused audio queue has %d entries, video has %d entries", |
| mAudioQueue.size(), mVideoQueue.size()); |
| } |
| |
| void NuPlayer::Renderer::onResume() { |
| readProperties(); |
| |
| if (!mPaused) { |
| return; |
| } |
| |
| if (mHasAudio) { |
| cancelAudioOffloadPauseTimeout(); |
| mAudioSink->start(); |
| } |
| |
| Mutex::Autolock autoLock(mLock); |
| mPaused = false; |
| |
| if (!mAudioQueue.empty()) { |
| postDrainAudioQueue_l(); |
| } |
| |
| if (!mVideoQueue.empty()) { |
| postDrainVideoQueue(); |
| } |
| } |
| |
| void NuPlayer::Renderer::onSetVideoFrameRate(float fps) { |
| if (mVideoScheduler == NULL) { |
| mVideoScheduler = new VideoFrameScheduler(); |
| } |
| mVideoScheduler->init(fps); |
| } |
| |
| // TODO: Remove unnecessary calls to getPlayedOutAudioDurationUs() |
| // as it acquires locks and may query the audio driver. |
| // |
| // Some calls are not needed since notifyPosition() doesn't always deliver a message. |
| // Some calls could conceivably retrieve extrapolated data instead of |
| // accessing getTimestamp() or getPosition() every time a data buffer with |
| // a media time is received. |
| // |
| int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) { |
| uint32_t numFramesPlayed; |
| int64_t numFramesPlayedAt; |
| AudioTimestamp ts; |
| static const int64_t kStaleTimestamp100ms = 100000; |
| |
| status_t res = mAudioSink->getTimestamp(ts); |
| if (res == OK) { // case 1: mixing audio tracks and offloaded tracks. |
| numFramesPlayed = ts.mPosition; |
| numFramesPlayedAt = |
| ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000; |
| const int64_t timestampAge = nowUs - numFramesPlayedAt; |
| if (timestampAge > kStaleTimestamp100ms) { |
| // This is an audio FIXME. |
| // getTimestamp returns a timestamp which may come from audio mixing threads. |
| // After pausing, the MixerThread may go idle, thus the mTime estimate may |
| // become stale. Assuming that the MixerThread runs 20ms, with FastMixer at 5ms, |
| // the max latency should be about 25ms with an average around 12ms (to be verified). |
| // For safety we use 100ms. |
| ALOGW("getTimestamp: returned stale timestamp nowUs(%lld) numFramesPlayedAt(%lld)", |
| (long long)nowUs, (long long)numFramesPlayedAt); |
| numFramesPlayedAt = nowUs - kStaleTimestamp100ms; |
| } |
| //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAt); |
| } else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track |
| numFramesPlayed = 0; |
| numFramesPlayedAt = nowUs; |
| //ALOGD("getTimestamp: WOULD_BLOCK %d %lld", |
| // numFramesPlayed, (long long)numFramesPlayedAt); |
| } else { // case 3: transitory at new track or audio fast tracks. |
| res = mAudioSink->getPosition(&numFramesPlayed); |
| CHECK_EQ(res, (status_t)OK); |
| numFramesPlayedAt = nowUs; |
| numFramesPlayedAt += 1000LL * mAudioSink->latency() / 2; /* XXX */ |
| //ALOGD("getPosition: %d %lld", numFramesPlayed, numFramesPlayedAt); |
| } |
| |
| // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours. |
| //CHECK_EQ(numFramesPlayed & (1 << 31), 0); // can't be negative until 12.4 hrs, test |
| int64_t durationUs = (int32_t)numFramesPlayed * 1000LL * mAudioSink->msecsPerFrame() |
| + nowUs - numFramesPlayedAt; |
| if (durationUs < 0) { |
| // Occurs when numFramesPlayed position is very small and the following: |
| // (1) In case 1, the time nowUs is computed before getTimestamp() is called and |
| // numFramesPlayedAt is greater than nowUs by time more than numFramesPlayed. |
| // (2) In case 3, using getPosition and adding mAudioSink->latency() to |
| // numFramesPlayedAt, by a time amount greater than numFramesPlayed. |
| // |
| // Both of these are transitory conditions. |
| ALOGW("getPlayedOutAudioDurationUs: negative timestamp %lld set to zero", (long long)durationUs); |
| durationUs = 0; |
| } |
| ALOGV("getPlayedOutAudioDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)", |
| (long long)durationUs, (long long)nowUs, numFramesPlayed, (long long)numFramesPlayedAt); |
| return durationUs; |
| } |
| |
| void NuPlayer::Renderer::onAudioOffloadTearDown() { |
| if (mAudioOffloadTornDown) { |
| return; |
| } |
| mAudioOffloadTornDown = true; |
| |
| int64_t firstAudioTimeUs; |
| { |
| Mutex::Autolock autoLock(mLock); |
| firstAudioTimeUs = mFirstAnchorTimeMediaUs; |
| } |
| |
| int64_t currentPositionUs = |
| firstAudioTimeUs + getPlayedOutAudioDurationUs(ALooper::GetNowUs()); |
| |
| mAudioSink->stop(); |
| mAudioSink->flush(); |
| |
| sp<AMessage> notify = mNotify->dup(); |
| notify->setInt32("what", kWhatAudioOffloadTearDown); |
| notify->setInt64("positionUs", currentPositionUs); |
| notify->post(); |
| } |
| |
| void NuPlayer::Renderer::startAudioOffloadPauseTimeout() { |
| if (offloadingAudio()) { |
| sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, id()); |
| msg->setInt32("generation", mAudioOffloadPauseTimeoutGeneration); |
| msg->post(kOffloadPauseMaxUs); |
| } |
| } |
| |
| void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() { |
| if (offloadingAudio()) { |
| ++mAudioOffloadPauseTimeoutGeneration; |
| } |
| } |
| |
| } // namespace android |
| |