Merge "Add support for deep audio buffers"
diff --git a/include/media/stagefright/NuMediaExtractor.h b/include/media/stagefright/NuMediaExtractor.h
index 9c61113..d46ce36 100644
--- a/include/media/stagefright/NuMediaExtractor.h
+++ b/include/media/stagefright/NuMediaExtractor.h
@@ -28,6 +28,7 @@
struct ABuffer;
struct AMessage;
+struct DataSource;
struct MediaBuffer;
struct MediaExtractor;
struct MediaSource;
@@ -60,6 +61,8 @@
status_t getSampleTime(int64_t *sampleTimeUs);
status_t getSampleMeta(sp<MetaData> *sampleMeta);
+ bool getCachedDuration(int64_t *durationUs, bool *eos) const;
+
protected:
virtual ~NuMediaExtractor();
@@ -78,13 +81,21 @@
uint32_t mTrackFlags; // bitmask of "TrackFlags"
};
+ sp<DataSource> mDataSource;
+
sp<MediaExtractor> mImpl;
+ bool mIsWidevineExtractor;
Vector<TrackInfo> mSelectedTracks;
+ int64_t mTotalBitrate; // in bits/sec
+ int64_t mDurationUs;
ssize_t fetchTrackSamples(int64_t seekTimeUs = -1ll);
void releaseTrackSamples();
+ bool getTotalBitrate(int64_t *bitRate) const;
+ void updateDurationAndBitrate();
+
DISALLOW_EVIL_CONSTRUCTORS(NuMediaExtractor);
};
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index 0957426..f1075b1 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -501,12 +501,12 @@
return mCacheOffset + mCache->totalSize();
}
-size_t NuCachedSource2::approxDataRemaining(status_t *finalStatus) {
+size_t NuCachedSource2::approxDataRemaining(status_t *finalStatus) const {
Mutex::Autolock autoLock(mLock);
return approxDataRemaining_l(finalStatus);
}
-size_t NuCachedSource2::approxDataRemaining_l(status_t *finalStatus) {
+size_t NuCachedSource2::approxDataRemaining_l(status_t *finalStatus) const {
*finalStatus = mFinalStatus;
if (mFinalStatus != OK && mNumRetriesLeft > 0) {
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 00bb74f..123e510 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -21,6 +21,7 @@
#include <media/stagefright/NuMediaExtractor.h>
#include "include/ESDS.h"
+#include "include/NuCachedSource2.h"
#include "include/WVMExtractor.h"
#include <media/stagefright/foundation/ABuffer.h>
@@ -38,7 +39,10 @@
namespace android {
-NuMediaExtractor::NuMediaExtractor() {
+NuMediaExtractor::NuMediaExtractor()
+ : mIsWidevineExtractor(false),
+ mTotalBitrate(-1ll),
+ mDurationUs(-1ll) {
}
NuMediaExtractor::~NuMediaExtractor() {
@@ -66,6 +70,7 @@
return -ENOENT;
}
+ mIsWidevineExtractor = false;
if (!strncasecmp("widevine://", path, 11)) {
String8 mimeType;
float confidence;
@@ -82,6 +87,7 @@
extractor->setAdaptiveStreamingMode(true);
mImpl = extractor;
+ mIsWidevineExtractor = true;
} else {
mImpl = MediaExtractor::Create(dataSource);
}
@@ -90,6 +96,10 @@
return ERROR_UNSUPPORTED;
}
+ mDataSource = dataSource;
+
+ updateDurationAndBitrate();
+
return OK;
}
@@ -111,9 +121,39 @@
return ERROR_UNSUPPORTED;
}
+ mDataSource = fileSource;
+
+ updateDurationAndBitrate();
+
return OK;
}
+void NuMediaExtractor::updateDurationAndBitrate() {
+ mTotalBitrate = 0ll;
+ mDurationUs = -1ll;
+
+ for (size_t i = 0; i < mImpl->countTracks(); ++i) {
+ sp<MetaData> meta = mImpl->getTrackMetaData(i);
+
+ int32_t bitrate;
+ if (!meta->findInt32(kKeyBitRate, &bitrate)) {
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+ ALOGV("track of type '%s' does not publish bitrate", mime);
+
+ mTotalBitrate = -1ll;
+ } else if (mTotalBitrate >= 0ll) {
+ mTotalBitrate += bitrate;
+ }
+
+ int64_t durationUs;
+ if (meta->findInt64(kKeyDuration, &durationUs)
+ && durationUs > mDurationUs) {
+ mDurationUs = durationUs;
+ }
+ }
+}
+
size_t NuMediaExtractor::countTracks() const {
return mImpl == NULL ? 0 : mImpl->countTracks();
}
@@ -508,4 +548,48 @@
return OK;
}
+bool NuMediaExtractor::getTotalBitrate(int64_t *bitrate) const {
+ if (mTotalBitrate >= 0) {
+ *bitrate = mTotalBitrate;
+ return true;
+ }
+
+ off64_t size;
+ if (mDurationUs >= 0 && mDataSource->getSize(&size) == OK) {
+ *bitrate = size * 8000000ll / mDurationUs; // in bits/sec
+ return true;
+ }
+
+ return false;
+}
+
+// Returns true iff cached duration is available/applicable.
+bool NuMediaExtractor::getCachedDuration(
+ int64_t *durationUs, bool *eos) const {
+ int64_t bitrate;
+ if (mIsWidevineExtractor) {
+ sp<WVMExtractor> wvmExtractor =
+ static_cast<WVMExtractor *>(mImpl.get());
+
+ status_t finalStatus;
+ *durationUs = wvmExtractor->getCachedDurationUs(&finalStatus);
+ *eos = (finalStatus != OK);
+ return true;
+ } else if ((mDataSource->flags() & DataSource::kIsCachingDataSource)
+ && getTotalBitrate(&bitrate)) {
+ sp<NuCachedSource2> cachedSource =
+ static_cast<NuCachedSource2 *>(mDataSource.get());
+
+ status_t finalStatus;
+ size_t cachedDataRemaining =
+ cachedSource->approxDataRemaining(&finalStatus);
+
+ *durationUs = cachedDataRemaining * 8000000ll / bitrate;
+ *eos = (finalStatus != OK);
+ return true;
+ }
+
+ return false;
+}
+
} // namespace android
diff --git a/media/libstagefright/include/NuCachedSource2.h b/media/libstagefright/include/NuCachedSource2.h
index c27a29b..5db4b4b 100644
--- a/media/libstagefright/include/NuCachedSource2.h
+++ b/media/libstagefright/include/NuCachedSource2.h
@@ -49,7 +49,7 @@
////////////////////////////////////////////////////////////////////////////
size_t cachedSize();
- size_t approxDataRemaining(status_t *finalStatus);
+ size_t approxDataRemaining(status_t *finalStatus) const;
void resumeFetchingIfNecessary();
@@ -94,7 +94,7 @@
sp<ALooper> mLooper;
Mutex mSerializer;
- Mutex mLock;
+ mutable Mutex mLock;
Condition mCondition;
PageCache *mCache;
@@ -123,7 +123,7 @@
ssize_t readInternal(off64_t offset, void *data, size_t size);
status_t seekInternal_l(off64_t offset);
- size_t approxDataRemaining_l(status_t *finalStatus);
+ size_t approxDataRemaining_l(status_t *finalStatus) const;
void restartPrefetcherIfNecessary_l(
bool ignoreLowWaterThreshold = false, bool force = false);
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 99dcf45..bce30d7 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -3994,20 +3994,38 @@
size_t trimEnd;
for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) {
- int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size()
- / mCblk->frameSize;
int64_t bufEnd;
- if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
- &bufEnd)) {
- ALOGE("Failed to convert frame count of %lld to media time duration"
- " (scale factor %d/%u) in %s", frameCount,
- mMediaTimeToSampleTransform.a_to_b_numer,
- mMediaTimeToSampleTransform.a_to_b_denom,
- __PRETTY_FUNCTION__);
- break;
+ if ((trimEnd + 1) < mTimedBufferQueue.size()) {
+ // We have a next buffer. Just use its PTS as the PTS of the frame
+ // following the last frame in this buffer. If the stream is sparse
+ // (ie, there are deliberate gaps left in the stream which should be
+ // filled with silence by the TimedAudioTrack), then this can result
+ // in one extra buffer being left un-trimmed when it could have
+ // been. In general, this is not typical, and we would rather
+ // optimized away the TS calculation below for the more common case
+ // where PTSes are contiguous.
+ bufEnd = mTimedBufferQueue[trimEnd + 1].pts();
+ } else {
+ // We have no next buffer. Compute the PTS of the frame following
+ // the last frame in this buffer by computing the duration of of
+ // this frame in media time units and adding it to the PTS of the
+ // buffer.
+ int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size()
+ / mCblk->frameSize;
+
+ if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
+ &bufEnd)) {
+ ALOGE("Failed to convert frame count of %lld to media time"
+ " duration" " (scale factor %d/%u) in %s",
+ frameCount,
+ mMediaTimeToSampleTransform.a_to_b_numer,
+ mMediaTimeToSampleTransform.a_to_b_denom,
+ __PRETTY_FUNCTION__);
+ break;
+ }
+ bufEnd += mTimedBufferQueue[trimEnd].pts();
}
- bufEnd += mTimedBufferQueue[trimEnd].pts();
if (bufEnd > mediaTimeNow)
break;
@@ -4111,6 +4129,7 @@
if (pts == AudioBufferProvider::kInvalidPTS) {
buffer->raw = 0;
buffer->frameCount = 0;
+ mTimedAudioOutputOnTime = false;
return INVALID_OPERATION;
}
@@ -4203,14 +4222,14 @@
// the current output position is within this threshold, then we will
// concatenate the next input samples to the previous output
const int64_t kSampleContinuityThreshold =
- (static_cast<int64_t>(sampleRate()) << 32) / 10;
+ (static_cast<int64_t>(sampleRate()) << 32) / 250;
// if this is the first buffer of audio that we're emitting from this track
// then it should be almost exactly on time.
const int64_t kSampleStartupThreshold = 1LL << 32;
if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
- (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
+ (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
// the next input is close enough to being on time, so concatenate it
// with the last output
timedYieldSamples_l(buffer);
@@ -4218,7 +4237,13 @@
ALOGVV("*** on time: head.pos=%d frameCount=%u",
head.position(), buffer->frameCount);
return NO_ERROR;
- } else if (sampleDelta > 0) {
+ }
+
+ // Looks like our output is not on time. Reset our on timed status.
+ // Next time we mix samples from our input queue, then should be within
+ // the StartupThreshold.
+ mTimedAudioOutputOnTime = false;
+ if (sampleDelta > 0) {
// the gap between the current output position and the proper start of
// the next input sample is too big, so fill it with silence
uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;