Merge "No newline or space at end of ALOG format string"
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 4c30e04..e541c18 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -335,7 +335,7 @@
status_t applyRotation();
status_t waitForBufferFilled_l();
- int64_t retrieveDecodingTimeUs(bool isCodecSpecific);
+ int64_t getDecodingTimeUs();
status_t parseAVCCodecSpecificData(
const void *data, size_t size,
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 8073af8..aacfbdd 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -1609,7 +1609,7 @@
mSeekTimeUs,
mSeeking == SEEK_VIDEO_ONLY
? MediaSource::ReadOptions::SEEK_NEXT_SYNC
- : MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
+ : MediaSource::ReadOptions::SEEK_CLOSEST);
}
for (;;) {
status_t err = mVideoSource->read(&mVideoBuffer, &options);
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 068660b..7ebbe1d 100755
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -23,10 +23,10 @@
#include <pthread.h>
#include <sys/prctl.h>
+#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MPEG4Writer.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaDebug.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaSource.h>
@@ -70,6 +70,10 @@
status_t dump(int fd, const Vector<String16>& args) const;
private:
+ enum {
+ kMaxCttsOffsetTimeUs = 1000000LL, // 1 second
+ };
+
MPEG4Writer *mOwner;
sp<MetaData> mMeta;
sp<MediaSource> mSource;
@@ -137,11 +141,12 @@
: sampleCount(count), sampleDuration(timescaledDur) {}
uint32_t sampleCount;
- int32_t sampleDuration; // time scale based
+ uint32_t sampleDuration; // time scale based
};
- bool mHasNegativeCttsDeltaDuration;
size_t mNumCttsTableEntries;
List<CttsTableEntry> mCttsTableEntries;
+ int64_t mMinCttsOffsetTimeUs;
+ int64_t mMaxCttsOffsetTimeUs;
// Sequence parameter set or picture parameter set
struct AVCParamSet {
@@ -172,6 +177,8 @@
// Update the audio track's drift information.
void updateDriftTime(const sp<MetaData>& meta);
+ int32_t getStartTimeOffsetScaledTime() const;
+
static void *ThreadWrapper(void *me);
status_t threadEntry();
@@ -471,7 +478,7 @@
!param->findInt32(kKeyTimeScale, &mTimeScale)) {
mTimeScale = 1000;
}
- CHECK(mTimeScale > 0);
+ CHECK_GT(mTimeScale, 0);
ALOGV("movie time scale: %d", mTimeScale);
mStreamableFile = true;
@@ -490,7 +497,7 @@
}
mEstimatedMoovBoxSize = estimateMoovBoxSize(bitRate);
}
- CHECK(mEstimatedMoovBoxSize >= 8);
+ CHECK_GE(mEstimatedMoovBoxSize, 8);
lseek64(mFd, mFreeBoxOffset, SEEK_SET);
writeInt32(mEstimatedMoovBoxSize);
write("free", 4);
@@ -684,7 +691,7 @@
mWriteMoovBoxToMemory = false;
if (mStreamableFile) {
- CHECK(mMoovBoxBufferOffset + 8 <= mEstimatedMoovBoxSize);
+ CHECK_LE(mMoovBoxBufferOffset + 8, mEstimatedMoovBoxSize);
// Moov box
lseek64(mFd, mFreeBoxOffset, SEEK_SET);
@@ -856,7 +863,7 @@
mOffset += length + 4;
} else {
- CHECK(length < 65536);
+ CHECK_LT(length, 65536);
uint8_t x = length >> 8;
::write(mFd, &x, 1);
@@ -1085,7 +1092,7 @@
void MPEG4Writer::setStartTimestampUs(int64_t timeUs) {
ALOGI("setStartTimestampUs: %lld", timeUs);
- CHECK(timeUs >= 0);
+ CHECK_GE(timeUs, 0ll);
Mutex::Autolock autoLock(mLock);
if (mStartTimestampUs < 0 || mStartTimestampUs > timeUs) {
mStartTimestampUs = timeUs;
@@ -1186,9 +1193,6 @@
if (mIsAudio) {
return;
}
- if (duration < 0 && !mHasNegativeCttsDeltaDuration) {
- mHasNegativeCttsDeltaDuration = true;
- }
CttsTableEntry cttsEntry(sampleCount, duration);
mCttsTableEntries.push_back(cttsEntry);
++mNumCttsTableEntries;
@@ -1218,7 +1222,7 @@
mTimeScale = timeScale;
}
- CHECK(mTimeScale > 0);
+ CHECK_GT(mTimeScale, 0);
}
void MPEG4Writer::Track::getCodecSpecificDataFromInputFormatIfPossible() {
@@ -1299,7 +1303,7 @@
}
}
- CHECK("Received a chunk for a unknown track" == 0);
+ CHECK(!"Received a chunk for a unknown track");
}
void MPEG4Writer::writeChunkToFile(Chunk* chunk) {
@@ -1509,7 +1513,6 @@
mMdatSizeBytes = 0;
mMaxChunkDurationUs = 0;
- mHasNegativeCttsDeltaDuration = false;
pthread_create(&mThread, &attr, ThreadWrapper, this);
pthread_attr_destroy(&attr);
@@ -1833,29 +1836,18 @@
int32_t nChunks = 0;
int32_t nZeroLengthFrames = 0;
int64_t lastTimestampUs = 0; // Previous sample time stamp
- int64_t lastCttsTimeUs = 0; // Previous sample time stamp
int64_t lastDurationUs = 0; // Between the previous two samples
int64_t currDurationTicks = 0; // Timescale based ticks
int64_t lastDurationTicks = 0; // Timescale based ticks
int32_t sampleCount = 1; // Sample count in the current stts table entry
- int64_t currCttsDurTicks = 0; // Timescale based ticks
- int64_t lastCttsDurTicks = 0; // Timescale based ticks
- int32_t cttsSampleCount = 1; // Sample count in the current ctts table entry
- uint32_t previousSampleSize = 0; // Size of the previous sample
+ uint32_t previousSampleSize = 0; // Size of the previous sample
int64_t previousPausedDurationUs = 0;
int64_t timestampUs = 0;
- int64_t cttsDeltaTimeUs = 0;
- bool hasBFrames = false;
+ int64_t cttsOffsetTimeUs = 0;
+ int64_t currCttsOffsetTimeTicks = 0; // Timescale based ticks
+ int64_t lastCttsOffsetTimeTicks = -1; // Timescale based ticks
+ int32_t cttsSampleCount = 0; // Sample count in the current ctts table entry
-#if 1
- // XXX: Samsung's video encoder's output buffer timestamp
- // is not correct. see bug 4724339
- char value[PROPERTY_VALUE_MAX];
- if (property_get("rw.media.record.hasb", value, NULL) &&
- (!strcasecmp(value, "true") || !strcasecmp(value, "1"))) {
- hasBFrames = true;
- }
-#endif
if (mIsAudio) {
prctl(PR_SET_NAME, (unsigned long)"AudioTrackEncoding", 0, 0, 0);
} else {
@@ -1897,7 +1889,7 @@
(const uint8_t *)buffer->data()
+ buffer->range_offset(),
buffer->range_length());
- CHECK_EQ(OK, err);
+ CHECK_EQ((status_t)OK, err);
} else if (mIsMPEG4) {
mCodecSpecificDataSize = buffer->range_length();
mCodecSpecificData = malloc(mCodecSpecificDataSize);
@@ -1963,32 +1955,64 @@
if (mResumed) {
int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs;
- CHECK(durExcludingEarlierPausesUs >= 0);
+ CHECK_GE(durExcludingEarlierPausesUs, 0ll);
int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs;
- CHECK(pausedDurationUs >= lastDurationUs);
+ CHECK_GE(pausedDurationUs, lastDurationUs);
previousPausedDurationUs += pausedDurationUs - lastDurationUs;
mResumed = false;
}
timestampUs -= previousPausedDurationUs;
- CHECK(timestampUs >= 0);
- if (!mIsAudio && hasBFrames) {
+ CHECK_GE(timestampUs, 0ll);
+ if (!mIsAudio) {
/*
* Composition time: timestampUs
* Decoding time: decodingTimeUs
- * Composition time delta = composition time - decoding time
- *
- * We save picture decoding time stamp delta in stts table entries,
- * and composition time delta duration in ctts table entries.
+ * Composition time offset = composition time - decoding time
*/
int64_t decodingTimeUs;
CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs));
decodingTimeUs -= previousPausedDurationUs;
- int64_t timeUs = decodingTimeUs;
- cttsDeltaTimeUs = timestampUs - decodingTimeUs;
+ cttsOffsetTimeUs =
+ timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs;
+ CHECK_GE(cttsOffsetTimeUs, 0ll);
timestampUs = decodingTimeUs;
- ALOGV("decoding time: %lld and ctts delta time: %lld",
- timestampUs, cttsDeltaTimeUs);
+ ALOGV("decoding time: %lld and ctts offset time: %lld",
+ timestampUs, cttsOffsetTimeUs);
+
+ // Update ctts box table if necessary
+ currCttsOffsetTimeTicks =
+ (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL;
+ CHECK_LE(currCttsOffsetTimeTicks, 0x0FFFFFFFFLL);
+ if (mNumSamples == 0) {
+ // Force the first ctts table entry to have one single entry
+ // so that we can do adjustment for the initial track start
+ // time offset easily in writeCttsBox().
+ lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+ addOneCttsTableEntry(1, currCttsOffsetTimeTicks);
+ cttsSampleCount = 0; // No sample in ctts box is pending
+ } else {
+ if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) {
+ addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
+ lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks;
+ cttsSampleCount = 1; // One sample in ctts box is pending
+ } else {
+ ++cttsSampleCount;
+ }
+ }
+
+ // Update ctts time offset range
+ if (mNumSamples == 0) {
+ mMinCttsOffsetTimeUs = currCttsOffsetTimeTicks;
+ mMaxCttsOffsetTimeUs = currCttsOffsetTimeTicks;
+ } else {
+ if (currCttsOffsetTimeTicks > mMaxCttsOffsetTimeUs) {
+ mMaxCttsOffsetTimeUs = currCttsOffsetTimeTicks;
+ } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTimeUs) {
+ mMinCttsOffsetTimeUs = currCttsOffsetTimeTicks;
+ }
+ }
+
}
if (mIsRealTimeRecording) {
@@ -1997,7 +2021,7 @@
}
}
- CHECK(timestampUs >= 0);
+ CHECK_GE(timestampUs, 0ll);
ALOGV("%s media time stamp: %lld and previous paused duration %lld",
mIsAudio? "Audio": "Video", timestampUs, previousPausedDurationUs);
if (timestampUs > mTrackDurationUs) {
@@ -2012,6 +2036,7 @@
currDurationTicks =
((timestampUs * mTimeScale + 500000LL) / 1000000LL -
(lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
+ CHECK_GE(currDurationTicks, 0ll);
mSampleSizes.push_back(sampleSize);
++mNumSamples;
@@ -2020,25 +2045,12 @@
// Force the first sample to have its own stts entry so that
// we can adjust its value later to maintain the A/V sync.
if (mNumSamples == 3 || currDurationTicks != lastDurationTicks) {
- ALOGV("%s lastDurationUs: %lld us, currDurationTicks: %lld us",
- mIsAudio? "Audio": "Video", lastDurationUs, currDurationTicks);
addOneSttsTableEntry(sampleCount, lastDurationTicks);
sampleCount = 1;
} else {
++sampleCount;
}
- if (!mIsAudio) {
- currCttsDurTicks =
- ((cttsDeltaTimeUs * mTimeScale + 500000LL) / 1000000LL -
- (lastCttsTimeUs * mTimeScale + 500000LL) / 1000000LL);
- if (currCttsDurTicks != lastCttsDurTicks) {
- addOneCttsTableEntry(cttsSampleCount, lastCttsDurTicks);
- cttsSampleCount = 1;
- } else {
- ++cttsSampleCount;
- }
- }
}
if (mSamplesHaveSameSize) {
if (mNumSamples >= 2 && previousSampleSize != sampleSize) {
@@ -2052,11 +2064,6 @@
lastDurationTicks = currDurationTicks;
lastTimestampUs = timestampUs;
- if (!mIsAudio) {
- lastCttsDurTicks = currCttsDurTicks;
- lastCttsTimeUs = cttsDeltaTimeUs;
- }
-
if (isSync != 0) {
addOneStssTableEntry(mNumSamples);
}
@@ -2125,10 +2132,8 @@
if (mNumSamples == 1) {
lastDurationUs = 0; // A single sample's duration
lastDurationTicks = 0;
- lastCttsDurTicks = 0;
} else {
++sampleCount; // Count for the last sample
- ++cttsSampleCount;
}
if (mNumSamples <= 2) {
@@ -2140,7 +2145,14 @@
addOneSttsTableEntry(sampleCount, lastDurationTicks);
}
- addOneCttsTableEntry(cttsSampleCount, lastCttsDurTicks);
+ // The last ctts box may not have been written yet, and this
+ // is to make sure that we write out the last ctts box.
+ if (currCttsOffsetTimeTicks == lastCttsOffsetTimeTicks) {
+ if (cttsSampleCount > 0) {
+ addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks);
+ }
+ }
+
mTrackDurationUs += lastDurationUs;
mReachedEOS = true;
@@ -2406,7 +2418,7 @@
mOwner->writeInt16(0x18); // depth
mOwner->writeInt16(-1); // predefined
- CHECK(23 + mCodecSpecificDataSize < 128);
+ CHECK_LT(23 + mCodecSpecificDataSize, 128);
if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
writeMp4vEsdsBox();
@@ -2465,10 +2477,10 @@
void MPEG4Writer::Track::writeMp4aEsdsBox() {
mOwner->beginBox("esds");
CHECK(mCodecSpecificData);
- CHECK(mCodecSpecificDataSize > 0);
+ CHECK_GT(mCodecSpecificDataSize, 0);
// Make sure all sizes encode to a single byte.
- CHECK(mCodecSpecificDataSize + 23 < 128);
+ CHECK_LT(mCodecSpecificDataSize + 23, 128);
mOwner->writeInt32(0); // version=0, flags=0
mOwner->writeInt8(0x03); // ES_DescrTag
@@ -2502,7 +2514,7 @@
void MPEG4Writer::Track::writeMp4vEsdsBox() {
CHECK(mCodecSpecificData);
- CHECK(mCodecSpecificDataSize > 0);
+ CHECK_GT(mCodecSpecificDataSize, 0);
mOwner->beginBox("esds");
mOwner->writeInt32(0); // version=0, flags=0
@@ -2662,7 +2674,7 @@
void MPEG4Writer::Track::writeAvccBox() {
CHECK(mCodecSpecificData);
- CHECK(mCodecSpecificDataSize >= 5);
+ CHECK_GE(mCodecSpecificDataSize, 5);
// Patch avcc's lengthSize field to match the number
// of bytes we use to indicate the size of a nal unit.
@@ -2690,23 +2702,26 @@
mOwner->endBox(); // pasp
}
+int32_t MPEG4Writer::Track::getStartTimeOffsetScaledTime() const {
+ int64_t trackStartTimeOffsetUs = 0;
+ int64_t moovStartTimeUs = mOwner->getStartTimestampUs();
+ if (mStartTimestampUs != moovStartTimeUs) {
+ CHECK_GT(mStartTimestampUs, moovStartTimeUs);
+ trackStartTimeOffsetUs = mStartTimestampUs - moovStartTimeUs;
+ }
+ return (trackStartTimeOffsetUs * mTimeScale + 500000LL) / 1000000LL;
+}
+
void MPEG4Writer::Track::writeSttsBox() {
mOwner->beginBox("stts");
mOwner->writeInt32(0); // version=0, flags=0
mOwner->writeInt32(mNumSttsTableEntries);
// Compensate for small start time difference from different media tracks
- int64_t trackStartTimeOffsetUs = 0;
- int64_t moovStartTimeUs = mOwner->getStartTimestampUs();
- if (mStartTimestampUs != moovStartTimeUs) {
- CHECK(mStartTimestampUs > moovStartTimeUs);
- trackStartTimeOffsetUs = mStartTimestampUs - moovStartTimeUs;
- }
List<SttsTableEntry>::iterator it = mSttsTableEntries.begin();
CHECK(it != mSttsTableEntries.end() && it->sampleCount == 1);
mOwner->writeInt32(it->sampleCount);
- int32_t dur = (trackStartTimeOffsetUs * mTimeScale + 500000LL) / 1000000LL;
- mOwner->writeInt32(dur + it->sampleDuration);
+ mOwner->writeInt32(getStartTimeOffsetScaledTime() + it->sampleDuration);
int64_t totalCount = 1;
while (++it != mSttsTableEntries.end()) {
@@ -2714,7 +2729,7 @@
mOwner->writeInt32(it->sampleDuration);
totalCount += it->sampleCount;
}
- CHECK(totalCount == mNumSamples);
+ CHECK_EQ(totalCount, mNumSamples);
mOwner->endBox(); // stts
}
@@ -2723,6 +2738,11 @@
return;
}
+ // There is no B frame at all
+ if (mMinCttsOffsetTimeUs == mMaxCttsOffsetTimeUs) {
+ return;
+ }
+
// Do not write ctts box when there is no need to have it.
if ((mNumCttsTableEntries == 1 &&
mCttsTableEntries.begin()->sampleDuration == 0) ||
@@ -2730,24 +2750,29 @@
return;
}
- ALOGV("ctts box has %d entries", mNumCttsTableEntries);
+ ALOGD("ctts box has %d entries with range [%lld, %lld]",
+ mNumCttsTableEntries, mMinCttsOffsetTimeUs, mMaxCttsOffsetTimeUs);
mOwner->beginBox("ctts");
- if (mHasNegativeCttsDeltaDuration) {
- mOwner->writeInt32(0x00010000); // version=1, flags=0
- } else {
- mOwner->writeInt32(0); // version=0, flags=0
- }
+ // Version 1 allows to use negative offset time value, but
+ // we are sticking to version 0 for now.
+ mOwner->writeInt32(0); // version=0, flags=0
mOwner->writeInt32(mNumCttsTableEntries);
- int64_t totalCount = 0;
- for (List<CttsTableEntry>::iterator it = mCttsTableEntries.begin();
- it != mCttsTableEntries.end(); ++it) {
+ // Compensate for small start time difference from different media tracks
+ List<CttsTableEntry>::iterator it = mCttsTableEntries.begin();
+ CHECK(it != mCttsTableEntries.end() && it->sampleCount == 1);
+ mOwner->writeInt32(it->sampleCount);
+ mOwner->writeInt32(getStartTimeOffsetScaledTime() +
+ it->sampleDuration - mMinCttsOffsetTimeUs);
+
+ int64_t totalCount = 1;
+ while (++it != mCttsTableEntries.end()) {
mOwner->writeInt32(it->sampleCount);
- mOwner->writeInt32(it->sampleDuration);
+ mOwner->writeInt32(it->sampleDuration - mMinCttsOffsetTimeUs);
totalCount += it->sampleCount;
}
- CHECK(totalCount == mNumSamples);
+ CHECK_EQ(totalCount, mNumSamples);
mOwner->endBox(); // ctts
}
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 381320b..470f750 100755
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -2187,7 +2187,7 @@
}
}
-int64_t OMXCodec::retrieveDecodingTimeUs(bool isCodecSpecific) {
+int64_t OMXCodec::getDecodingTimeUs() {
CHECK(mIsEncoder && mIsVideo);
if (mDecodingTimeList.empty()) {
@@ -2199,12 +2199,7 @@
List<int64_t>::iterator it = mDecodingTimeList.begin();
int64_t timeUs = *it;
-
- // If the output buffer is codec specific configuration,
- // do not remove the decoding time from the list.
- if (!isCodecSpecific) {
- mDecodingTimeList.erase(it);
- }
+ mDecodingTimeList.erase(it);
return timeUs;
}
@@ -2384,7 +2379,7 @@
}
if (mIsEncoder && mIsVideo) {
- int64_t decodingTimeUs = retrieveDecodingTimeUs(isCodecSpecific);
+ int64_t decodingTimeUs = isCodecSpecific? 0: getDecodingTimeUs();
buffer->meta_data()->setInt64(kKeyDecodingTime, decodingTimeUs);
}
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index a1644d2..a0db719 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -93,7 +93,10 @@
void advance();
void reset();
- void seek(int64_t seekTimeUs, bool seekToKeyFrame);
+
+ void seek(
+ int64_t seekTimeUs, bool seekToKeyFrame,
+ int64_t *actualFrameTimeUs);
const mkvparser::Block *block() const;
int64_t blockTimeUs() const;
@@ -303,22 +306,52 @@
} while (!eos() && block()->GetTrackNumber() != mTrackNum);
}
-void BlockIterator::seek(int64_t seekTimeUs, bool seekToKeyFrame) {
+void BlockIterator::seek(
+ int64_t seekTimeUs, bool seekToKeyFrame,
+ int64_t *actualFrameTimeUs) {
Mutex::Autolock autoLock(mExtractor->mLock);
- mCluster = mExtractor->mSegment->FindCluster(seekTimeUs * 1000ll);
+ *actualFrameTimeUs = -1ll;
+
+ int64_t seekTimeNs = seekTimeUs * 1000ll;
+
+ mCluster = mExtractor->mSegment->FindCluster(seekTimeNs);
mBlockEntry = NULL;
mBlockEntryIndex = 0;
- do {
- advance_l();
- }
- while (!eos() && block()->GetTrackNumber() != mTrackNum);
+ long prevKeyFrameBlockEntryIndex = -1;
- if (seekToKeyFrame) {
- while (!eos() && !mBlockEntry->GetBlock()->IsKey()) {
- advance_l();
+ for (;;) {
+ advance_l();
+
+ if (eos()) {
+ break;
}
+
+ if (block()->GetTrackNumber() != mTrackNum) {
+ continue;
+ }
+
+ if (block()->IsKey()) {
+ prevKeyFrameBlockEntryIndex = mBlockEntryIndex - 1;
+ }
+
+ int64_t timeNs = block()->GetTime(mCluster);
+
+ if (timeNs >= seekTimeNs) {
+ *actualFrameTimeUs = (timeNs + 500ll) / 1000ll;
+ break;
+ }
+ }
+
+ if (eos()) {
+ return;
+ }
+
+ if (seekToKeyFrame && !block()->IsKey()) {
+ CHECK_GE(prevKeyFrameBlockEntryIndex, 0);
+ mBlockEntryIndex = prevKeyFrameBlockEntryIndex;
+ advance_l();
}
}
@@ -397,6 +430,8 @@
MediaBuffer **out, const ReadOptions *options) {
*out = NULL;
+ int64_t targetSampleTimeUs = -1ll;
+
int64_t seekTimeUs;
ReadOptions::SeekMode mode;
if (options && options->getSeekTo(&seekTimeUs, &mode)
@@ -406,10 +441,14 @@
// Apparently keyframe indication in audio tracks is unreliable,
// fortunately in all our currently supported audio encodings every
// frame is effectively a keyframe.
- mBlockIter.seek(seekTimeUs, !mIsAudio);
+ int64_t actualFrameTimeUs;
+ mBlockIter.seek(seekTimeUs, !mIsAudio, &actualFrameTimeUs);
+
+ if (mode == ReadOptions::SEEK_CLOSEST) {
+ targetSampleTimeUs = actualFrameTimeUs;
+ }
}
-again:
while (mPendingFrames.empty()) {
status_t err = readBlock();
@@ -424,6 +463,11 @@
mPendingFrames.erase(mPendingFrames.begin());
if (mType != AVC) {
+ if (targetSampleTimeUs >= 0ll) {
+ frame->meta_data()->setInt64(
+ kKeyTargetTime, targetSampleTimeUs);
+ }
+
*out = frame;
return OK;
@@ -506,6 +550,11 @@
frame->release();
frame = NULL;
+ if (targetSampleTimeUs >= 0ll) {
+ buffer->meta_data()->setInt64(
+ kKeyTargetTime, targetSampleTimeUs);
+ }
+
*out = buffer;
return OK;
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index d5d1b6c..d4f8d5c 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -748,7 +748,7 @@
{
status_t result;
- ALOGV("setParameters(): io %d, keyvalue %s, tid %d, calling tid %d",
+ ALOGV("setParameters(): io %d, keyvalue %s, tid %d, calling pid %d",
ioHandle, keyValuePairs.string(), gettid(), IPCThreadState::self()->getCallingPid());
// check calling permissions
if (!settingsAllowed()) {
@@ -821,7 +821,7 @@
String8 AudioFlinger::getParameters(audio_io_handle_t ioHandle, const String8& keys) const
{
-// ALOGV("getParameters() io %d, keys %s, tid %d, calling tid %d",
+// ALOGV("getParameters() io %d, keys %s, tid %d, calling pid %d",
// ioHandle, keys.string(), gettid(), IPCThreadState::self()->getCallingPid());
if (ioHandle == 0) {
@@ -1524,12 +1524,11 @@
// all tracks in same audio session must share the same routing strategy otherwise
// conflicts will happen when tracks are moved from one output to another by audio policy
// manager
- uint32_t strategy =
- AudioSystem::getStrategyForStream((audio_stream_type_t)streamType);
+ uint32_t strategy = AudioSystem::getStrategyForStream(streamType);
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> t = mTracks[i];
if (t != 0) {
- uint32_t actual = AudioSystem::getStrategyForStream((audio_stream_type_t)t->type());
+ uint32_t actual = AudioSystem::getStrategyForStream(t->streamType());
if (sessionId == t->sessionId() && strategy != actual) {
ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
strategy, actual);
@@ -1551,7 +1550,7 @@
if (chain != 0) {
ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
track->setMainBuffer(chain->inBuffer());
- chain->setStrategy(AudioSystem::getStrategyForStream((audio_stream_type_t)track->type()));
+ chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType()));
chain->incTrackCnt();
}
@@ -1781,7 +1780,7 @@
sp<Track> track = mTracks[i];
if (sessionId == track->sessionId() &&
!(track->mCblk->flags & CBLK_INVALID_MSK)) {
- return AudioSystem::getStrategyForStream((audio_stream_type_t) track->type());
+ return AudioSystem::getStrategyForStream(track->streamType());
}
}
return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
@@ -2162,7 +2161,7 @@
// compute volume for this track
uint32_t vl, vr, va;
if (track->isMuted() || track->isPausing() ||
- mStreamTypes[track->type()].mute) {
+ mStreamTypes[track->streamType()].mute) {
vl = vr = va = 0;
if (track->isPausing()) {
track->setPaused();
@@ -2170,7 +2169,7 @@
} else {
// read original volumes with volume control
- float typeVolume = mStreamTypes[track->type()].volume;
+ float typeVolume = mStreamTypes[track->streamType()].volume;
float v = masterVolume * typeVolume;
uint32_t vlr = cblk->getVolumeLR();
vl = vlr & 0xFFFF;
@@ -2331,7 +2330,7 @@
size_t size = mTracks.size();
for (size_t i = 0; i < size; i++) {
sp<Track> t = mTracks[i];
- if (t->type() == streamType) {
+ if (t->streamType() == streamType) {
android_atomic_or(CBLK_INVALID_ON, &t->mCblk->flags);
t->mCblk->cv.signal();
}
@@ -2704,13 +2703,13 @@
// compute volume for this track
float left, right;
if (track->isMuted() || mMasterMute || track->isPausing() ||
- mStreamTypes[track->type()].mute) {
+ mStreamTypes[track->streamType()].mute) {
left = right = 0;
if (track->isPausing()) {
track->setPaused();
}
} else {
- float typeVolume = mStreamTypes[track->type()].volume;
+ float typeVolume = mStreamTypes[track->streamType()].volume;
float v = mMasterVolume * typeVolume;
uint32_t vlr = cblk->getVolumeLR();
float v_clamped = v * (vlr & 0xFFFF);
@@ -3391,7 +3390,7 @@
mName = playbackThread->getTrackName_l();
mMainBuffer = playbackThread->mixBuffer();
}
- ALOGV("Track constructor name %d, calling thread %d", mName, IPCThreadState::self()->getCallingPid());
+ ALOGV("Track constructor name %d, calling pid %d", mName, IPCThreadState::self()->getCallingPid());
if (mName < 0) {
ALOGE("no more track names available");
}
@@ -3428,9 +3427,7 @@
if (thread != 0) {
if (!isOutputTrack()) {
if (mState == ACTIVE || mState == RESUMING) {
- AudioSystem::stopOutput(thread->id(),
- (audio_stream_type_t)mStreamType,
- mSessionId);
+ AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
// to track the speaker usage
addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
@@ -3523,7 +3520,7 @@
status_t AudioFlinger::PlaybackThread::Track::start()
{
status_t status = NO_ERROR;
- ALOGV("start(%d), calling thread %d session %d",
+ ALOGV("start(%d), calling pid %d session %d",
mName, IPCThreadState::self()->getCallingPid(), mSessionId);
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
@@ -3541,9 +3538,7 @@
if (!isOutputTrack() && state != ACTIVE && state != RESUMING) {
thread->mLock.unlock();
- status = AudioSystem::startOutput(thread->id(),
- (audio_stream_type_t)mStreamType,
- mSessionId);
+ status = AudioSystem::startOutput(thread->id(), mStreamType, mSessionId);
thread->mLock.lock();
// to track the speaker usage
@@ -3565,7 +3560,7 @@
void AudioFlinger::PlaybackThread::Track::stop()
{
- ALOGV("stop(%d), calling thread %d", mName, IPCThreadState::self()->getCallingPid());
+ ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
Mutex::Autolock _l(thread->mLock);
@@ -3581,9 +3576,7 @@
}
if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) {
thread->mLock.unlock();
- AudioSystem::stopOutput(thread->id(),
- (audio_stream_type_t)mStreamType,
- mSessionId);
+ AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
thread->mLock.lock();
// to track the speaker usage
@@ -3594,7 +3587,7 @@
void AudioFlinger::PlaybackThread::Track::pause()
{
- ALOGV("pause(%d), calling thread %d", mName, IPCThreadState::self()->getCallingPid());
+ ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
Mutex::Autolock _l(thread->mLock);
@@ -3603,9 +3596,7 @@
ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
if (!isOutputTrack()) {
thread->mLock.unlock();
- AudioSystem::stopOutput(thread->id(),
- (audio_stream_type_t)mStreamType,
- mSessionId);
+ AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
thread->mLock.lock();
// to track the speaker usage
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 97103c4..955648f 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -564,6 +564,18 @@
KeyedVector< int, KeyedVector< int, sp<SuspendedSessionDesc> > > mSuspendedSessions;
};
+ struct stream_type_t {
+ stream_type_t()
+ : volume(1.0f),
+ mute(false),
+ valid(true)
+ {
+ }
+ float volume;
+ bool mute;
+ bool valid;
+ };
+
// --- PlaybackThread ---
class PlaybackThread : public ThreadBase {
public:
@@ -600,7 +612,7 @@
return mName;
}
- audio_stream_type_t type() const {
+ audio_stream_type_t streamType() const {
return mStreamType;
}
status_t attachAuxEffect(int EffectId);
@@ -758,18 +770,6 @@
void setStreamValid(audio_stream_type_t streamType, bool valid);
- struct stream_type_t {
- stream_type_t()
- : volume(1.0f),
- mute(false),
- valid(true)
- {
- }
- float volume;
- bool mute;
- bool valid;
- };
-
protected:
int16_t* mMixBuffer;
int mSuspended;
@@ -1412,7 +1412,7 @@
DefaultKeyedVector< audio_io_handle_t, sp<PlaybackThread> > mPlaybackThreads;
- PlaybackThread::stream_type_t mStreamTypes[AUDIO_STREAM_CNT];
+ stream_type_t mStreamTypes[AUDIO_STREAM_CNT];
// both are protected by mLock
float mMasterVolume;
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index 10efd85..af464b2 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -542,7 +542,7 @@
}
void AudioPolicyService::binderDied(const wp<IBinder>& who) {
- ALOGW("binderDied() %p, tid %d, calling tid %d", who.unsafe_get(), gettid(),
+ ALOGW("binderDied() %p, tid %d, calling pid %d", who.unsafe_get(), gettid(),
IPCThreadState::self()->getCallingPid());
}