Implement server side playback timestamps with 64 bit accuracy
Provide server timestamps if the HAL doesn't provide it.
Provide monotonic - boottime translation.
Integrate record timestamps and playback timestamps together.
Bug: 17472992
Bug: 22871200
Bug: 26400089
Bug: 26682703
Change-Id: If1974f94232fcce7ba0bbcdf63d9e54ed51918ff
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 1446d19..d31b8d3 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -141,6 +141,10 @@
FastMixerDumpState * const dumpState = (FastMixerDumpState *) mDumpState;
const size_t frameCount = current->mFrameCount;
+ // update boottime offset, in case it has changed
+ mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME] =
+ mBoottimeOffset.load();
+
// handle state change here, but since we want to diff the state,
// we're prepared for previous == &sInitial the first time through
unsigned previousTrackMask;
@@ -341,21 +345,23 @@
currentTrackMask &= ~(1 << i);
const FastTrack* fastTrack = ¤t->mFastTracks[i];
- // Refresh the per-track timestamp
- if (mTimestampStatus == NO_ERROR) {
- uint32_t trackFramesWrittenButNotPresented =
- mNativeFramesWrittenButNotPresented;
- uint32_t trackFramesWritten = fastTrack->mBufferProvider->framesReleased();
- // Can't provide an AudioTimestamp before first frame presented,
- // or during the brief 32-bit wraparound window
- if (trackFramesWritten >= trackFramesWrittenButNotPresented) {
- AudioTimestamp perTrackTimestamp;
- perTrackTimestamp.mPosition =
- trackFramesWritten - trackFramesWrittenButNotPresented;
- perTrackTimestamp.mTime = mTimestamp.mTime;
- fastTrack->mBufferProvider->onTimestamp(perTrackTimestamp);
- }
+ const int64_t trackFramesWrittenButNotPresented =
+ mNativeFramesWrittenButNotPresented;
+ const int64_t trackFramesWritten = fastTrack->mBufferProvider->framesReleased();
+ ExtendedTimestamp perTrackTimestamp(mTimestamp);
+
+ // Can't provide an ExtendedTimestamp before first frame presented.
+ // Also, timestamp may not go to very last frame on stop().
+ if (trackFramesWritten >= trackFramesWrittenButNotPresented &&
+ perTrackTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] > 0) {
+ perTrackTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+ trackFramesWritten - trackFramesWrittenButNotPresented;
+ } else {
+ perTrackTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = 0;
+ perTrackTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = -1;
}
+ perTrackTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] = trackFramesWritten;
+ fastTrack->mBufferProvider->onTimestamp(perTrackTimestamp);
int name = mFastTrackNames[i];
ALOG_ASSERT(name >= 0);
@@ -449,17 +455,34 @@
mAttemptedWrite = true;
// FIXME count # of writes blocked excessively, CPU usage, etc. for dump
- mTimestampStatus = mOutputSink->getTimestamp(mTimestamp);
- if (mTimestampStatus == NO_ERROR) {
- uint32_t totalNativeFramesPresented = mTimestamp.mPosition;
+ ExtendedTimestamp timestamp; // local
+ status_t status = mOutputSink->getTimestamp(timestamp);
+ if (status == NO_ERROR) {
+ const int64_t totalNativeFramesPresented =
+ timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
if (totalNativeFramesPresented <= mTotalNativeFramesWritten) {
mNativeFramesWrittenButNotPresented =
mTotalNativeFramesWritten - totalNativeFramesPresented;
+ mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+ timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
+ timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
} else {
// HAL reported that more frames were presented than were written
- mTimestampStatus = INVALID_OPERATION;
+ mNativeFramesWrittenButNotPresented = 0;
+ mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = 0;
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = -1;
+ status = INVALID_OPERATION;
}
}
+ if (status == NO_ERROR) {
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+ } else {
+ // fetch server time if we can't get timestamp
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] =
+ systemTime(SYSTEM_TIME_MONOTONIC);
+ }
}
}
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index e38878e..3cc7c9f 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -38,7 +38,9 @@
FastMixerStateQueue* sq();
virtual void setMasterMono(bool mono) { mMasterMono.store(mono); /* memory_order_seq_cst */ }
-
+ virtual void setBoottimeOffset(int64_t boottimeOffset) {
+ mBoottimeOffset.store(boottimeOffset); /* memory_order_seq_cst */
+ }
private:
FastMixerStateQueue mSQ;
@@ -79,14 +81,15 @@
unsigned mSampleRate;
int mFastTracksGen;
FastMixerDumpState mDummyFastMixerDumpState;
- uint32_t mTotalNativeFramesWritten; // copied to dumpState->mFramesWritten
+ int64_t mTotalNativeFramesWritten; // copied to dumpState->mFramesWritten
// next 2 fields are valid only when timestampStatus == NO_ERROR
- AudioTimestamp mTimestamp;
- uint32_t mNativeFramesWrittenButNotPresented;
+ ExtendedTimestamp mTimestamp;
+ int64_t mNativeFramesWrittenButNotPresented;
// accessed without lock between multiple threads.
std::atomic_bool mMasterMono;
+ std::atomic_int_fast64_t mBoottimeOffset;
}; // class FastMixer
} // namespace android
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index fe3cc53..fa61af2 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -88,8 +88,8 @@
// ExtendedAudioBufferProvider interface
virtual size_t framesReady() const;
- virtual size_t framesReleased() const;
- virtual void onTimestamp(const AudioTimestamp ×tamp);
+ virtual int64_t framesReleased() const;
+ virtual void onTimestamp(const ExtendedTimestamp ×tamp);
bool isPausing() const { return mState == PAUSING; }
bool isPaused() const { return mState == PAUSED; }
@@ -101,15 +101,15 @@
void flushAck();
bool isResumePending();
void resumeAck();
- void updateTrackFrameInfo(uint32_t trackFramesReleased, uint32_t sinkFramesWritten,
- AudioTimestamp *timeStamp = NULL);
+ void updateTrackFrameInfo(int64_t trackFramesReleased, int64_t sinkFramesWritten,
+ const ExtendedTimestamp &timeStamp);
sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
// framesWritten is cumulative, never reset, and is shared all tracks
// audioHalFrames is derived from output latency
// FIXME parameters not needed, could get them from the thread
- bool presentationComplete(size_t framesWritten, size_t audioHalFrames);
+ bool presentationComplete(int64_t framesWritten, size_t audioHalFrames);
public:
void triggerEvents(AudioSystem::sync_event_t type);
@@ -142,9 +142,9 @@
// zero means not monitoring
// access these three variables only when holding thread lock.
- LinearMap<uint32_t> mFrameMap; // track frame to server frame mapping
- bool mSinkTimestampValid; // valid cached timestamp
- AudioTimestamp mSinkTimestamp;
+ LinearMap<int64_t> mFrameMap; // track frame to server frame mapping
+
+ ExtendedTimestamp mSinkTimestamp;
private:
// The following fields are only for fast tracks, and should be in a subclass
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 4807400..e0d8f75 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1034,6 +1034,8 @@
mNotifiedBatteryStart = true;
}
gBoottime.acquire(mWakeLockToken);
+ mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME] =
+ gBoottime.getBoottimeOffset();
}
void AudioFlinger::ThreadBase::releaseWakeLock()
@@ -2370,13 +2372,14 @@
if (initCheck() != NO_ERROR) {
return INVALID_OPERATION;
}
- size_t framesWritten = mBytesWritten / mFrameSize;
+ int64_t framesWritten = mBytesWritten / mFrameSize;
*halFrames = framesWritten;
if (isSuspended()) {
// return an estimation of rendered frames when the output is suspended
size_t latencyFrames = (latency_l() * mSampleRate) / 1000;
- *dspFrames = framesWritten >= latencyFrames ? framesWritten - latencyFrames : 0;
+ *dspFrames = (uint32_t)
+ (framesWritten >= (int64_t)latencyFrames ? framesWritten - latencyFrames : 0);
return NO_ERROR;
} else {
status_t status;
@@ -2860,42 +2863,31 @@
// and associate with the sink frames written out. We need
// this to convert the sink timestamp to the track timestamp.
if (mNormalSink != 0) {
- bool updateTracks = true;
- bool cacheTimestamp = false;
- AudioTimestamp timeStamp;
- // FIXME: Use a 64 bit mNormalSink->framesWritten() counter.
- // At this time, we must always use cached timestamps even when
- // going through mPipeSink (which is non-blocking). The reason is that
- // the track may be removed from the active list for many hours and
- // the mNormalSink->framesWritten() will wrap making the linear
- // mapping fail.
- //
- // (Also mAudioTrackServerProxy->framesReleased() needs to be
- // updated to 64 bits for 64 bit frame position.)
- //
- if (true /* see comment above, should be: mNormalSink == mOutputSink */) {
- // If we use a hardware device, we must cache the sink timestamp now.
- // hardware devices can block timestamp access during data writes.
- if (mNormalSink->getTimestamp(timeStamp) == NO_ERROR) {
- cacheTimestamp = true;
- } else {
- updateTracks = false;
- }
- }
- if (updateTracks) {
- // sinkFramesWritten for non-offloaded tracks are contiguous
- // even after standby() is called. This is useful for the track frame
- // to sink frame mapping.
- const uint32_t sinkFramesWritten = mNormalSink->framesWritten();
- const size_t size = mActiveTracks.size();
- for (size_t i = 0; i < size; ++i) {
- sp<Track> t = mActiveTracks[i].promote();
- if (t != 0 && !t->isFastTrack()) {
- t->updateTrackFrameInfo(
- t->mAudioTrackServerProxy->framesReleased(),
- sinkFramesWritten,
- cacheTimestamp ? &timeStamp : NULL);
- }
+ // We always fetch the timestamp here because often the downstream
+ // sink will block whie writing.
+ ExtendedTimestamp timestamp; // use private copy to fetch
+ (void) mNormalSink->getTimestamp(timestamp);
+ // copy over kernel info
+ mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
+ timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
+ timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
+
+ // sinkFramesWritten for non-offloaded tracks are contiguous
+ // even after standby() is called. This is useful for the track frame
+ // to sink frame mapping.
+ const int64_t sinkFramesWritten = mNormalSink->framesWritten();
+ mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] = sinkFramesWritten;
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = systemTime();
+
+ const size_t size = mActiveTracks.size();
+ for (size_t i = 0; i < size; ++i) {
+ sp<Track> t = mActiveTracks[i].promote();
+ if (t != 0 && !t->isFastTrack()) {
+ t->updateTrackFrameInfo(
+ t->mAudioTrackServerProxy->framesReleased(),
+ sinkFramesWritten,
+ mTimestamp);
}
}
}
@@ -3209,7 +3201,12 @@
status_t AudioFlinger::PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp)
{
if (mNormalSink != 0) {
- return mNormalSink->getTimestamp(timestamp);
+ ExtendedTimestamp ets;
+ status_t status = mNormalSink->getTimestamp(ets);
+ if (status == NO_ERROR) {
+ status = ets.getBestTimestamp(×tamp);
+ }
+ return status;
}
if ((mType == OFFLOAD || mType == DIRECT)
&& mOutput != NULL && mOutput->stream->get_presentation_position) {
@@ -3925,7 +3922,7 @@
{
size_t audioHALFrames =
(mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
- size_t framesWritten = mBytesWritten / mFrameSize;
+ int64_t framesWritten = mBytesWritten / mFrameSize;
if (!(mStandby || track->presentationComplete(framesWritten, audioHALFrames))) {
// track stays in active list until presentation is complete
break;
@@ -4255,7 +4252,7 @@
// TODO: use actual buffer filling status instead of latency when available from
// audio HAL
size_t audioHALFrames = (latency_l() * mSampleRate) / 1000;
- size_t framesWritten = mBytesWritten / mFrameSize;
+ int64_t framesWritten = mBytesWritten / mFrameSize;
if (mStandby || track->presentationComplete(framesWritten, audioHALFrames)) {
if (track->isStopped()) {
track->reset();
@@ -4796,7 +4793,7 @@
audioHALFrames = 0;
}
- size_t framesWritten = mBytesWritten / mFrameSize;
+ int64_t framesWritten = mBytesWritten / mFrameSize;
if (mStandby || !last ||
track->presentationComplete(framesWritten, audioHALFrames)) {
if (track->isStopping_2()) {
@@ -5343,7 +5340,7 @@
track->mState = TrackBase::STOPPED;
size_t audioHALFrames =
(mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
- size_t framesWritten =
+ int64_t framesWritten =
mBytesWritten / mOutput->getFrameSize();
track->presentationComplete(framesWritten, audioHALFrames);
track->reset();
@@ -5789,9 +5786,6 @@
}
}
- mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME] =
- gBoottime.getBoottimeOffset();
-
// used to request a deferred sleep, to be executed later while mutex is unlocked
uint32_t sleepUs = 0;
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 7c92c1c..507f197 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -381,7 +381,7 @@
};
void acquireWakeLock(int uid = -1);
- void acquireWakeLock_l(int uid = -1);
+ virtual void acquireWakeLock_l(int uid = -1);
void releaseWakeLock();
void releaseWakeLock_l();
void updateWakeLockUids(const SortedVector<int> &uids);
@@ -460,6 +460,7 @@
sp<NBLog::Writer> mNBLogWriter;
bool mSystemReady;
bool mNotifiedBatteryStart;
+ ExtendedTimestamp mTimestamp;
};
// --- PlaybackThread ---
@@ -691,9 +692,7 @@
// 'volatile' means accessed via atomic operations and no lock.
volatile int32_t mSuspended;
- // FIXME overflows every 6+ hours at 44.1 kHz stereo 16-bit samples
- // mFramesWritten would be better, or 64-bit even better
- size_t mBytesWritten;
+ int64_t mBytesWritten;
private:
// mMasterMute is in both PlaybackThread and in AudioFlinger. When a
// PlaybackThread needs to find out if master-muted, it checks it's local
@@ -868,6 +867,14 @@
virtual uint32_t suspendSleepTimeUs() const;
virtual void cacheParameters_l();
+ virtual void acquireWakeLock_l(int uid = -1) {
+ PlaybackThread::acquireWakeLock_l(uid);
+ if (hasFastMixer()) {
+ mFastMixer->setBoottimeOffset(
+ mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME]);
+ }
+ }
+
// threadLoop snippets
virtual ssize_t threadLoop_write();
virtual void threadLoop_standby();
@@ -1311,8 +1318,6 @@
// rolling index that is never cleared
int32_t mRsmpInRear; // last filled frame + 1
- ExtendedTimestamp mTimestamp;
-
// For dumpsys
const sp<NBAIO_Sink> mTeeSink;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 536581c..a67693f 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -362,7 +362,6 @@
mAuxEffectId(0), mHasVolumeController(false),
mPresentationCompleteFrames(0),
mFrameMap(16 /* sink-frame-to-track-frame map memory */),
- mSinkTimestampValid(false),
// mSinkTimestamp
mFastIndex(-1),
mCachedVolume(1.0),
@@ -591,23 +590,18 @@
return mAudioTrackServerProxy->framesReady();
}
-size_t AudioFlinger::PlaybackThread::Track::framesReleased() const
+int64_t AudioFlinger::PlaybackThread::Track::framesReleased() const
{
return mAudioTrackServerProxy->framesReleased();
}
-void AudioFlinger::PlaybackThread::Track::onTimestamp(const AudioTimestamp ×tamp)
+void AudioFlinger::PlaybackThread::Track::onTimestamp(const ExtendedTimestamp ×tamp)
{
// This call comes from a FastTrack and should be kept lockless.
// The server side frames are already translated to client frames.
+ mAudioTrackServerProxy->setTimestamp(timestamp);
- ExtendedTimestamp ets;
- ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
- timestamp.mTime.tv_sec * 1000000000LL + timestamp.mTime.tv_nsec;
- ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = timestamp.mPosition;
-
- // Caution, this doesn't set the timebase for BOOTTIME properly, but is ignored right now.
- mAudioTrackServerProxy->setTimestamp(ets);
+ // We do not set drained here, as FastTrack timestamp may not go to very last frame.
}
// Don't call for fast tracks; the framesReady() could result in priority inversion
@@ -872,9 +866,8 @@
status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
{
- // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant
- if (isFastTrack()) {
- return INVALID_OPERATION;
+ if (!isOffloaded() && !isDirect()) {
+ return INVALID_OPERATION; // normal tracks handled through SSQ
}
sp<ThreadBase> thread = mThread.promote();
if (thread == 0) {
@@ -883,33 +876,7 @@
Mutex::Autolock _l(thread->mLock);
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
-
- if (isOffloaded() || isDirect()) {
- return playbackThread->getTimestamp_l(timestamp);
- }
-
- if (!mFrameMap.hasData()) {
- // WOULD_BLOCK is consistent with AudioTrack::getTimestamp() in the
- // FLUSHED and STOPPED state. We should only return INVALID_OPERATION
- // when this method is not permitted due to configuration or device.
- return WOULD_BLOCK;
- }
- status_t result = OK;
- if (!mSinkTimestampValid) { // if no sink position, try to fetch again
- result = playbackThread->getTimestamp_l(mSinkTimestamp);
- }
-
- if (result == OK) {
- // Lookup the track frame corresponding to the sink frame position.
- timestamp.mPosition = mFrameMap.findX(mSinkTimestamp.mPosition);
- timestamp.mTime = mSinkTimestamp.mTime;
- // ALOGD("track (server-side) timestamp: mPosition(%u) mTime(%llu)",
- // timestamp.mPosition, TIME_TO_NANOS(timestamp.mTime));
- }
- // (Possible) FIXME: mSinkTimestamp is updated only when the track is on
- // the Thread active list. If the track is no longer on the thread active
- // list should we use current time?
- return result;
+ return playbackThread->getTimestamp_l(timestamp);
}
status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
@@ -972,9 +939,12 @@
mAuxBuffer = buffer;
}
-bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten,
- size_t audioHalFrames)
+bool AudioFlinger::PlaybackThread::Track::presentationComplete(
+ int64_t framesWritten, size_t audioHalFrames)
{
+ // TODO: improve this based on FrameMap if it exists, to ensure full drain.
+ // This assists in proper timestamp computation as well as wakelock management.
+
// a track is considered presented when the total number of frames written to audio HAL
// corresponds to the number of frames written when presentationComplete() is called for the
// first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
@@ -982,15 +952,17 @@
// to detect when all frames have been played. In this case framesWritten isn't
// useful because it doesn't always reflect whether there is data in the h/w
// buffers, particularly if a track has been paused and resumed during draining
- ALOGV("presentationComplete() mPresentationCompleteFrames %d framesWritten %d",
- mPresentationCompleteFrames, framesWritten);
+ ALOGV("presentationComplete() mPresentationCompleteFrames %lld framesWritten %lld",
+ (long long)mPresentationCompleteFrames, (long long)framesWritten);
if (mPresentationCompleteFrames == 0) {
mPresentationCompleteFrames = framesWritten + audioHalFrames;
- ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
- mPresentationCompleteFrames, audioHalFrames);
+ ALOGV("presentationComplete() reset: mPresentationCompleteFrames %lld audioHalFrames %zu",
+ (long long)mPresentationCompleteFrames, audioHalFrames);
}
- if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) {
+ if ((!isOffloaded() && !isDirect() && !isFastTrack()
+ && framesWritten >= mPresentationCompleteFrames
+ && mAudioTrackServerProxy->isDrained()) || isOffloaded()) {
triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
mAudioTrackServerProxy->setStreamEndDone();
return true;
@@ -1101,14 +1073,34 @@
//To be called with thread lock held
void AudioFlinger::PlaybackThread::Track::updateTrackFrameInfo(
- uint32_t trackFramesReleased, uint32_t sinkFramesWritten, AudioTimestamp *timeStamp) {
+ int64_t trackFramesReleased, int64_t sinkFramesWritten,
+ const ExtendedTimestamp &timeStamp) {
+ //update frame map
mFrameMap.push(trackFramesReleased, sinkFramesWritten);
- if (timeStamp == NULL) {
- mSinkTimestampValid = false;
- } else {
- mSinkTimestampValid = true;
- mSinkTimestamp = *timeStamp;
+
+ // adjust server times and set drained state.
+ //
+ // Our timestamps are only updated when the track is on the Thread active list.
+ // We need to ensure that tracks are not removed before full drain.
+ ExtendedTimestamp local = timeStamp;
+ bool checked = false;
+ for (int i = ExtendedTimestamp::LOCATION_MAX - 1;
+ i >= ExtendedTimestamp::LOCATION_SERVER; --i) {
+ // Lookup the track frame corresponding to the sink frame position.
+ if (local.mTimeNs[i] > 0) {
+ local.mPosition[i] = mFrameMap.findX(local.mPosition[i]);
+ // check drain state from the latest stage in the pipeline.
+ if (!checked) {
+ mAudioTrackServerProxy->setDrained(
+ local.mPosition[i] >= mAudioTrackServerProxy->framesReleased());
+ checked = true;
+ }
+ }
}
+ if (!checked) { // no server info, assume drained.
+ mAudioTrackServerProxy->setDrained(true);
+ }
+ mServerProxy->setTimestamp(local);
}
// ----------------------------------------------------------------------------