aaudio: fix timestamps and underruns
Start the client after we get valid timing data from the server.
That can take a while because of the long cold start times.
The client is synced with the current position of the service.
Now the client can start clean with no underruns.
Bug: 63918065
Test: test_timestamps.cpp
Change-Id: I5d01eb844e4b14cd5477d56ea1dd9e309abc1c52
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 7f2e495..bbbd439 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -55,7 +55,7 @@
// Wait at least this many times longer than the operation should take.
#define MIN_TIMEOUT_OPERATIONS 4
-#define LOG_TIMESTAMPS 0
+#define LOG_TIMESTAMPS 0
AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
: AudioStream()
@@ -66,9 +66,9 @@
, mStreamVolume(1.0f)
, mInService(inService)
, mServiceInterface(serviceInterface)
+ , mAtomicTimestamp()
, mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
, mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
- , mAtomicTimestamp()
{
ALOGD("AudioStreamInternal(): mWakeupDelayNanos = %d, mMinimumSleepNanos = %d",
mWakeupDelayNanos, mMinimumSleepNanos);
@@ -250,25 +250,45 @@
}
}
+/*
+ * It normally takes about 20-30 msec to start a stream on the server.
+ * But the first time can take as much as 200-300 msec. The HW
+ * starts right away so by the time the client gets a chance to write into
+ * the buffer, it is already in a deep underflow state. That can cause the
+ * XRunCount to be non-zero, which could lead an app to tune its latency higher.
+ * To avoid this problem, we set a request for the processing code to start the
+ * client stream at the same position as the server stream.
+ * The processing code will then save the current offset
+ * between client and server and apply that to any position given to the app.
+ */
aaudio_result_t AudioStreamInternal::requestStart()
{
int64_t startTime;
- ALOGD("AudioStreamInternal()::requestStart()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
- ALOGE("AudioStreamInternal::requestStart() mServiceStreamHandle invalid");
+ ALOGE("requestStart() mServiceStreamHandle invalid");
return AAUDIO_ERROR_INVALID_STATE;
}
if (isActive()) {
- ALOGE("AudioStreamInternal::requestStart() already active");
+ ALOGE("requestStart() already active");
return AAUDIO_ERROR_INVALID_STATE;
}
- aaudio_stream_state_t originalState = getState();
+ aaudio_stream_state_t originalState = getState();
+ if (originalState == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ ALOGE("requestStart() but DISCONNECTED");
+ return AAUDIO_ERROR_DISCONNECTED;
+ }
setState(AAUDIO_STREAM_STATE_STARTING);
- aaudio_result_t result = AAudioConvert_androidToAAudioResult(startWithStatus());
+
+ // Clear any stale timestamps from the previous run.
+ drainTimestampsFromService();
+
+ status_t status = startWithStatus(); // Call PlayerBase, which will start the device stream.
+ aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
startTime = AudioClock::getNanoseconds();
mClockModel.start(startTime);
+ mNeedCatchUp.request(); // Ask data processing code to catch up when first timestamp received.
if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
// Launch the callback loop thread.
@@ -314,13 +334,14 @@
aaudio_result_t AudioStreamInternal::requestStopInternal()
{
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
- ALOGE("AudioStreamInternal::requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
+ ALOGE("requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
mServiceStreamHandle);
return AAUDIO_ERROR_INVALID_STATE;
}
mClockModel.stop(AudioClock::getNanoseconds());
setState(AAUDIO_STREAM_STATE_STOPPING);
+ mAtomicTimestamp.clear();
return AAudioConvert_androidToAAudioResult(stopWithStatus());
}
@@ -336,7 +357,7 @@
aaudio_result_t AudioStreamInternal::registerThread() {
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
- ALOGE("AudioStreamInternal::registerThread() mServiceStreamHandle invalid");
+ ALOGE("registerThread() mServiceStreamHandle invalid");
return AAUDIO_ERROR_INVALID_STATE;
}
return mServiceInterface.registerAudioThread(mServiceStreamHandle,
@@ -346,7 +367,7 @@
aaudio_result_t AudioStreamInternal::unregisterThread() {
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
- ALOGE("AudioStreamInternal::unregisterThread() mServiceStreamHandle invalid");
+ ALOGE("unregisterThread() mServiceStreamHandle invalid");
return AAUDIO_ERROR_INVALID_STATE;
}
return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, gettid());
@@ -374,12 +395,14 @@
// Generated in server and passed to client. Return latest.
if (mAtomicTimestamp.isValid()) {
Timestamp timestamp = mAtomicTimestamp.read();
- *framePosition = timestamp.getPosition();
- *timeNanoseconds = timestamp.getNanoseconds();
- return AAUDIO_OK;
- } else {
- return AAUDIO_ERROR_UNAVAILABLE;
+ int64_t position = timestamp.getPosition() + mFramesOffsetFromService;
+ if (position >= 0) {
+ *framePosition = position;
+ *timeNanoseconds = timestamp.getNanoseconds();
+ return AAUDIO_OK;
+ }
}
+ return AAUDIO_ERROR_UNAVAILABLE;
}
aaudio_result_t AudioStreamInternal::updateStateMachine() {
@@ -394,14 +417,14 @@
static int64_t oldTime = 0;
int64_t framePosition = command.timestamp.position;
int64_t nanoTime = command.timestamp.timestamp;
- ALOGD("AudioStreamInternal: timestamp says framePosition = %08lld at nanoTime %lld",
+ ALOGD("logTimestamp: timestamp says framePosition = %8lld at nanoTime %lld",
(long long) framePosition,
(long long) nanoTime);
int64_t nanosDelta = nanoTime - oldTime;
if (nanosDelta > 0 && oldTime > 0) {
int64_t framesDelta = framePosition - oldPosition;
int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
- ALOGD("AudioStreamInternal: framesDelta = %08lld, nanosDelta = %08lld, rate = %lld",
+ ALOGD("logTimestamp: framesDelta = %8lld, nanosDelta = %8lld, rate = %lld",
(long long) framesDelta, (long long) nanosDelta, (long long) rate);
}
oldPosition = framePosition;
@@ -478,6 +501,34 @@
return result;
}
+aaudio_result_t AudioStreamInternal::drainTimestampsFromService() {
+ aaudio_result_t result = AAUDIO_OK;
+
+ while (result == AAUDIO_OK) {
+ AAudioServiceMessage message;
+ if (mAudioEndpoint.readUpCommand(&message) != 1) {
+ break; // no command this time, no problem
+ }
+ switch (message.what) {
+ // ignore most messages
+ case AAudioServiceMessage::code::TIMESTAMP_SERVICE:
+ case AAudioServiceMessage::code::TIMESTAMP_HARDWARE:
+ break;
+
+ case AAudioServiceMessage::code::EVENT:
+ result = onEventFromServer(&message);
+ break;
+
+ default:
+ ALOGE("WARNING - drainTimestampsFromService() Unrecognized what = %d",
+ (int) message.what);
+ result = AAUDIO_ERROR_INTERNAL;
+ break;
+ }
+ }
+ return result;
+}
+
// Process all the commands coming from the server.
aaudio_result_t AudioStreamInternal::processCommands() {
aaudio_result_t result = AAUDIO_OK;
@@ -502,7 +553,7 @@
break;
default:
- ALOGE("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
+ ALOGE("WARNING - processCommands() Unrecognized what = %d",
(int) message.what);
result = AAUDIO_ERROR_INTERNAL;
break;
@@ -613,7 +664,7 @@
}
aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
- ALOGD("AudioStreamInternal::setBufferSize() req = %d => %d", requestedFrames, actualFrames);
+ ALOGD("setBufferSize() req = %d => %d", requestedFrames, actualFrames);
if (result < 0) {
return result;
} else {
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 3523294..899d455 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -115,12 +115,15 @@
int64_t currentTimeNanos,
int64_t *wakeTimePtr) = 0;
+ aaudio_result_t drainTimestampsFromService();
+
aaudio_result_t processCommands();
aaudio_result_t requestStopInternal();
aaudio_result_t stopCallback();
+ virtual void advanceClientToMatchServerPosition() = 0;
virtual void onFlushFromServer() {}
@@ -167,6 +170,10 @@
AAudioServiceInterface &mServiceInterface; // abstract interface to the service
+ SimpleDoubleBuffer<Timestamp> mAtomicTimestamp;
+
+ AtomicRequestor mNeedCatchUp; // Ask read() or write() to sync on first timestamp.
+
private:
/*
* Asynchronous write with data conversion.
@@ -188,8 +195,6 @@
AudioEndpointParcelable mEndPointParcelable; // description of the buffers filled by service
EndpointDescriptor mEndpointDescriptor; // buffer description with resolved addresses
- SimpleDoubleBuffer<Timestamp> mAtomicTimestamp;
-
int64_t mServiceLatencyNanos = 0;
};
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 7b1e53e..b792ecd 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -39,6 +39,21 @@
AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
+void AudioStreamInternalCapture::advanceClientToMatchServerPosition() {
+ int64_t readCounter = mAudioEndpoint.getDataReadCounter();
+ int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
+
+ // Bump offset so caller does not see the retrograde motion in getFramesRead().
+ int64_t offset = readCounter - writeCounter;
+ mFramesOffsetFromService += offset;
+ ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
+ (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
+
+ // Force readCounter to match writeCounter.
+ // This is because we cannot change the write counter in the hardware.
+ mAudioEndpoint.setDataReadCounter(writeCounter);
+}
+
// Write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
int64_t timeoutNanoseconds)
@@ -57,6 +72,18 @@
const char *traceName = "aaRdNow";
ATRACE_BEGIN(traceName);
+ if (mClockModel.isStarting()) {
+ // Still haven't got any timestamps from server.
+ // Keep waiting until we get some valid timestamps then start writing to the
+ // current buffer position.
+ ALOGD("processDataNow() wait for valid timestamps");
+ // Sleep very briefly and hope we get a timestamp soon.
+ *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
+ ATRACE_END();
+ return 0;
+ }
+ // If we have gotten this far then we have at least one timestamp from server.
+
if (mAudioEndpoint.isFreeRunning()) {
//ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
// Update data queue based on the timing model.
@@ -65,6 +92,14 @@
mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
}
+ // This code assumes that we have already received valid timestamps.
+ if (mNeedCatchUp.isRequested()) {
+ // Catch an MMAP pointer that is already advancing.
+ // This will avoid initial underruns caused by a slow cold start.
+ advanceClientToMatchServerPosition();
+ mNeedCatchUp.acknowledge();
+ }
+
// If the write index passed the read index then consider it an overrun.
if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
mXRunCount++;
@@ -100,8 +135,8 @@
// Calculate frame position based off of the readCounter because
// the writeCounter might have just advanced in the background,
// causing us to sleep until a later burst.
- int64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
- wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
+ int64_t nextPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
+ wakeTime = mClockModel.convertPositionToTime(nextPosition);
}
break;
default:
@@ -186,8 +221,7 @@
}
int64_t AudioStreamInternalCapture::getFramesRead() {
- int64_t frames = mAudioEndpoint.getDataWriteCounter()
- + mFramesOffsetFromService;
+ int64_t frames = mAudioEndpoint.getDataReadCounter() + mFramesOffsetFromService;
//ALOGD("AudioStreamInternalCapture::getFramesRead() returns %lld", (long long)frames);
return frames;
}
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.h b/media/libaaudio/src/client/AudioStreamInternalCapture.h
index 17f37e8..294dbaf 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.h
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.h
@@ -46,6 +46,8 @@
}
protected:
+ void advanceClientToMatchServerPosition() override;
+
/**
* Low level data processing that will not block. It will just read or write as much as it can.
*
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 31e0a40..f2e40a2 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -48,6 +48,7 @@
mClockModel.stop(AudioClock::getNanoseconds());
setState(AAUDIO_STREAM_STATE_PAUSING);
+ mAtomicTimestamp.clear();
return AAudioConvert_androidToAAudioResult(pauseWithStatus());
}
@@ -72,21 +73,25 @@
return mServiceInterface.flushStream(mServiceStreamHandle);
}
-void AudioStreamInternalPlay::onFlushFromServer() {
+void AudioStreamInternalPlay::advanceClientToMatchServerPosition() {
int64_t readCounter = mAudioEndpoint.getDataReadCounter();
int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
// Bump offset so caller does not see the retrograde motion in getFramesRead().
- int64_t framesFlushed = writeCounter - readCounter;
- mFramesOffsetFromService += framesFlushed;
- ALOGD("AudioStreamInternal::onFlushFromServer() readN = %lld, writeN = %lld, offset = %lld",
+ int64_t offset = writeCounter - readCounter;
+ mFramesOffsetFromService += offset;
+ ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
(long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
- // Flush written frames by forcing writeCounter to readCounter.
- // This is because we cannot move the read counter in the hardware.
+ // Force writeCounter to match readCounter.
+ // This is because we cannot change the read counter in the hardware.
mAudioEndpoint.setDataWriteCounter(readCounter);
}
+void AudioStreamInternalPlay::onFlushFromServer() {
+ advanceClientToMatchServerPosition();
+}
+
// Write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
int64_t timeoutNanoseconds)
@@ -106,6 +111,18 @@
const char *traceName = "aaWrNow";
ATRACE_BEGIN(traceName);
+ if (mClockModel.isStarting()) {
+ // Still haven't got any timestamps from server.
+ // Keep waiting until we get some valid timestamps then start writing to the
+ // current buffer position.
+ ALOGD("processDataNow() wait for valid timestamps");
+ // Sleep very briefly and hope we get a timestamp soon.
+ *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
+ ATRACE_END();
+ return 0;
+ }
+ // If we have gotten this far then we have at least one timestamp from server.
+
// If a DMA channel or DSP is reading the other end then we have to update the readCounter.
if (mAudioEndpoint.isFreeRunning()) {
// Update data queue based on the timing model.
@@ -114,6 +131,13 @@
mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
}
+ if (mNeedCatchUp.isRequested()) {
+ // Catch an MMAP pointer that is already advancing.
+ // This will avoid initial underruns caused by a slow cold start.
+ advanceClientToMatchServerPosition();
+ mNeedCatchUp.acknowledge();
+ }
+
// If the read index passed the write index then consider it an underrun.
if (mAudioEndpoint.getFullFramesAvailable() < 0) {
mXRunCount++;
@@ -153,9 +177,9 @@
// Calculate frame position based off of the writeCounter because
// the readCounter might have just advanced in the background,
// causing us to sleep until a later burst.
- int64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
+ int64_t nextPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
- mAudioEndpoint.getBufferSizeInFrames();
- wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
+ wakeTime = mClockModel.convertPositionToTime(nextPosition);
}
break;
default:
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
index e59d02c..fdb1fd7 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.h
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -54,6 +54,8 @@
aaudio_result_t requestPauseInternal();
+ void advanceClientToMatchServerPosition() override;
+
void onFlushFromServer() override;
/**
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index c06c8a9..bac69f1 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -48,19 +48,26 @@
}
void IsochronousClockModel::start(int64_t nanoTime) {
- ALOGD("IsochronousClockModel::start(nanos = %lld)\n", (long long) nanoTime);
+ ALOGV("IsochronousClockModel::start(nanos = %lld)\n", (long long) nanoTime);
mMarkerNanoTime = nanoTime;
mState = STATE_STARTING;
}
void IsochronousClockModel::stop(int64_t nanoTime) {
- ALOGD("IsochronousClockModel::stop(nanos = %lld)\n", (long long) nanoTime);
+ ALOGV("IsochronousClockModel::stop(nanos = %lld)\n", (long long) nanoTime);
setPositionAndTime(convertTimeToPosition(nanoTime), nanoTime);
// TODO should we set position?
mState = STATE_STOPPED;
}
+bool IsochronousClockModel::isStarting() {
+ return mState == STATE_STARTING;
+}
+
void IsochronousClockModel::processTimestamp(int64_t framePosition, int64_t nanoTime) {
+// ALOGD("processTimestamp() - framePosition = %lld at nanoTime %llu",
+// (long long)framePosition,
+// (long long)nanoTime);
int64_t framesDelta = framePosition - mMarkerFramePosition;
int64_t nanosDelta = nanoTime - mMarkerNanoTime;
if (nanosDelta < 1000) {
@@ -70,9 +77,6 @@
// ALOGD("processTimestamp() - mMarkerFramePosition = %lld at mMarkerNanoTime %llu",
// (long long)mMarkerFramePosition,
// (long long)mMarkerNanoTime);
-// ALOGD("processTimestamp() - framePosition = %lld at nanoTime %llu",
-// (long long)framePosition,
-// (long long)nanoTime);
int64_t expectedNanosDelta = convertDeltaPositionToTime(framesDelta);
// ALOGD("processTimestamp() - expectedNanosDelta = %lld, nanosDelta = %llu",
@@ -116,6 +120,8 @@
default:
break;
}
+
+// ALOGD("processTimestamp() - mState = %d", mState);
}
void IsochronousClockModel::setSampleRate(int32_t sampleRate) {
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 585f53a..7182376 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -36,6 +36,8 @@
void start(int64_t nanoTime);
void stop(int64_t nanoTime);
+ bool isStarting();
+
void processTimestamp(int64_t framePosition, int64_t nanoTime);
/**
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index b0c6c94..f56be32 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -272,8 +272,7 @@
class SimpleDoubleBuffer {
public:
SimpleDoubleBuffer()
- : mValues()
- , mCounter(0) {}
+ : mValues() {}
__attribute__((no_sanitize("integer")))
void write(T value) {
@@ -282,6 +281,14 @@
mCounter++; // Increment AFTER updating storage, OK if it wraps.
}
+ /**
+ * This should only be called by the same thread that calls write() or when
+ * no other thread is calling write.
+ */
+ void clear() {
+ mCounter.store(0);
+ }
+
T read() const {
T result;
int before;
@@ -293,7 +300,7 @@
int index = (before & 1) ^ 1;
result = mValues[index];
after = mCounter.load();
- } while ((after != before) && --timeout > 0);
+ } while ((after != before) && (after > 0) && (--timeout > 0));
return result;
}
@@ -306,7 +313,7 @@
private:
T mValues[2];
- std::atomic<int> mCounter;
+ std::atomic<int> mCounter{0};
};
class Timestamp {
@@ -328,4 +335,32 @@
int64_t mNanoseconds;
};
+
+/**
+ * Pass a request to another thread.
+ * This is used when one thread, A, wants another thread, B, to do something.
+ * A naive approach would be for A to set a flag and for B to clear it when done.
+ * But that creates a race condition. This technique avoids the race condition.
+ *
+ * Assumes only one requester and one acknowledger.
+ */
+class AtomicRequestor {
+public:
+ void request() {
+ // TODO handle overflows, very unlikely
+ mRequested++;
+ }
+
+ bool isRequested() {
+ return mRequested.load() > mAcknowledged.load();
+ }
+
+ void acknowledge() {
+ mAcknowledged++;
+ }
+
+private:
+ std::atomic<int> mRequested{0};
+ std::atomic<int> mAcknowledged{0};
+};
#endif //UTILITY_AAUDIO_UTILITIES_H