aaudio: improve accuracy of timestamps
Account for latency added by the AAudio service.
Fix input timestamps.
Bug: 37080396
Test: test_timestamps.cpp input_monitor.cpp
Change-Id: I1053cd21af722bb9b9371df4e5731bf4a0a57b0b
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
index edf644a..2dfd0a7 100644
--- a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
@@ -27,9 +27,11 @@
#include "AAudioSimpleRecorder.h"
// TODO support FLOAT
-#define REQUIRED_FORMAT AAUDIO_FORMAT_PCM_I16
+#define REQUIRED_FORMAT AAUDIO_FORMAT_PCM_I16
#define MIN_FRAMES_TO_READ 48 /* arbitrary, 1 msec at 48000 Hz */
+static const int FRAMES_PER_LINE = 20000;
+
int main(int argc, const char **argv)
{
AAudioArgsParser argParser;
@@ -46,7 +48,10 @@
int32_t framesPerRead = 0;
int32_t framesToRecord = 0;
int32_t framesLeft = 0;
+ int32_t nextFrameCount = 0;
+ int32_t frameCount = 0;
int32_t xRunCount = 0;
+ int64_t previousFramePosition = -1;
int16_t *data = nullptr;
float peakLevel = 0.0;
int loopCounter = 0;
@@ -56,7 +61,7 @@
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Monitor input level using AAudio\n", argv[0]);
+ printf("%s - Monitor input level using AAudio V0.1.1\n", argv[0]);
argParser.setFormat(REQUIRED_FORMAT);
if (argParser.parseArgs(argc, argv)) {
@@ -133,6 +138,7 @@
goto finish;
}
framesLeft -= actual;
+ frameCount += actual;
// Peak finder.
for (int frameIndex = 0; frameIndex < actual; frameIndex++) {
@@ -143,9 +149,36 @@
}
// Display level as stars, eg. "******".
- if ((loopCounter++ % 10) == 0) {
+ if (frameCount > nextFrameCount) {
displayPeakLevel(peakLevel);
peakLevel = 0.0;
+ nextFrameCount += FRAMES_PER_LINE;
+ }
+
+ // Print timestamps.
+ int64_t framePosition = 0;
+ int64_t frameTime = 0;
+ aaudio_result_t timeResult;
+ timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC,
+ &framePosition, &frameTime);
+
+ if (timeResult == AAUDIO_OK) {
+ if (framePosition > (previousFramePosition + FRAMES_PER_LINE)) {
+ int64_t realTime = getNanoseconds();
+ int64_t framesRead = AAudioStream_getFramesRead(aaudioStream);
+
+ double latencyMillis = calculateLatencyMillis(framesRead, realTime,
+ framePosition, frameTime,
+ actualSampleRate);
+
+ printf("--- timestamp: result = %4d, position = %lld, at %lld nanos"
+ ", latency = %7.2f msec\n",
+ timeResult,
+ (long long) framePosition,
+ (long long) frameTime,
+ latencyMillis);
+ previousFramePosition = framePosition;
+ }
}
}
diff --git a/media/libaaudio/examples/utils/AAudioExampleUtils.h b/media/libaaudio/examples/utils/AAudioExampleUtils.h
index 66de25f..6cbcc58 100644
--- a/media/libaaudio/examples/utils/AAudioExampleUtils.h
+++ b/media/libaaudio/examples/utils/AAudioExampleUtils.h
@@ -25,7 +25,7 @@
#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
-static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
+const char *getSharingModeText(aaudio_sharing_mode_t mode) {
const char *modeText = "unknown";
switch (mode) {
case AAUDIO_SHARING_MODE_EXCLUSIVE:
@@ -49,7 +49,7 @@
return (time.tv_sec * NANOS_PER_SECOND) + time.tv_nsec;
}
-void displayPeakLevel(float peakLevel) {
+static void displayPeakLevel(float peakLevel) {
printf("%5.3f ", peakLevel);
const int maxStars = 50; // arbitrary, fits on one line
int numStars = (int) (peakLevel * maxStars);
@@ -59,4 +59,24 @@
printf("\n");
}
+/**
+ * @param position1 position of hardware frame
+ * @param nanoseconds1
+ * @param position2 position of client read/write
+ * @param nanoseconds2
+ * @param sampleRate
+ * @return latency in milliseconds
+ */
+static double calculateLatencyMillis(int64_t position1, int64_t nanoseconds1,
+ int64_t position2, int64_t nanoseconds2,
+ int64_t sampleRate) {
+ int64_t deltaFrames = position2 - position1;
+ int64_t deltaTime =
+ (NANOS_PER_SECOND * deltaFrames / sampleRate);
+ int64_t timeCurrentFramePlayed = nanoseconds1 + deltaTime;
+ int64_t latencyNanos = timeCurrentFramePlayed - nanoseconds2;
+ double latencyMillis = latencyNanos / 1000000.0;
+ return latencyMillis;
+}
+
#endif // AAUDIO_EXAMPLE_UTILS_H
diff --git a/media/libaaudio/src/binding/AAudioServiceMessage.h b/media/libaaudio/src/binding/AAudioServiceMessage.h
index b4377fb..54e8001 100644
--- a/media/libaaudio/src/binding/AAudioServiceMessage.h
+++ b/media/libaaudio/src/binding/AAudioServiceMessage.h
@@ -28,7 +28,6 @@
// Used to send information about the HAL to the client.
struct AAudioMessageTimestamp {
int64_t position; // number of frames transferred so far
- int64_t deviceOffset; // add to client position to get device position
int64_t timestamp; // time when that position was reached
};
@@ -51,7 +50,8 @@
typedef struct AAudioServiceMessage_s {
enum class code : uint32_t {
NOTHING,
- TIMESTAMP,
+ TIMESTAMP_SERVICE, // when frame is read or written by the service to the client
+ TIMESTAMP_HARDWARE, // when frame is at DAC or ADC
EVENT,
};
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 4c7d0f7..259c9b9 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -68,6 +68,7 @@
, mServiceInterface(serviceInterface)
, mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
, mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
+ , mAtomicTimestamp()
{
ALOGD("AudioStreamInternal(): mWakeupDelayNanos = %d, mMinimumSleepNanos = %d",
mWakeupDelayNanos, mMinimumSleepNanos);
@@ -351,12 +352,15 @@
aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
int64_t *framePosition,
int64_t *timeNanoseconds) {
- // TODO Generate in server and pass to client. Return latest.
- int64_t time = AudioClock::getNanoseconds();
- *framePosition = mClockModel.convertTimeToPosition(time) + mFramesOffsetFromService;
- // TODO Get a more accurate timestamp from the service. This code just adds a fudge factor.
- *timeNanoseconds = time + (6 * AAUDIO_NANOS_PER_MILLISECOND);
- return AAUDIO_OK;
+ // Generated in server and passed to client. Return latest.
+ if (mAtomicTimestamp.isValid()) {
+ Timestamp timestamp = mAtomicTimestamp.read();
+ *framePosition = timestamp.getPosition();
+ *timeNanoseconds = timestamp.getNanoseconds();
+ return AAUDIO_OK;
+ } else {
+ return AAUDIO_ERROR_UNAVAILABLE;
+ }
}
aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() {
@@ -385,7 +389,7 @@
oldTime = nanoTime;
}
-aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
+aaudio_result_t AudioStreamInternal::onTimestampService(AAudioServiceMessage *message) {
#if LOG_TIMESTAMPS
logTimestamp(*message);
#endif
@@ -393,6 +397,12 @@
return AAUDIO_OK;
}
+aaudio_result_t AudioStreamInternal::onTimestampHardware(AAudioServiceMessage *message) {
+ Timestamp timestamp(message->timestamp.position, message->timestamp.timestamp);
+ mAtomicTimestamp.write(timestamp);
+ return AAUDIO_OK;
+}
+
aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
aaudio_result_t result = AAUDIO_OK;
switch (message->event.event) {
@@ -456,8 +466,12 @@
break; // no command this time, no problem
}
switch (message.what) {
- case AAudioServiceMessage::code::TIMESTAMP:
- result = onTimestampFromServer(&message);
+ case AAudioServiceMessage::code::TIMESTAMP_SERVICE:
+ result = onTimestampService(&message);
+ break;
+
+ case AAudioServiceMessage::code::TIMESTAMP_HARDWARE:
+ result = onTimestampHardware(&message);
break;
case AAudioServiceMessage::code::EVENT:
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 1b991de..607e734 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -122,7 +122,9 @@
aaudio_result_t onEventFromServer(AAudioServiceMessage *message);
- aaudio_result_t onTimestampFromServer(AAudioServiceMessage *message);
+ aaudio_result_t onTimestampService(AAudioServiceMessage *message);
+
+ aaudio_result_t onTimestampHardware(AAudioServiceMessage *message);
void logTimestamp(AAudioServiceMessage &message);
@@ -181,6 +183,11 @@
AudioEndpointParcelable mEndPointParcelable; // description of the buffers filled by service
EndpointDescriptor mEndpointDescriptor; // buffer description with resolved addresses
+
+ SimpleDoubleBuffer<Timestamp> mAtomicTimestamp;
+
+ int64_t mServiceLatencyNanos = 0;
+
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index acd319b..b0c6c94 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -258,4 +258,74 @@
}
}
+
+/**
+ * Simple double buffer for a structure that can be written occasionally and read occasionally.
+ * This allows a SINGLE writer with multiple readers.
+ *
+ * It is OK if the FIFO overflows and we lose old values.
+ * It is also OK if we read an old value.
+ * Thread may return a non-atomic result if the other thread is rapidly writing
+ * new values on another core.
+ */
+template <class T>
+class SimpleDoubleBuffer {
+public:
+ SimpleDoubleBuffer()
+ : mValues()
+ , mCounter(0) {}
+
+ __attribute__((no_sanitize("integer")))
+ void write(T value) {
+ int index = mCounter.load() & 1;
+ mValues[index] = value;
+ mCounter++; // Increment AFTER updating storage, OK if it wraps.
+ }
+
+ T read() const {
+ T result;
+ int before;
+ int after;
+ int timeout = 3;
+ do {
+ // Check to see if a write occurred while were reading.
+ before = mCounter.load();
+ int index = (before & 1) ^ 1;
+ result = mValues[index];
+ after = mCounter.load();
+ } while ((after != before) && --timeout > 0);
+ return result;
+ }
+
+ /**
+ * @return true if at least one value has been written
+ */
+ bool isValid() const {
+ return mCounter.load() > 0;
+ }
+
+private:
+ T mValues[2];
+ std::atomic<int> mCounter;
+};
+
+class Timestamp {
+public:
+ Timestamp()
+ : mPosition(0)
+ , mNanoseconds(0) {}
+ Timestamp(int64_t position, int64_t nanoseconds)
+ : mPosition(position)
+ , mNanoseconds(nanoseconds) {}
+
+ int64_t getPosition() const { return mPosition; }
+
+ int64_t getNanoseconds() const { return mNanoseconds; }
+
+private:
+ // These cannot be const because we need to implement the copy assignment operator.
+ int64_t mPosition;
+ int64_t mNanoseconds;
+};
+
#endif //UTILITY_AAUDIO_UTILITIES_H
diff --git a/media/libaaudio/tests/Android.mk b/media/libaaudio/tests/Android.mk
index e4eef06..4402919 100644
--- a/media/libaaudio/tests/Android.mk
+++ b/media/libaaudio/tests/Android.mk
@@ -34,6 +34,17 @@
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/src \
+ frameworks/av/media/libaaudio/examples
+LOCAL_SRC_FILES:= test_timestamps.cpp
+LOCAL_SHARED_LIBRARIES := libaaudio
+LOCAL_MODULE := test_timestamps
+include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
frameworks/av/media/libaaudio/src
LOCAL_SRC_FILES:= test_linear_ramp.cpp
LOCAL_SHARED_LIBRARIES := libaaudio
diff --git a/media/libaaudio/tests/test_timestamps.cpp b/media/libaaudio/tests/test_timestamps.cpp
new file mode 100644
index 0000000..d9ca391
--- /dev/null
+++ b/media/libaaudio/tests/test_timestamps.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play silence and recover from dead servers or disconnected devices.
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
+
+#include "utils/AAudioExampleUtils.h"
+
+#define DEFAULT_TIMEOUT_NANOS ((int64_t)1000000000)
+
+int main(int argc, char **argv) {
+ (void) argc;
+ (void *)argv;
+
+ aaudio_result_t result = AAUDIO_OK;
+
+ int32_t triesLeft = 3;
+ int32_t bufferCapacity;
+ int32_t framesPerBurst = 0;
+ float *buffer = nullptr;
+
+ int32_t actualChannelCount = 0;
+ int32_t actualSampleRate = 0;
+ int32_t originalBufferSize = 0;
+ int32_t requestedBufferSize = 0;
+ int32_t finalBufferSize = 0;
+ aaudio_format_t actualDataFormat = AAUDIO_FORMAT_PCM_FLOAT;
+ aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ int32_t framesMax;
+ int64_t framesTotal;
+ int64_t printAt;
+ int samplesPerBurst;
+ int64_t previousFramePosition = -1;
+
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream = nullptr;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("Test Timestamps V0.1.1\n");
+
+ AAudio_setMMapPolicy(AAUDIO_POLICY_AUTO);
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&aaudioBuilder);
+ if (result != AAUDIO_OK) {
+ printf("AAudio_createStreamBuilder returned %s",
+ AAudio_convertResultToText(result));
+ goto finish;
+ }
+
+ // Request stream properties.
+ AAudioStreamBuilder_setFormat(aaudioBuilder, AAUDIO_FORMAT_PCM_FLOAT);
+ //AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_NONE);
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+
+ // Create an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
+ if (result != AAUDIO_OK) {
+ printf("AAudioStreamBuilder_openStream returned %s",
+ AAudio_convertResultToText(result));
+ goto finish;
+ }
+
+ // Check to see what kind of stream we actually got.
+ actualSampleRate = AAudioStream_getSampleRate(aaudioStream);
+ actualChannelCount = AAudioStream_getChannelCount(aaudioStream);
+ actualDataFormat = AAudioStream_getFormat(aaudioStream);
+
+ printf("-------- chans = %3d, rate = %6d format = %d\n",
+ actualChannelCount, actualSampleRate, actualDataFormat);
+ printf(" Is MMAP used? %s\n", AAudioStream_isMMapUsed(aaudioStream)
+ ? "yes" : "no");
+
+ // This is the number of frames that are read in one chunk by a DMA controller
+ // or a DSP or a mixer.
+ framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
+ printf(" framesPerBurst = %3d\n", framesPerBurst);
+
+ originalBufferSize = AAudioStream_getBufferSizeInFrames(aaudioStream);
+ requestedBufferSize = 2 * framesPerBurst;
+ finalBufferSize = AAudioStream_setBufferSizeInFrames(aaudioStream, requestedBufferSize);
+
+ printf(" BufferSize: original = %4d, requested = %4d, final = %4d\n",
+ originalBufferSize, requestedBufferSize, finalBufferSize);
+
+ samplesPerBurst = framesPerBurst * actualChannelCount;
+ buffer = new float[samplesPerBurst];
+
+ result = AAudioStream_requestStart(aaudioStream);
+ if (result != AAUDIO_OK) {
+ printf("AAudioStream_requestStart returned %s",
+ AAudio_convertResultToText(result));
+ goto finish;
+ }
+
+ // Play silence very briefly.
+ framesMax = actualSampleRate * 4;
+ framesTotal = 0;
+ printAt = actualSampleRate;
+ while (result == AAUDIO_OK && framesTotal < framesMax) {
+ int32_t framesWritten = AAudioStream_write(aaudioStream,
+ buffer, framesPerBurst,
+ DEFAULT_TIMEOUT_NANOS);
+ if (framesWritten < 0) {
+ result = framesWritten;
+ printf("write() returned %s, frames = %d\n",
+ AAudio_convertResultToText(result), (int)framesTotal);
+ printf(" frames = %d\n", (int)framesTotal);
+ } else if (framesWritten != framesPerBurst) {
+ printf("write() returned %d, frames = %d\n", framesWritten, (int)framesTotal);
+ result = AAUDIO_ERROR_TIMEOUT;
+ } else {
+ framesTotal += framesWritten;
+ if (framesTotal >= printAt) {
+ printf("frames = %d\n", (int)framesTotal);
+ printAt += actualSampleRate;
+ }
+ }
+
+ // Print timestamps.
+ int64_t framePosition = 0;
+ int64_t frameTime = 0;
+ aaudio_result_t timeResult;
+ timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC,
+ &framePosition, &frameTime);
+
+ if (timeResult == AAUDIO_OK) {
+ if (framePosition > (previousFramePosition + 5000)) {
+ int64_t realTime = getNanoseconds();
+ int64_t framesWritten = AAudioStream_getFramesWritten(aaudioStream);
+
+ double latencyMillis = calculateLatencyMillis(framePosition, frameTime,
+ framesWritten, realTime,
+ actualSampleRate);
+
+ printf("--- timestamp: result = %4d, position = %lld, at %lld nanos"
+ ", latency = %7.2f msec\n",
+ timeResult,
+ (long long) framePosition,
+ (long long) frameTime,
+ latencyMillis);
+ previousFramePosition = framePosition;
+ }
+ }
+ }
+
+ result = AAudioStream_requestStop(aaudioStream);
+ if (result != AAUDIO_OK) {
+ printf("AAudioStream_requestStop returned %s\n",
+ AAudio_convertResultToText(result));
+ }
+ result = AAudioStream_close(aaudioStream);
+ if (result != AAUDIO_OK) {
+ printf("AAudioStream_close returned %s\n",
+ AAudio_convertResultToText(result));
+ }
+ aaudioStream = nullptr;
+
+
+finish:
+ if (aaudioStream != nullptr) {
+ AAudioStream_close(aaudioStream);
+ }
+ AAudioStreamBuilder_delete(aaudioBuilder);
+ delete[] buffer;
+ printf("result = %d = %s\n", result, AAudio_convertResultToText(result));
+}