aaudio: test timestamps
Run for a while, gathering timestamps
then print a report.
Bug: 63918065
Test: this is a test
Change-Id: Ie9ada2d22fc265247a4d6c9645e42cf94caa1c05
diff --git a/media/libaaudio/examples/utils/AAudioExampleUtils.h b/media/libaaudio/examples/utils/AAudioExampleUtils.h
index 9ef62c9..c179ce6 100644
--- a/media/libaaudio/examples/utils/AAudioExampleUtils.h
+++ b/media/libaaudio/examples/utils/AAudioExampleUtils.h
@@ -31,18 +31,51 @@
#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
const char *getSharingModeText(aaudio_sharing_mode_t mode) {
- const char *modeText = "unknown";
+ const char *text = "unknown";
switch (mode) {
- case AAUDIO_SHARING_MODE_EXCLUSIVE:
- modeText = "EXCLUSIVE";
- break;
- case AAUDIO_SHARING_MODE_SHARED:
- modeText = "SHARED";
- break;
- default:
- break;
+ case AAUDIO_SHARING_MODE_EXCLUSIVE:
+ text = "EXCLUSIVE";
+ break;
+ case AAUDIO_SHARING_MODE_SHARED:
+ text = "SHARED";
+ break;
+ default:
+ break;
}
- return modeText;
+ return text;
+}
+
+const char *getPerformanceModeText(aaudio_performance_mode_t mode) {
+ const char *text = "unknown";
+ switch (mode) {
+ case AAUDIO_PERFORMANCE_MODE_NONE:
+ text = "NONE";
+ break;
+ case AAUDIO_PERFORMANCE_MODE_LOW_LATENCY:
+ text = "LOW_LATENCY";
+ break;
+ case AAUDIO_PERFORMANCE_MODE_POWER_SAVING:
+ text = "POWER_SAVING";
+ break;
+ default:
+ break;
+ }
+ return text;
+}
+
+const char *getDirectionText(aaudio_direction_t direction) {
+ const char *text = "unknown";
+ switch (direction) {
+ case AAUDIO_DIRECTION_INPUT:
+ text = "INPUT";
+ break;
+ case AAUDIO_DIRECTION_OUTPUT:
+ text = "OUTPUT";
+ break;
+ default:
+ break;
+ }
+ return text;
}
static void convertNanosecondsToTimespec(int64_t nanoseconds, struct timespec *time) {
diff --git a/media/libaaudio/tests/test_timestamps.cpp b/media/libaaudio/tests/test_timestamps.cpp
index d9ca391..49de05a 100644
--- a/media/libaaudio/tests/test_timestamps.cpp
+++ b/media/libaaudio/tests/test_timestamps.cpp
@@ -17,23 +17,99 @@
// Play silence and recover from dead servers or disconnected devices.
#include <stdio.h>
+#include <stdlib.h>
#include <unistd.h>
#include <aaudio/AAudio.h>
#include <aaudio/AAudioTesting.h>
-
#include "utils/AAudioExampleUtils.h"
+#include "../examples/utils/AAudioExampleUtils.h"
-#define DEFAULT_TIMEOUT_NANOS ((int64_t)1000000000)
+// Arbitrary period for glitches, once per second at 48000 Hz.
+#define FORCED_UNDERRUN_PERIOD_FRAMES 48000
+// How long to sleep in a callback to cause an intentional glitch. For testing.
+#define FORCED_UNDERRUN_SLEEP_MICROS (10 * 1000)
-int main(int argc, char **argv) {
- (void) argc;
- (void *)argv;
+#define MAX_TIMESTAMPS 1000
+#define DEFAULT_TIMEOUT_NANOS ((int64_t)1000000000)
+
+#define NUM_SECONDS 1
+#define NUM_LOOPS 4
+
+typedef struct TimestampInfo {
+ int64_t framesTotal;
+ int64_t appPosition; // frames
+ int64_t appNanoseconds;
+ int64_t timestampPosition; // frames
+ int64_t timestampNanos;
+ aaudio_result_t result;
+} TimestampInfo;
+
+typedef struct TimestampCallbackData_s {
+ TimestampInfo timestamps[MAX_TIMESTAMPS];
+ int64_t framesTotal = 0;
+ int64_t nextFrameToGlitch = FORCED_UNDERRUN_PERIOD_FRAMES;
+ int32_t timestampCount = 0; // in timestamps
+ bool forceUnderruns = false;
+} TimestampCallbackData_t;
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t timestampDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData __unused,
+ int32_t numFrames
+) {
+
+ // should not happen but just in case...
+ if (userData == nullptr) {
+ printf("ERROR - SimplePlayerDataCallbackProc needs userData\n");
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+ TimestampCallbackData_t *timestampData = (TimestampCallbackData_t *) userData;
+
+ aaudio_direction_t direction = AAudioStream_getDirection(stream);
+ if (direction == AAUDIO_DIRECTION_INPUT) {
+ timestampData->framesTotal += numFrames;
+ }
+
+ if (timestampData->forceUnderruns) {
+ if (timestampData->framesTotal > timestampData->nextFrameToGlitch) {
+ usleep(FORCED_UNDERRUN_SLEEP_MICROS);
+ printf("Simulate glitch at %lld\n", (long long) timestampData->framesTotal);
+ timestampData->nextFrameToGlitch += FORCED_UNDERRUN_PERIOD_FRAMES;
+ }
+ }
+
+ if (timestampData->timestampCount < MAX_TIMESTAMPS) {
+ TimestampInfo *timestamp = ×tampData->timestamps[timestampData->timestampCount];
+ timestamp->result = AAudioStream_getTimestamp(stream,
+ CLOCK_MONOTONIC,
+ ×tamp->timestampPosition,
+ ×tamp->timestampNanos);
+ timestamp->framesTotal = timestampData->framesTotal;
+ timestamp->appPosition = (direction == AAUDIO_DIRECTION_OUTPUT)
+ ? AAudioStream_getFramesWritten(stream)
+ : AAudioStream_getFramesRead(stream);
+ timestamp->appNanoseconds = getNanoseconds();
+ timestampData->timestampCount++;
+ }
+
+ if (direction == AAUDIO_DIRECTION_OUTPUT) {
+ timestampData->framesTotal += numFrames;
+ }
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+static TimestampCallbackData_t sTimestampData;
+
+static aaudio_result_t testTimeStamps(aaudio_policy_t mmapPolicy,
+ aaudio_sharing_mode_t sharingMode,
+ aaudio_performance_mode_t performanceMode,
+ aaudio_direction_t direction) {
aaudio_result_t result = AAUDIO_OK;
- int32_t triesLeft = 3;
- int32_t bufferCapacity;
int32_t framesPerBurst = 0;
float *buffer = nullptr;
@@ -44,22 +120,20 @@
int32_t finalBufferSize = 0;
aaudio_format_t actualDataFormat = AAUDIO_FORMAT_PCM_FLOAT;
aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;
- int32_t framesMax;
- int64_t framesTotal;
- int64_t printAt;
- int samplesPerBurst;
- int64_t previousFramePosition = -1;
+ aaudio_sharing_mode_t actualPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
AAudioStreamBuilder *aaudioBuilder = nullptr;
AAudioStream *aaudioStream = nullptr;
- // Make printf print immediately so that debug info is not stuck
- // in a buffer if we hang or crash.
- setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+ memset(&sTimestampData, 0, sizeof(sTimestampData));
- printf("Test Timestamps V0.1.1\n");
+ printf("------------ testTimeStamps(policy = %d, sharing = %s, perf = %s, dir = %s) -----------\n",
+ mmapPolicy,
+ getSharingModeText(sharingMode),
+ getPerformanceModeText(performanceMode),
+ getDirectionText(direction));
- AAudio_setMMapPolicy(AAUDIO_POLICY_AUTO);
+ AAudio_setMMapPolicy(mmapPolicy);
// Use an AAudioStreamBuilder to contain requested parameters.
result = AAudio_createStreamBuilder(&aaudioBuilder);
@@ -70,9 +144,11 @@
}
// Request stream properties.
- AAudioStreamBuilder_setFormat(aaudioBuilder, AAUDIO_FORMAT_PCM_FLOAT);
- //AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_NONE);
- AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
+ AAudioStreamBuilder_setFormat(aaudioBuilder, AAUDIO_FORMAT_PCM_I16);
+ AAudioStreamBuilder_setSharingMode(aaudioBuilder, sharingMode);
+ AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, performanceMode);
+ AAudioStreamBuilder_setDirection(aaudioBuilder, direction);
+ AAudioStreamBuilder_setDataCallback(aaudioBuilder, timestampDataCallbackProc, &sTimestampData);
// Create an AAudioStream using the Builder.
result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
@@ -87,10 +163,25 @@
actualChannelCount = AAudioStream_getChannelCount(aaudioStream);
actualDataFormat = AAudioStream_getFormat(aaudioStream);
- printf("-------- chans = %3d, rate = %6d format = %d\n",
- actualChannelCount, actualSampleRate, actualDataFormat);
+ actualSharingMode = AAudioStream_getSharingMode(aaudioStream);
+ if (actualSharingMode != sharingMode) {
+ printf("did not get expected sharingMode, got %3d, skipping test\n",
+ actualSharingMode);
+ result = AAUDIO_OK;
+ goto finish;
+ }
+ actualPerformanceMode = AAudioStream_getPerformanceMode(aaudioStream);
+ if (actualPerformanceMode != performanceMode) {
+ printf("did not get expected performanceMode, got %3d, skipping test\n",
+ actualPerformanceMode);
+ result = AAUDIO_OK;
+ goto finish;
+ }
+
+ printf(" chans = %3d, rate = %6d format = %d\n",
+ actualChannelCount, actualSampleRate, actualDataFormat);
printf(" Is MMAP used? %s\n", AAudioStream_isMMapUsed(aaudioStream)
- ? "yes" : "no");
+ ? "yes" : "no");
// This is the number of frames that are read in one chunk by a DMA controller
// or a DSP or a mixer.
@@ -98,91 +189,143 @@
printf(" framesPerBurst = %3d\n", framesPerBurst);
originalBufferSize = AAudioStream_getBufferSizeInFrames(aaudioStream);
- requestedBufferSize = 2 * framesPerBurst;
+ requestedBufferSize = 4 * framesPerBurst;
finalBufferSize = AAudioStream_setBufferSizeInFrames(aaudioStream, requestedBufferSize);
printf(" BufferSize: original = %4d, requested = %4d, final = %4d\n",
originalBufferSize, requestedBufferSize, finalBufferSize);
- samplesPerBurst = framesPerBurst * actualChannelCount;
- buffer = new float[samplesPerBurst];
-
- result = AAudioStream_requestStart(aaudioStream);
- if (result != AAUDIO_OK) {
- printf("AAudioStream_requestStart returned %s",
+ {
+ int64_t position;
+ int64_t nanoseconds;
+ result = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC, &position, &nanoseconds);
+ printf("before start, AAudioStream_getTimestamp() returns %s\n",
AAudio_convertResultToText(result));
- goto finish;
}
- // Play silence very briefly.
- framesMax = actualSampleRate * 4;
- framesTotal = 0;
- printAt = actualSampleRate;
- while (result == AAUDIO_OK && framesTotal < framesMax) {
- int32_t framesWritten = AAudioStream_write(aaudioStream,
- buffer, framesPerBurst,
- DEFAULT_TIMEOUT_NANOS);
- if (framesWritten < 0) {
- result = framesWritten;
- printf("write() returned %s, frames = %d\n",
- AAudio_convertResultToText(result), (int)framesTotal);
- printf(" frames = %d\n", (int)framesTotal);
- } else if (framesWritten != framesPerBurst) {
- printf("write() returned %d, frames = %d\n", framesWritten, (int)framesTotal);
- result = AAUDIO_ERROR_TIMEOUT;
- } else {
- framesTotal += framesWritten;
- if (framesTotal >= printAt) {
- printf("frames = %d\n", (int)framesTotal);
- printAt += actualSampleRate;
+ for (int runs = 0; runs < NUM_LOOPS; runs++) {
+ printf("------------------ loop #%d\n", runs);
+
+ int64_t temp = sTimestampData.framesTotal;
+ memset(&sTimestampData, 0, sizeof(sTimestampData));
+ sTimestampData.framesTotal = temp;
+
+ sTimestampData.forceUnderruns = false;
+
+ result = AAudioStream_requestStart(aaudioStream);
+ if (result != AAUDIO_OK) {
+ printf("AAudioStream_requestStart returned %s",
+ AAudio_convertResultToText(result));
+ goto finish;
+ }
+
+ for (int second = 0; second < NUM_SECONDS; second++) {
+ // Give AAudio callback time to run in the background.
+ sleep(1);
+
+ // Periodically print the progress so we know it hasn't died.
+ printf("framesWritten = %d, XRuns = %d\n",
+ (int) AAudioStream_getFramesWritten(aaudioStream),
+ (int) AAudioStream_getXRunCount(aaudioStream)
+ );
+ }
+
+ result = AAudioStream_requestStop(aaudioStream);
+ if (result != AAUDIO_OK) {
+ printf("AAudioStream_requestStop returned %s\n",
+ AAudio_convertResultToText(result));
+ }
+
+ printf("timestampCount = %d\n", sTimestampData.timestampCount);
+ int printed = 0;
+ for (int i = 0; i < sTimestampData.timestampCount; i++) {
+ TimestampInfo *timestamp = &sTimestampData.timestamps[i];
+ bool posChanged = (timestamp->timestampPosition != (timestamp - 1)->timestampPosition);
+ bool timeChanged = (timestamp->timestampNanos != (timestamp - 1)->timestampNanos);
+ if ((printed < 20) && ((i < 10) || posChanged || timeChanged)) {
+ printf(" %3d : frames %8lld, xferd %8lld", i,
+ (long long) timestamp->framesTotal,
+ (long long) timestamp->appPosition);
+ if (timestamp->result != AAUDIO_OK) {
+ printf(", result = %s\n", AAudio_convertResultToText(timestamp->result));
+ } else {
+ bool negative = timestamp->timestampPosition < 0;
+ bool retro = (i > 0 && (timestamp->timestampPosition <
+ (timestamp - 1)->timestampPosition));
+ const char *message = negative ? " <=NEGATIVE!"
+ : (retro ? " <= RETROGRADE!" : "");
+
+ double latency = calculateLatencyMillis(timestamp->timestampPosition,
+ timestamp->timestampNanos,
+ timestamp->appPosition,
+ timestamp->appNanoseconds,
+ actualSampleRate);
+ printf(", STAMP: pos = %8lld, nanos = %8lld, lat = %7.1f msec %s\n",
+ (long long) timestamp->timestampPosition,
+ (long long) timestamp->timestampNanos,
+ latency,
+ message);
+ }
+ printed++;
}
}
- // Print timestamps.
- int64_t framePosition = 0;
- int64_t frameTime = 0;
- aaudio_result_t timeResult;
- timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC,
- &framePosition, &frameTime);
-
- if (timeResult == AAUDIO_OK) {
- if (framePosition > (previousFramePosition + 5000)) {
- int64_t realTime = getNanoseconds();
- int64_t framesWritten = AAudioStream_getFramesWritten(aaudioStream);
-
- double latencyMillis = calculateLatencyMillis(framePosition, frameTime,
- framesWritten, realTime,
- actualSampleRate);
-
- printf("--- timestamp: result = %4d, position = %lld, at %lld nanos"
- ", latency = %7.2f msec\n",
- timeResult,
- (long long) framePosition,
- (long long) frameTime,
- latencyMillis);
- previousFramePosition = framePosition;
- }
- }
+ // Avoid race conditions in AudioFlinger.
+ // There is normally a delay between a real user stopping and restarting a stream.
+ sleep(1);
}
- result = AAudioStream_requestStop(aaudioStream);
- if (result != AAUDIO_OK) {
- printf("AAudioStream_requestStop returned %s\n",
- AAudio_convertResultToText(result));
- }
- result = AAudioStream_close(aaudioStream);
- if (result != AAUDIO_OK) {
- printf("AAudioStream_close returned %s\n",
- AAudio_convertResultToText(result));
- }
- aaudioStream = nullptr;
-
-
finish:
if (aaudioStream != nullptr) {
AAudioStream_close(aaudioStream);
}
AAudioStreamBuilder_delete(aaudioBuilder);
- delete[] buffer;
printf("result = %d = %s\n", result, AAudio_convertResultToText(result));
+
+ return result;
+}
+
+int main(int argc, char **argv) {
+ (void) argc;
+ (void *) argv;
+
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("Test Timestamps V0.1.2\n");
+ // Legacy
+// result = testTimeStamps(AAUDIO_POLICY_NEVER,
+// AAUDIO_SHARING_MODE_SHARED,
+// AAUDIO_PERFORMANCE_MODE_NONE,
+// AAUDIO_DIRECTION_INPUT);
+// result = testTimeStamps(AAUDIO_POLICY_NEVER,
+// AAUDIO_SHARING_MODE_SHARED,
+// AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+// AAUDIO_DIRECTION_INPUT);
+// result = testTimeStamps(AAUDIO_POLICY_NEVER, AAUDIO_SHARING_MODE_SHARED,
+// AAUDIO_PERFORMANCE_MODE_NONE,
+// AAUDIO_DIRECTION_OUTPUT);
+ result = testTimeStamps(AAUDIO_POLICY_NEVER, AAUDIO_SHARING_MODE_SHARED,
+ AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+ AAUDIO_DIRECTION_OUTPUT);
+ // MMAP
+// result = testTimeStamps(AAUDIO_POLICY_ALWAYS,
+// AAUDIO_SHARING_MODE_EXCLUSIVE,
+// AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+// AAUDIO_DIRECTION_INPUT);
+// result = testTimeStamps(AAUDIO_POLICY_ALWAYS,
+// AAUDIO_SHARING_MODE_EXCLUSIVE,
+// AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+// AAUDIO_DIRECTION_OUTPUT);
+// result = testTimeStamps(AAUDIO_POLICY_ALWAYS, AAUDIO_SHARING_MODE_SHARED,
+// AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+// AAUDIO_DIRECTION_INPUT);
+// result = testTimeStamps(AAUDIO_POLICY_ALWAYS, AAUDIO_SHARING_MODE_SHARED,
+// AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+// AAUDIO_DIRECTION_OUTPUT);
+
+ return (result == AAUDIO_OK) ? EXIT_SUCCESS : EXIT_FAILURE;
}