aaudio: reduce glitching by improving sleep timing
ALways sleep a minimum time to avoid busy wait in real-time thread.
Account for wakeup jitter in threads on the other side of a FIFO.
Avoid race condition that caused a longer sleep than necessary.
Fix calculation of mFreeRunning for capture mode.
Also added systrace logging, which was used to debug this.
Bug: 63814792
Test: Run GStomper on Walleye with MMAP on, see bug
Change-Id: I7b20098580ff454365425bd21e43c17ade532a0a
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 0684ed6..6ec285f 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -113,7 +113,8 @@
return result;
}
-aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor)
+aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor,
+ aaudio_direction_t direction)
{
aaudio_result_t result = AudioEndpoint_validateDescriptor(pEndpointDescriptor);
if (result != AAUDIO_OK) {
@@ -143,12 +144,20 @@
descriptor->dataAddress
);
- // ============================ down data queue =============================
+ // ============================ data queue =============================
descriptor = &pEndpointDescriptor->dataQueueDescriptor;
ALOGV("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
- ALOGV("AudioEndpoint::configure() data readCounterAddress = %p", descriptor->readCounterAddress);
- mFreeRunning = descriptor->readCounterAddress == nullptr;
+ ALOGV("AudioEndpoint::configure() data readCounterAddress = %p",
+ descriptor->readCounterAddress);
+
+ // An example of free running is when the other side is read or written by hardware DMA
+ // or a DSP. It does not update its counter so we have to update it.
+ int64_t *remoteCounter = (direction == AAUDIO_DIRECTION_OUTPUT)
+ ? descriptor->readCounterAddress // read by other side
+ : descriptor->writeCounterAddress; // written by other side
+ mFreeRunning = (remoteCounter == nullptr);
ALOGV("AudioEndpoint::configure() mFreeRunning = %d", mFreeRunning ? 1 : 0);
+
int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
? &mDataReadCounter
: descriptor->readCounterAddress;
@@ -173,13 +182,8 @@
return mUpCommandQueue->read(commandPtr, 1);
}
-aaudio_result_t AudioEndpoint::writeDataNow(const void *buffer, int32_t numFrames)
-{
- return mDataQueue->write(buffer, numFrames);
-}
-
-void AudioEndpoint::getEmptyFramesAvailable(WrappingBuffer *wrappingBuffer) {
- mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
+int32_t AudioEndpoint::getEmptyFramesAvailable(WrappingBuffer *wrappingBuffer) {
+ return mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
}
int32_t AudioEndpoint::getEmptyFramesAvailable()
@@ -187,7 +191,7 @@
return mDataQueue->getFifoControllerBase()->getEmptyFramesAvailable();
}
-void AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer)
+int32_t AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer)
{
return mDataQueue->getFullDataAvailable(wrappingBuffer);
}
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index e7c6916..81a4f7b 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -40,7 +40,8 @@
/**
* Configure based on the EndPointDescriptor_t.
*/
- aaudio_result_t configure(const EndpointDescriptor *pEndpointDescriptor);
+ aaudio_result_t configure(const EndpointDescriptor *pEndpointDescriptor,
+ aaudio_direction_t direction);
/**
* Read from a command passed up from the Server.
@@ -48,17 +49,11 @@
*/
aaudio_result_t readUpCommand(AAudioServiceMessage *commandPtr);
- /**
- * Non-blocking write.
- * @return framesWritten or a negative error code.
- */
- aaudio_result_t writeDataNow(const void *buffer, int32_t numFrames);
-
- void getEmptyFramesAvailable(android::WrappingBuffer *wrappingBuffer);
+ int32_t getEmptyFramesAvailable(android::WrappingBuffer *wrappingBuffer);
int32_t getEmptyFramesAvailable();
- void getFullFramesAvailable(android::WrappingBuffer *wrappingBuffer);
+ int32_t getFullFramesAvailable(android::WrappingBuffer *wrappingBuffer);
int32_t getFullFramesAvailable();
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 8b14922..4c7d0f7 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -28,10 +28,10 @@
#include <binder/IServiceManager.h>
#include <aaudio/AAudio.h>
+#include <cutils/properties.h>
#include <utils/String16.h>
#include <utils/Trace.h>
-#include "AudioClock.h"
#include "AudioEndpointParcelable.h"
#include "binding/AAudioStreamRequest.h"
#include "binding/AAudioStreamConfiguration.h"
@@ -39,6 +39,7 @@
#include "binding/AAudioServiceMessage.h"
#include "core/AudioStreamBuilder.h"
#include "fifo/FifoBuffer.h"
+#include "utility/AudioClock.h"
#include "utility/LinearRamp.h"
#include "AudioStreamInternal.h"
@@ -64,7 +65,12 @@
, mFramesPerBurst(16)
, mStreamVolume(1.0f)
, mInService(inService)
- , mServiceInterface(serviceInterface) {
+ , mServiceInterface(serviceInterface)
+ , mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
+ , mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
+ {
+ ALOGD("AudioStreamInternal(): mWakeupDelayNanos = %d, mMinimumSleepNanos = %d",
+ mWakeupDelayNanos, mMinimumSleepNanos);
}
AudioStreamInternal::~AudioStreamInternal() {
@@ -135,7 +141,7 @@
}
// Configure endpoint based on descriptor.
- mAudioEndpoint.configure(&mEndpointDescriptor);
+ mAudioEndpoint.configure(&mEndpointDescriptor, getDirection());
mFramesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
int32_t capacity = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
@@ -472,12 +478,12 @@
aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames,
int64_t timeoutNanoseconds)
{
- const char * traceName = (mInService) ? "aaWrtS" : "aaWrtC";
+ const char * traceName = "aaProc";
+ const char * fifoName = "aaRdy";
ATRACE_BEGIN(traceName);
- int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
if (ATRACE_ENABLED()) {
- const char * traceName = (mInService) ? "aaFullS" : "aaFullC";
- ATRACE_INT(traceName, fullFrames);
+ int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
+ ATRACE_INT(fifoName, fullFrames);
}
aaudio_result_t result = AAUDIO_OK;
@@ -505,10 +511,12 @@
if (timeoutNanoseconds == 0) {
break; // don't block
} else if (framesLeft > 0) {
- // clip the wake time to something reasonable
- if (wakeTimeNanos < currentTimeNanos) {
- wakeTimeNanos = currentTimeNanos;
+ if (!mAudioEndpoint.isFreeRunning()) {
+ // If there is software on the other end of the FIFO then it may get delayed.
+ // So wake up just a little after we expect it to be ready.
+ wakeTimeNanos += mWakeupDelayNanos;
}
+
if (wakeTimeNanos > deadlineNanos) {
// If we time out, just return the framesWritten so far.
// TODO remove after we fix the deadline bug
@@ -525,12 +533,30 @@
break;
}
- int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
- AudioClock::sleepForNanos(sleepForNanos);
+ currentTimeNanos = AudioClock::getNanoseconds();
+ int64_t earliestWakeTime = currentTimeNanos + mMinimumSleepNanos;
+ // Guarantee a minimum sleep time.
+ if (wakeTimeNanos < earliestWakeTime) {
+ wakeTimeNanos = earliestWakeTime;
+ }
+
+ if (ATRACE_ENABLED()) {
+ int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
+ ATRACE_INT(fifoName, fullFrames);
+ int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
+ ATRACE_INT("aaSlpNs", (int32_t)sleepForNanos);
+ }
+
+ AudioClock::sleepUntilNanoTime(wakeTimeNanos);
currentTimeNanos = AudioClock::getNanoseconds();
}
}
+ if (ATRACE_ENABLED()) {
+ int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
+ ATRACE_INT(fifoName, fullFrames);
+ }
+
// return error or framesProcessed
(void) loopCount;
ATRACE_END();
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 109e425..1b991de 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -27,6 +27,7 @@
#include "client/IsochronousClockModel.h"
#include "client/AudioEndpoint.h"
#include "core/AudioStream.h"
+#include "utility/AudioClock.h"
#include "utility/LinearRamp.h"
using android::sp;
@@ -173,6 +174,11 @@
// Adjust timing model based on timestamp from service.
void processTimestamp(uint64_t position, int64_t time);
+ // Thread on other side of FIFO will have wakeup jitter.
+ // By delaying slightly we can avoid waking up before other side is ready.
+ const int32_t mWakeupDelayNanos; // delay past typical wakeup jitter
+ const int32_t mMinimumSleepNanos; // minimum sleep while polling
+
AudioEndpointParcelable mEndPointParcelable; // description of the buffers filled by service
EndpointDescriptor mEndpointDescriptor; // buffer description with resolved addresses
};
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 22f8bd1..7b1e53e 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -24,6 +24,9 @@
#include "client/AudioStreamInternalCapture.h"
#include "utility/AudioClock.h"
+#define ATRACE_TAG ATRACE_TAG_AUDIO
+#include <utils/Trace.h>
+
using android::WrappingBuffer;
using namespace aaudio;
@@ -36,7 +39,6 @@
AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
-
// Write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
int64_t timeoutNanoseconds)
@@ -52,6 +54,9 @@
return result;
}
+ const char *traceName = "aaRdNow";
+ ATRACE_BEGIN(traceName);
+
if (mAudioEndpoint.isFreeRunning()) {
//ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
// Update data queue based on the timing model.
@@ -63,6 +68,9 @@
// If the write index passed the read index then consider it an overrun.
if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
mXRunCount++;
+ if (ATRACE_ENABLED()) {
+ ATRACE_INT("aaOverRuns", mXRunCount);
+ }
}
// Read some data from the buffer.
@@ -70,6 +78,9 @@
int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
//ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
// numFrames, framesProcessed);
+ if (ATRACE_ENABLED()) {
+ ATRACE_INT("aaRead", framesProcessed);
+ }
// Calculate an ideal time to wake up.
if (wakeTimePtr != nullptr && framesProcessed >= 0) {
@@ -82,14 +93,14 @@
case AAUDIO_STREAM_STATE_OPEN:
case AAUDIO_STREAM_STATE_STARTING:
break;
- case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
+ case AAUDIO_STREAM_STATE_STARTED:
{
- uint32_t burstSize = mFramesPerBurst;
- if (burstSize < 32) {
- burstSize = 32; // TODO review
- }
+ // When do we expect the next write burst to occur?
- uint64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + burstSize;
+ // Calculate frame position based off of the readCounter because
+ // the writeCounter might have just advanced in the background,
+ // causing us to sleep until a later burst.
+ int64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
}
break;
@@ -99,10 +110,8 @@
*wakeTimePtr = wakeTime;
}
-// ALOGD("AudioStreamInternalCapture::readNow finished: now = %llu, read# = %llu, wrote# = %llu",
-// (unsigned long long)currentNanoTime,
-// (unsigned long long)mAudioEndpoint.getDataReadCounter(),
-// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+
+ ATRACE_END();
return framesProcessed;
}
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 1b18577..31e0a40 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -18,6 +18,10 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#define ATRACE_TAG ATRACE_TAG_AUDIO
+
+#include <utils/Trace.h>
+
#include "client/AudioStreamInternalPlay.h"
#include "utility/AudioClock.h"
@@ -99,6 +103,10 @@
return result;
}
+ const char *traceName = "aaWrNow";
+ ATRACE_BEGIN(traceName);
+
+ // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
if (mAudioEndpoint.isFreeRunning()) {
// Update data queue based on the timing model.
int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
@@ -108,10 +116,10 @@
// If the read index passed the write index then consider it an underrun.
if (mAudioEndpoint.getFullFramesAvailable() < 0) {
- ALOGV("AudioStreamInternal::processDataNow() - XRun! write = %d, read = %d",
- (int)mAudioEndpoint.getDataWriteCounter(),
- (int)mAudioEndpoint.getDataReadCounter());
mXRunCount++;
+ if (ATRACE_ENABLED()) {
+ ATRACE_INT("aaUnderRuns", mXRunCount);
+ }
}
// Write some data to the buffer.
@@ -119,6 +127,9 @@
int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
//ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
// numFrames, framesWritten);
+ if (ATRACE_ENABLED()) {
+ ATRACE_INT("aaWrote", framesWritten);
+ }
// Calculate an ideal time to wake up.
if (wakeTimePtr != nullptr && framesWritten >= 0) {
@@ -135,14 +146,15 @@
wakeTime = currentNanoTime;
}
break;
- case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
+ case AAUDIO_STREAM_STATE_STARTED:
{
- uint32_t burstSize = mFramesPerBurst;
- if (burstSize < 32) {
- burstSize = 32; // TODO review
- }
+ // When do we expect the next read burst to occur?
- uint64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + burstSize;
+ // Calculate frame position based off of the writeCounter because
+ // the readCounter might have just advanced in the background,
+ // causing us to sleep until a later burst.
+ int64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
+ - mAudioEndpoint.getBufferSizeInFrames();
wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
}
break;
@@ -152,10 +164,8 @@
*wakeTimePtr = wakeTime;
}
-// ALOGD("AudioStreamInternal::processDataNow finished: now = %llu, read# = %llu, wrote# = %llu",
-// (unsigned long long)currentNanoTime,
-// (unsigned long long)mAudioEndpoint.getDataReadCounter(),
-// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+
+ ATRACE_END();
return framesWritten;
}
@@ -170,7 +180,7 @@
mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
- // Read data in one or two parts.
+ // Write data in one or two parts.
int partIndex = 0;
while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
int32_t framesToWrite = framesLeft;
@@ -291,6 +301,7 @@
aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
AAudioStream_dataCallback appCallback = getDataCallbackProc();
if (appCallback == nullptr) return NULL;
+ int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
// result might be a frame count
while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
@@ -302,10 +313,7 @@
mCallbackFrames);
if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
- // Write audio data to stream.
- int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
-
- // This is a BLOCKING WRITE!
+ // Write audio data to stream. This is a BLOCKING WRITE!
result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
if ((result != mCallbackFrames)) {
ALOGE("AudioStreamInternalPlay(): callbackLoop: write() returned %d", result);
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 73f4c1d..4d0a7b8 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -150,7 +150,7 @@
int64_t nextBurstPosition = mFramesPerBurst * nextBurstIndex;
int64_t framesDelta = nextBurstPosition - mMarkerFramePosition;
int64_t nanosDelta = convertDeltaPositionToTime(framesDelta);
- int64_t time = (int64_t) (mMarkerNanoTime + nanosDelta);
+ int64_t time = mMarkerNanoTime + nanosDelta;
// ALOGD("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
// (unsigned long long)framePosition,
// (unsigned long long)time);
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 19b08c4..4859c69 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -82,9 +82,10 @@
mSampleRate, mSamplesPerFrame, mFormat,
AudioStream_convertSharingModeToShortText(mSharingMode),
(getDirection() == AAUDIO_DIRECTION_OUTPUT) ? "OUTPUT" : "INPUT");
- ALOGI("AudioStream::open() device = %d, perfMode = %d, callbackFrames = %d",
- mDeviceId, mPerformanceMode, mFramesPerDataCallback);
-
+ ALOGI("AudioStream::open() device = %d, perfMode = %d, callback: %s with frames = %d",
+ mDeviceId, mPerformanceMode,
+ (mDataCallbackProc == nullptr ? "OFF" : "ON"),
+ mFramesPerDataCallback);
return AAUDIO_OK;
}
diff --git a/media/libaaudio/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
index 6b4a772..8d2c62d 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.cpp
+++ b/media/libaaudio/src/fifo/FifoBuffer.cpp
@@ -105,16 +105,18 @@
}
-void FifoBuffer::getFullDataAvailable(WrappingBuffer *wrappingBuffer) {
+fifo_frames_t FifoBuffer::getFullDataAvailable(WrappingBuffer *wrappingBuffer) {
fifo_frames_t framesAvailable = mFifo->getFullFramesAvailable();
fifo_frames_t startIndex = mFifo->getReadIndex();
fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
+ return framesAvailable;
}
-void FifoBuffer::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
+fifo_frames_t FifoBuffer::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
fifo_frames_t framesAvailable = mFifo->getEmptyFramesAvailable();
fifo_frames_t startIndex = mFifo->getWriteIndex();
fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
+ return framesAvailable;
}
fifo_frames_t FifoBuffer::read(void *buffer, fifo_frames_t numFrames) {
diff --git a/media/libaaudio/src/fifo/FifoBuffer.h b/media/libaaudio/src/fifo/FifoBuffer.h
index 2b262a1..a94e9b0 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.h
+++ b/media/libaaudio/src/fifo/FifoBuffer.h
@@ -64,16 +64,18 @@
* if the data is split across the end of the FIFO then set data2 and numFrames2.
* Other wise set them to null
* @param wrappingBuffer
+ * @return total full frames available
*/
- void getFullDataAvailable(WrappingBuffer *wrappingBuffer);
+ fifo_frames_t getFullDataAvailable(WrappingBuffer *wrappingBuffer);
/**
* Return pointer to available empty frames in data1 and set size in numFrames1.
* if the room is split across the end of the FIFO then set data2 and numFrames2.
* Other wise set them to null
* @param wrappingBuffer
+ * @return total empty frames available
*/
- void getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer);
+ fifo_frames_t getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer);
/**
* Copy data from the FIFO into the buffer.
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index a3198d7..2450920 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -369,12 +369,43 @@
return prop;
}
+int32_t AAudioProperty_getWakeupDelayMicros() {
+ const int32_t minMicros = 0; // arbitrary
+ const int32_t defaultMicros = 200; // arbitrary, based on some observed jitter
+ const int32_t maxMicros = 5000; // arbitrary, probably don't want more than 500
+ int32_t prop = property_get_int32(AAUDIO_PROP_WAKEUP_DELAY_USEC, defaultMicros);
+ if (prop < minMicros) {
+ ALOGW("AAudioProperty_getWakeupDelayMicros: clipped %d to %d", prop, minMicros);
+ prop = minMicros;
+ } else if (prop > maxMicros) {
+ ALOGW("AAudioProperty_getWakeupDelayMicros: clipped %d to %d", prop, maxMicros);
+ prop = maxMicros;
+ }
+ return prop;
+}
+
+int32_t AAudioProperty_getMinimumSleepMicros() {
+ const int32_t minMicros = 20; // arbitrary
+ const int32_t defaultMicros = 200; // arbitrary
+ const int32_t maxMicros = 2000; // arbitrary
+ int32_t prop = property_get_int32(AAUDIO_PROP_MINIMUM_SLEEP_USEC, defaultMicros);
+ if (prop < minMicros) {
+ ALOGW("AAudioProperty_getMinimumSleepMicros: clipped %d to %d", prop, minMicros);
+ prop = minMicros;
+ } else if (prop > maxMicros) {
+ ALOGW("AAudioProperty_getMinimumSleepMicros: clipped %d to %d", prop, maxMicros);
+ prop = maxMicros;
+ }
+ return prop;
+}
+
int32_t AAudioProperty_getHardwareBurstMinMicros() {
const int32_t defaultMicros = 1000; // arbitrary
const int32_t maxMicros = 1000 * 1000; // arbitrary
int32_t prop = property_get_int32(AAUDIO_PROP_HW_BURST_MIN_USEC, defaultMicros);
if (prop < 1 || prop > maxMicros) {
- ALOGE("AAudioProperty_getHardwareBurstMinMicros: invalid = %d", prop);
+ ALOGE("AAudioProperty_getHardwareBurstMinMicros: invalid = %d, use %d",
+ prop, defaultMicros);
prop = defaultMicros;
}
return prop;
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index efd663d..acd319b 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -195,13 +195,35 @@
/**
* Read system property.
- * @return number of bursts per mixer cycle
+ * @return number of bursts per AAudio service mixer cycle
*/
int32_t AAudioProperty_getMixerBursts();
#define AAUDIO_PROP_HW_BURST_MIN_USEC "aaudio.hw_burst_min_usec"
/**
+ * Read a system property that specifies the number of extra microseconds that a thread
+ * should sleep when waiting for another thread to service a FIFO. This is used
+ * to avoid the waking thread from being overly optimistic about the other threads
+ * wakeup timing. This value should be set high enough to cover typical scheduling jitter
+ * for a real-time thread.
+ *
+ * @return number of microseconds to delay the wakeup.
+ */
+int32_t AAudioProperty_getWakeupDelayMicros();
+
+#define AAUDIO_PROP_WAKEUP_DELAY_USEC "aaudio.wakeup_delay_usec"
+
+/**
+ * Read a system property that specifies the minimum sleep time when polling the FIFO.
+ *
+ * @return minimum number of microseconds to sleep.
+ */
+int32_t AAudioProperty_getMinimumSleepMicros();
+
+#define AAUDIO_PROP_MINIMUM_SLEEP_USEC "aaudio.minimum_sleep_usec"
+
+/**
* Read system property.
* This is handy in case the DMA is bursting too quickly for the CPU to keep up.
* For example, there may be a DMA burst every 100 usec but you only
diff --git a/services/oboeservice/AAudioMixer.cpp b/services/oboeservice/AAudioMixer.cpp
index 43203d4..8222734 100644
--- a/services/oboeservice/AAudioMixer.cpp
+++ b/services/oboeservice/AAudioMixer.cpp
@@ -18,9 +18,17 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#define ATRACE_TAG ATRACE_TAG_AUDIO
+
#include <cstring>
+#include <utils/Trace.h>
+
#include "AAudioMixer.h"
+#ifndef AAUDIO_MIXER_ATRACE_ENABLED
+#define AAUDIO_MIXER_ATRACE_ENABLED 1
+#endif
+
using android::WrappingBuffer;
using android::FifoBuffer;
using android::fifo_frames_t;
@@ -41,13 +49,28 @@
memset(mOutputBuffer, 0, mBufferSizeInBytes);
}
-bool AAudioMixer::mix(FifoBuffer *fifo, float volume) {
+bool AAudioMixer::mix(int trackIndex, FifoBuffer *fifo, float volume) {
WrappingBuffer wrappingBuffer;
float *destination = mOutputBuffer;
fifo_frames_t framesLeft = mFramesPerBurst;
+#if AAUDIO_MIXER_ATRACE_ENABLED
+ ATRACE_BEGIN("aaMix");
+#endif /* AAUDIO_MIXER_ATRACE_ENABLED */
+
// Gather the data from the client. May be in two parts.
- fifo->getFullDataAvailable(&wrappingBuffer);
+ fifo_frames_t fullFrames = fifo->getFullDataAvailable(&wrappingBuffer);
+#if AAUDIO_MIXER_ATRACE_ENABLED
+ if (ATRACE_ENABLED()) {
+ char rdyText[] = "aaMixRdy#";
+ char letter = 'A' + (trackIndex % 26);
+ rdyText[sizeof(rdyText) - 2] = letter;
+ ATRACE_INT(rdyText, fullFrames);
+ }
+#else /* MIXER_ATRACE_ENABLED */
+ (void) trackIndex;
+ (void) fullFrames;
+#endif /* AAUDIO_MIXER_ATRACE_ENABLED */
// Mix data in one or two parts.
int partIndex = 0;
@@ -70,6 +93,10 @@
//ALOGW("AAudioMixer::mix() UNDERFLOW by %d / %d frames ----- UNDERFLOW !!!!!!!!!!",
// framesLeft, mFramesPerBurst);
}
+#if AAUDIO_MIXER_ATRACE_ENABLED
+ ATRACE_END();
+#endif /* AAUDIO_MIXER_ATRACE_ENABLED */
+
return (framesLeft > 0); // did not get all the frames we needed, ie. "underflow"
}
diff --git a/services/oboeservice/AAudioMixer.h b/services/oboeservice/AAudioMixer.h
index 9155fec..a8090bc 100644
--- a/services/oboeservice/AAudioMixer.h
+++ b/services/oboeservice/AAudioMixer.h
@@ -37,7 +37,7 @@
* @param volume
* @return true if underflowed
*/
- bool mix(android::FifoBuffer *fifo, float volume);
+ bool mix(int trackIndex, android::FifoBuffer *fifo, float volume);
void mixPart(float *destination, float *source, int32_t numFrames, float volume);
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index e609ab5..b83b918 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -74,16 +74,18 @@
// Mix data from each active stream.
mMixer.clear();
{ // use lock guard
+ int index = 0;
std::lock_guard <std::mutex> lock(mLockStreams);
for (sp<AAudioServiceStreamShared> sharedStream : mRegisteredStreams) {
if (sharedStream->isRunning()) {
FifoBuffer *fifo = sharedStream->getDataFifoBuffer();
float volume = 1.0; // to match legacy volume
- bool underflowed = mMixer.mix(fifo, volume);
+ bool underflowed = mMixer.mix(index, fifo, volume);
underflowCount += underflowed ? 1 : 0;
// TODO log underflows in each stream
sharedStream->markTransferTime(AudioClock::getNanoseconds());
}
+ index++;
}
}
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index afa2ff0..ff02c0f 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -154,7 +154,7 @@
status);
return AAUDIO_ERROR_UNAVAILABLE;
} else {
- ALOGD("createMmapBuffer status %d, buffer_size, %d burst_size %d"
+ ALOGD("createMmapBuffer status = %d, buffer_size = %d, burst_size %d"
", Sharable FD: %s",
status,
abs(mMmapBufferinfo.buffer_size_frames),