aaudio: lower latency using MMAP capture
MMAP can be enabled by setting system properties.
Bug: 38267780
Test: input_monitor.cpp
Change-Id: I5e86fd1d9baef4fe59837ccbca7971acbb54d8b5
Signed-off-by: Phil Burk <philburk@google.com>
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
index 0e14af0..7590d6a 100644
--- a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
@@ -23,14 +23,13 @@
#include <math.h>
#include <aaudio/AAudio.h>
-#define SAMPLE_RATE 48000
-#define NUM_SECONDS 10
+#define SAMPLE_RATE 48000
+#define NUM_SECONDS 6
#define NANOS_PER_MICROSECOND ((int64_t)1000)
#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
-#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
+#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
-#define DECAY_FACTOR 0.999
-#define MIN_FRAMES_TO_READ 48 /* arbitrary, 1 msec at 48000 Hz */
+#define MIN_FRAMES_TO_READ 48 /* arbitrary, 1 msec at 48000 Hz */
static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
const char *modeText = "unknown";
@@ -59,6 +58,7 @@
aaudio_audio_format_t actualDataFormat;
const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
aaudio_sharing_mode_t actualSharingMode;
AAudioStreamBuilder *aaudioBuilder = nullptr;
@@ -143,27 +143,27 @@
state = AAudioStream_getState(aaudioStream);
printf("after start, state = %s\n", AAudio_convertStreamStateToText(state));
- // Play for a while.
+ // Record for a while.
framesToRecord = actualSampleRate * NUM_SECONDS;
framesLeft = framesToRecord;
while (framesLeft > 0) {
// Read audio data from the stream.
- int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
+ const int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
int minFrames = (framesToRecord < framesPerRead) ? framesToRecord : framesPerRead;
int actual = AAudioStream_read(aaudioStream, data, minFrames, timeoutNanos);
if (actual < 0) {
- fprintf(stderr, "ERROR - AAudioStream_read() returned %zd\n", actual);
+ fprintf(stderr, "ERROR - AAudioStream_read() returned %d\n", actual);
+ result = actual;
goto finish;
} else if (actual == 0) {
- fprintf(stderr, "WARNING - AAudioStream_read() returned %zd\n", actual);
+ fprintf(stderr, "WARNING - AAudioStream_read() returned %d\n", actual);
goto finish;
}
framesLeft -= actual;
- // Peak follower.
+ // Peak finder.
for (int frameIndex = 0; frameIndex < actual; frameIndex++) {
float sample = data[frameIndex * actualSamplesPerFrame] * (1.0/32768);
- peakLevel *= DECAY_FACTOR;
if (sample > peakLevel) {
peakLevel = sample;
}
@@ -177,6 +177,7 @@
printf("*");
}
printf("\n");
+ peakLevel = 0.0;
}
}
@@ -184,9 +185,9 @@
printf("AAudioStream_getXRunCount %d\n", xRunCount);
finish:
- delete[] data;
AAudioStream_close(aaudioStream);
AAudioStreamBuilder_delete(aaudioBuilder);
+ delete[] data;
printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
}
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
index 7c34252..4c2d870 100644
--- a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
@@ -94,7 +94,7 @@
AAudioStreamBuilder_setDirection(mBuilder, AAUDIO_DIRECTION_INPUT);
AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
AAudioStreamBuilder_setDataCallback(mBuilder, proc, userContext);
- AAudioStreamBuilder_setFormat(mBuilder, AAUDIO_FORMAT_PCM_FLOAT);
+ AAudioStreamBuilder_setFormat(mBuilder, AAUDIO_FORMAT_PCM_I16);
// Open an AAudioStream using the Builder.
result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
@@ -104,9 +104,9 @@
goto finish1;
}
- printf("AAudioStream_getFramesPerBurst() = %d\n",
+ printf("AAudioStream_getFramesPerBurst() = %d\n",
AAudioStream_getFramesPerBurst(mStream));
- printf("AAudioStream_getBufferSizeInFrames() = %d\n",
+ printf("AAudioStream_getBufferSizeInFrames() = %d\n",
AAudioStream_getBufferSizeInFrames(mStream));
printf("AAudioStream_getBufferCapacityInFrames() = %d\n",
AAudioStream_getBufferCapacityInFrames(mStream));
@@ -285,7 +285,7 @@
if (result != AAUDIO_OK) {
goto error;
}
- sleep(1);
+ usleep(2000 * 1000);
result = recorder.start();
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - recorder.start() returned %d\n", result);
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index bad21f7..1b52652 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -344,8 +344,8 @@
aaudio_audio_format_t actualInputFormat;
aaudio_audio_format_t actualOutputFormat;
- //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
- const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
+ //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
aaudio_sharing_mode_t actualSharingMode;
AAudioStreamBuilder *builder = nullptr;
diff --git a/media/libaaudio/src/Android.mk b/media/libaaudio/src/Android.mk
index f43c0ad..a452c1d 100644
--- a/media/libaaudio/src/Android.mk
+++ b/media/libaaudio/src/Android.mk
@@ -44,6 +44,8 @@
fifo/FifoControllerBase.cpp \
client/AudioEndpoint.cpp \
client/AudioStreamInternal.cpp \
+ client/AudioStreamInternalCapture.cpp \
+ client/AudioStreamInternalPlay.cpp \
client/IsochronousClockModel.cpp \
binding/AudioEndpointParcelable.cpp \
binding/AAudioBinderClient.cpp \
@@ -99,6 +101,8 @@
fifo/FifoControllerBase.cpp \
client/AudioEndpoint.cpp \
client/AudioStreamInternal.cpp \
+ client/AudioStreamInternalCapture.cpp \
+ client/AudioStreamInternalPlay.cpp \
client/IsochronousClockModel.cpp \
binding/AudioEndpointParcelable.cpp \
binding/AAudioBinderClient.cpp \
diff --git a/media/libaaudio/src/binding/AAudioServiceDefinitions.h b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
index 2de560b..638544e 100644
--- a/media/libaaudio/src/binding/AAudioServiceDefinitions.h
+++ b/media/libaaudio/src/binding/AAudioServiceDefinitions.h
@@ -77,8 +77,7 @@
// Set capacityInFrames to zero if Queue is unused.
RingBufferDescriptor upMessageQueueDescriptor; // server to client
RingBufferDescriptor downMessageQueueDescriptor; // client to server
- RingBufferDescriptor upDataQueueDescriptor; // eg. record
- RingBufferDescriptor downDataQueueDescriptor; // eg. playback
+ RingBufferDescriptor dataQueueDescriptor; // playback or capture
} EndpointDescriptor;
} // namespace aaudio
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
index ee92ee3..d05abb0 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
@@ -93,10 +93,8 @@
&descriptor->downMessageQueueDescriptor);
if (result != AAUDIO_OK) return result;
- result = mUpDataQueueParcelable.resolve(mSharedMemories, &descriptor->upDataQueueDescriptor);
- if (result != AAUDIO_OK) return result;
result = mDownDataQueueParcelable.resolve(mSharedMemories,
- &descriptor->downDataQueueDescriptor);
+ &descriptor->dataQueueDescriptor);
return result;
}
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index 1094d9e..899eb04 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -62,6 +62,8 @@
return status;
}
if (mSizeInBytes > 0) {
+ // Keep the original FD until you are done with the mFd.
+ // If you close it in here then it will prevent mFd from working.
mOriginalFd = parcel->readFileDescriptor();
ALOGV("SharedMemoryParcelable::readFromParcel() LEAK? mOriginalFd = %d\n", mOriginalFd);
mFd = fcntl(mOriginalFd, F_DUPFD_CLOEXEC, 0);
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index e6751c49..5cb642b 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -32,7 +32,7 @@
#define RIDICULOUSLY_LARGE_FRAME_SIZE 4096
AudioEndpoint::AudioEndpoint()
- : mOutputFreeRunning(false)
+ : mFreeRunning(false)
, mDataReadCounter(0)
, mDataWriteCounter(0)
{
@@ -108,7 +108,7 @@
&pEndpointDescriptor->upMessageQueueDescriptor);
if (result == AAUDIO_OK) {
result = AudioEndpoint_validateQueueDescriptor("data",
- &pEndpointDescriptor->downDataQueueDescriptor);
+ &pEndpointDescriptor->dataQueueDescriptor);
}
return result;
}
@@ -144,11 +144,11 @@
);
// ============================ down data queue =============================
- descriptor = &pEndpointDescriptor->downDataQueueDescriptor;
+ descriptor = &pEndpointDescriptor->dataQueueDescriptor;
ALOGV("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
ALOGV("AudioEndpoint::configure() data readCounterAddress = %p", descriptor->readCounterAddress);
- mOutputFreeRunning = descriptor->readCounterAddress == nullptr;
- ALOGV("AudioEndpoint::configure() mOutputFreeRunning = %d", mOutputFreeRunning ? 1 : 0);
+ mFreeRunning = descriptor->readCounterAddress == nullptr;
+ ALOGV("AudioEndpoint::configure() mFreeRunning = %d", mFreeRunning ? 1 : 0);
int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
? &mDataReadCounter
: descriptor->readCounterAddress;
@@ -156,7 +156,7 @@
? &mDataWriteCounter
: descriptor->writeCounterAddress;
- mDownDataQueue = new FifoBuffer(
+ mDataQueue = new FifoBuffer(
descriptor->bytesPerFrame,
descriptor->capacityInFrames,
readCounterAddress,
@@ -164,7 +164,7 @@
descriptor->dataAddress
);
uint32_t threshold = descriptor->capacityInFrames / 2;
- mDownDataQueue->setThreshold(threshold);
+ mDataQueue->setThreshold(threshold);
return result;
}
@@ -175,44 +175,54 @@
aaudio_result_t AudioEndpoint::writeDataNow(const void *buffer, int32_t numFrames)
{
- return mDownDataQueue->write(buffer, numFrames);
+ return mDataQueue->write(buffer, numFrames);
}
-void AudioEndpoint::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
- mDownDataQueue->getEmptyRoomAvailable(wrappingBuffer);
+void AudioEndpoint::getEmptyFramesAvailable(WrappingBuffer *wrappingBuffer) {
+ mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
}
-int32_t AudioEndpoint::getEmptyFramesAvailable() {
- return mDownDataQueue->getFifoControllerBase()->getEmptyFramesAvailable();
+int32_t AudioEndpoint::getEmptyFramesAvailable()
+{
+ return mDataQueue->getFifoControllerBase()->getEmptyFramesAvailable();
+}
+
+void AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer)
+{
+ return mDataQueue->getFullDataAvailable(wrappingBuffer);
}
int32_t AudioEndpoint::getFullFramesAvailable()
{
- return mDownDataQueue->getFifoControllerBase()->getFullFramesAvailable();
+ return mDataQueue->getFifoControllerBase()->getFullFramesAvailable();
}
void AudioEndpoint::advanceWriteIndex(int32_t deltaFrames) {
- mDownDataQueue->getFifoControllerBase()->advanceWriteIndex(deltaFrames);
+ mDataQueue->getFifoControllerBase()->advanceWriteIndex(deltaFrames);
}
-void AudioEndpoint::setDownDataReadCounter(fifo_counter_t framesRead)
-{
- mDownDataQueue->setReadCounter(framesRead);
+void AudioEndpoint::advanceReadIndex(int32_t deltaFrames) {
+ mDataQueue->getFifoControllerBase()->advanceReadIndex(deltaFrames);
}
-fifo_counter_t AudioEndpoint::getDownDataReadCounter()
+void AudioEndpoint::setDataReadCounter(fifo_counter_t framesRead)
{
- return mDownDataQueue->getReadCounter();
+ mDataQueue->setReadCounter(framesRead);
}
-void AudioEndpoint::setDownDataWriteCounter(fifo_counter_t framesRead)
+fifo_counter_t AudioEndpoint::getDataReadCounter()
{
- mDownDataQueue->setWriteCounter(framesRead);
+ return mDataQueue->getReadCounter();
}
-fifo_counter_t AudioEndpoint::getDownDataWriteCounter()
+void AudioEndpoint::setDataWriteCounter(fifo_counter_t framesRead)
{
- return mDownDataQueue->getWriteCounter();
+ mDataQueue->setWriteCounter(framesRead);
+}
+
+fifo_counter_t AudioEndpoint::getDataWriteCounter()
+{
+ return mDataQueue->getWriteCounter();
}
int32_t AudioEndpoint::setBufferSizeInFrames(int32_t requestedFrames,
@@ -221,18 +231,18 @@
if (requestedFrames < ENDPOINT_DATA_QUEUE_SIZE_MIN) {
requestedFrames = ENDPOINT_DATA_QUEUE_SIZE_MIN;
}
- mDownDataQueue->setThreshold(requestedFrames);
- *actualFrames = mDownDataQueue->getThreshold();
+ mDataQueue->setThreshold(requestedFrames);
+ *actualFrames = mDataQueue->getThreshold();
return AAUDIO_OK;
}
int32_t AudioEndpoint::getBufferSizeInFrames() const
{
- return mDownDataQueue->getThreshold();
+ return mDataQueue->getThreshold();
}
int32_t AudioEndpoint::getBufferCapacityInFrames() const
{
- return (int32_t)mDownDataQueue->getBufferCapacityInFrames();
+ return (int32_t)mDataQueue->getBufferCapacityInFrames();
}
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index 3a2099f..53ba033 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -54,29 +54,36 @@
*/
aaudio_result_t writeDataNow(const void *buffer, int32_t numFrames);
- void getEmptyRoomAvailable(android::WrappingBuffer *wrappingBuffer);
+ void getEmptyFramesAvailable(android::WrappingBuffer *wrappingBuffer);
int32_t getEmptyFramesAvailable();
+
+ void getFullFramesAvailable(android::WrappingBuffer *wrappingBuffer);
+
int32_t getFullFramesAvailable();
+ void advanceReadIndex(int32_t deltaFrames);
+
void advanceWriteIndex(int32_t deltaFrames);
/**
* Set the read index in the downData queue.
* This is needed if the reader is not updating the index itself.
*/
- void setDownDataReadCounter(android::fifo_counter_t framesRead);
- android::fifo_counter_t getDownDataReadCounter();
+ void setDataReadCounter(android::fifo_counter_t framesRead);
- void setDownDataWriteCounter(android::fifo_counter_t framesWritten);
- android::fifo_counter_t getDownDataWriteCounter();
+ android::fifo_counter_t getDataReadCounter();
+
+ void setDataWriteCounter(android::fifo_counter_t framesWritten);
+
+ android::fifo_counter_t getDataWriteCounter();
/**
* The result is not valid until after configure() is called.
*
* @return true if the output buffer read position is not updated, eg. DMA
*/
- bool isOutputFreeRunning() const { return mOutputFreeRunning; }
+ bool isFreeRunning() const { return mFreeRunning; }
int32_t setBufferSizeInFrames(int32_t requestedFrames,
int32_t *actualFrames);
@@ -86,8 +93,8 @@
private:
android::FifoBuffer *mUpCommandQueue;
- android::FifoBuffer *mDownDataQueue;
- bool mOutputFreeRunning;
+ android::FifoBuffer *mDataQueue;
+ bool mFreeRunning;
android::fifo_counter_t mDataReadCounter; // only used if free-running
android::fifo_counter_t mDataWriteCounter; // only used if free-running
};
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 143d4b7..701f698 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -41,8 +41,6 @@
#include "AudioStreamInternal.h"
-#define LOG_TIMESTAMPS 0
-
using android::String16;
using android::Mutex;
using android::WrappingBuffer;
@@ -59,6 +57,8 @@
//#define MYLOG_CONDITION (s_logCounter++ < 500000)
#define MYLOG_CONDITION (1)
+#define LOG_TIMESTAMPS 0
+
AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService)
: AudioStream()
, mClockModel()
@@ -140,8 +140,8 @@
// Configure endpoint based on descriptor.
mAudioEndpoint.configure(&mEndpointDescriptor);
- mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
- int32_t capacity = mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames;
+ mFramesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
+ int32_t capacity = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.open() %s framesPerBurst = %d, capacity = %d",
getLocationName(), mFramesPerBurst, capacity);
@@ -193,7 +193,7 @@
if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
// Don't close a stream while it is running.
aaudio_stream_state_t currentState = getState();
- if (isPlaying()) {
+ if (isActive()) {
requestStop();
aaudio_stream_state_t nextState;
int64_t timeoutNanoseconds = MIN_TIMEOUT_NANOS;
@@ -217,53 +217,6 @@
}
-// Render audio in the application callback and then write the data to the stream.
-void *AudioStreamInternal::callbackLoop() {
- aaudio_result_t result = AAUDIO_OK;
- aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
- AAudioStream_dataCallback appCallback = getDataCallbackProc();
- if (appCallback == nullptr) return NULL;
-
- // result might be a frame count
- while (mCallbackEnabled.load() && isPlaying() && (result >= 0)) {
- // Call application using the AAudio callback interface.
- callbackResult = (*appCallback)(
- (AAudioStream *) this,
- getDataCallbackUserData(),
- mCallbackBuffer,
- mCallbackFrames);
-
- if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
- // Write audio data to stream.
- int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
-
- // This is a BLOCKING WRITE!
- result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
- if ((result != mCallbackFrames)) {
- ALOGE("AudioStreamInternal(): callbackLoop: write() returned %d", result);
- if (result >= 0) {
- // Only wrote some of the frames requested. Must have timed out.
- result = AAUDIO_ERROR_TIMEOUT;
- }
- if (getErrorCallbackProc() != nullptr) {
- (*getErrorCallbackProc())(
- (AAudioStream *) this,
- getErrorCallbackUserData(),
- result);
- }
- break;
- }
- } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
- ALOGD("AudioStreamInternal(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
- break;
- }
- }
-
- ALOGD("AudioStreamInternal(): callbackLoop() exiting, result = %d, isPlaying() = %d",
- result, (int) isPlaying());
- return NULL;
-}
-
static void *aaudio_callback_thread_proc(void *context)
{
AudioStreamInternal *stream = (AudioStreamInternal *)context;
@@ -285,7 +238,6 @@
startTime = AudioClock::getNanoseconds();
mClockModel.start(startTime);
- processTimestamp(0, startTime);
setState(AAUDIO_STREAM_STATE_STARTING);
aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);;
@@ -313,11 +265,15 @@
return timeoutNanoseconds;
}
+int64_t AudioStreamInternal::calculateReasonableTimeout() {
+ return calculateReasonableTimeout(getFramesPerBurst());
+}
+
aaudio_result_t AudioStreamInternal::stopCallback()
{
if (isDataCallbackActive()) {
mCallbackEnabled.store(false);
- return joinThread(NULL, calculateReasonableTimeout(mCallbackFrames));
+ return joinThread(NULL);
} else {
return AAUDIO_OK;
}
@@ -360,10 +316,11 @@
return mServiceInterface.flushStream(mServiceStreamHandle);
}
+// TODO for Play only
void AudioStreamInternal::onFlushFromServer() {
ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
- int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
- int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
+ int64_t readCounter = mAudioEndpoint.getDataReadCounter();
+ int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
// Bump offset so caller does not see the retrograde motion in getFramesRead().
int64_t framesFlushed = writeCounter - readCounter;
@@ -371,7 +328,7 @@
// Flush written frames by forcing writeCounter to readCounter.
// This is because we cannot move the read counter in the hardware.
- mAudioEndpoint.setDownDataWriteCounter(readCounter);
+ mAudioEndpoint.setDataWriteCounter(readCounter);
}
aaudio_result_t AudioStreamInternal::requestStopInternal()
@@ -422,7 +379,8 @@
// TODO Generate in server and pass to client. Return latest.
int64_t time = AudioClock::getNanoseconds();
*framePosition = mClockModel.convertTimeToPosition(time);
- *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
+ // TODO Get a more accurate timestamp from the service. This code just adds a fudge factor.
+ *timeNanoseconds = time + (6 * AAUDIO_NANOS_PER_MILLISECOND);
return AAUDIO_OK;
}
@@ -434,7 +392,7 @@
}
#if LOG_TIMESTAMPS
-static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) {
+static void AudioStreamInternal_logTimestamp(AAudioServiceMessage &command) {
static int64_t oldPosition = 0;
static int64_t oldTime = 0;
int64_t framePosition = command.timestamp.position;
@@ -456,12 +414,10 @@
#endif
aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
- int64_t framePosition = 0;
#if LOG_TIMESTAMPS
- AudioStreamInternal_LogTimestamp(command);
+ AudioStreamInternal_logTimestamp(*message);
#endif
- framePosition = message->timestamp.position;
- processTimestamp(framePosition, message->timestamp.timestamp);
+ processTimestamp(message->timestamp.position, message->timestamp.timestamp);
return AAUDIO_OK;
}
@@ -471,20 +427,28 @@
switch (message->event.event) {
case AAUDIO_SERVICE_EVENT_STARTED:
ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
- setState(AAUDIO_STREAM_STATE_STARTED);
+ if (getState() == AAUDIO_STREAM_STATE_STARTING) {
+ setState(AAUDIO_STREAM_STATE_STARTED);
+ }
break;
case AAUDIO_SERVICE_EVENT_PAUSED:
ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
- setState(AAUDIO_STREAM_STATE_PAUSED);
+ if (getState() == AAUDIO_STREAM_STATE_PAUSING) {
+ setState(AAUDIO_STREAM_STATE_PAUSED);
+ }
break;
case AAUDIO_SERVICE_EVENT_STOPPED:
ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STOPPED");
- setState(AAUDIO_STREAM_STATE_STOPPED);
+ if (getState() == AAUDIO_STREAM_STATE_STOPPING) {
+ setState(AAUDIO_STREAM_STATE_STOPPED);
+ }
break;
case AAUDIO_SERVICE_EVENT_FLUSHED:
ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
- setState(AAUDIO_STREAM_STATE_FLUSHED);
- onFlushFromServer();
+ if (getState() == AAUDIO_STREAM_STATE_FLUSHING) {
+ setState(AAUDIO_STREAM_STATE_FLUSHED);
+ onFlushFromServer();
+ }
break;
case AAUDIO_SERVICE_EVENT_CLOSED:
ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
@@ -537,15 +501,15 @@
return result;
}
-// Write the data, block if needed and timeoutMillis > 0
-aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
- int64_t timeoutNanoseconds)
+// Read or write the data, block if needed and timeoutMillis > 0
+aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames,
+ int64_t timeoutNanoseconds)
{
const char * traceName = (mInService) ? "aaWrtS" : "aaWrtC";
ATRACE_BEGIN(traceName);
aaudio_result_t result = AAUDIO_OK;
int32_t loopCount = 0;
- uint8_t* source = (uint8_t*)buffer;
+ uint8_t* audioData = (uint8_t*)buffer;
int64_t currentTimeNanos = AudioClock::getNanoseconds();
int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
int32_t framesLeft = numFrames;
@@ -556,19 +520,19 @@
ATRACE_INT(traceName, fullFrames);
}
- // Write until all the data has been written or until a timeout occurs.
+ // Loop until all the data has been processed or until a timeout occurs.
while (framesLeft > 0) {
- // The call to writeNow() will not block. It will just write as much as it can.
+ // The call to processDataNow() will not block. It will just read as much as it can.
int64_t wakeTimeNanos = 0;
- aaudio_result_t framesWritten = writeNow(source, framesLeft,
- currentTimeNanos, &wakeTimeNanos);
- if (framesWritten < 0) {
- ALOGE("AudioStreamInternal::write() loop: writeNow returned %d", framesWritten);
- result = framesWritten;
+ aaudio_result_t framesProcessed = processDataNow(audioData, framesLeft,
+ currentTimeNanos, &wakeTimeNanos);
+ if (framesProcessed < 0) {
+ ALOGE("AudioStreamInternal::processData() loop: framesProcessed = %d", framesProcessed);
+ result = framesProcessed;
break;
}
- framesLeft -= (int32_t) framesWritten;
- source += framesWritten * getBytesPerFrame();
+ framesLeft -= (int32_t) framesProcessed;
+ audioData += framesProcessed * getBytesPerFrame();
// Should we block?
if (timeoutNanoseconds == 0) {
@@ -580,190 +544,32 @@
}
if (wakeTimeNanos > deadlineNanos) {
// If we time out, just return the framesWritten so far.
- ALOGE("AudioStreamInternal::write(): timed out after %lld nanos",
+ ALOGE("AudioStreamInternal::processData(): timed out after %lld nanos",
(long long) timeoutNanoseconds);
+ ALOGE("AudioStreamInternal::processData(): wakeTime = %lld, deadline = %lld nanos",
+ (long long) wakeTimeNanos, (long long) deadlineNanos);
+ ALOGE("AudioStreamInternal::processData(): past deadline by %d micros",
+ (int)((wakeTimeNanos - deadlineNanos) / AAUDIO_NANOS_PER_MICROSECOND));
+
break;
}
int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
+ //ALOGE("AudioStreamInternal::processData(): sleep for %d micros",
+ // (int)(sleepForNanos / AAUDIO_NANOS_PER_MICROSECOND));
AudioClock::sleepForNanos(sleepForNanos);
currentTimeNanos = AudioClock::getNanoseconds();
}
}
- // return error or framesWritten
+ // return error or framesProcessed
(void) loopCount;
ATRACE_END();
return (result < 0) ? result : numFrames - framesLeft;
}
-// Write as much data as we can without blocking.
-aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
- int64_t currentNanoTime, int64_t *wakeTimePtr) {
- aaudio_result_t result = processCommands();
- if (result != AAUDIO_OK) {
- return result;
- }
-
- if (mAudioEndpoint.isOutputFreeRunning()) {
- //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter");
- // Update data queue based on the timing model.
- int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
- mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
- }
- // TODO else query from endpoint cuz set by actual reader, maybe
-
- // If the read index passed the write index then consider it an underrun.
- if (mAudioEndpoint.getFullFramesAvailable() < 0) {
- mXRunCount++;
- }
-
- // Write some data to the buffer.
- //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames);
- int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
- //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
- // numFrames, framesWritten);
-
- // Calculate an ideal time to wake up.
- if (wakeTimePtr != nullptr && framesWritten >= 0) {
- // By default wake up a few milliseconds from now. // TODO review
- int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
- aaudio_stream_state_t state = getState();
- //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s",
- // AAudio_convertStreamStateToText(state));
- switch (state) {
- case AAUDIO_STREAM_STATE_OPEN:
- case AAUDIO_STREAM_STATE_STARTING:
- if (framesWritten != 0) {
- // Don't wait to write more data. Just prime the buffer.
- wakeTime = currentNanoTime;
- }
- break;
- case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
- {
- uint32_t burstSize = mFramesPerBurst;
- if (burstSize < 32) {
- burstSize = 32; // TODO review
- }
-
- uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize;
- wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
- }
- break;
- default:
- break;
- }
- *wakeTimePtr = wakeTime;
-
- }
-// ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
-// (unsigned long long)currentNanoTime,
-// (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
-// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
- return framesWritten;
-}
-
-
-aaudio_result_t AudioStreamInternal::writeNowWithConversion(const void *buffer,
- int32_t numFrames) {
- // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)",
- // buffer, numFrames);
- WrappingBuffer wrappingBuffer;
- uint8_t *source = (uint8_t *) buffer;
- int32_t framesLeft = numFrames;
-
- mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
-
- // Read data in one or two parts.
- int partIndex = 0;
- while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
- int32_t framesToWrite = framesLeft;
- int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
- if (framesAvailable > 0) {
- if (framesToWrite > framesAvailable) {
- framesToWrite = framesAvailable;
- }
- int32_t numBytes = getBytesPerFrame() * framesToWrite;
- int32_t numSamples = framesToWrite * getSamplesPerFrame();
- // Data conversion.
- float levelFrom;
- float levelTo;
- bool ramping = mVolumeRamp.nextSegment(framesToWrite * getSamplesPerFrame(),
- &levelFrom, &levelTo);
- // The formats are validated when the stream is opened so we do not have to
- // check for illegal combinations here.
- if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
- if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
- AAudio_linearRamp(
- (const float *) source,
- (float *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
- if (ramping) {
- AAudioConvert_floatToPcm16(
- (const float *) source,
- (int16_t *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- } else {
- AAudioConvert_floatToPcm16(
- (const float *) source,
- (int16_t *) wrappingBuffer.data[partIndex],
- numSamples,
- levelTo);
- }
- }
- } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
- if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
- if (ramping) {
- AAudioConvert_pcm16ToFloat(
- (const int16_t *) source,
- (float *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- } else {
- AAudioConvert_pcm16ToFloat(
- (const int16_t *) source,
- (float *) wrappingBuffer.data[partIndex],
- numSamples,
- levelTo);
- }
- } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
- AAudio_linearRamp(
- (const int16_t *) source,
- (int16_t *) wrappingBuffer.data[partIndex],
- framesToWrite,
- getSamplesPerFrame(),
- levelFrom,
- levelTo);
- }
- }
- source += numBytes;
- framesLeft -= framesToWrite;
- } else {
- break;
- }
- partIndex++;
- }
- int32_t framesWritten = numFrames - framesLeft;
- mAudioEndpoint.advanceWriteIndex(framesWritten);
-
- if (framesWritten > 0) {
- incrementFramesWritten(framesWritten);
- }
- // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
- return framesWritten;
-}
-
void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
- mClockModel.processTimestamp( position, time);
+ mClockModel.processTimestamp(position, time);
}
aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
@@ -784,40 +590,18 @@
}
}
-int32_t AudioStreamInternal::getBufferSize() const
-{
+int32_t AudioStreamInternal::getBufferSize() const {
return mAudioEndpoint.getBufferSizeInFrames();
}
-int32_t AudioStreamInternal::getBufferCapacity() const
-{
+int32_t AudioStreamInternal::getBufferCapacity() const {
return mAudioEndpoint.getBufferCapacityInFrames();
}
-int32_t AudioStreamInternal::getFramesPerBurst() const
-{
- return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
+int32_t AudioStreamInternal::getFramesPerBurst() const {
+ return mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
}
-int64_t AudioStreamInternal::getFramesRead()
-{
- int64_t framesRead =
- mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
- + mFramesOffsetFromService;
- // Prevent retrograde motion.
- if (framesRead < mLastFramesRead) {
- framesRead = mLastFramesRead;
- } else {
- mLastFramesRead = framesRead;
- }
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
- return framesRead;
-}
-
-int64_t AudioStreamInternal::getFramesWritten()
-{
- int64_t getFramesWritten = mAudioEndpoint.getDownDataWriteCounter()
- + mFramesOffsetFromService;
- ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::getFramesWritten() returns %lld", (long long)getFramesWritten);
- return getFramesWritten;
+aaudio_result_t AudioStreamInternal::joinThread(void** returnArg) {
+ return AudioStream::joinThread(returnArg, calculateReasonableTimeout(getFramesPerBurst()));
}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index ee602c1..377f9c7 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -37,7 +37,7 @@
class AudioStreamInternal : public AudioStream {
public:
- AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService = false);
+ AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService);
virtual ~AudioStreamInternal();
// =========== Begin ABSTRACT methods ===========================
@@ -60,10 +60,6 @@
aaudio_result_t close() override;
- aaudio_result_t write(const void *buffer,
- int32_t numFrames,
- int64_t timeoutNanoseconds) override;
-
aaudio_result_t setBufferSize(int32_t requestedFrames) override;
int32_t getBufferSize() const override;
@@ -72,9 +68,6 @@
int32_t getFramesPerBurst() const override;
- int64_t getFramesRead() override;
- int64_t getFramesWritten() override;
-
int32_t getXRunCount() const override {
return mXRunCount;
}
@@ -83,16 +76,37 @@
aaudio_result_t unregisterThread() override;
+ aaudio_result_t joinThread(void** returnArg);
+
// Called internally from 'C'
- void *callbackLoop();
+ virtual void *callbackLoop() = 0;
bool isMMap() override {
return true;
}
+ // Calculate timeout based on framesPerBurst
+ int64_t calculateReasonableTimeout();
+
protected:
+ aaudio_result_t processData(void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds);
+
+/**
+ * Low level data processing that will not block. It will just read or write as much as it can.
+ *
+ * It passed back a recommended time to wake up if wakeTimePtr is not NULL.
+ *
+ * @return the number of frames processed or a negative error code.
+ */
+ virtual aaudio_result_t processDataNow(void *buffer,
+ int32_t numFrames,
+ int64_t currentTimeNanos,
+ int64_t *wakeTimePtr) = 0;
+
aaudio_result_t processCommands();
aaudio_result_t requestPauseInternal();
@@ -100,17 +114,6 @@
aaudio_result_t stopCallback();
-/**
- * Low level write that will not block. It will just write as much as it can.
- *
- * It passed back a recommended time to wake up if wakeTimePtr is not NULL.
- *
- * @return the number of frames written or a negative error code.
- */
- aaudio_result_t writeNow(const void *buffer,
- int32_t numFrames,
- int64_t currentTimeNanos,
- int64_t *wakeTimePtr);
void onFlushFromServer();
@@ -121,6 +124,24 @@
// Calculate timeout for an operation involving framesPerOperation.
int64_t calculateReasonableTimeout(int32_t framesPerOperation);
+ aaudio_audio_format_t mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
+
+ IsochronousClockModel mClockModel; // timing model for chasing the HAL
+
+ AudioEndpoint mAudioEndpoint; // source for reads or sink for writes
+ aaudio_handle_t mServiceStreamHandle; // opaque handle returned from service
+
+ int32_t mFramesPerBurst; // frames per HAL transfer
+ int32_t mXRunCount = 0; // how many underrun events?
+
+ LinearRamp mVolumeRamp;
+
+ // Offset from underlying frame position.
+ int64_t mFramesOffsetFromService = 0; // offset for timestamps
+
+ uint8_t *mCallbackBuffer = nullptr;
+ int32_t mCallbackFrames = 0;
+
private:
/*
* Asynchronous write with data conversion.
@@ -130,38 +151,20 @@
*/
aaudio_result_t writeNowWithConversion(const void *buffer,
int32_t numFrames);
- void processTimestamp(uint64_t position, int64_t time);
+ // Adjust timing model based on timestamp from service.
+ void processTimestamp(uint64_t position, int64_t time);
const char *getLocationName() const {
return mInService ? "SERVICE" : "CLIENT";
}
- // Adjust timing model based on timestamp from service.
-
- IsochronousClockModel mClockModel; // timing model for chasing the HAL
- AudioEndpoint mAudioEndpoint; // sink for writes
- aaudio_handle_t mServiceStreamHandle; // opaque handle returned from service
-
AudioEndpointParcelable mEndPointParcelable; // description of the buffers filled by service
EndpointDescriptor mEndpointDescriptor; // buffer description with resolved addresses
-
- aaudio_audio_format_t mDeviceFormat = AAUDIO_FORMAT_UNSPECIFIED;
-
- uint8_t *mCallbackBuffer = nullptr;
- int32_t mCallbackFrames = 0;
-
- // Offset from underlying frame position.
- int64_t mFramesOffsetFromService = 0; // offset for timestamps
- int64_t mLastFramesRead = 0; // used to prevent retrograde motion
- int32_t mFramesPerBurst; // frames per HAL transfer
- int32_t mXRunCount = 0; // how many underrun events?
- LinearRamp mVolumeRamp;
-
AAudioServiceInterface &mServiceInterface; // abstract interface to the service
// The service uses this for SHARED mode.
- bool mInService = false; // Are running in the client or the service?
+ bool mInService = false; // Is this running in the client or the service?
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
new file mode 100644
index 0000000..93693bd
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <aaudio/AAudio.h>
+
+#include "client/AudioStreamInternalCapture.h"
+#include "utility/AudioClock.h"
+
+using android::WrappingBuffer;
+
+using namespace aaudio;
+
+AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface,
+ bool inService)
+ : AudioStreamInternal(serviceInterface, inService) {
+
+}
+
+AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
+
+
+// Write the data, block if needed and timeoutMillis > 0
+aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
+ int64_t timeoutNanoseconds)
+{
+ return processData(buffer, numFrames, timeoutNanoseconds);
+}
+
+// Read as much data as we can without blocking.
+aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
+ int64_t currentNanoTime, int64_t *wakeTimePtr) {
+ aaudio_result_t result = processCommands();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ if (mAudioEndpoint.isFreeRunning()) {
+ //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
+ // Update data queue based on the timing model.
+ int64_t estimatedRemoteCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+ // TODO refactor, maybe use setRemoteCounter()
+ mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
+ }
+
+ // If the write index passed the read index then consider it an overrun.
+ if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
+ mXRunCount++;
+ }
+
+ // Read some data from the buffer.
+ //ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
+ int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
+ //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
+ // numFrames, framesProcessed);
+
+ // Calculate an ideal time to wake up.
+ if (wakeTimePtr != nullptr && framesProcessed >= 0) {
+ // By default wake up a few milliseconds from now. // TODO review
+ int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
+ aaudio_stream_state_t state = getState();
+ //ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
+ // AAudio_convertStreamStateToText(state));
+ switch (state) {
+ case AAUDIO_STREAM_STATE_OPEN:
+ case AAUDIO_STREAM_STATE_STARTING:
+ break;
+ case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
+ {
+ uint32_t burstSize = mFramesPerBurst;
+ if (burstSize < 32) {
+ burstSize = 32; // TODO review
+ }
+
+ uint64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + burstSize;
+ wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
+ }
+ break;
+ default:
+ break;
+ }
+ *wakeTimePtr = wakeTime;
+
+ }
+// ALOGD("AudioStreamInternalCapture::readNow finished: now = %llu, read# = %llu, wrote# = %llu",
+// (unsigned long long)currentNanoTime,
+// (unsigned long long)mAudioEndpoint.getDataReadCounter(),
+// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+ return framesProcessed;
+}
+
+aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
+ int32_t numFrames) {
+ // ALOGD("AudioStreamInternalCapture::readNowWithConversion(%p, %d)",
+ // buffer, numFrames);
+ WrappingBuffer wrappingBuffer;
+ uint8_t *destination = (uint8_t *) buffer;
+ int32_t framesLeft = numFrames;
+
+ mAudioEndpoint.getFullFramesAvailable(&wrappingBuffer);
+
+ // Read data in one or two parts.
+ for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
+ int32_t framesToProcess = framesLeft;
+ int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+ if (framesAvailable <= 0) break;
+
+ if (framesToProcess > framesAvailable) {
+ framesToProcess = framesAvailable;
+ }
+
+ int32_t numBytes = getBytesPerFrame() * framesToProcess;
+ int32_t numSamples = framesToProcess * getSamplesPerFrame();
+
+ // TODO factor this out into a utility function
+ if (mDeviceFormat == getFormat()) {
+ memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
+ } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16
+ && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
+ AAudioConvert_pcm16ToFloat(
+ (const int16_t *) wrappingBuffer.data[partIndex],
+ (float *) destination,
+ numSamples,
+ 1.0f);
+ } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT
+ && getFormat() == AAUDIO_FORMAT_PCM_I16) {
+ AAudioConvert_floatToPcm16(
+ (const float *) wrappingBuffer.data[partIndex],
+ (int16_t *) destination,
+ numSamples,
+ 1.0f);
+ } else {
+ ALOGE("Format conversion not supported!");
+ return AAUDIO_ERROR_INVALID_FORMAT;
+ }
+ destination += numBytes;
+ framesLeft -= framesToProcess;
+ }
+
+ int32_t framesProcessed = numFrames - framesLeft;
+ mAudioEndpoint.advanceReadIndex(framesProcessed);
+ incrementFramesRead(framesProcessed);
+
+ //ALOGD("AudioStreamInternalCapture::readNowWithConversion() returns %d", framesProcessed);
+ return framesProcessed;
+}
+
+int64_t AudioStreamInternalCapture::getFramesWritten()
+{
+ int64_t frames =
+ mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
+ + mFramesOffsetFromService;
+ // Prevent retrograde motion.
+ if (frames < mLastFramesWritten) {
+ frames = mLastFramesWritten;
+ } else {
+ mLastFramesWritten = frames;
+ }
+ //ALOGD("AudioStreamInternalCapture::getFramesWritten() returns %lld", (long long)frames);
+ return frames;
+}
+
+int64_t AudioStreamInternalCapture::getFramesRead()
+{
+ int64_t frames = mAudioEndpoint.getDataWriteCounter()
+ + mFramesOffsetFromService;
+ //ALOGD("AudioStreamInternalCapture::getFramesRead() returns %lld", (long long)frames);
+ return frames;
+}
+
+// Read data from the stream and pass it to the callback for processing.
+void *AudioStreamInternalCapture::callbackLoop() {
+ aaudio_result_t result = AAUDIO_OK;
+ aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
+ AAudioStream_dataCallback appCallback = getDataCallbackProc();
+ if (appCallback == nullptr) return NULL;
+
+ // result might be a frame count
+ while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
+
+ // Read audio data from stream.
+ int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
+
+ // This is a BLOCKING READ!
+ result = read(mCallbackBuffer, mCallbackFrames, timeoutNanos);
+ if ((result != mCallbackFrames)) {
+ ALOGE("AudioStreamInternalCapture(): callbackLoop: read() returned %d", result);
+ if (result >= 0) {
+ // Only read some of the frames requested. Must have timed out.
+ result = AAUDIO_ERROR_TIMEOUT;
+ }
+ AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
+ if (errorCallback != nullptr) {
+ (*errorCallback)(
+ (AAudioStream *) this,
+ getErrorCallbackUserData(),
+ result);
+ }
+ break;
+ }
+
+ // Call application using the AAudio callback interface.
+ callbackResult = (*appCallback)(
+ (AAudioStream *) this,
+ getDataCallbackUserData(),
+ mCallbackBuffer,
+ mCallbackFrames);
+
+ if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+ ALOGD("AudioStreamInternalCapture(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
+ break;
+ }
+ }
+
+ ALOGD("AudioStreamInternalCapture(): callbackLoop() exiting, result = %d, isActive() = %d",
+ result, (int) isActive());
+ return NULL;
+}
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.h b/media/libaaudio/src/client/AudioStreamInternalCapture.h
new file mode 100644
index 0000000..17f37e8
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_CAPTURE_H
+#define ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_CAPTURE_H
+
+#include <stdint.h>
+#include <aaudio/AAudio.h>
+
+#include "binding/AAudioServiceInterface.h"
+#include "client/AudioStreamInternal.h"
+
+using android::sp;
+using android::IAAudioService;
+
+namespace aaudio {
+
+class AudioStreamInternalCapture : public AudioStreamInternal {
+public:
+ AudioStreamInternalCapture(AAudioServiceInterface &serviceInterface, bool inService = false);
+ virtual ~AudioStreamInternalCapture();
+
+ aaudio_result_t read(void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds) override;
+
+ int64_t getFramesRead() override;
+ int64_t getFramesWritten() override;
+
+ void *callbackLoop() override;
+
+ aaudio_direction_t getDirection() const override {
+ return AAUDIO_DIRECTION_INPUT;
+ }
+protected:
+
+/**
+ * Low level data processing that will not block. It will just read or write as much as it can.
+ *
+ * It passes back a recommended time to wake up if wakeTimePtr is not NULL.
+ *
+ * @return the number of frames processed or a negative error code.
+ */
+ aaudio_result_t processDataNow(void *buffer,
+ int32_t numFrames,
+ int64_t currentTimeNanos,
+ int64_t *wakeTimePtr) override;
+
+private:
+ /*
+ * Asynchronous read with data conversion.
+ * @param buffer
+ * @param numFrames
+ * @return frames written or negative error
+ */
+ aaudio_result_t readNowWithConversion(void *buffer, int32_t numFrames);
+
+ int64_t mLastFramesWritten = 0; // used to prevent retrograde motion
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_CAPTURE_H
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
new file mode 100644
index 0000000..fc9766f
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AAudio"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "client/AudioStreamInternalPlay.h"
+#include "utility/AudioClock.h"
+
+using android::WrappingBuffer;
+
+using namespace aaudio;
+
+AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface,
+ bool inService)
+ : AudioStreamInternal(serviceInterface, inService) {
+
+}
+
+AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
+
+
+// Write the data, block if needed and timeoutMillis > 0
+aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
+ int64_t timeoutNanoseconds)
+
+{
+ return processData((void *)buffer, numFrames, timeoutNanoseconds);
+}
+
+// Write as much data as we can without blocking.
+aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
+ int64_t currentNanoTime, int64_t *wakeTimePtr) {
+ aaudio_result_t result = processCommands();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
+ if (mAudioEndpoint.isFreeRunning()) {
+ //ALOGD("AudioStreamInternal::processDataNow() - update read counter");
+ // Update data queue based on the timing model.
+ int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+ mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
+ }
+ // TODO else query from endpoint cuz set by actual reader, maybe
+
+ // If the read index passed the write index then consider it an underrun.
+ if (mAudioEndpoint.getFullFramesAvailable() < 0) {
+ mXRunCount++;
+ }
+
+ // Write some data to the buffer.
+ //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
+ int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
+ //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
+ // numFrames, framesWritten);
+
+ // Calculate an ideal time to wake up.
+ if (wakeTimePtr != nullptr && framesWritten >= 0) {
+ // By default wake up a few milliseconds from now. // TODO review
+ int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
+ aaudio_stream_state_t state = getState();
+ //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
+ // AAudio_convertStreamStateToText(state));
+ switch (state) {
+ case AAUDIO_STREAM_STATE_OPEN:
+ case AAUDIO_STREAM_STATE_STARTING:
+ if (framesWritten != 0) {
+ // Don't wait to write more data. Just prime the buffer.
+ wakeTime = currentNanoTime;
+ }
+ break;
+ case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur?
+ {
+ uint32_t burstSize = mFramesPerBurst;
+ if (burstSize < 32) {
+ burstSize = 32; // TODO review
+ }
+
+ uint64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + burstSize;
+ wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
+ }
+ break;
+ default:
+ break;
+ }
+ *wakeTimePtr = wakeTime;
+
+ }
+// ALOGD("AudioStreamInternal::processDataNow finished: now = %llu, read# = %llu, wrote# = %llu",
+// (unsigned long long)currentNanoTime,
+// (unsigned long long)mAudioEndpoint.getDataReadCounter(),
+// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+ return framesWritten;
+}
+
+
+aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
+ int32_t numFrames) {
+ // ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
+ // buffer, numFrames);
+ WrappingBuffer wrappingBuffer;
+ uint8_t *source = (uint8_t *) buffer;
+ int32_t framesLeft = numFrames;
+
+ mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
+
+ // Read data in one or two parts.
+ int partIndex = 0;
+ while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
+ int32_t framesToWrite = framesLeft;
+ int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
+ if (framesAvailable > 0) {
+ if (framesToWrite > framesAvailable) {
+ framesToWrite = framesAvailable;
+ }
+ int32_t numBytes = getBytesPerFrame() * framesToWrite;
+ int32_t numSamples = framesToWrite * getSamplesPerFrame();
+ // Data conversion.
+ float levelFrom;
+ float levelTo;
+ bool ramping = mVolumeRamp.nextSegment(framesToWrite * getSamplesPerFrame(),
+ &levelFrom, &levelTo);
+ // The formats are validated when the stream is opened so we do not have to
+ // check for illegal combinations here.
+ // TODO factor this out into a utility function
+ if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
+ if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ AAudio_linearRamp(
+ (const float *) source,
+ (float *) wrappingBuffer.data[partIndex],
+ framesToWrite,
+ getSamplesPerFrame(),
+ levelFrom,
+ levelTo);
+ } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
+ if (ramping) {
+ AAudioConvert_floatToPcm16(
+ (const float *) source,
+ (int16_t *) wrappingBuffer.data[partIndex],
+ framesToWrite,
+ getSamplesPerFrame(),
+ levelFrom,
+ levelTo);
+ } else {
+ AAudioConvert_floatToPcm16(
+ (const float *) source,
+ (int16_t *) wrappingBuffer.data[partIndex],
+ numSamples,
+ levelTo);
+ }
+ }
+ } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
+ if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
+ if (ramping) {
+ AAudioConvert_pcm16ToFloat(
+ (const int16_t *) source,
+ (float *) wrappingBuffer.data[partIndex],
+ framesToWrite,
+ getSamplesPerFrame(),
+ levelFrom,
+ levelTo);
+ } else {
+ AAudioConvert_pcm16ToFloat(
+ (const int16_t *) source,
+ (float *) wrappingBuffer.data[partIndex],
+ numSamples,
+ levelTo);
+ }
+ } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
+ AAudio_linearRamp(
+ (const int16_t *) source,
+ (int16_t *) wrappingBuffer.data[partIndex],
+ framesToWrite,
+ getSamplesPerFrame(),
+ levelFrom,
+ levelTo);
+ }
+ }
+ source += numBytes;
+ framesLeft -= framesToWrite;
+ } else {
+ break;
+ }
+ partIndex++;
+ }
+ int32_t framesWritten = numFrames - framesLeft;
+ mAudioEndpoint.advanceWriteIndex(framesWritten);
+
+ if (framesWritten > 0) {
+ incrementFramesWritten(framesWritten);
+ }
+ // ALOGD("AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
+ return framesWritten;
+}
+
+
+int64_t AudioStreamInternalPlay::getFramesRead()
+{
+ int64_t framesRead =
+ mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
+ + mFramesOffsetFromService;
+ // Prevent retrograde motion.
+ if (framesRead < mLastFramesRead) {
+ framesRead = mLastFramesRead;
+ } else {
+ mLastFramesRead = framesRead;
+ }
+ ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
+ return framesRead;
+}
+
+int64_t AudioStreamInternalPlay::getFramesWritten()
+{
+ int64_t getFramesWritten = mAudioEndpoint.getDataWriteCounter()
+ + mFramesOffsetFromService;
+ ALOGD("AudioStreamInternal::getFramesWritten() returns %lld", (long long)getFramesWritten);
+ return getFramesWritten;
+}
+
+
+// Render audio in the application callback and then write the data to the stream.
+void *AudioStreamInternalPlay::callbackLoop() {
+ aaudio_result_t result = AAUDIO_OK;
+ aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
+ AAudioStream_dataCallback appCallback = getDataCallbackProc();
+ if (appCallback == nullptr) return NULL;
+
+ // result might be a frame count
+ while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
+ // Call application using the AAudio callback interface.
+ callbackResult = (*appCallback)(
+ (AAudioStream *) this,
+ getDataCallbackUserData(),
+ mCallbackBuffer,
+ mCallbackFrames);
+
+ if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+ // Write audio data to stream.
+ int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
+
+ // This is a BLOCKING WRITE!
+ result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
+ if ((result != mCallbackFrames)) {
+ ALOGE("AudioStreamInternalPlay(): callbackLoop: write() returned %d", result);
+ if (result >= 0) {
+ // Only wrote some of the frames requested. Must have timed out.
+ result = AAUDIO_ERROR_TIMEOUT;
+ }
+ AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
+ if (errorCallback != nullptr) {
+ (*errorCallback)(
+ (AAudioStream *) this,
+ getErrorCallbackUserData(),
+ result);
+ }
+ break;
+ }
+ } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+ ALOGD("AudioStreamInternalPlay(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
+ break;
+ }
+ }
+
+ ALOGD("AudioStreamInternalPlay(): callbackLoop() exiting, result = %d, isActive() = %d",
+ result, (int) isActive());
+ return NULL;
+}
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
new file mode 100644
index 0000000..b043f67
--- /dev/null
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_PLAY_H
+#define ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_PLAY_H
+
+#include <stdint.h>
+#include <aaudio/AAudio.h>
+
+#include "binding/AAudioServiceInterface.h"
+#include "client/AudioStreamInternal.h"
+
+using android::sp;
+using android::IAAudioService;
+
+namespace aaudio {
+
+class AudioStreamInternalPlay : public AudioStreamInternal {
+public:
+ AudioStreamInternalPlay(AAudioServiceInterface &serviceInterface, bool inService = false);
+ virtual ~AudioStreamInternalPlay();
+
+ aaudio_result_t write(const void *buffer,
+ int32_t numFrames,
+ int64_t timeoutNanoseconds) override;
+
+ int64_t getFramesRead() override;
+ int64_t getFramesWritten() override;
+
+ void *callbackLoop() override;
+
+ aaudio_direction_t getDirection() const override {
+ return AAUDIO_DIRECTION_OUTPUT;
+ }
+
+protected:
+/**
+ * Low level write that will not block. It will just write as much as it can.
+ *
+ * It passed back a recommended time to wake up if wakeTimePtr is not NULL.
+ *
+ * @return the number of frames written or a negative error code.
+ */
+ aaudio_result_t processDataNow(void *buffer,
+ int32_t numFrames,
+ int64_t currentTimeNanos,
+ int64_t *wakeTimePtr) override;
+private:
+ /*
+ * Asynchronous write with data conversion.
+ * @param buffer
+ * @param numFrames
+ * @return fdrames written or negative error
+ */
+ aaudio_result_t writeNowWithConversion(const void *buffer,
+ int32_t numFrames);
+
+ int64_t mLastFramesRead = 0; // used to prevent retrograde motion
+};
+
+} /* namespace aaudio */
+
+#endif //ANDROID_AAUDIO_AUDIO_STREAM_INTERNAL_PLAY_H
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 21e3e70..1de33bb 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -41,41 +41,40 @@
IsochronousClockModel::~IsochronousClockModel() {
}
-void IsochronousClockModel::start(int64_t nanoTime)
-{
+void IsochronousClockModel::start(int64_t nanoTime) {
+ ALOGD("IsochronousClockModel::start(nanos = %lld)\n", (long long) nanoTime);
mMarkerNanoTime = nanoTime;
mState = STATE_STARTING;
}
-void IsochronousClockModel::stop(int64_t nanoTime)
-{
+void IsochronousClockModel::stop(int64_t nanoTime) {
+ ALOGD("IsochronousClockModel::stop(nanos = %lld)\n", (long long) nanoTime);
mMarkerNanoTime = nanoTime;
mMarkerFramePosition = convertTimeToPosition(nanoTime); // TODO should we do this?
mState = STATE_STOPPED;
}
-void IsochronousClockModel::processTimestamp(int64_t framePosition,
- int64_t nanoTime) {
+void IsochronousClockModel::processTimestamp(int64_t framePosition, int64_t nanoTime) {
int64_t framesDelta = framePosition - mMarkerFramePosition;
int64_t nanosDelta = nanoTime - mMarkerNanoTime;
if (nanosDelta < 1000) {
return;
}
-// ALOGI("processTimestamp() - mMarkerFramePosition = %lld at mMarkerNanoTime %llu",
+// ALOGD("processTimestamp() - mMarkerFramePosition = %lld at mMarkerNanoTime %llu",
// (long long)mMarkerFramePosition,
// (long long)mMarkerNanoTime);
-// ALOGI("processTimestamp() - framePosition = %lld at nanoTime %llu",
+// ALOGD("processTimestamp() - framePosition = %lld at nanoTime %llu",
// (long long)framePosition,
// (long long)nanoTime);
int64_t expectedNanosDelta = convertDeltaPositionToTime(framesDelta);
-// ALOGI("processTimestamp() - expectedNanosDelta = %lld, nanosDelta = %llu",
+// ALOGD("processTimestamp() - expectedNanosDelta = %lld, nanosDelta = %llu",
// (long long)expectedNanosDelta,
// (long long)nanosDelta);
-// ALOGI("processTimestamp() - mSampleRate = %d", mSampleRate);
-// ALOGI("processTimestamp() - mState = %d", mState);
+// ALOGD("processTimestamp() - mSampleRate = %d", mSampleRate);
+// ALOGD("processTimestamp() - mState = %d", mState);
switch (mState) {
case STATE_STOPPED:
break;
@@ -85,12 +84,12 @@
mState = STATE_SYNCING;
break;
case STATE_SYNCING:
- // This will handle a burst of rapid consumption in the beginning.
+ // This will handle a burst of rapid transfer at the beginning.
if (nanosDelta < expectedNanosDelta) {
mMarkerFramePosition = framePosition;
mMarkerNanoTime = nanoTime;
} else {
- ALOGI("processTimestamp() - advance to STATE_RUNNING");
+// ALOGD("processTimestamp() - advance to STATE_RUNNING");
mState = STATE_RUNNING;
}
break;
@@ -101,15 +100,15 @@
// or we may be drifting due to a slow HW clock.
mMarkerFramePosition = framePosition;
mMarkerNanoTime = nanoTime;
- ALOGV("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
- (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
+// ALOGD("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
+// (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
} else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
// Later than expected timestamp.
mMarkerFramePosition = framePosition;
mMarkerNanoTime = nanoTime - mMaxLatenessInNanos;
- ALOGV("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
- (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
- (int) (mMaxLatenessInNanos / 1000));
+// ALOGD("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
+// (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
+// (int) (mMaxLatenessInNanos / 1000));
}
break;
default:
@@ -141,8 +140,7 @@
return (mSampleRate * nanosDelta) / AAUDIO_NANOS_PER_SECOND;
}
-int64_t IsochronousClockModel::convertPositionToTime(
- int64_t framePosition) const {
+int64_t IsochronousClockModel::convertPositionToTime(int64_t framePosition) const {
if (mState == STATE_STOPPED) {
return mMarkerNanoTime;
}
@@ -151,14 +149,13 @@
int64_t framesDelta = nextBurstPosition - mMarkerFramePosition;
int64_t nanosDelta = convertDeltaPositionToTime(framesDelta);
int64_t time = (int64_t) (mMarkerNanoTime + nanosDelta);
-// ALOGI("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
+// ALOGD("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
// (unsigned long long)framePosition,
// (unsigned long long)time);
return time;
}
-int64_t IsochronousClockModel::convertTimeToPosition(
- int64_t nanoTime) const {
+int64_t IsochronousClockModel::convertTimeToPosition(int64_t nanoTime) const {
if (mState == STATE_STOPPED) {
return mMarkerFramePosition;
}
@@ -167,10 +164,10 @@
int64_t nextBurstPosition = mMarkerFramePosition + framesDelta;
int64_t nextBurstIndex = nextBurstPosition / mFramesPerBurst;
int64_t position = nextBurstIndex * mFramesPerBurst;
-// ALOGI("IsochronousClockModel::convertTimeToPosition: time = %llu --> pos = %llu",
+// ALOGD("IsochronousClockModel::convertTimeToPosition: time = %llu --> pos = %llu",
// (unsigned long long)nanoTime,
// (unsigned long long)position);
-// ALOGI("IsochronousClockModel::convertTimeToPosition: framesDelta = %llu, mFramesPerBurst = %d",
+// ALOGD("IsochronousClockModel::convertTimeToPosition: framesDelta = %llu, mFramesPerBurst = %d",
// (long long) framesDelta, mFramesPerBurst);
return position;
}
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 59032d5..471935e 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -316,7 +316,7 @@
}
// Don't allow writes when playing with a callback.
- if (audioStream->getDataCallbackProc() != nullptr && audioStream->isPlaying()) {
+ if (audioStream->getDataCallbackProc() != nullptr && audioStream->isActive()) {
ALOGE("Cannot write to a callback stream when running.");
return AAUDIO_ERROR_INVALID_STATE;
}
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 7847661..66b64a6 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -43,7 +43,6 @@
mSampleRate = builder.getSampleRate();
mDeviceId = builder.getDeviceId();
mFormat = builder.getFormat();
- mDirection = builder.getDirection();
mSharingMode = builder.getSharingMode();
mSharingModeMatchRequired = builder.isSharingModeMatchRequired();
@@ -82,10 +81,6 @@
ALOGE("AudioStream::open(): mSampleRate out of range = %d", mSampleRate);
return AAUDIO_ERROR_INVALID_RATE;
}
- if (mDirection != AAUDIO_DIRECTION_INPUT && mDirection != AAUDIO_DIRECTION_OUTPUT) {
- ALOGE("AudioStream::open(): illegal direction %d", mDirection);
- return AAUDIO_ERROR_UNEXPECTED_VALUE;
- }
switch(mPerformanceMode) {
case AAUDIO_PERFORMANCE_MODE_NONE:
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index c49b46b..377f24a 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -92,7 +92,7 @@
aaudio_audio_thread_proc_t threadProc,
void *threadArg);
- virtual aaudio_result_t joinThread(void **returnArg, int64_t timeoutNanoseconds);
+ aaudio_result_t joinThread(void **returnArg, int64_t timeoutNanoseconds);
virtual aaudio_result_t registerThread() {
return AAUDIO_OK;
@@ -130,7 +130,7 @@
return AAUDIO_ERROR_UNIMPLEMENTED;
}
- bool isPlaying() const {
+ bool isActive() const {
return mState == AAUDIO_STREAM_STATE_STARTING || mState == AAUDIO_STREAM_STATE_STARTED;
}
@@ -170,9 +170,7 @@
return mSharingModeMatchRequired;
}
- aaudio_direction_t getDirection() const {
- return mDirection;
- }
+ virtual aaudio_direction_t getDirection() const = 0;
/**
* This is only valid after setSamplesPerFrame() and setFormat() have been called.
@@ -215,7 +213,7 @@
}
bool isDataCallbackActive() {
- return (mDataCallbackProc != nullptr) && isPlaying();
+ return (mDataCallbackProc != nullptr) && isActive();
}
// ============== I/O ===========================
@@ -302,7 +300,6 @@
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
bool mSharingModeMatchRequired = false; // must match sharing mode requested
aaudio_audio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
- aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 30e7eba..06f2d70 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -24,7 +24,8 @@
#include <aaudio/AAudio.h>
#include "binding/AAudioBinderClient.h"
-#include "client/AudioStreamInternal.h"
+#include "client/AudioStreamInternalCapture.h"
+#include "client/AudioStreamInternalPlay.h"
#include "core/AudioStream.h"
#include "core/AudioStreamBuilder.h"
#include "legacy/AudioStreamRecord.h"
@@ -51,17 +52,18 @@
switch (direction) {
case AAUDIO_DIRECTION_INPUT:
- if (sharingMode == AAUDIO_SHARING_MODE_SHARED) {
- *audioStreamPtr = new AudioStreamRecord();
+ if (tryMMap) {
+ *audioStreamPtr = new AudioStreamInternalCapture(AAudioBinderClient::getInstance(),
+ false);
} else {
- ALOGE("AudioStreamBuilder(): bad sharing mode = %d for input", sharingMode);
- result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ *audioStreamPtr = new AudioStreamRecord();
}
break;
case AAUDIO_DIRECTION_OUTPUT:
if (tryMMap) {
- *audioStreamPtr = new AudioStreamInternal(AAudioBinderClient::getInstance(), false);
+ *audioStreamPtr = new AudioStreamInternalPlay(AAudioBinderClient::getInstance(),
+ false);
} else {
*audioStreamPtr = new AudioStreamTrack();
}
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index 0af6457..90000fc 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -65,6 +65,10 @@
aaudio_result_t updateStateWhileWaiting() override;
+ aaudio_direction_t getDirection() const override {
+ return AAUDIO_DIRECTION_INPUT;
+ }
+
// This is public so it can be called from the C callback function.
void processCallback(int event, void *info) override;
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index 186a08e..990f005 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -63,6 +63,10 @@
int64_t getFramesRead() override;
+ aaudio_direction_t getDirection() const override {
+ return AAUDIO_DIRECTION_OUTPUT;
+ }
+
aaudio_result_t updateStateWhileWaiting() override;
// This is public so it can be called from the C callback function.
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 168ed86..38ad59f 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -327,7 +327,7 @@
static int32_t AAudioProperty_getMMapProperty(const char *propName,
int32_t defaultValue,
const char * caller) {
- int32_t prop = property_get_int32(AAUDIO_PROP_MMAP_ENABLED, defaultValue);
+ int32_t prop = property_get_int32(propName, defaultValue);
switch (prop) {
case AAUDIO_USE_NEVER:
case AAUDIO_USE_ALWAYS:
@@ -352,9 +352,9 @@
}
int32_t AAudioProperty_getMixerBursts() {
- const int32_t defaultBursts = 2; // arbitrary
+ const int32_t defaultBursts = 2; // arbitrary, use 2 for double buffered
const int32_t maxBursts = 1024; // arbitrary
- int32_t prop = property_get_int32(AAUDIO_PROP_MIXER_BURSTS, defaultBursts); // use 2 for double buffered
+ int32_t prop = property_get_int32(AAUDIO_PROP_MIXER_BURSTS, defaultBursts);
if (prop < 1 || prop > maxBursts) {
ALOGE("AAudioProperty_getMixerBursts: invalid = %d", prop);
prop = defaultBursts;