aaudio: fix CTS for MMAP mode

Improve calculation of buffer capacity based on requested minimum.
Adjust timing of start() to reduce underflows.
Track ServiceEndpoints based on requested deviceId.
Fix getFramesRead() and flush() behavior.
Fix timeouts due to ClockModel lateness bug.
Misc cleanup.

Bug: 37755299
Test: test_aaudio.cpp
Change-Id: I637c16e87fbe14b6f28c60aeea0b9dfed965ecd0
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
index b8ef611..ef4a51f 100644
--- a/media/libaaudio/src/binding/IAAudioService.cpp
+++ b/media/libaaudio/src/binding/IAAudioService.cpp
@@ -45,11 +45,9 @@
         Parcel data, reply;
         // send command
         data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
-        ALOGV("BpAAudioService::client openStream --------------------");
         // request.dump();
         request.writeToParcel(&data);
         status_t err = remote()->transact(OPEN_STREAM, data, &reply);
-        ALOGV("BpAAudioService::client openStream returned %d", err);
         if (err != NO_ERROR) {
             ALOGE("BpAAudioService::client openStream transact failed %d", err);
             return AAudioConvert_androidToAAudioResult(err);
@@ -57,6 +55,7 @@
         // parse reply
         aaudio_handle_t stream;
         err = reply.readInt32(&stream);
+        ALOGD("BpAAudioService::client openStream returned stream = 0x%08x", stream);
         if (err != NO_ERROR) {
             ALOGE("BpAAudioService::client transact(OPEN_STREAM) readInt %d", err);
             return AAudioConvert_androidToAAudioResult(err);
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 5cb642b..0684ed6 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -246,3 +246,7 @@
     return (int32_t)mDataQueue->getBufferCapacityInFrames();
 }
 
+void AudioEndpoint::dump() const {
+    ALOGD("AudioEndpoint: data readCounter  = %lld", (long long) mDataQueue->getReadCounter());
+    ALOGD("AudioEndpoint: data writeCounter = %lld", (long long) mDataQueue->getWriteCounter());
+}
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index 53ba033..e7c6916 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -91,6 +91,8 @@
 
     int32_t getBufferCapacityInFrames() const;
 
+    void dump() const;
+
 private:
     android::FifoBuffer    *mUpCommandQueue;
     android::FifoBuffer    *mDataQueue;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 768d9db..b59c445 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -63,8 +63,8 @@
         , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
         , mFramesPerBurst(16)
         , mStreamVolume(1.0f)
-        , mServiceInterface(serviceInterface)
-        , mInService(inService) {
+        , mInService(inService)
+        , mServiceInterface(serviceInterface) {
 }
 
 AudioStreamInternal::~AudioStreamInternal() {
@@ -154,13 +154,13 @@
         if (getDataCallbackProc()) {
             mCallbackFrames = builder.getFramesPerDataCallback();
             if (mCallbackFrames > getBufferCapacity() / 2) {
-                ALOGE("AudioStreamInternal.open(): framesPerCallback too large = %d, capacity = %d",
+                ALOGE("AudioStreamInternal::open(): framesPerCallback too big = %d, capacity = %d",
                       mCallbackFrames, getBufferCapacity());
                 mServiceInterface.closeStream(mServiceStreamHandle);
                 return AAUDIO_ERROR_OUT_OF_RANGE;
 
             } else if (mCallbackFrames < 0) {
-                ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
+                ALOGE("AudioStreamInternal::open(): framesPerCallback negative");
                 mServiceInterface.closeStream(mServiceStreamHandle);
                 return AAUDIO_ERROR_OUT_OF_RANGE;
 
@@ -185,7 +185,7 @@
 }
 
 aaudio_result_t AudioStreamInternal::close() {
-    ALOGD("AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X",
+    ALOGD("AudioStreamInternal::close(): mServiceStreamHandle = 0x%08X",
              mServiceStreamHandle);
     if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
         // Don't close a stream while it is running.
@@ -201,12 +201,14 @@
                 result, AAudio_convertResultToText(result));
             }
         }
+        setState(AAUDIO_STREAM_STATE_CLOSING);
         aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
         mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
 
         mServiceInterface.closeStream(serviceStreamHandle);
         delete[] mCallbackBuffer;
         mCallbackBuffer = nullptr;
+        setState(AAUDIO_STREAM_STATE_CLOSED);
         return mEndPointParcelable.close();
     } else {
         return AAUDIO_ERROR_INVALID_HANDLE;
@@ -228,15 +230,20 @@
 aaudio_result_t AudioStreamInternal::requestStart()
 {
     int64_t startTime;
-    ALOGD("AudioStreamInternal(): start()");
+    ALOGD("AudioStreamInternal()::requestStart()");
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
+    if (isActive()) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    aaudio_stream_state_t originalState = getState();
+
+    setState(AAUDIO_STREAM_STATE_STARTING);
+    aaudio_result_t result = AAudioConvert_androidToAAudioResult(startWithStatus());
 
     startTime = AudioClock::getNanoseconds();
     mClockModel.start(startTime);
-    setState(AAUDIO_STREAM_STATE_STARTING);
-    aaudio_result_t result = AAudioConvert_androidToAAudioResult(startWithStatus());
 
     if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
         // Launch the callback loop thread.
@@ -246,6 +253,9 @@
         mCallbackEnabled.store(true);
         result = createThread(periodNanos, aaudio_callback_thread_proc, this);
     }
+    if (result != AAUDIO_OK) {
+        setState(originalState);
+    }
     return result;
 }
 
@@ -279,7 +289,7 @@
 aaudio_result_t AudioStreamInternal::requestPauseInternal()
 {
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
-        ALOGE("AudioStreamInternal(): requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
+        ALOGE("AudioStreamInternal::requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
               mServiceStreamHandle);
         return AAUDIO_ERROR_INVALID_STATE;
     }
@@ -301,7 +311,7 @@
 
 aaudio_result_t AudioStreamInternal::requestFlush() {
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
-        ALOGE("AudioStreamInternal(): requestFlush() mServiceStreamHandle invalid = 0x%08X",
+        ALOGE("AudioStreamInternal::requestFlush() mServiceStreamHandle invalid = 0x%08X",
               mServiceStreamHandle);
         return AAUDIO_ERROR_INVALID_STATE;
     }
@@ -312,13 +322,14 @@
 
 // TODO for Play only
 void AudioStreamInternal::onFlushFromServer() {
-    ALOGD("AudioStreamInternal(): onFlushFromServer()");
     int64_t readCounter = mAudioEndpoint.getDataReadCounter();
     int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
 
     // Bump offset so caller does not see the retrograde motion in getFramesRead().
     int64_t framesFlushed = writeCounter - readCounter;
     mFramesOffsetFromService += framesFlushed;
+    ALOGD("AudioStreamInternal::onFlushFromServer() readN = %lld, writeN = %lld, offset = %lld",
+          (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
 
     // Flush written frames by forcing writeCounter to readCounter.
     // This is because we cannot move the read counter in the hardware.
@@ -328,7 +339,7 @@
 aaudio_result_t AudioStreamInternal::requestStopInternal()
 {
     if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
-        ALOGE("AudioStreamInternal(): requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
+        ALOGE("AudioStreamInternal::requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
               mServiceStreamHandle);
         return AAUDIO_ERROR_INVALID_STATE;
     }
@@ -370,7 +381,7 @@
                            int64_t *timeNanoseconds) {
     // TODO Generate in server and pass to client. Return latest.
     int64_t time = AudioClock::getNanoseconds();
-    *framePosition = mClockModel.convertTimeToPosition(time);
+    *framePosition = mClockModel.convertTimeToPosition(time) + mFramesOffsetFromService;
     // TODO Get a more accurate timestamp from the service. This code just adds a fudge factor.
     *timeNanoseconds = time + (6 * AAUDIO_NANOS_PER_MILLISECOND);
     return AAUDIO_OK;
@@ -383,31 +394,28 @@
     return processCommands();
 }
 
-#if LOG_TIMESTAMPS
-static void AudioStreamInternal_logTimestamp(AAudioServiceMessage &command) {
+void AudioStreamInternal::logTimestamp(AAudioServiceMessage &command) {
     static int64_t oldPosition = 0;
     static int64_t oldTime = 0;
     int64_t framePosition = command.timestamp.position;
     int64_t nanoTime = command.timestamp.timestamp;
-    ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %lld",
+    ALOGD("AudioStreamInternal: timestamp says framePosition = %08lld at nanoTime %lld",
          (long long) framePosition,
          (long long) nanoTime);
     int64_t nanosDelta = nanoTime - oldTime;
     if (nanosDelta > 0 && oldTime > 0) {
         int64_t framesDelta = framePosition - oldPosition;
         int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
-        ALOGD("AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
-        ALOGD("AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
-        ALOGD("AudioStreamInternal() - measured rate = %lld", (long long) rate);
+        ALOGD("AudioStreamInternal: framesDelta = %08lld, nanosDelta = %08lld, rate = %lld",
+              (long long) framesDelta, (long long) nanosDelta, (long long) rate);
     }
     oldPosition = framePosition;
     oldTime = nanoTime;
 }
-#endif
 
 aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
 #if LOG_TIMESTAMPS
-    AudioStreamInternal_logTimestamp(*message);
+    logTimestamp(*message);
 #endif
     processTimestamp(message->timestamp.position, message->timestamp.timestamp);
     return AAUDIO_OK;
@@ -417,47 +425,48 @@
     aaudio_result_t result = AAUDIO_OK;
     switch (message->event.event) {
         case AAUDIO_SERVICE_EVENT_STARTED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
+            ALOGD("AudioStreamInternal::onEventFromServergot() AAUDIO_SERVICE_EVENT_STARTED");
             if (getState() == AAUDIO_STREAM_STATE_STARTING) {
                 setState(AAUDIO_STREAM_STATE_STARTED);
             }
             break;
         case AAUDIO_SERVICE_EVENT_PAUSED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
+            ALOGD("AudioStreamInternal::onEventFromServergot() AAUDIO_SERVICE_EVENT_PAUSED");
             if (getState() == AAUDIO_STREAM_STATE_PAUSING) {
                 setState(AAUDIO_STREAM_STATE_PAUSED);
             }
             break;
         case AAUDIO_SERVICE_EVENT_STOPPED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STOPPED");
+            ALOGD("AudioStreamInternal::onEventFromServergot() AAUDIO_SERVICE_EVENT_STOPPED");
             if (getState() == AAUDIO_STREAM_STATE_STOPPING) {
                 setState(AAUDIO_STREAM_STATE_STOPPED);
             }
             break;
         case AAUDIO_SERVICE_EVENT_FLUSHED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
+            ALOGD("AudioStreamInternal::onEventFromServer() got AAUDIO_SERVICE_EVENT_FLUSHED");
             if (getState() == AAUDIO_STREAM_STATE_FLUSHING) {
                 setState(AAUDIO_STREAM_STATE_FLUSHED);
                 onFlushFromServer();
             }
             break;
         case AAUDIO_SERVICE_EVENT_CLOSED:
-            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
+            ALOGD("AudioStreamInternal::onEventFromServer() got AAUDIO_SERVICE_EVENT_CLOSED");
             setState(AAUDIO_STREAM_STATE_CLOSED);
             break;
         case AAUDIO_SERVICE_EVENT_DISCONNECTED:
             result = AAUDIO_ERROR_DISCONNECTED;
             setState(AAUDIO_STREAM_STATE_DISCONNECTED);
-            ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
+            ALOGW("WARNING - AudioStreamInternal::onEventFromServer()"
+                          " AAUDIO_SERVICE_EVENT_DISCONNECTED");
             break;
         case AAUDIO_SERVICE_EVENT_VOLUME:
             mStreamVolume = (float)message->event.dataDouble;
             doSetVolume();
-            ALOGD("processCommands() AAUDIO_SERVICE_EVENT_VOLUME %lf",
+            ALOGD("AudioStreamInternal::onEventFromServer() AAUDIO_SERVICE_EVENT_VOLUME %lf",
                      message->event.dataDouble);
             break;
         default:
-            ALOGW("WARNING - processCommands() Unrecognized event = %d",
+            ALOGW("WARNING - AudioStreamInternal::onEventFromServer() Unrecognized event = %d",
                  (int) message->event.event);
             break;
     }
@@ -499,27 +508,27 @@
 {
     const char * traceName = (mInService) ? "aaWrtS" : "aaWrtC";
     ATRACE_BEGIN(traceName);
-    aaudio_result_t result = AAUDIO_OK;
-    int32_t loopCount = 0;
-    uint8_t* audioData = (uint8_t*)buffer;
-    int64_t currentTimeNanos = AudioClock::getNanoseconds();
-    int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
-    int32_t framesLeft = numFrames;
-
     int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
     if (ATRACE_ENABLED()) {
         const char * traceName = (mInService) ? "aaFullS" : "aaFullC";
         ATRACE_INT(traceName, fullFrames);
     }
 
+    aaudio_result_t result = AAUDIO_OK;
+    int32_t loopCount = 0;
+    uint8_t* audioData = (uint8_t*)buffer;
+    int64_t currentTimeNanos = AudioClock::getNanoseconds();
+    const int64_t entryTimeNanos = currentTimeNanos;
+    const int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
+    int32_t framesLeft = numFrames;
+
     // Loop until all the data has been processed or until a timeout occurs.
     while (framesLeft > 0) {
-        // The call to processDataNow() will not block. It will just read as much as it can.
+        // The call to processDataNow() will not block. It will just process as much as it can.
         int64_t wakeTimeNanos = 0;
         aaudio_result_t framesProcessed = processDataNow(audioData, framesLeft,
                                                   currentTimeNanos, &wakeTimeNanos);
         if (framesProcessed < 0) {
-            ALOGE("AudioStreamInternal::processData() loop: framesProcessed = %d", framesProcessed);
             result = framesProcessed;
             break;
         }
@@ -537,12 +546,16 @@
             if (wakeTimeNanos > deadlineNanos) {
                 // If we time out, just return the framesWritten so far.
                 // TODO remove after we fix the deadline bug
-                ALOGE("AudioStreamInternal::processData(): timed out after %lld nanos",
+                ALOGW("AudioStreamInternal::processData(): entered at %lld nanos, currently %lld",
+                      (long long) entryTimeNanos, (long long) currentTimeNanos);
+                ALOGW("AudioStreamInternal::processData(): timed out after %lld nanos",
                       (long long) timeoutNanoseconds);
-                ALOGE("AudioStreamInternal::processData(): wakeTime = %lld, deadline = %lld nanos",
+                ALOGW("AudioStreamInternal::processData(): wakeTime = %lld, deadline = %lld nanos",
                       (long long) wakeTimeNanos, (long long) deadlineNanos);
-                ALOGE("AudioStreamInternal::processData(): past deadline by %d micros",
+                ALOGW("AudioStreamInternal::processData(): past deadline by %d micros",
                       (int)((wakeTimeNanos - deadlineNanos) / AAUDIO_NANOS_PER_MICROSECOND));
+                mClockModel.dump();
+                mAudioEndpoint.dump();
                 break;
             }
 
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 47aedad..c2c6419 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -125,6 +125,8 @@
 
     aaudio_result_t onTimestampFromServer(AAudioServiceMessage *message);
 
+    void logTimestamp(AAudioServiceMessage &message);
+
     // Calculate timeout for an operation involving framesPerOperation.
     int64_t calculateReasonableTimeout(int32_t framesPerOperation);
 
@@ -155,6 +157,9 @@
     uint8_t                 *mCallbackBuffer = nullptr;
     int32_t                  mCallbackFrames = 0;
 
+    // The service uses this for SHARED mode.
+    bool                     mInService = false;  // Is this running in the client or the service?
+
 private:
     /*
      * Asynchronous write with data conversion.
@@ -172,8 +177,6 @@
     EndpointDescriptor       mEndpointDescriptor; // buffer description with resolved addresses
     AAudioServiceInterface  &mServiceInterface;   // abstract interface to the service
 
-    // The service uses this for SHARED mode.
-    bool                     mInService = false;  // Is this running in the client or the service?
 };
 
 } /* namespace aaudio */
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 93693bd..22f8bd1 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -14,10 +14,11 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "AAudio"
+#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
+#include <algorithm>
 #include <aaudio/AAudio.h>
 
 #include "client/AudioStreamInternalCapture.h"
@@ -155,29 +156,27 @@
 
     int32_t framesProcessed = numFrames - framesLeft;
     mAudioEndpoint.advanceReadIndex(framesProcessed);
-    incrementFramesRead(framesProcessed);
 
     //ALOGD("AudioStreamInternalCapture::readNowWithConversion() returns %d", framesProcessed);
     return framesProcessed;
 }
 
-int64_t AudioStreamInternalCapture::getFramesWritten()
-{
-    int64_t frames =
-            mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
-            + mFramesOffsetFromService;
-    // Prevent retrograde motion.
-    if (frames < mLastFramesWritten) {
-        frames = mLastFramesWritten;
+int64_t AudioStreamInternalCapture::getFramesWritten() {
+    int64_t framesWrittenHardware;
+    if (isActive()) {
+        framesWrittenHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
     } else {
-        mLastFramesWritten = frames;
+        framesWrittenHardware = mAudioEndpoint.getDataWriteCounter();
     }
-    //ALOGD("AudioStreamInternalCapture::getFramesWritten() returns %lld", (long long)frames);
-    return frames;
+    // Prevent retrograde motion.
+    mLastFramesWritten = std::max(mLastFramesWritten,
+                                  framesWrittenHardware + mFramesOffsetFromService);
+    //ALOGD("AudioStreamInternalCapture::getFramesWritten() returns %lld",
+    //      (long long)mLastFramesWritten);
+    return mLastFramesWritten;
 }
 
-int64_t AudioStreamInternalCapture::getFramesRead()
-{
+int64_t AudioStreamInternalCapture::getFramesRead() {
     int64_t frames = mAudioEndpoint.getDataWriteCounter()
                                + mFramesOffsetFromService;
     //ALOGD("AudioStreamInternalCapture::getFramesRead() returns %lld", (long long)frames);
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index fc9766f..76ecbf9 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "AAudio"
+#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
@@ -51,15 +51,17 @@
     }
 
     if (mAudioEndpoint.isFreeRunning()) {
-        //ALOGD("AudioStreamInternal::processDataNow() - update read counter");
         // Update data queue based on the timing model.
         int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+        // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
         mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
     }
-    // TODO else query from endpoint cuz set by actual reader, maybe
 
     // If the read index passed the write index then consider it an underrun.
     if (mAudioEndpoint.getFullFramesAvailable() < 0) {
+        ALOGV("AudioStreamInternal::processDataNow() - XRun! write = %d, read = %d",
+              (int)mAudioEndpoint.getDataWriteCounter(),
+              (int)mAudioEndpoint.getDataReadCounter());
         mXRunCount++;
     }
 
@@ -201,9 +203,6 @@
     int32_t framesWritten = numFrames - framesLeft;
     mAudioEndpoint.advanceWriteIndex(framesWritten);
 
-    if (framesWritten > 0) {
-        incrementFramesWritten(framesWritten);
-    }
     // ALOGD("AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
     return framesWritten;
 }
@@ -211,25 +210,29 @@
 
 int64_t AudioStreamInternalPlay::getFramesRead()
 {
-    int64_t framesRead =
-            mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
-            + mFramesOffsetFromService;
+    int64_t framesReadHardware;
+    if (isActive()) {
+        framesReadHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
+    } else {
+        framesReadHardware = mAudioEndpoint.getDataReadCounter();
+    }
+    int64_t framesRead = framesReadHardware + mFramesOffsetFromService;
     // Prevent retrograde motion.
     if (framesRead < mLastFramesRead) {
         framesRead = mLastFramesRead;
     } else {
         mLastFramesRead = framesRead;
     }
-    ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
+    //ALOGD("AudioStreamInternalPlay::getFramesRead() returns %lld", (long long)framesRead);
     return framesRead;
 }
 
 int64_t AudioStreamInternalPlay::getFramesWritten()
 {
-    int64_t getFramesWritten = mAudioEndpoint.getDataWriteCounter()
+    int64_t framesWritten = mAudioEndpoint.getDataWriteCounter()
                                + mFramesOffsetFromService;
-    ALOGD("AudioStreamInternal::getFramesWritten() returns %lld", (long long)getFramesWritten);
-    return getFramesWritten;
+    //ALOGD("AudioStreamInternalPlay::getFramesWritten() returns %lld", (long long)framesWritten);
+    return framesWritten;
 }
 
 
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 1de33bb..73f4c1d 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -41,6 +41,13 @@
 IsochronousClockModel::~IsochronousClockModel() {
 }
 
+void IsochronousClockModel::setPositionAndTime(int64_t framePosition, int64_t nanoTime) {
+    ALOGV("IsochronousClockModel::setPositionAndTime(%lld, %lld)",
+          (long long) framePosition, (long long) nanoTime);
+    mMarkerFramePosition = framePosition;
+    mMarkerNanoTime = nanoTime;
+}
+
 void IsochronousClockModel::start(int64_t nanoTime) {
     ALOGD("IsochronousClockModel::start(nanos = %lld)\n", (long long) nanoTime);
     mMarkerNanoTime = nanoTime;
@@ -49,8 +56,8 @@
 
 void IsochronousClockModel::stop(int64_t nanoTime) {
     ALOGD("IsochronousClockModel::stop(nanos = %lld)\n", (long long) nanoTime);
-    mMarkerNanoTime = nanoTime;
-    mMarkerFramePosition = convertTimeToPosition(nanoTime); // TODO should we do this?
+    setPositionAndTime(convertTimeToPosition(nanoTime), nanoTime);
+    // TODO should we set position?
     mState = STATE_STOPPED;
 }
 
@@ -79,15 +86,13 @@
     case STATE_STOPPED:
         break;
     case STATE_STARTING:
-        mMarkerFramePosition = framePosition;
-        mMarkerNanoTime = nanoTime;
+        setPositionAndTime(framePosition, nanoTime);
         mState = STATE_SYNCING;
         break;
     case STATE_SYNCING:
         // This will handle a burst of rapid transfer at the beginning.
         if (nanosDelta < expectedNanosDelta) {
-            mMarkerFramePosition = framePosition;
-            mMarkerNanoTime = nanoTime;
+            setPositionAndTime(framePosition, nanoTime);
         } else {
 //            ALOGD("processTimestamp() - advance to STATE_RUNNING");
             mState = STATE_RUNNING;
@@ -98,17 +103,15 @@
             // Earlier than expected timestamp.
             // This data is probably more accurate so use it.
             // or we may be drifting due to a slow HW clock.
-            mMarkerFramePosition = framePosition;
-            mMarkerNanoTime = nanoTime;
 //            ALOGD("processTimestamp() - STATE_RUNNING - %d < %d micros - EARLY",
 //                 (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000));
+            setPositionAndTime(framePosition, nanoTime);
         } else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
             // Later than expected timestamp.
-            mMarkerFramePosition = framePosition;
-            mMarkerNanoTime = nanoTime - mMaxLatenessInNanos;
 //            ALOGD("processTimestamp() - STATE_RUNNING - %d > %d + %d micros - LATE",
 //                 (int) (nanosDelta / 1000), (int)(expectedNanosDelta / 1000),
 //                 (int) (mMaxLatenessInNanos / 1000));
+            setPositionAndTime(framePosition - mFramesPerBurst,  nanoTime - mMaxLatenessInNanos);
         }
         break;
     default:
@@ -131,8 +134,7 @@
     mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
 }
 
-int64_t IsochronousClockModel::convertDeltaPositionToTime(
-        int64_t framesDelta) const {
+int64_t IsochronousClockModel::convertDeltaPositionToTime(int64_t framesDelta) const {
     return (AAUDIO_NANOS_PER_SECOND * framesDelta) / mSampleRate;
 }
 
@@ -171,3 +173,12 @@
 //         (long long) framesDelta, mFramesPerBurst);
     return position;
 }
+
+void IsochronousClockModel::dump() const {
+    ALOGD("IsochronousClockModel::mMarkerFramePosition = %lld", (long long) mMarkerFramePosition);
+    ALOGD("IsochronousClockModel::mMarkerNanoTime      = %lld", (long long) mMarkerNanoTime);
+    ALOGD("IsochronousClockModel::mSampleRate          = %6d", mSampleRate);
+    ALOGD("IsochronousClockModel::mFramesPerBurst      = %6d", mFramesPerBurst);
+    ALOGD("IsochronousClockModel::mMaxLatenessInNanos  = %6d", mMaxLatenessInNanos);
+    ALOGD("IsochronousClockModel::mState               = %6d", mState);
+}
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 0314f55..585f53a 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -43,6 +43,8 @@
      */
     void setSampleRate(int32_t sampleRate);
 
+    void setPositionAndTime(int64_t framePosition, int64_t nanoTime);
+
     int32_t getSampleRate() const {
         return mSampleRate;
     }
@@ -86,6 +88,8 @@
      */
     int64_t convertDeltaTimeToPosition(int64_t nanosDelta) const;
 
+    void dump() const;
+
 private:
     enum clock_model_state_t {
         STATE_STOPPED,
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 76f98fa..3f5de77 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -253,8 +253,10 @@
 AAUDIO_API aaudio_result_t  AAudioStream_requestStart(AAudioStream* stream)
 {
     AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
-    ALOGD("AAudioStream_requestStart(%p)", stream);
-    return audioStream->requestStart();
+    ALOGD("AAudioStream_requestStart(%p) called --------------", stream);
+    aaudio_result_t result = audioStream->requestStart();
+    ALOGD("AAudioStream_requestStart(%p) returned ------------", stream);
+    return result;
 }
 
 AAUDIO_API aaudio_result_t  AAudioStream_requestPause(AAudioStream* stream)
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 5a05c0e..19b08c4 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -36,6 +36,22 @@
     setPeriodNanoseconds(0);
 }
 
+static const char *AudioStream_convertSharingModeToShortText(aaudio_sharing_mode_t sharingMode) {
+    const char *result;
+    switch (sharingMode) {
+        case AAUDIO_SHARING_MODE_EXCLUSIVE:
+            result = "EX";
+            break;
+        case AAUDIO_SHARING_MODE_SHARED:
+            result = "SH";
+            break;
+        default:
+            result = "?!";
+            break;
+    }
+    return result;
+}
+
 aaudio_result_t AudioStream::open(const AudioStreamBuilder& builder)
 {
     // Call here as well because the AAudioService will call this without calling build().
@@ -62,8 +78,9 @@
     mErrorCallbackUserData = builder.getErrorCallbackUserData();
 
     // This is very helpful for debugging in the future. Please leave it in.
-    ALOGI("AudioStream::open() rate = %d, channels = %d, format = %d, sharing = %d, dir = %s",
-          mSampleRate, mSamplesPerFrame, mFormat, mSharingMode,
+    ALOGI("AudioStream::open() rate = %d, channels = %d, format = %d, sharing = %s, dir = %s",
+          mSampleRate, mSamplesPerFrame, mFormat,
+          AudioStream_convertSharingModeToShortText(mSharingMode),
           (getDirection() == AAUDIO_DIRECTION_OUTPUT) ? "OUTPUT" : "INPUT");
     ALOGI("AudioStream::open() device = %d, perfMode = %d, callbackFrames = %d",
           mDeviceId, mPerformanceMode, mFramesPerDataCallback);
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 39c9f9c..7c7fcc5 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -186,13 +186,9 @@
         return AAudioConvert_formatToSizeInBytes(mFormat);
     }
 
-    virtual int64_t getFramesWritten() {
-        return mFramesWritten.get();
-    }
+    virtual int64_t getFramesWritten() = 0;
 
-    virtual int64_t getFramesRead() {
-        return mFramesRead.get();
-    }
+    virtual int64_t getFramesRead() = 0;
 
     AAudioStream_dataCallback getDataCallbackProc() const {
         return mDataCallbackProc;
@@ -232,13 +228,6 @@
 
 protected:
 
-    virtual int64_t incrementFramesWritten(int32_t frames) {
-        return mFramesWritten.increment(frames);
-    }
-
-    virtual int64_t incrementFramesRead(int32_t frames) {
-        return mFramesRead.increment(frames);
-    }
 
     /**
      * This should not be called after the open() call.
@@ -281,8 +270,6 @@
     std::atomic<bool>    mCallbackEnabled;
 
 protected:
-    MonotonicCounter     mFramesWritten;
-    MonotonicCounter     mFramesRead;
 
     void setPeriodNanoseconds(int64_t periodNanoseconds) {
         mPeriodNanoseconds.store(periodNanoseconds, std::memory_order_release);
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
index 0ded8e1..d2ef3c7 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.h
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -74,6 +74,15 @@
 
     virtual int64_t incrementClientFrameCounter(int32_t frames)  = 0;
 
+
+    virtual int64_t getFramesWritten() override {
+        return mFramesWritten.get();
+    }
+
+    virtual int64_t getFramesRead() override {
+        return mFramesRead.get();
+    }
+
 protected:
 
     class StreamDeviceCallback : public android::AudioSystem::AudioDeviceCallback
@@ -103,6 +112,17 @@
     void onStart() { mCallbackEnabled.store(true); }
     void onStop() { mCallbackEnabled.store(false); }
 
+    int64_t incrementFramesWritten(int32_t frames) {
+        return mFramesWritten.increment(frames);
+    }
+
+    int64_t incrementFramesRead(int32_t frames) {
+        return mFramesRead.increment(frames);
+    }
+
+    MonotonicCounter           mFramesWritten;
+    MonotonicCounter           mFramesRead;
+
     FixedBlockAdapter         *mBlockAdapter = nullptr;
     aaudio_wrapping_frames_t   mPositionWhenStarting = 0;
     int32_t                    mCallbackBufferSize = 0;
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 7e39908..77f31e2 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -423,7 +423,7 @@
     default:
         break;
     }
-    return AudioStream::getFramesRead();
+    return AudioStreamLegacy::getFramesRead();
 }
 
 aaudio_result_t AudioStreamTrack::getTimestamp(clockid_t clockId,
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index 3dc1feb..5c6825d 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -52,15 +52,17 @@
             assert(false); // There are only two possible directions.
             break;
     }
-    ALOGD("AAudioEndpointManager::openEndpoint(), found %p for device = %d, dir = %d",
-          endpoint, deviceId, (int)direction);
 
     // If we can't find an existing one then open a new one.
-    if (endpoint == nullptr) {
+    if (endpoint != nullptr) {
+        ALOGD("AAudioEndpointManager::openEndpoint(), found %p for device = %d, dir = %d",
+              endpoint, deviceId, (int)direction);
+
+    } else {
         if (direction == AAUDIO_DIRECTION_INPUT) {
             AAudioServiceEndpointCapture *capture = new AAudioServiceEndpointCapture(audioService);
             if (capture->open(deviceId) != AAUDIO_OK) {
-                ALOGE("AAudioEndpointManager::openEndpoint(), open failed");
+                ALOGE("AAudioEndpointManager::openEndpoint(), open input failed");
                 delete capture;
             } else {
                 mInputs[deviceId] = capture;
@@ -69,17 +71,20 @@
         } else if (direction == AAUDIO_DIRECTION_OUTPUT) {
             AAudioServiceEndpointPlay *player = new AAudioServiceEndpointPlay(audioService);
             if (player->open(deviceId) != AAUDIO_OK) {
-                ALOGE("AAudioEndpointManager::openEndpoint(), open failed");
+                ALOGE("AAudioEndpointManager::openEndpoint(), open output failed");
                 delete player;
             } else {
                 mOutputs[deviceId] = player;
                 endpoint = player;
             }
         }
-
+        ALOGD("AAudioEndpointManager::openEndpoint(), created %p for device = %d, dir = %d",
+              endpoint, deviceId, (int)direction);
     }
 
     if (endpoint != nullptr) {
+        ALOGD("AAudioEndpointManager::openEndpoint(), sampleRate = %d, framesPerBurst = %d",
+              endpoint->getSampleRate(), endpoint->getFramesPerBurst());
         // Increment the reference count under this lock.
         endpoint->setReferenceCount(endpoint->getReferenceCount() + 1);
     }
@@ -95,9 +100,15 @@
     // Decrement the reference count under this lock.
     int32_t newRefCount = serviceEndpoint->getReferenceCount() - 1;
     serviceEndpoint->setReferenceCount(newRefCount);
+    ALOGD("AAudioEndpointManager::closeEndpoint(%p) newRefCount = %d",
+          serviceEndpoint, newRefCount);
+
+    // If no longer in use then close and delete it.
     if (newRefCount <= 0) {
         aaudio_direction_t direction = serviceEndpoint->getDirection();
-        int32_t deviceId = serviceEndpoint->getDeviceId();
+        // Track endpoints based on requested deviceId because UNSPECIFIED
+        // can change to a specific device after opening.
+        int32_t deviceId = serviceEndpoint->getRequestedDeviceId();
 
         switch (direction) {
             case AAUDIO_DIRECTION_INPUT:
@@ -109,6 +120,8 @@
         }
 
         serviceEndpoint->close();
+        ALOGD("AAudioEndpointManager::closeEndpoint() delete %p for device %d, dir = %d",
+              serviceEndpoint, deviceId, (int)direction);
         delete serviceEndpoint;
     }
 }
diff --git a/services/oboeservice/AAudioEndpointManager.h b/services/oboeservice/AAudioEndpointManager.h
index db1103d..899ea35 100644
--- a/services/oboeservice/AAudioEndpointManager.h
+++ b/services/oboeservice/AAudioEndpointManager.h
@@ -35,7 +35,7 @@
 
     /**
      * Find a service endpoint for the given deviceId and direction.
-     * If an endpoint does not already exist then it will try to create one.
+     * If an endpoint does not already exist then try to create one.
      *
      * @param deviceId
      * @param direction
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index c9b9065..b0e0a74 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -89,7 +89,7 @@
         return result;
     } else {
         aaudio_handle_t handle = mHandleTracker.put(AAUDIO_HANDLE_TYPE_STREAM, serviceStream);
-        ALOGV("AAudioService::openStream(): handle = 0x%08X", handle);
+        ALOGD("AAudioService::openStream(): handle = 0x%08X", handle);
         if (handle < 0) {
             ALOGE("AAudioService::openStream(): handle table full");
             delete serviceStream;
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index d8ae284..cc2cb44 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -46,6 +46,7 @@
 
 // Set up an EXCLUSIVE MMAP stream that will be shared.
 aaudio_result_t AAudioServiceEndpoint::open(int32_t deviceId) {
+    mRequestedDeviceId = deviceId;
     mStreamInternal = getStreamInternal();
 
     AudioStreamBuilder builder;
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index 50bf049..c271dbd 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -48,6 +48,7 @@
     aaudio_result_t stopStream(AAudioServiceStreamShared *sharedStream);
     aaudio_result_t close();
 
+    int32_t getRequestedDeviceId() const { return mRequestedDeviceId; }
     int32_t getDeviceId() const { return mStreamInternal->getDeviceId(); }
 
     aaudio_direction_t getDirection() const { return mStreamInternal->getDirection(); }
@@ -81,6 +82,7 @@
 
     AudioStreamInternal     *mStreamInternal = nullptr;
     int32_t                  mReferenceCount = 0;
+    int32_t                  mRequestedDeviceId = 0;
 };
 
 } /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.h b/services/oboeservice/AAudioServiceEndpointPlay.h
index b977960..89935ae 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.h
+++ b/services/oboeservice/AAudioServiceEndpointPlay.h
@@ -32,6 +32,9 @@
 
 namespace aaudio {
 
+/**
+ * Contains a mixer and a stream for writing the result of the mix.
+ */
 class AAudioServiceEndpointPlay : public AAudioServiceEndpoint {
 public:
     explicit AAudioServiceEndpointPlay(android::AAudioService &audioService);
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 8f0abc2..ee0e7ed 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -71,12 +71,11 @@
 }
 
 aaudio_result_t AAudioServiceStreamBase::pause() {
-
     sendCurrentTimestamp();
     mThreadEnabled.store(false);
     aaudio_result_t result = mAAudioThread.stop();
     if (result != AAUDIO_OK) {
-        processError();
+        processFatalError();
         return result;
     }
     sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
@@ -90,7 +89,7 @@
     mThreadEnabled.store(false);
     aaudio_result_t result = mAAudioThread.stop();
     if (result != AAUDIO_OK) {
-        processError();
+        processFatalError();
         return result;
     }
     sendServiceEvent(AAUDIO_SERVICE_EVENT_STOPPED);
@@ -126,7 +125,7 @@
     ALOGD("AAudioServiceStreamBase::run() exiting ----------------");
 }
 
-void AAudioServiceStreamBase::processError() {
+void AAudioServiceStreamBase::processFatalError() {
     sendServiceEvent(AAUDIO_SERVICE_EVENT_DISCONNECTED);
 }
 
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index ee52c39..46ceeae 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -111,7 +111,7 @@
 
     void run() override; // to implement Runnable
 
-    void processError();
+    void processFatalError();
 
 protected:
     aaudio_result_t writeUpMessageQueue(AAudioServiceMessage *command);
@@ -122,16 +122,16 @@
 
     virtual aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) = 0;
 
-    aaudio_stream_state_t               mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+    aaudio_stream_state_t   mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
 
     pid_t              mRegisteredClientThread = ILLEGAL_THREAD_ID;
 
     SharedRingBuffer*  mUpMessageQueue;
     std::mutex         mLockUpMessageQueue;
 
-    AAudioThread        mAAudioThread;
+    AAudioThread       mAAudioThread;
     // This is used by one thread to tell another thread to exit. So it must be atomic.
-    std::atomic<bool>   mThreadEnabled;
+    std::atomic<bool>  mThreadEnabled;
 
     aaudio_format_t    mAudioFormat = AAUDIO_FORMAT_UNSPECIFIED;
     int32_t            mFramesPerBurst = 0;
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 97b9937..2f3ec27 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -138,15 +138,19 @@
         return AAUDIO_ERROR_UNAVAILABLE;
     }
 
+    if (deviceId == AAUDIO_UNSPECIFIED) {
+        ALOGW("AAudioServiceStreamMMAP::open() - openMmapStream() failed to set deviceId");
+    }
+
     // Create MMAP/NOIRQ buffer.
     int32_t minSizeFrames = configurationInput.getBufferCapacity();
-    if (minSizeFrames == 0) { // zero will get rejected
+    if (minSizeFrames <= 0) { // zero will get rejected
         minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
     }
     status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
     if (status != OK) {
-        ALOGE("%s: createMmapBuffer() returned status %d, return AAUDIO_ERROR_UNAVAILABLE",
-              __FILE__, status);
+        ALOGE("AAudioServiceStreamMMAP::open() - createMmapBuffer() returned status %d",
+              status);
         return AAUDIO_ERROR_UNAVAILABLE;
     } else {
         ALOGD("createMmapBuffer status %d shared_address = %p buffer_size %d burst_size %d",
@@ -181,6 +185,9 @@
     ALOGD("AAudioServiceStreamMMAP::open() original burst = %d, minMicros = %d, final burst = %d\n",
           mMmapBufferinfo.burst_size_frames, burstMinMicros, mFramesPerBurst);
 
+    ALOGD("AAudioServiceStreamMMAP::open() actual rate = %d, channels = %d, deviceId = %d\n",
+          mSampleRate, mSamplesPerFrame, deviceId);
+
     // Fill in AAudioStreamConfiguration
     configurationOutput.setSampleRate(mSampleRate);
     configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
@@ -199,7 +206,7 @@
     status_t status = mMmapStream->start(mMmapClient, &mPortHandle);
     if (status != OK) {
         ALOGE("AAudioServiceStreamMMAP::start() mMmapStream->start() returned %d", status);
-        processError();
+        processFatalError();
         result = AAudioConvert_androidToAAudioResult(status);
     } else {
         result = AAudioServiceStreamBase::start();
@@ -234,8 +241,6 @@
 aaudio_result_t AAudioServiceStreamMMAP::flush() {
     if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
     // TODO how do we flush an MMAP/NOIRQ buffer? sync pointers?
-    sendServiceEvent(AAUDIO_SERVICE_EVENT_FLUSHED);
-    mState = AAUDIO_STREAM_STATE_FLUSHED;
     return AAudioServiceStreamBase::flush();;
 }
 
@@ -244,13 +249,13 @@
                                                                 int64_t *timeNanos) {
     struct audio_mmap_position position;
     if (mMmapStream == nullptr) {
-        processError();
+        processFatalError();
         return AAUDIO_ERROR_NULL;
     }
     status_t status = mMmapStream->getMmapPosition(&position);
     if (status != OK) {
         ALOGE("sendCurrentTimestamp(): getMmapPosition() returned %d", status);
-        processError();
+        processFatalError();
         return AAudioConvert_androidToAAudioResult(status);
     } else {
         mFramesRead.update32(position.position_frames);
@@ -295,4 +300,4 @@
     parcelable.mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
     parcelable.mDownDataQueueParcelable.setCapacityInFrames(mCapacityInFrames);
     return AAUDIO_OK;
-}
\ No newline at end of file
+}
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index 494b18e..f246fc02 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -34,8 +34,10 @@
 using namespace android;
 using namespace aaudio;
 
-#define MIN_BURSTS_PER_BUFFER   2
-#define MAX_BURSTS_PER_BUFFER   32
+#define MIN_BURSTS_PER_BUFFER       2
+#define DEFAULT_BURSTS_PER_BUFFER   16
+// This is an arbitrary range. TODO review.
+#define MAX_FRAMES_PER_BUFFER       (32 * 1024)
 
 AAudioServiceStreamShared::AAudioServiceStreamShared(AAudioService &audioService)
     : mAudioService(audioService)
@@ -46,12 +48,58 @@
     close();
 }
 
+int32_t AAudioServiceStreamShared::calculateBufferCapacity(int32_t requestedCapacityFrames,
+                                                           int32_t framesPerBurst) {
+
+    if (requestedCapacityFrames > MAX_FRAMES_PER_BUFFER) {
+        ALOGE("AAudioServiceStreamShared::open(), requested capacity %d > max %d",
+              requestedCapacityFrames, MAX_FRAMES_PER_BUFFER);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+
+    // Determine how many bursts will fit in the buffer.
+    int32_t numBursts;
+    if (requestedCapacityFrames == AAUDIO_UNSPECIFIED) {
+        // Use fewer bursts if default is too many.
+        if ((DEFAULT_BURSTS_PER_BUFFER * framesPerBurst) > MAX_FRAMES_PER_BUFFER) {
+            numBursts = MAX_FRAMES_PER_BUFFER / framesPerBurst;
+        } else {
+            numBursts = DEFAULT_BURSTS_PER_BUFFER;
+        }
+    } else {
+        // round up to nearest burst boundary
+        numBursts = (requestedCapacityFrames + framesPerBurst - 1) / framesPerBurst;
+    }
+
+    // Clip to bare minimum.
+    if (numBursts < MIN_BURSTS_PER_BUFFER) {
+        numBursts = MIN_BURSTS_PER_BUFFER;
+    }
+    // Check for numeric overflow.
+    if (numBursts > 0x8000 || framesPerBurst > 0x8000) {
+        ALOGE("AAudioServiceStreamShared::open(), numeric overflow, capacity = %d * %d",
+              numBursts, framesPerBurst);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+    int32_t capacityInFrames = numBursts * framesPerBurst;
+
+    // Final sanity check.
+    if (capacityInFrames > MAX_FRAMES_PER_BUFFER) {
+        ALOGE("AAudioServiceStreamShared::open(), calculated capacity %d > max %d",
+              capacityInFrames, MAX_FRAMES_PER_BUFFER);
+        return AAUDIO_ERROR_OUT_OF_RANGE;
+    }
+    ALOGD("AAudioServiceStreamShared::open(), requested capacity = %d frames, actual = %d",
+          requestedCapacityFrames, capacityInFrames);
+    return capacityInFrames;
+}
+
 aaudio_result_t AAudioServiceStreamShared::open(const aaudio::AAudioStreamRequest &request,
                      aaudio::AAudioStreamConfiguration &configurationOutput)  {
 
     aaudio_result_t result = AAudioServiceStreamBase::open(request, configurationOutput);
     if (result != AAUDIO_OK) {
-        ALOGE("AAudioServiceStreamBase open returned %d", result);
+        ALOGE("AAudioServiceStreamBase open() returned %d", result);
         return result;
     }
 
@@ -72,16 +120,18 @@
         mAudioFormat = AAUDIO_FORMAT_PCM_FLOAT;
     } else if (mAudioFormat != AAUDIO_FORMAT_PCM_FLOAT) {
         ALOGE("AAudioServiceStreamShared::open(), mAudioFormat = %d, need FLOAT", mAudioFormat);
-        return AAUDIO_ERROR_INVALID_FORMAT;
+        result = AAUDIO_ERROR_INVALID_FORMAT;
+        goto error;
     }
 
     mSampleRate = configurationInput.getSampleRate();
     if (mSampleRate == AAUDIO_UNSPECIFIED) {
         mSampleRate = mServiceEndpoint->getSampleRate();
     } else if (mSampleRate != mServiceEndpoint->getSampleRate()) {
-        ALOGE("AAudioServiceStreamShared::open(), mAudioFormat = %d, need %d",
+        ALOGE("AAudioServiceStreamShared::open(), mSampleRate = %d, need %d",
               mSampleRate, mServiceEndpoint->getSampleRate());
-        return AAUDIO_ERROR_INVALID_RATE;
+        result = AAUDIO_ERROR_INVALID_RATE;
+        goto error;
     }
 
     mSamplesPerFrame = configurationInput.getSamplesPerFrame();
@@ -90,37 +140,51 @@
     } else if (mSamplesPerFrame != mServiceEndpoint->getSamplesPerFrame()) {
         ALOGE("AAudioServiceStreamShared::open(), mSamplesPerFrame = %d, need %d",
               mSamplesPerFrame, mServiceEndpoint->getSamplesPerFrame());
-        return AAUDIO_ERROR_OUT_OF_RANGE;
+        result = AAUDIO_ERROR_OUT_OF_RANGE;
+        goto error;
     }
 
-    // Determine this stream's shared memory buffer capacity.
     mFramesPerBurst = mServiceEndpoint->getFramesPerBurst();
-    int32_t minCapacityFrames = configurationInput.getBufferCapacity();
-    int32_t numBursts = MAX_BURSTS_PER_BUFFER;
-    if (minCapacityFrames != AAUDIO_UNSPECIFIED) {
-        numBursts = (minCapacityFrames + mFramesPerBurst - 1) / mFramesPerBurst;
-        if (numBursts < MIN_BURSTS_PER_BUFFER) {
-            numBursts = MIN_BURSTS_PER_BUFFER;
-        } else if (numBursts > MAX_BURSTS_PER_BUFFER) {
-            numBursts = MAX_BURSTS_PER_BUFFER;
-        }
+    ALOGD("AAudioServiceStreamShared::open(), mSampleRate = %d, mFramesPerBurst = %d",
+          mSampleRate, mFramesPerBurst);
+
+    mCapacityInFrames = calculateBufferCapacity(configurationInput.getBufferCapacity(),
+                                     mFramesPerBurst);
+    if (mCapacityInFrames < 0) {
+        result = mCapacityInFrames; // negative error code
+        mCapacityInFrames = 0;
+        goto error;
     }
-    mCapacityInFrames = numBursts * mFramesPerBurst;
-    ALOGD("AAudioServiceStreamShared::open(), mCapacityInFrames = %d", mCapacityInFrames);
 
     // Create audio data shared memory buffer for client.
     mAudioDataQueue = new SharedRingBuffer();
-    mAudioDataQueue->allocate(calculateBytesPerFrame(), mCapacityInFrames);
+    result = mAudioDataQueue->allocate(calculateBytesPerFrame(), mCapacityInFrames);
+    if (result != AAUDIO_OK) {
+        ALOGE("AAudioServiceStreamShared::open(), could not allocate FIFO with %d frames",
+              mCapacityInFrames);
+        result = AAUDIO_ERROR_NO_MEMORY;
+        goto error;
+    }
+
+    ALOGD("AAudioServiceStreamShared::open() actual rate = %d, channels = %d, deviceId = %d",
+          mSampleRate, mSamplesPerFrame, mServiceEndpoint->getDeviceId());
 
     // Fill in configuration for client.
     configurationOutput.setSampleRate(mSampleRate);
     configurationOutput.setSamplesPerFrame(mSamplesPerFrame);
     configurationOutput.setAudioFormat(mAudioFormat);
-    configurationOutput.setDeviceId(deviceId);
+    configurationOutput.setDeviceId(mServiceEndpoint->getDeviceId());
 
-    mServiceEndpoint->registerStream(this);
+    result = mServiceEndpoint->registerStream(this);
+    if (result != AAUDIO_OK) {
+        goto error;
+    }
 
     return AAUDIO_OK;
+
+error:
+    close();
+    return result;
 }
 
 /**
@@ -137,11 +201,11 @@
     aaudio_result_t result = endpoint->startStream(this);
     if (result != AAUDIO_OK) {
         ALOGE("AAudioServiceStreamShared::start() mServiceEndpoint returned %d", result);
-        processError();
+        processFatalError();
     } else {
         result = AAudioServiceStreamBase::start();
     }
-    return AAUDIO_OK;
+    return result;
 }
 
 /**
@@ -154,11 +218,10 @@
     if (endpoint == nullptr) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    // Add this stream to the mixer.
     aaudio_result_t result = endpoint->stopStream(this);
     if (result != AAUDIO_OK) {
         ALOGE("AAudioServiceStreamShared::pause() mServiceEndpoint returned %d", result);
-        processError();
+        processFatalError();
     }
     return AAudioServiceStreamBase::pause();
 }
@@ -168,11 +231,10 @@
     if (endpoint == nullptr) {
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    // Add this stream to the mixer.
     aaudio_result_t result = endpoint->stopStream(this);
     if (result != AAUDIO_OK) {
         ALOGE("AAudioServiceStreamShared::stop() mServiceEndpoint returned %d", result);
-        processError();
+        processFatalError();
     }
     return AAudioServiceStreamBase::stop();
 }
@@ -183,9 +245,17 @@
  * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete.
  */
 aaudio_result_t AAudioServiceStreamShared::flush()  {
-    // TODO make sure we are paused
-    // TODO actually flush the data
-    return AAudioServiceStreamBase::flush() ;
+    AAudioServiceEndpoint *endpoint = mServiceEndpoint;
+    if (endpoint == nullptr) {
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    if (mState != AAUDIO_STREAM_STATE_PAUSED) {
+        ALOGE("AAudioServiceStreamShared::flush() stream not paused, state = %s",
+            AAudio_convertStreamStateToText(mState));
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    // Data will get flushed when the client receives the FLUSHED event.
+    return AAudioServiceStreamBase::flush();
 }
 
 aaudio_result_t AAudioServiceStreamShared::close()  {
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
index dfdbbb3..35af434 100644
--- a/services/oboeservice/AAudioServiceStreamShared.h
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -97,6 +97,14 @@
 
     aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override;
 
+    /**
+     * @param requestedCapacityFrames
+     * @param framesPerBurst
+     * @return capacity or negative error
+     */
+    static int32_t calculateBufferCapacity(int32_t requestedCapacityFrames,
+                                            int32_t framesPerBurst);
+
 private:
     android::AAudioService  &mAudioService;
     AAudioServiceEndpoint   *mServiceEndpoint = nullptr;