Merge "fix output nFilledLen when prepareForAdaptivePlayback is true" into oc-mr1-dev
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 0684ed6..6ec285f 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -113,7 +113,8 @@
     return result;
 }
 
-aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor)
+aaudio_result_t AudioEndpoint::configure(const EndpointDescriptor *pEndpointDescriptor,
+                                         aaudio_direction_t   direction)
 {
     aaudio_result_t result = AudioEndpoint_validateDescriptor(pEndpointDescriptor);
     if (result != AAUDIO_OK) {
@@ -143,12 +144,20 @@
             descriptor->dataAddress
     );
 
-    // ============================ down data queue =============================
+    // ============================ data queue =============================
     descriptor = &pEndpointDescriptor->dataQueueDescriptor;
     ALOGV("AudioEndpoint::configure() data framesPerBurst = %d", descriptor->framesPerBurst);
-    ALOGV("AudioEndpoint::configure() data readCounterAddress = %p", descriptor->readCounterAddress);
-    mFreeRunning = descriptor->readCounterAddress == nullptr;
+    ALOGV("AudioEndpoint::configure() data readCounterAddress = %p",
+          descriptor->readCounterAddress);
+
+    // An example of free running is when the other side is read or written by hardware DMA
+    // or a DSP. It does not update its counter so we have to update it.
+    int64_t *remoteCounter = (direction == AAUDIO_DIRECTION_OUTPUT)
+                             ? descriptor->readCounterAddress // read by other side
+                             : descriptor->writeCounterAddress; // written by other side
+    mFreeRunning = (remoteCounter == nullptr);
     ALOGV("AudioEndpoint::configure() mFreeRunning = %d", mFreeRunning ? 1 : 0);
+
     int64_t *readCounterAddress = (descriptor->readCounterAddress == nullptr)
                                   ? &mDataReadCounter
                                   : descriptor->readCounterAddress;
@@ -173,13 +182,8 @@
     return mUpCommandQueue->read(commandPtr, 1);
 }
 
-aaudio_result_t AudioEndpoint::writeDataNow(const void *buffer, int32_t numFrames)
-{
-    return mDataQueue->write(buffer, numFrames);
-}
-
-void AudioEndpoint::getEmptyFramesAvailable(WrappingBuffer *wrappingBuffer) {
-    mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
+int32_t AudioEndpoint::getEmptyFramesAvailable(WrappingBuffer *wrappingBuffer) {
+    return mDataQueue->getEmptyRoomAvailable(wrappingBuffer);
 }
 
 int32_t AudioEndpoint::getEmptyFramesAvailable()
@@ -187,7 +191,7 @@
     return mDataQueue->getFifoControllerBase()->getEmptyFramesAvailable();
 }
 
-void AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer)
+int32_t AudioEndpoint::getFullFramesAvailable(WrappingBuffer *wrappingBuffer)
 {
     return mDataQueue->getFullDataAvailable(wrappingBuffer);
 }
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index e7c6916..81a4f7b 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -40,7 +40,8 @@
     /**
      * Configure based on the EndPointDescriptor_t.
      */
-    aaudio_result_t configure(const EndpointDescriptor *pEndpointDescriptor);
+    aaudio_result_t configure(const EndpointDescriptor *pEndpointDescriptor,
+                              aaudio_direction_t direction);
 
     /**
      * Read from a command passed up from the Server.
@@ -48,17 +49,11 @@
      */
     aaudio_result_t readUpCommand(AAudioServiceMessage *commandPtr);
 
-    /**
-     * Non-blocking write.
-     * @return framesWritten or a negative error code.
-     */
-    aaudio_result_t writeDataNow(const void *buffer, int32_t numFrames);
-
-    void getEmptyFramesAvailable(android::WrappingBuffer *wrappingBuffer);
+    int32_t getEmptyFramesAvailable(android::WrappingBuffer *wrappingBuffer);
 
     int32_t getEmptyFramesAvailable();
 
-    void getFullFramesAvailable(android::WrappingBuffer *wrappingBuffer);
+    int32_t getFullFramesAvailable(android::WrappingBuffer *wrappingBuffer);
 
     int32_t getFullFramesAvailable();
 
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 8b14922..4c7d0f7 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -28,10 +28,10 @@
 #include <binder/IServiceManager.h>
 
 #include <aaudio/AAudio.h>
+#include <cutils/properties.h>
 #include <utils/String16.h>
 #include <utils/Trace.h>
 
-#include "AudioClock.h"
 #include "AudioEndpointParcelable.h"
 #include "binding/AAudioStreamRequest.h"
 #include "binding/AAudioStreamConfiguration.h"
@@ -39,6 +39,7 @@
 #include "binding/AAudioServiceMessage.h"
 #include "core/AudioStreamBuilder.h"
 #include "fifo/FifoBuffer.h"
+#include "utility/AudioClock.h"
 #include "utility/LinearRamp.h"
 
 #include "AudioStreamInternal.h"
@@ -64,7 +65,12 @@
         , mFramesPerBurst(16)
         , mStreamVolume(1.0f)
         , mInService(inService)
-        , mServiceInterface(serviceInterface) {
+        , mServiceInterface(serviceInterface)
+        , mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND)
+        , mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND)
+        {
+    ALOGD("AudioStreamInternal(): mWakeupDelayNanos = %d, mMinimumSleepNanos = %d",
+          mWakeupDelayNanos, mMinimumSleepNanos);
 }
 
 AudioStreamInternal::~AudioStreamInternal() {
@@ -135,7 +141,7 @@
         }
 
         // Configure endpoint based on descriptor.
-        mAudioEndpoint.configure(&mEndpointDescriptor);
+        mAudioEndpoint.configure(&mEndpointDescriptor, getDirection());
 
         mFramesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
         int32_t capacity = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
@@ -472,12 +478,12 @@
 aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames,
                                                  int64_t timeoutNanoseconds)
 {
-    const char * traceName = (mInService) ? "aaWrtS" : "aaWrtC";
+    const char * traceName = "aaProc";
+    const char * fifoName = "aaRdy";
     ATRACE_BEGIN(traceName);
-    int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
     if (ATRACE_ENABLED()) {
-        const char * traceName = (mInService) ? "aaFullS" : "aaFullC";
-        ATRACE_INT(traceName, fullFrames);
+        int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
+        ATRACE_INT(fifoName, fullFrames);
     }
 
     aaudio_result_t result = AAUDIO_OK;
@@ -505,10 +511,12 @@
         if (timeoutNanoseconds == 0) {
             break; // don't block
         } else if (framesLeft > 0) {
-            // clip the wake time to something reasonable
-            if (wakeTimeNanos < currentTimeNanos) {
-                wakeTimeNanos = currentTimeNanos;
+            if (!mAudioEndpoint.isFreeRunning()) {
+                // If there is software on the other end of the FIFO then it may get delayed.
+                // So wake up just a little after we expect it to be ready.
+                wakeTimeNanos += mWakeupDelayNanos;
             }
+
             if (wakeTimeNanos > deadlineNanos) {
                 // If we time out, just return the framesWritten so far.
                 // TODO remove after we fix the deadline bug
@@ -525,12 +533,30 @@
                 break;
             }
 
-            int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
-            AudioClock::sleepForNanos(sleepForNanos);
+            currentTimeNanos = AudioClock::getNanoseconds();
+            int64_t earliestWakeTime = currentTimeNanos + mMinimumSleepNanos;
+            // Guarantee a minimum sleep time.
+            if (wakeTimeNanos < earliestWakeTime) {
+                wakeTimeNanos = earliestWakeTime;
+            }
+
+            if (ATRACE_ENABLED()) {
+                int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
+                ATRACE_INT(fifoName, fullFrames);
+                int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
+                ATRACE_INT("aaSlpNs", (int32_t)sleepForNanos);
+            }
+
+            AudioClock::sleepUntilNanoTime(wakeTimeNanos);
             currentTimeNanos = AudioClock::getNanoseconds();
         }
     }
 
+    if (ATRACE_ENABLED()) {
+        int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
+        ATRACE_INT(fifoName, fullFrames);
+    }
+
     // return error or framesProcessed
     (void) loopCount;
     ATRACE_END();
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 109e425..1b991de 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -27,6 +27,7 @@
 #include "client/IsochronousClockModel.h"
 #include "client/AudioEndpoint.h"
 #include "core/AudioStream.h"
+#include "utility/AudioClock.h"
 #include "utility/LinearRamp.h"
 
 using android::sp;
@@ -173,6 +174,11 @@
     // Adjust timing model based on timestamp from service.
     void processTimestamp(uint64_t position, int64_t time);
 
+    // Thread on other side of FIFO will have wakeup jitter.
+    // By delaying slightly we can avoid waking up before other side is ready.
+    const int32_t            mWakeupDelayNanos; // delay past typical wakeup jitter
+    const int32_t            mMinimumSleepNanos; // minimum sleep while polling
+
     AudioEndpointParcelable  mEndPointParcelable; // description of the buffers filled by service
     EndpointDescriptor       mEndpointDescriptor; // buffer description with resolved addresses
 };
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 22f8bd1..7b1e53e 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -24,6 +24,9 @@
 #include "client/AudioStreamInternalCapture.h"
 #include "utility/AudioClock.h"
 
+#define ATRACE_TAG ATRACE_TAG_AUDIO
+#include <utils/Trace.h>
+
 using android::WrappingBuffer;
 
 using namespace aaudio;
@@ -36,7 +39,6 @@
 
 AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
 
-
 // Write the data, block if needed and timeoutMillis > 0
 aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
                                                int64_t timeoutNanoseconds)
@@ -52,6 +54,9 @@
         return result;
     }
 
+    const char *traceName = "aaRdNow";
+    ATRACE_BEGIN(traceName);
+
     if (mAudioEndpoint.isFreeRunning()) {
         //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
         // Update data queue based on the timing model.
@@ -63,6 +68,9 @@
     // If the write index passed the read index then consider it an overrun.
     if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
         mXRunCount++;
+        if (ATRACE_ENABLED()) {
+            ATRACE_INT("aaOverRuns", mXRunCount);
+        }
     }
 
     // Read some data from the buffer.
@@ -70,6 +78,9 @@
     int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
     //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
     //    numFrames, framesProcessed);
+    if (ATRACE_ENABLED()) {
+        ATRACE_INT("aaRead", framesProcessed);
+    }
 
     // Calculate an ideal time to wake up.
     if (wakeTimePtr != nullptr && framesProcessed >= 0) {
@@ -82,14 +93,14 @@
             case AAUDIO_STREAM_STATE_OPEN:
             case AAUDIO_STREAM_STATE_STARTING:
                 break;
-            case AAUDIO_STREAM_STATE_STARTED:   // When do we expect the next read burst to occur?
+            case AAUDIO_STREAM_STATE_STARTED:
             {
-                uint32_t burstSize = mFramesPerBurst;
-                if (burstSize < 32) {
-                    burstSize = 32; // TODO review
-                }
+                // When do we expect the next write burst to occur?
 
-                uint64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + burstSize;
+                // Calculate frame position based off of the readCounter because
+                // the writeCounter might have just advanced in the background,
+                // causing us to sleep until a later burst.
+                int64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
                 wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
             }
                 break;
@@ -99,10 +110,8 @@
         *wakeTimePtr = wakeTime;
 
     }
-//    ALOGD("AudioStreamInternalCapture::readNow finished: now = %llu, read# = %llu, wrote# = %llu",
-//         (unsigned long long)currentNanoTime,
-//         (unsigned long long)mAudioEndpoint.getDataReadCounter(),
-//         (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+
+    ATRACE_END();
     return framesProcessed;
 }
 
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 1b18577..31e0a40 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -18,6 +18,10 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
+#define ATRACE_TAG ATRACE_TAG_AUDIO
+
+#include <utils/Trace.h>
+
 #include "client/AudioStreamInternalPlay.h"
 #include "utility/AudioClock.h"
 
@@ -99,6 +103,10 @@
         return result;
     }
 
+    const char *traceName = "aaWrNow";
+    ATRACE_BEGIN(traceName);
+
+    // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
     if (mAudioEndpoint.isFreeRunning()) {
         // Update data queue based on the timing model.
         int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
@@ -108,10 +116,10 @@
 
     // If the read index passed the write index then consider it an underrun.
     if (mAudioEndpoint.getFullFramesAvailable() < 0) {
-        ALOGV("AudioStreamInternal::processDataNow() - XRun! write = %d, read = %d",
-              (int)mAudioEndpoint.getDataWriteCounter(),
-              (int)mAudioEndpoint.getDataReadCounter());
         mXRunCount++;
+        if (ATRACE_ENABLED()) {
+            ATRACE_INT("aaUnderRuns", mXRunCount);
+        }
     }
 
     // Write some data to the buffer.
@@ -119,6 +127,9 @@
     int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
     //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
     //    numFrames, framesWritten);
+    if (ATRACE_ENABLED()) {
+        ATRACE_INT("aaWrote", framesWritten);
+    }
 
     // Calculate an ideal time to wake up.
     if (wakeTimePtr != nullptr && framesWritten >= 0) {
@@ -135,14 +146,15 @@
                     wakeTime = currentNanoTime;
                 }
                 break;
-            case AAUDIO_STREAM_STATE_STARTED:   // When do we expect the next read burst to occur?
+            case AAUDIO_STREAM_STATE_STARTED:
             {
-                uint32_t burstSize = mFramesPerBurst;
-                if (burstSize < 32) {
-                    burstSize = 32; // TODO review
-                }
+                // When do we expect the next read burst to occur?
 
-                uint64_t nextReadPosition = mAudioEndpoint.getDataReadCounter() + burstSize;
+                // Calculate frame position based off of the writeCounter because
+                // the readCounter might have just advanced in the background,
+                // causing us to sleep until a later burst.
+                int64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
+                        - mAudioEndpoint.getBufferSizeInFrames();
                 wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
             }
                 break;
@@ -152,10 +164,8 @@
         *wakeTimePtr = wakeTime;
 
     }
-//    ALOGD("AudioStreamInternal::processDataNow finished: now = %llu, read# = %llu, wrote# = %llu",
-//         (unsigned long long)currentNanoTime,
-//         (unsigned long long)mAudioEndpoint.getDataReadCounter(),
-//         (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
+
+    ATRACE_END();
     return framesWritten;
 }
 
@@ -170,7 +180,7 @@
 
     mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
 
-    // Read data in one or two parts.
+    // Write data in one or two parts.
     int partIndex = 0;
     while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
         int32_t framesToWrite = framesLeft;
@@ -291,6 +301,7 @@
     aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
     AAudioStream_dataCallback appCallback = getDataCallbackProc();
     if (appCallback == nullptr) return NULL;
+    int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
 
     // result might be a frame count
     while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
@@ -302,10 +313,7 @@
                 mCallbackFrames);
 
         if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
-            // Write audio data to stream.
-            int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
-
-            // This is a BLOCKING WRITE!
+            // Write audio data to stream. This is a BLOCKING WRITE!
             result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
             if ((result != mCallbackFrames)) {
                 ALOGE("AudioStreamInternalPlay(): callbackLoop: write() returned %d", result);
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 6ea1172..c06c8a9 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -149,7 +149,7 @@
     int64_t nextBurstPosition = mFramesPerBurst * nextBurstIndex;
     int64_t framesDelta = nextBurstPosition - mMarkerFramePosition;
     int64_t nanosDelta = convertDeltaPositionToTime(framesDelta);
-    int64_t time = (int64_t) (mMarkerNanoTime + nanosDelta);
+    int64_t time = mMarkerNanoTime + nanosDelta;
 //    ALOGD("IsochronousClockModel::convertPositionToTime: pos = %llu --> time = %llu",
 //         (unsigned long long)framePosition,
 //         (unsigned long long)time);
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 19b08c4..4859c69 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -82,9 +82,10 @@
           mSampleRate, mSamplesPerFrame, mFormat,
           AudioStream_convertSharingModeToShortText(mSharingMode),
           (getDirection() == AAUDIO_DIRECTION_OUTPUT) ? "OUTPUT" : "INPUT");
-    ALOGI("AudioStream::open() device = %d, perfMode = %d, callbackFrames = %d",
-          mDeviceId, mPerformanceMode, mFramesPerDataCallback);
-
+    ALOGI("AudioStream::open() device = %d, perfMode = %d, callback: %s with frames = %d",
+          mDeviceId, mPerformanceMode,
+          (mDataCallbackProc == nullptr ? "OFF" : "ON"),
+          mFramesPerDataCallback);
 
     return AAUDIO_OK;
 }
diff --git a/media/libaaudio/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
index 6b4a772..8d2c62d 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.cpp
+++ b/media/libaaudio/src/fifo/FifoBuffer.cpp
@@ -105,16 +105,18 @@
 
 }
 
-void FifoBuffer::getFullDataAvailable(WrappingBuffer *wrappingBuffer) {
+fifo_frames_t FifoBuffer::getFullDataAvailable(WrappingBuffer *wrappingBuffer) {
     fifo_frames_t framesAvailable = mFifo->getFullFramesAvailable();
     fifo_frames_t startIndex = mFifo->getReadIndex();
     fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
+    return framesAvailable;
 }
 
-void FifoBuffer::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
+fifo_frames_t FifoBuffer::getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer) {
     fifo_frames_t framesAvailable = mFifo->getEmptyFramesAvailable();
     fifo_frames_t startIndex = mFifo->getWriteIndex();
     fillWrappingBuffer(wrappingBuffer, framesAvailable, startIndex);
+    return framesAvailable;
 }
 
 fifo_frames_t FifoBuffer::read(void *buffer, fifo_frames_t numFrames) {
diff --git a/media/libaaudio/src/fifo/FifoBuffer.h b/media/libaaudio/src/fifo/FifoBuffer.h
index 2b262a1..a94e9b0 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.h
+++ b/media/libaaudio/src/fifo/FifoBuffer.h
@@ -64,16 +64,18 @@
      * if the data is split across the end of the FIFO then set data2 and numFrames2.
      * Other wise set them to null
      * @param wrappingBuffer
+     * @return total full frames available
      */
-    void getFullDataAvailable(WrappingBuffer *wrappingBuffer);
+    fifo_frames_t getFullDataAvailable(WrappingBuffer *wrappingBuffer);
 
     /**
      * Return pointer to available empty frames in data1 and set size in numFrames1.
      * if the room is split across the end of the FIFO then set data2 and numFrames2.
      * Other wise set them to null
      * @param wrappingBuffer
+     * @return total empty frames available
      */
-    void getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer);
+    fifo_frames_t getEmptyRoomAvailable(WrappingBuffer *wrappingBuffer);
 
     /**
      * Copy data from the FIFO into the buffer.
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index a3198d7..2450920 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -369,12 +369,43 @@
     return prop;
 }
 
+int32_t AAudioProperty_getWakeupDelayMicros() {
+    const int32_t minMicros = 0; // arbitrary
+    const int32_t defaultMicros = 200; // arbitrary, based on some observed jitter
+    const int32_t maxMicros = 5000; // arbitrary, probably don't want more than 500
+    int32_t prop = property_get_int32(AAUDIO_PROP_WAKEUP_DELAY_USEC, defaultMicros);
+    if (prop < minMicros) {
+        ALOGW("AAudioProperty_getWakeupDelayMicros: clipped %d to %d", prop, minMicros);
+        prop = minMicros;
+    } else if (prop > maxMicros) {
+        ALOGW("AAudioProperty_getWakeupDelayMicros: clipped %d to %d", prop, maxMicros);
+        prop = maxMicros;
+    }
+    return prop;
+}
+
+int32_t AAudioProperty_getMinimumSleepMicros() {
+    const int32_t minMicros = 20; // arbitrary
+    const int32_t defaultMicros = 200; // arbitrary
+    const int32_t maxMicros = 2000; // arbitrary
+    int32_t prop = property_get_int32(AAUDIO_PROP_MINIMUM_SLEEP_USEC, defaultMicros);
+    if (prop < minMicros) {
+        ALOGW("AAudioProperty_getMinimumSleepMicros: clipped %d to %d", prop, minMicros);
+        prop = minMicros;
+    } else if (prop > maxMicros) {
+        ALOGW("AAudioProperty_getMinimumSleepMicros: clipped %d to %d", prop, maxMicros);
+        prop = maxMicros;
+    }
+    return prop;
+}
+
 int32_t AAudioProperty_getHardwareBurstMinMicros() {
     const int32_t defaultMicros = 1000; // arbitrary
     const int32_t maxMicros = 1000 * 1000; // arbitrary
     int32_t prop = property_get_int32(AAUDIO_PROP_HW_BURST_MIN_USEC, defaultMicros);
     if (prop < 1 || prop > maxMicros) {
-        ALOGE("AAudioProperty_getHardwareBurstMinMicros: invalid = %d", prop);
+        ALOGE("AAudioProperty_getHardwareBurstMinMicros: invalid = %d, use %d",
+              prop, defaultMicros);
         prop = defaultMicros;
     }
     return prop;
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index efd663d..acd319b 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -195,13 +195,35 @@
 
 /**
  * Read system property.
- * @return number of bursts per mixer cycle
+ * @return number of bursts per AAudio service mixer cycle
  */
 int32_t AAudioProperty_getMixerBursts();
 
 #define AAUDIO_PROP_HW_BURST_MIN_USEC      "aaudio.hw_burst_min_usec"
 
 /**
+ * Read a system property that specifies the number of extra microseconds that a thread
+ * should sleep when waiting for another thread to service a FIFO. This is used
+ * to avoid the waking thread from being overly optimistic about the other threads
+ * wakeup timing. This value should be set high enough to cover typical scheduling jitter
+ * for a real-time thread.
+ *
+ * @return number of microseconds to delay the wakeup.
+ */
+int32_t AAudioProperty_getWakeupDelayMicros();
+
+#define AAUDIO_PROP_WAKEUP_DELAY_USEC      "aaudio.wakeup_delay_usec"
+
+/**
+ * Read a system property that specifies the minimum sleep time when polling the FIFO.
+ *
+ * @return minimum number of microseconds to sleep.
+ */
+int32_t AAudioProperty_getMinimumSleepMicros();
+
+#define AAUDIO_PROP_MINIMUM_SLEEP_USEC      "aaudio.minimum_sleep_usec"
+
+/**
  * Read system property.
  * This is handy in case the DMA is bursting too quickly for the CPU to keep up.
  * For example, there may be a DMA burst every 100 usec but you only
diff --git a/media/libeffects/downmix/Android.mk b/media/libeffects/downmix/Android.mk
index 28b96d5..a5fbf14 100644
--- a/media/libeffects/downmix/Android.mk
+++ b/media/libeffects/downmix/Android.mk
@@ -20,7 +20,8 @@
 	$(call include-path-for, audio-effects) \
 	$(call include-path-for, audio-utils)
 
-LOCAL_CFLAGS += -fvisibility=hidden -DBUILD_FLOAT
+#-DBUILD_FLOAT
+LOCAL_CFLAGS += -fvisibility=hidden
 LOCAL_CFLAGS += -Wall -Werror
 
 LOCAL_HEADER_LIBRARIES += libhardware_headers
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 521dc5f..b180853 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -2,6 +2,8 @@
     name: "libmedia_headers",
     vendor_available: true,
     export_include_dirs: ["include", "omx/1.0/include"],
+    header_libs: ["libstagefright_headers"],
+    export_header_lib_headers: ["libstagefright_headers"],
 }
 
 cc_library {
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 77f0cab..942a8fc 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -2318,6 +2318,33 @@
             }
         }
     } else {
+        // VolumeShapers are not affected when a track moves between players for
+        // gapless playback (setNextMediaPlayer).
+        // We forward VolumeShaper operations that do not change configuration
+        // to the new player so that unducking may occur as expected.
+        // Unducking is an idempotent operation, same if applied back-to-back.
+        if (configuration->getType() == VolumeShaper::Configuration::TYPE_ID
+                && mNextOutput != nullptr) {
+            ALOGV("applyVolumeShaper: Attempting to forward missed operation: %s %s",
+                    configuration->toString().c_str(), operation->toString().c_str());
+            Mutex::Autolock nextLock(mNextOutput->mLock);
+
+            // recycled track should be forwarded from this AudioSink by switchToNextOutput
+            sp<AudioTrack> track = mNextOutput->mRecycledTrack;
+            if (track != nullptr) {
+                ALOGD("Forward VolumeShaper operation to recycled track %p", track.get());
+                (void)track->applyVolumeShaper(configuration, operation);
+            } else {
+                // There is a small chance that the unduck occurs after the next
+                // player has already started, but before it is registered to receive
+                // the unduck command.
+                track = mNextOutput->mTrack;
+                if (track != nullptr) {
+                    ALOGD("Forward VolumeShaper operation to track %p", track.get());
+                    (void)track->applyVolumeShaper(configuration, operation);
+                }
+            }
+        }
         status = mVolumeHandler->applyVolumeShaper(configuration, operation);
     }
     return status;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 55c364f..1a7db26 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -933,7 +933,6 @@
     if (stream == AUDIO_STREAM_TTS) {
         flags = AUDIO_OUTPUT_FLAG_TTS;
     } else if (stream == AUDIO_STREAM_VOICE_CALL &&
-               getPhoneState() == AUDIO_MODE_IN_COMMUNICATION &&
                audio_is_linear_pcm(format)) {
         flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX |
                                        AUDIO_OUTPUT_FLAG_DIRECT);
@@ -1647,7 +1646,6 @@
             halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
         }
     } else if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION &&
-               getPhoneState() == AUDIO_MODE_IN_COMMUNICATION &&
                audio_is_linear_pcm(format)) {
         flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_VOIP_TX);
     }
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 2bf73a0..e8fc080 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -85,7 +85,7 @@
 
     virtual binder::Status endConfigure(int operatingMode) override;
 
-    // Returns -EBUSY if device is not idle
+    // Returns -EBUSY if device is not idle or in error state
     virtual binder::Status deleteStream(int streamId) override;
 
     virtual binder::Status createStream(
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 49423a0..6421695 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -240,6 +240,7 @@
 
     status_t res = OK;
     std::vector<wp<Camera3StreamInterface>> streams;
+    nsecs_t maxExpectedDuration = getExpectedInFlightDuration();
     {
         Mutex::Autolock l(mLock);
         if (mStatus == STATUS_UNINITIALIZED) return res;
@@ -251,9 +252,10 @@
                 SET_ERR_L("Can't stop streaming");
                 // Continue to close device even in case of error
             } else {
-                res = waitUntilStateThenRelock(/*active*/ false, kShutdownTimeout);
+                res = waitUntilStateThenRelock(/*active*/ false, maxExpectedDuration);
                 if (res != OK) {
-                    SET_ERR_L("Timeout waiting for HAL to drain");
+                    SET_ERR_L("Timeout waiting for HAL to drain (% " PRIi64 " ns)",
+                            maxExpectedDuration);
                     // Continue to close device even in case of error
                 }
             }
@@ -312,6 +314,7 @@
         mInterface->clear();
         mOutputStreams.clear();
         mInputStream.clear();
+        mDeletedStreams.clear();
         mBufferManager.clear();
         internalUpdateStatusLocked(STATUS_UNINITIALIZED);
     }
@@ -1138,6 +1141,7 @@
         uint32_t width, uint32_t height, int format, int *id) {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
+    nsecs_t maxExpectedDuration = getExpectedInFlightDuration();
     Mutex::Autolock l(mLock);
     ALOGV("Camera %s: Creating new input stream %d: %d x %d, format %d",
             mId.string(), mNextStreamId, width, height, format);
@@ -1158,7 +1162,7 @@
             break;
         case STATUS_ACTIVE:
             ALOGV("%s: Stopping activity to reconfigure streams", __FUNCTION__);
-            res = internalPauseAndWaitLocked();
+            res = internalPauseAndWaitLocked(maxExpectedDuration);
             if (res != OK) {
                 SET_ERR_L("Can't pause captures to reconfigure streams!");
                 return res;
@@ -1225,6 +1229,7 @@
         int streamSetId, bool isShared, uint32_t consumerUsage) {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
+    nsecs_t maxExpectedDuration = getExpectedInFlightDuration();
     Mutex::Autolock l(mLock);
     ALOGV("Camera %s: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d"
             " consumer usage 0x%x, isShared %d", mId.string(), mNextStreamId, width, height, format,
@@ -1246,7 +1251,7 @@
             break;
         case STATUS_ACTIVE:
             ALOGV("%s: Stopping activity to reconfigure streams", __FUNCTION__);
-            res = internalPauseAndWaitLocked();
+            res = internalPauseAndWaitLocked(maxExpectedDuration);
             if (res != OK) {
                 SET_ERR_L("Can't pause captures to reconfigure streams!");
                 return res;
@@ -1425,6 +1430,12 @@
         return -EBUSY;
     }
 
+    if (mStatus == STATUS_ERROR) {
+        ALOGW("%s: Camera %s: deleteStream not allowed in ERROR state",
+                __FUNCTION__, mId.string());
+        return -EBUSY;
+    }
+
     sp<Camera3StreamInterface> deletedStream;
     ssize_t outputStreamIdx = mOutputStreams.indexOfKey(id);
     if (mInputStream != NULL && id == mInputStream->getId()) {
@@ -1491,59 +1502,66 @@
     }
 
     Mutex::Autolock il(mInterfaceLock);
-    Mutex::Autolock l(mLock);
 
-    switch (mStatus) {
-        case STATUS_ERROR:
-            CLOGE("Device has encountered a serious error");
-            return INVALID_OPERATION;
-        case STATUS_UNINITIALIZED:
-            CLOGE("Device is not initialized!");
-            return INVALID_OPERATION;
-        case STATUS_UNCONFIGURED:
-        case STATUS_CONFIGURED:
-        case STATUS_ACTIVE:
-            // OK
-            break;
-        default:
-            SET_ERR_L("Unexpected status: %d", mStatus);
-            return INVALID_OPERATION;
-    }
+    {
+        Mutex::Autolock l(mLock);
+        switch (mStatus) {
+            case STATUS_ERROR:
+                CLOGE("Device has encountered a serious error");
+                return INVALID_OPERATION;
+            case STATUS_UNINITIALIZED:
+                CLOGE("Device is not initialized!");
+                return INVALID_OPERATION;
+            case STATUS_UNCONFIGURED:
+            case STATUS_CONFIGURED:
+            case STATUS_ACTIVE:
+                // OK
+                break;
+            default:
+                SET_ERR_L("Unexpected status: %d", mStatus);
+                return INVALID_OPERATION;
+        }
 
-    if (!mRequestTemplateCache[templateId].isEmpty()) {
-        *request = mRequestTemplateCache[templateId];
-        return OK;
+        if (!mRequestTemplateCache[templateId].isEmpty()) {
+            *request = mRequestTemplateCache[templateId];
+            return OK;
+        }
     }
 
     camera_metadata_t *rawRequest;
     status_t res = mInterface->constructDefaultRequestSettings(
             (camera3_request_template_t) templateId, &rawRequest);
-    if (res == BAD_VALUE) {
-        ALOGI("%s: template %d is not supported on this camera device",
-              __FUNCTION__, templateId);
-        return res;
-    } else if (res != OK) {
-        CLOGE("Unable to construct request template %d: %s (%d)",
-                templateId, strerror(-res), res);
-        return res;
+
+    {
+        Mutex::Autolock l(mLock);
+        if (res == BAD_VALUE) {
+            ALOGI("%s: template %d is not supported on this camera device",
+                  __FUNCTION__, templateId);
+            return res;
+        } else if (res != OK) {
+            CLOGE("Unable to construct request template %d: %s (%d)",
+                    templateId, strerror(-res), res);
+            return res;
+        }
+
+        set_camera_metadata_vendor_id(rawRequest, mVendorTagId);
+        mRequestTemplateCache[templateId].acquire(rawRequest);
+
+        *request = mRequestTemplateCache[templateId];
     }
-
-    set_camera_metadata_vendor_id(rawRequest, mVendorTagId);
-    mRequestTemplateCache[templateId].acquire(rawRequest);
-
-    *request = mRequestTemplateCache[templateId];
     return OK;
 }
 
 status_t Camera3Device::waitUntilDrained() {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
+    nsecs_t maxExpectedDuration = getExpectedInFlightDuration();
     Mutex::Autolock l(mLock);
 
-    return waitUntilDrainedLocked();
+    return waitUntilDrainedLocked(maxExpectedDuration);
 }
 
-status_t Camera3Device::waitUntilDrainedLocked() {
+status_t Camera3Device::waitUntilDrainedLocked(nsecs_t maxExpectedDuration) {
     switch (mStatus) {
         case STATUS_UNINITIALIZED:
         case STATUS_UNCONFIGURED:
@@ -1559,9 +1577,9 @@
             SET_ERR_L("Unexpected status: %d",mStatus);
             return INVALID_OPERATION;
     }
-
-    ALOGV("%s: Camera %s: Waiting until idle", __FUNCTION__, mId.string());
-    status_t res = waitUntilStateThenRelock(/*active*/ false, kShutdownTimeout);
+    ALOGV("%s: Camera %s: Waiting until idle (%" PRIi64 "ns)", __FUNCTION__, mId.string(),
+            maxExpectedDuration);
+    status_t res = waitUntilStateThenRelock(/*active*/ false, maxExpectedDuration);
     if (res != OK) {
         SET_ERR_L("Error waiting for HAL to drain: %s (%d)", strerror(-res),
                 res);
@@ -1577,15 +1595,16 @@
 }
 
 // Pause to reconfigure
-status_t Camera3Device::internalPauseAndWaitLocked() {
+status_t Camera3Device::internalPauseAndWaitLocked(nsecs_t maxExpectedDuration) {
     mRequestThread->setPaused(true);
     mPauseStateNotify = true;
 
-    ALOGV("%s: Camera %s: Internal wait until idle", __FUNCTION__, mId.string());
-    status_t res = waitUntilStateThenRelock(/*active*/ false, kShutdownTimeout);
+    ALOGV("%s: Camera %s: Internal wait until idle (% " PRIi64 " ns)", __FUNCTION__, mId.string(),
+          maxExpectedDuration);
+    status_t res = waitUntilStateThenRelock(/*active*/ false, maxExpectedDuration);
     if (res != OK) {
         SET_ERR_L("Can't idle device in %f seconds!",
-                kShutdownTimeout/1e9);
+                maxExpectedDuration/1e9);
     }
 
     return res;
@@ -2352,13 +2371,13 @@
 
 status_t Camera3Device::registerInFlight(uint32_t frameNumber,
         int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
-        bool hasAppCallback) {
+        bool hasAppCallback, nsecs_t maxExpectedDuration) {
     ATRACE_CALL();
     Mutex::Autolock l(mInFlightLock);
 
     ssize_t res;
     res = mInFlightMap.add(frameNumber, InFlightRequest(numBuffers, resultExtras, hasInput,
-            hasAppCallback));
+            hasAppCallback, maxExpectedDuration));
     if (res < 0) return res;
 
     if (mInFlightMap.size() == 1) {
@@ -2369,6 +2388,7 @@
         }
     }
 
+    mExpectedInflightDuration += maxExpectedDuration;
     return OK;
 }
 
@@ -2389,6 +2409,7 @@
 }
 
 void Camera3Device::removeInFlightMapEntryLocked(int idx) {
+    nsecs_t duration = mInFlightMap.valueAt(idx).maxExpectedDuration;
     mInFlightMap.removeItemsAt(idx, 1);
 
     // Indicate idle inFlightMap to the status tracker
@@ -2399,6 +2420,7 @@
             mStatusTracker->markComponentIdle(mInFlightStatusId, Fence::NO_FENCE);
         }
     }
+    mExpectedInflightDuration -= duration;
 }
 
 void Camera3Device::removeInFlightRequestIfReadyLocked(int idx) {
@@ -2479,6 +2501,7 @@
                 request.pendingOutputBuffers.size(), 0);
         }
         mInFlightMap.clear();
+        mExpectedInflightDuration = 0;
     }
 
     // Then return all inflight buffers not returned by HAL
@@ -2502,16 +2525,60 @@
         streamBuffer.status = CAMERA3_BUFFER_STATUS_ERROR;
         streamBuffer.acquire_fence = -1;
         streamBuffer.release_fence = -1;
+
+        // First check if the buffer belongs to deleted stream
+        bool streamDeleted = false;
+        for (auto& stream : mDeletedStreams) {
+            if (streamId == stream->getId()) {
+                streamDeleted = true;
+                // Return buffer to deleted stream
+                camera3_stream* halStream = stream->asHalStream();
+                streamBuffer.stream = halStream;
+                switch (halStream->stream_type) {
+                    case CAMERA3_STREAM_OUTPUT:
+                        res = stream->returnBuffer(streamBuffer, /*timestamp*/ 0);
+                        if (res != OK) {
+                            ALOGE("%s: Can't return output buffer for frame %d to"
+                                  " stream %d: %s (%d)",  __FUNCTION__,
+                                  frameNumber, streamId, strerror(-res), res);
+                        }
+                        break;
+                    case CAMERA3_STREAM_INPUT:
+                        res = stream->returnInputBuffer(streamBuffer);
+                        if (res != OK) {
+                            ALOGE("%s: Can't return input buffer for frame %d to"
+                                  " stream %d: %s (%d)",  __FUNCTION__,
+                                  frameNumber, streamId, strerror(-res), res);
+                        }
+                        break;
+                    default: // Bi-direcitonal stream is deprecated
+                        ALOGE("%s: stream %d has unknown stream type %d",
+                                __FUNCTION__, streamId, halStream->stream_type);
+                        break;
+                }
+                break;
+            }
+        }
+        if (streamDeleted) {
+            continue;
+        }
+
+        // Then check against configured streams
         if (streamId == inputStreamId) {
             streamBuffer.stream = mInputStream->asHalStream();
             res = mInputStream->returnInputBuffer(streamBuffer);
             if (res != OK) {
                 ALOGE("%s: Can't return input buffer for frame %d to"
-                      "  its stream:%s (%d)",  __FUNCTION__,
-                      frameNumber, strerror(-res), res);
+                      " stream %d: %s (%d)",  __FUNCTION__,
+                      frameNumber, streamId, strerror(-res), res);
             }
         } else {
-            streamBuffer.stream = mOutputStreams.valueFor(streamId)->asHalStream();
+            ssize_t idx = mOutputStreams.indexOfKey(streamId);
+            if (idx == NAME_NOT_FOUND) {
+                ALOGE("%s: Output stream id %d not found!", __FUNCTION__, streamId);
+                continue;
+            }
+            streamBuffer.stream = mOutputStreams.valueAt(idx)->asHalStream();
             returnOutputBuffers(&streamBuffer, /*size*/1, /*timestamp*/ 0);
         }
     }
@@ -3918,6 +3985,42 @@
     return true;
 }
 
+nsecs_t Camera3Device::RequestThread::calculateMaxExpectedDuration(const camera_metadata_t *request) {
+    nsecs_t maxExpectedDuration = kDefaultExpectedDuration;
+    camera_metadata_ro_entry_t e = camera_metadata_ro_entry_t();
+    find_camera_metadata_ro_entry(request,
+            ANDROID_CONTROL_AE_MODE,
+            &e);
+    if (e.count == 0) return maxExpectedDuration;
+
+    switch (e.data.u8[0]) {
+        case ANDROID_CONTROL_AE_MODE_OFF:
+            find_camera_metadata_ro_entry(request,
+                    ANDROID_SENSOR_EXPOSURE_TIME,
+                    &e);
+            if (e.count > 0) {
+                maxExpectedDuration = e.data.i64[0];
+            }
+            find_camera_metadata_ro_entry(request,
+                    ANDROID_SENSOR_FRAME_DURATION,
+                    &e);
+            if (e.count > 0) {
+                maxExpectedDuration = std::max(e.data.i64[0], maxExpectedDuration);
+            }
+            break;
+        default:
+            find_camera_metadata_ro_entry(request,
+                    ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+                    &e);
+            if (e.count > 1) {
+                maxExpectedDuration = 1e9 / e.data.u8[0];
+            }
+            break;
+    }
+
+    return maxExpectedDuration;
+}
+
 bool Camera3Device::RequestThread::threadLoop() {
     ATRACE_CALL();
     status_t res;
@@ -4138,7 +4241,8 @@
         res = parent->registerInFlight(halRequest->frame_number,
                 totalNumBuffers, captureRequest->mResultExtras,
                 /*hasInput*/halRequest->input_buffer != NULL,
-                hasCallback);
+                hasCallback,
+                calculateMaxExpectedDuration(halRequest->settings));
         ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
                ", burstId = %" PRId32 ".",
                 __FUNCTION__,
@@ -4192,6 +4296,12 @@
     return false;
 }
 
+nsecs_t Camera3Device::getExpectedInFlightDuration() {
+    Mutex::Autolock al(mInFlightLock);
+    return mExpectedInflightDuration > kMinInflightDuration ?
+            mExpectedInflightDuration : kMinInflightDuration;
+}
+
 void Camera3Device::RequestThread::cleanUpFailedRequests(bool sendRequestError) {
     if (mNextRequests.empty()) {
         return;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 0251d62..d700e03 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -187,10 +187,11 @@
 
     static const size_t        kDumpLockAttempts  = 10;
     static const size_t        kDumpSleepDuration = 100000; // 0.10 sec
-    static const nsecs_t       kShutdownTimeout   = 5000000000; // 5 sec
     static const nsecs_t       kActiveTimeout     = 500000000;  // 500 ms
     static const size_t        kInFlightWarnLimit = 30;
     static const size_t        kInFlightWarnLimitHighSpeed = 256; // batch size 32 * pipe depth 8
+    static const nsecs_t       kDefaultExpectedDuration = 100000000; // 100 ms
+    static const nsecs_t       kMinInflightDuration = 5000000000; // 5 s
     // SCHED_FIFO priority for request submission thread in HFR mode
     static const int           kRequestThreadPriority = 1;
 
@@ -485,7 +486,7 @@
      * CameraDeviceBase interface we shouldn't need to.
      * Must be called with mLock and mInterfaceLock both held.
      */
-    status_t internalPauseAndWaitLocked();
+    status_t internalPauseAndWaitLocked(nsecs_t maxExpectedDuration);
 
     /**
      * Resume work after internalPauseAndWaitLocked()
@@ -511,7 +512,7 @@
      *
      * Need to be called with mLock and mInterfaceLock held.
      */
-    status_t waitUntilDrainedLocked();
+    status_t waitUntilDrainedLocked(nsecs_t maxExpectedDuration);
 
     /**
      * Do common work for setting up a streaming or single capture request.
@@ -776,6 +777,9 @@
         // send request in mNextRequests to HAL in a batch. Return true = sucssess
         bool sendRequestsBatch();
 
+        // Calculate the expected maximum duration for a request
+        nsecs_t calculateMaxExpectedDuration(const camera_metadata_t *request);
+
         wp<Camera3Device>  mParent;
         wp<camera3::StatusTracker>  mStatusTracker;
         sp<HalInterface>   mInterface;
@@ -877,6 +881,11 @@
         // is not for constrained high speed recording, this flag will also be true.
         bool hasCallback;
 
+        // Maximum expected frame duration for this request.
+        // For manual captures, equal to the max of requested exposure time and frame duration
+        // For auto-exposure modes, equal to 1/(lower end of target FPS range)
+        nsecs_t maxExpectedDuration;
+
         // Default constructor needed by KeyedVector
         InFlightRequest() :
                 shutterTimestamp(0),
@@ -885,11 +894,12 @@
                 haveResultMetadata(false),
                 numBuffersLeft(0),
                 hasInputBuffer(false),
-                hasCallback(true) {
+                hasCallback(true),
+                maxExpectedDuration(kDefaultExpectedDuration) {
         }
 
         InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
-                bool hasAppCallback) :
+                bool hasAppCallback, nsecs_t maxDuration) :
                 shutterTimestamp(0),
                 sensorTimestamp(0),
                 requestStatus(OK),
@@ -897,20 +907,31 @@
                 numBuffersLeft(numBuffers),
                 resultExtras(extras),
                 hasInputBuffer(hasInput),
-                hasCallback(hasAppCallback) {
+                hasCallback(hasAppCallback),
+                maxExpectedDuration(maxDuration) {
         }
     };
 
     // Map from frame number to the in-flight request state
     typedef KeyedVector<uint32_t, InFlightRequest> InFlightMap;
 
-    Mutex                  mInFlightLock; // Protects mInFlightMap
+
+    Mutex                  mInFlightLock; // Protects mInFlightMap and
+                                          // mExpectedInflightDuration
     InFlightMap            mInFlightMap;
+    nsecs_t                mExpectedInflightDuration = 0;
     int                    mInFlightStatusId;
 
+
     status_t registerInFlight(uint32_t frameNumber,
             int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
-            bool callback);
+            bool callback, nsecs_t maxExpectedDuration);
+
+    /**
+     * Returns the maximum expected time it'll take for all currently in-flight
+     * requests to complete, based on their settings
+     */
+    nsecs_t getExpectedInFlightDuration();
 
     /**
      * Tracking for idle detection
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 35096eb..ff2dcef 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -293,6 +293,15 @@
 void Camera3InputStream::onBufferFreed(const wp<GraphicBuffer>& gb) {
     const sp<GraphicBuffer> buffer = gb.promote();
     if (buffer != nullptr) {
+        camera3_stream_buffer streamBuffer =
+                {nullptr, &buffer->handle, 0, -1, -1};
+        // Check if this buffer is outstanding.
+        if (isOutstandingBuffer(streamBuffer)) {
+            ALOGV("%s: Stream %d: Trying to free a buffer that is still being "
+                    "processed.", __FUNCTION__, mId);
+            return;
+        }
+
         sp<Camera3StreamBufferFreedListener> callback = mBufferFreedListener.promote();
         if (callback != nullptr) {
             callback->onBufferFreed(mId, buffer->handle);
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index e77421a..9e6ac79 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -479,6 +479,7 @@
     if (res == OK) {
         fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/true);
         if (buffer->buffer) {
+            Mutex::Autolock l(mOutstandingBuffersLock);
             mOutstandingBuffers.push_back(*buffer->buffer);
         }
     }
@@ -486,11 +487,13 @@
     return res;
 }
 
-bool Camera3Stream::isOutstandingBuffer(const camera3_stream_buffer &buffer) {
+bool Camera3Stream::isOutstandingBuffer(const camera3_stream_buffer &buffer) const{
     if (buffer.buffer == nullptr) {
         return false;
     }
 
+    Mutex::Autolock l(mOutstandingBuffersLock);
+
     for (auto b : mOutstandingBuffers) {
         if (b == *buffer.buffer) {
             return true;
@@ -504,6 +507,8 @@
         return;
     }
 
+    Mutex::Autolock l(mOutstandingBuffersLock);
+
     for (auto b = mOutstandingBuffers.begin(); b != mOutstandingBuffers.end(); b++) {
         if (*b == *buffer.buffer) {
             mOutstandingBuffers.erase(b);
@@ -575,6 +580,7 @@
     if (res == OK) {
         fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/false);
         if (buffer->buffer) {
+            Mutex::Autolock l(mOutstandingBuffersLock);
             mOutstandingBuffers.push_back(*buffer->buffer);
         }
     }
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 0940d62..44fe6b6 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -461,6 +461,9 @@
     // INVALID_OPERATION if they cannot be obtained.
     virtual status_t getEndpointUsage(uint32_t *usage) const = 0;
 
+    // Return whether the buffer is in the list of outstanding buffers.
+    bool isOutstandingBuffer(const camera3_stream_buffer& buffer) const;
+
     // Tracking for idle state
     wp<StatusTracker> mStatusTracker;
     // Status tracker component ID
@@ -483,9 +486,6 @@
 
     status_t        cancelPrepareLocked();
 
-    // Return whether the buffer is in the list of outstanding buffers.
-    bool isOutstandingBuffer(const camera3_stream_buffer& buffer);
-
     // Remove the buffer from the list of outstanding buffers.
     void removeOutstandingBuffer(const camera3_stream_buffer& buffer);
 
@@ -502,6 +502,7 @@
     // Number of buffers allocated on last prepare call.
     size_t mLastMaxCount;
 
+    mutable Mutex mOutstandingBuffersLock;
     // Outstanding buffers dequeued from the stream's buffer queue.
     List<buffer_handle_t> mOutstandingBuffers;
 
diff --git a/services/oboeservice/AAudioMixer.cpp b/services/oboeservice/AAudioMixer.cpp
index 43203d4..8222734 100644
--- a/services/oboeservice/AAudioMixer.cpp
+++ b/services/oboeservice/AAudioMixer.cpp
@@ -18,9 +18,17 @@
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
+#define ATRACE_TAG ATRACE_TAG_AUDIO
+
 #include <cstring>
+#include <utils/Trace.h>
+
 #include "AAudioMixer.h"
 
+#ifndef AAUDIO_MIXER_ATRACE_ENABLED
+#define AAUDIO_MIXER_ATRACE_ENABLED    1
+#endif
+
 using android::WrappingBuffer;
 using android::FifoBuffer;
 using android::fifo_frames_t;
@@ -41,13 +49,28 @@
     memset(mOutputBuffer, 0, mBufferSizeInBytes);
 }
 
-bool AAudioMixer::mix(FifoBuffer *fifo, float volume) {
+bool AAudioMixer::mix(int trackIndex, FifoBuffer *fifo, float volume) {
     WrappingBuffer wrappingBuffer;
     float *destination = mOutputBuffer;
     fifo_frames_t framesLeft = mFramesPerBurst;
 
+#if AAUDIO_MIXER_ATRACE_ENABLED
+    ATRACE_BEGIN("aaMix");
+#endif /* AAUDIO_MIXER_ATRACE_ENABLED */
+
     // Gather the data from the client. May be in two parts.
-    fifo->getFullDataAvailable(&wrappingBuffer);
+    fifo_frames_t fullFrames = fifo->getFullDataAvailable(&wrappingBuffer);
+#if AAUDIO_MIXER_ATRACE_ENABLED
+    if (ATRACE_ENABLED()) {
+        char rdyText[] = "aaMixRdy#";
+        char letter = 'A' + (trackIndex % 26);
+        rdyText[sizeof(rdyText) - 2] = letter;
+        ATRACE_INT(rdyText, fullFrames);
+    }
+#else /* MIXER_ATRACE_ENABLED */
+    (void) trackIndex;
+    (void) fullFrames;
+#endif /* AAUDIO_MIXER_ATRACE_ENABLED */
 
     // Mix data in one or two parts.
     int partIndex = 0;
@@ -70,6 +93,10 @@
         //ALOGW("AAudioMixer::mix() UNDERFLOW by %d / %d frames ----- UNDERFLOW !!!!!!!!!!",
         //      framesLeft, mFramesPerBurst);
     }
+#if AAUDIO_MIXER_ATRACE_ENABLED
+    ATRACE_END();
+#endif /* AAUDIO_MIXER_ATRACE_ENABLED */
+
     return (framesLeft > 0); // did not get all the frames we needed, ie. "underflow"
 }
 
diff --git a/services/oboeservice/AAudioMixer.h b/services/oboeservice/AAudioMixer.h
index 9155fec..a8090bc 100644
--- a/services/oboeservice/AAudioMixer.h
+++ b/services/oboeservice/AAudioMixer.h
@@ -37,7 +37,7 @@
      * @param volume
      * @return true if underflowed
      */
-    bool mix(android::FifoBuffer *fifo, float volume);
+    bool mix(int trackIndex, android::FifoBuffer *fifo, float volume);
 
     void mixPart(float *destination, float *source, int32_t numFrames, float volume);
 
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index e609ab5..b83b918 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -74,16 +74,18 @@
         // Mix data from each active stream.
         mMixer.clear();
         { // use lock guard
+            int index = 0;
             std::lock_guard <std::mutex> lock(mLockStreams);
             for (sp<AAudioServiceStreamShared> sharedStream : mRegisteredStreams) {
                 if (sharedStream->isRunning()) {
                     FifoBuffer *fifo = sharedStream->getDataFifoBuffer();
                     float volume = 1.0; // to match legacy volume
-                    bool underflowed = mMixer.mix(fifo, volume);
+                    bool underflowed = mMixer.mix(index, fifo, volume);
                     underflowCount += underflowed ? 1 : 0;
                     // TODO log underflows in each stream
                     sharedStream->markTransferTime(AudioClock::getNanoseconds());
                 }
+                index++;
             }
         }
 
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index afa2ff0..ff02c0f 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -154,7 +154,7 @@
               status);
         return AAUDIO_ERROR_UNAVAILABLE;
     } else {
-        ALOGD("createMmapBuffer status %d, buffer_size, %d burst_size %d"
+        ALOGD("createMmapBuffer status = %d, buffer_size = %d, burst_size %d"
                 ", Sharable FD: %s",
               status,
               abs(mMmapBufferinfo.buffer_size_frames),