Merge "m4v_h263: Fix heap buffer overflow issue in BitstreamFillCache" into rvc-qpr-dev
diff --git a/camera/ICameraClient.cpp b/camera/ICameraClient.cpp
index c02c81b..bef2ea0 100644
--- a/camera/ICameraClient.cpp
+++ b/camera/ICameraClient.cpp
@@ -142,7 +142,8 @@
             camera_frame_metadata_t metadata;
             if (data.dataAvail() > 0) {
                 metadata.number_of_faces = data.readInt32();
-                if (metadata.number_of_faces <= 0 ||
+                // Zero faces is a valid case, to notify clients that no faces are now visible
+                if (metadata.number_of_faces < 0 ||
                         metadata.number_of_faces > (int32_t)(INT32_MAX / sizeof(camera_face_t))) {
                     ALOGE("%s: Too large face count: %d", __FUNCTION__, metadata.number_of_faces);
                     return BAD_VALUE;
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 7d78571..1354fce 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1890,10 +1890,8 @@
      *   <li>ACaptureRequest</li>
      * </ul></p>
      *
-     * <p>Instead of using ACAMERA_SCALER_CROP_REGION with dual purposes of crop and zoom, the
-     * application can now choose to use this tag to specify the desired zoom level. The
-     * ACAMERA_SCALER_CROP_REGION can still be used to specify the horizontal or vertical
-     * crop to achieve aspect ratios different than the native camera sensor.</p>
+     * <p>Instead of using ACAMERA_SCALER_CROP_REGION for zoom, the application can now choose to
+     * use this tag to specify the desired zoom level.</p>
      * <p>By using this control, the application gains a simpler way to control zoom, which can
      * be a combination of optical and digital zoom. For example, a multi-camera system may
      * contain more than one lens with different focal lengths, and the user can use optical
@@ -3413,16 +3411,24 @@
      * respectively.</p>
      * <p>The camera device may adjust the crop region to account for rounding and other hardware
      * requirements; the final crop region used will be included in the output capture result.</p>
+     * <p>The camera sensor output aspect ratio depends on factors such as output stream
+     * combination and ACAMERA_CONTROL_AE_TARGET_FPS_RANGE, and shouldn't be adjusted by using
+     * this control. And the camera device will treat different camera sensor output sizes
+     * (potentially with in-sensor crop) as the same crop of
+     * ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE. As a result, the application shouldn't assume the
+     * maximum crop region always maps to the same aspect ratio or field of view for the
+     * sensor output.</p>
      * <p>Starting from API level 30, it's strongly recommended to use ACAMERA_CONTROL_ZOOM_RATIO
      * to take advantage of better support for zoom with logical multi-camera. The benefits
      * include better precision with optical-digital zoom combination, and ability to do
      * zoom-out from 1.0x. When using ACAMERA_CONTROL_ZOOM_RATIO for zoom, the crop region in
-     * the capture request must be either letterboxing or pillarboxing (but not both). The
+     * the capture request should be left as the default activeArray size. The
      * coordinate system is post-zoom, meaning that the activeArraySize or
      * preCorrectionActiveArraySize covers the camera device's field of view "after" zoom.  See
      * ACAMERA_CONTROL_ZOOM_RATIO for details.</p>
      * <p>The data representation is int[4], which maps to (left, top, width, height).</p>
      *
+     * @see ACAMERA_CONTROL_AE_TARGET_FPS_RANGE
      * @see ACAMERA_CONTROL_ZOOM_RATIO
      * @see ACAMERA_DISTORTION_CORRECTION_MODE
      * @see ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM
diff --git a/drm/libmediadrm/DrmMetricsConsumer.cpp b/drm/libmediadrm/DrmMetricsConsumer.cpp
index b47b4ff..5f0b26e 100644
--- a/drm/libmediadrm/DrmMetricsConsumer.cpp
+++ b/drm/libmediadrm/DrmMetricsConsumer.cpp
@@ -37,8 +37,8 @@
 template <> std::string GetAttributeName<KeyStatusType>(KeyStatusType type) {
     static const char *type_names[] = {"USABLE", "EXPIRED",
                                        "OUTPUT_NOT_ALLOWED", "STATUS_PENDING",
-                                       "INTERNAL_ERROR"};
-    if (((size_t)type) > arraysize(type_names)) {
+                                       "INTERNAL_ERROR", "USABLE_IN_FUTURE"};
+    if (((size_t)type) >= arraysize(type_names)) {
         return "UNKNOWN_TYPE";
     }
     return type_names[(size_t)type];
@@ -48,7 +48,7 @@
     static const char *type_names[] = {"PROVISION_REQUIRED", "KEY_NEEDED",
                                        "KEY_EXPIRED", "VENDOR_DEFINED",
                                        "SESSION_RECLAIMED"};
-    if (((size_t)type) > arraysize(type_names)) {
+    if (((size_t)type) >= arraysize(type_names)) {
         return "UNKNOWN_TYPE";
     }
     return type_names[(size_t)type];
diff --git a/media/codec2/components/gav1/Android.bp b/media/codec2/components/gav1/Android.bp
index 5c4abb7..f374089 100644
--- a/media/codec2/components/gav1/Android.bp
+++ b/media/codec2/components/gav1/Android.bp
@@ -13,8 +13,4 @@
 
     srcs: ["C2SoftGav1Dec.cpp"],
     static_libs: ["libgav1"],
-
-    include_dirs: [
-        "external/libgav1/libgav1/",
-    ],
 }
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index ec5f549..120ba7a 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -288,9 +288,7 @@
 void C2SoftGav1Dec::onRelease() { destroyDecoder(); }
 
 c2_status_t C2SoftGav1Dec::onFlush_sm() {
-  Libgav1StatusCode status =
-      mCodecCtx->EnqueueFrame(/*data=*/nullptr, /*size=*/0,
-                              /*user_private_data=*/0);
+  Libgav1StatusCode status = mCodecCtx->SignalEOS();
   if (status != kLibgav1StatusOk) {
     ALOGE("Failed to flush av1 decoder. status: %d.", status);
     return C2_CORRUPTED;
@@ -299,7 +297,7 @@
   // Dequeue frame (if any) that was enqueued previously.
   const libgav1::DecoderBuffer *buffer;
   status = mCodecCtx->DequeueFrame(&buffer);
-  if (status != kLibgav1StatusOk) {
+  if (status != kLibgav1StatusOk && status != kLibgav1StatusNothingToDequeue) {
     ALOGE("Failed to dequeue frame after flushing the av1 decoder. status: %d",
           status);
     return C2_CORRUPTED;
@@ -433,7 +431,8 @@
     TIME_DIFF(mTimeEnd, mTimeStart, delay);
 
     const Libgav1StatusCode status =
-        mCodecCtx->EnqueueFrame(bitstream, inSize, frameIndex);
+        mCodecCtx->EnqueueFrame(bitstream, inSize, frameIndex,
+                                /*buffer_private_data=*/nullptr);
 
     GETTIME(&mTimeEnd, nullptr);
     TIME_DIFF(mTimeStart, mTimeEnd, decodeTime);
@@ -447,17 +446,6 @@
       return;
     }
 
-  } else {
-    const Libgav1StatusCode status =
-        mCodecCtx->EnqueueFrame(/*data=*/nullptr, /*size=*/0,
-                                /*user_private_data=*/0);
-    if (status != kLibgav1StatusOk) {
-      ALOGE("Failed to flush av1 decoder. status: %d.", status);
-      work->result = C2_CORRUPTED;
-      work->workletsProcessed = 1u;
-      mSignalledError = true;
-      return;
-    }
   }
 
   (void)outputBuffer(pool, work);
@@ -470,13 +458,12 @@
   }
 }
 
-static void copyOutputBufferToYV12Frame(uint8_t *dst, const uint8_t *srcY,
-                                        const uint8_t *srcU,
-                                        const uint8_t *srcV, size_t srcYStride,
-                                        size_t srcUStride, size_t srcVStride,
-                                        uint32_t width, uint32_t height) {
-  const size_t dstYStride = align(width, 16);
-  const size_t dstUVStride = align(dstYStride / 2, 16);
+static void copyOutputBufferToYuvPlanarFrame(uint8_t *dst, const uint8_t *srcY,
+                                             const uint8_t *srcU,
+                                             const uint8_t *srcV, size_t srcYStride,
+                                             size_t srcUStride, size_t srcVStride,
+                                             size_t dstYStride, size_t dstUVStride,
+                                             uint32_t width, uint32_t height) {
   uint8_t *const dstStart = dst;
 
   for (size_t i = 0; i < height; ++i) {
@@ -570,10 +557,10 @@
 static void convertYUV420Planar16ToYUV420Planar(
     uint8_t *dst, const uint16_t *srcY, const uint16_t *srcU,
     const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
-    size_t srcVStride, size_t dstStride, size_t width, size_t height) {
+    size_t srcVStride, size_t dstYStride, size_t dstUVStride,
+    size_t width, size_t height) {
   uint8_t *dstY = (uint8_t *)dst;
-  size_t dstYSize = dstStride * height;
-  size_t dstUVStride = align(dstStride / 2, 16);
+  size_t dstYSize = dstYStride * height;
   size_t dstUVSize = dstUVStride * height / 2;
   uint8_t *dstV = dstY + dstYSize;
   uint8_t *dstU = dstV + dstUVSize;
@@ -584,7 +571,7 @@
     }
 
     srcY += srcYStride;
-    dstY += dstStride;
+    dstY += dstYStride;
   }
 
   for (size_t y = 0; y < (height + 1) / 2; ++y) {
@@ -607,13 +594,14 @@
   const libgav1::DecoderBuffer *buffer;
   const Libgav1StatusCode status = mCodecCtx->DequeueFrame(&buffer);
 
-  if (status != kLibgav1StatusOk) {
+  if (status != kLibgav1StatusOk && status != kLibgav1StatusNothingToDequeue) {
     ALOGE("av1 decoder DequeueFrame failed. status: %d.", status);
     return false;
   }
 
-  // |buffer| can be NULL if status was equal to kLibgav1StatusOk. This is not
-  // an error. This could mean one of two things:
+  // |buffer| can be NULL if status was equal to kLibgav1StatusOk or
+  // kLibgav1StatusNothingToDequeue. This is not an error. This could mean one
+  // of two things:
   //  - The EnqueueFrame() call was either a flush (called with nullptr).
   //  - The enqueued frame did not have any displayable frames.
   if (!buffer) {
@@ -683,6 +671,9 @@
   size_t srcYStride = buffer->stride[0];
   size_t srcUStride = buffer->stride[1];
   size_t srcVStride = buffer->stride[2];
+  C2PlanarLayout layout = wView.layout();
+  size_t dstYStride = layout.planes[C2PlanarLayout::PLANE_Y].rowInc;
+  size_t dstUVStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
 
   if (buffer->bitdepth == 10) {
     const uint16_t *srcY = (const uint16_t *)buffer->plane[0];
@@ -692,18 +683,19 @@
     if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
       convertYUV420Planar16ToY410(
           (uint32_t *)dst, srcY, srcU, srcV, srcYStride / 2, srcUStride / 2,
-          srcVStride / 2, align(mWidth, 16), mWidth, mHeight);
+          srcVStride / 2, dstYStride / sizeof(uint32_t), mWidth, mHeight);
     } else {
       convertYUV420Planar16ToYUV420Planar(dst, srcY, srcU, srcV, srcYStride / 2,
                                           srcUStride / 2, srcVStride / 2,
-                                          align(mWidth, 16), mWidth, mHeight);
+                                          dstYStride, dstUVStride, mWidth, mHeight);
     }
   } else {
     const uint8_t *srcY = (const uint8_t *)buffer->plane[0];
     const uint8_t *srcU = (const uint8_t *)buffer->plane[1];
     const uint8_t *srcV = (const uint8_t *)buffer->plane[2];
-    copyOutputBufferToYV12Frame(dst, srcY, srcU, srcV, srcYStride, srcUStride,
-                                srcVStride, mWidth, mHeight);
+    copyOutputBufferToYuvPlanarFrame(dst, srcY, srcU, srcV, srcYStride, srcUStride,
+                                     srcVStride, dstYStride, dstUVStride,
+                                     mWidth, mHeight);
   }
   finishWork(buffer->user_private_data, work, std::move(block));
   block = nullptr;
@@ -722,9 +714,7 @@
     return C2_OMITTED;
   }
 
-  Libgav1StatusCode status =
-      mCodecCtx->EnqueueFrame(/*data=*/nullptr, /*size=*/0,
-                              /*user_private_data=*/0);
+  const Libgav1StatusCode status = mCodecCtx->SignalEOS();
   if (status != kLibgav1StatusOk) {
     ALOGE("Failed to flush av1 decoder. status: %d.", status);
     return C2_CORRUPTED;
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.h b/media/codec2/components/gav1/C2SoftGav1Dec.h
index a7c08bb..555adc9 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.h
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.h
@@ -18,8 +18,8 @@
 #define ANDROID_C2_SOFT_GAV1_DEC_H_
 
 #include <SimpleC2Component.h>
-#include "libgav1/src/decoder.h"
-#include "libgav1/src/decoder_settings.h"
+#include "libgav1/src/gav1/decoder.h"
+#include "libgav1/src/gav1/decoder_settings.h"
 
 #define GETTIME(a, b) gettimeofday(a, b);
 #define TIME_DIFF(start, end, diff)     \
diff --git a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
index 15564d9..a8b5377 100644
--- a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
+++ b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
@@ -279,6 +279,8 @@
         // skip 7 <type + "vorbis"> bytes
         makeBitReader((const uint8_t *)data + 7, inSize - 7, &buf, &ref, &bits);
         if (data[0] == 1) {
+            // release any memory that vorbis_info_init will blindly overwrite
+            vorbis_info_clear(mVi);
             vorbis_info_init(mVi);
             if (0 != _vorbis_unpack_info(mVi, &bits)) {
                 ALOGE("Encountered error while unpacking info");
@@ -323,6 +325,8 @@
                 work->result = C2_CORRUPTED;
                 return;
             }
+            // release any memory that vorbis_dsp_init will blindly overwrite
+            vorbis_dsp_clear(mState);
             if (0 != vorbis_dsp_init(mState, mVi)) {
                 ALOGE("Encountered error while dsp init");
                 mSignalledError = true;
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 1972d3f..54107bd 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -1331,8 +1331,6 @@
         mCallback->onError(err2, ACTION_CODE_FATAL);
         return;
     }
-    // We're not starting after flush.
-    (void)mSentConfigAfterResume.test_and_set();
     err2 = mChannel->start(inputFormat, outputFormat, buffersBoundToCodec);
     if (err2 != OK) {
         mCallback->onError(err2, ACTION_CODE_FATAL);
@@ -1580,7 +1578,6 @@
         return;
     }
 
-    mSentConfigAfterResume.clear();
     {
         Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
         const std::unique_ptr<Config> &config = *configLocked;
@@ -1797,7 +1794,7 @@
             // handle configuration changes in work done
             Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
             const std::unique_ptr<Config> &config = *configLocked;
-            bool changed = !mSentConfigAfterResume.test_and_set();
+            bool changed = false;
             Config::Watcher<C2StreamInitDataInfo::output> initData =
                 config->watch<C2StreamInitDataInfo::output>();
             if (!work->worklets.empty()
diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp
index e58a1e4..c49a16c 100644
--- a/media/codec2/sfplugin/CCodecBuffers.cpp
+++ b/media/codec2/sfplugin/CCodecBuffers.cpp
@@ -272,8 +272,6 @@
 
     // The output format can be processed without a registered slot.
     if (outputFormat) {
-        ALOGD("[%s] popFromStashAndRegister: output format changed to %s",
-                mName, outputFormat->debugString().c_str());
         updateSkipCutBuffer(outputFormat, entry.notify);
     }
 
@@ -301,6 +299,10 @@
     }
 
     if (!entry.notify) {
+        if (outputFormat) {
+            ALOGD("[%s] popFromStashAndRegister: output format changed to %s",
+                    mName, outputFormat->debugString().c_str());
+        }
         mPending.pop_front();
         return DISCARD;
     }
@@ -317,6 +319,10 @@
     // Append information from the front stash entry to outBuffer.
     (*outBuffer)->meta()->setInt64("timeUs", entry.timestamp);
     (*outBuffer)->meta()->setInt32("flags", entry.flags);
+    if (outputFormat) {
+        ALOGD("[%s] popFromStashAndRegister: output format changed to %s",
+                mName, outputFormat->debugString().c_str());
+    }
     ALOGV("[%s] popFromStashAndRegister: "
           "out buffer index = %zu [%p] => %p + %zu (%lld)",
           mName, *index, outBuffer->get(),
diff --git a/media/codec2/sfplugin/include/media/stagefright/CCodec.h b/media/codec2/sfplugin/include/media/stagefright/CCodec.h
index ecb2506..dbbb5d5 100644
--- a/media/codec2/sfplugin/include/media/stagefright/CCodec.h
+++ b/media/codec2/sfplugin/include/media/stagefright/CCodec.h
@@ -193,7 +193,6 @@
 
     Mutexed<std::unique_ptr<CCodecConfig>> mConfig;
     Mutexed<std::list<std::unique_ptr<C2Work>>> mWorkDoneQueue;
-    std::atomic_flag mSentConfigAfterResume;
 
     friend class CCodecCallbackImpl;
 
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index b88e4e8..27bd357 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -840,7 +840,7 @@
         }
 
         if (err != OK) {
-            mPendingFrames.clear();
+            clearPendingFrames();
 
             mBlockIter.advance();
             mbuf->release();
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index a47f189..cab8929 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -1035,6 +1035,11 @@
  * but still allow queries to the stream to occur from other threads. This often
  * happens if you are monitoring stream progress from a UI thread.
  *
+ * NOTE: This function is only fully implemented for MMAP streams,
+ * which are low latency streams supported by some devices.
+ * On other "Legacy" streams some audio resources will still be in use
+ * and some callbacks may still be in process after this call.
+ *
  * @param stream reference provided by AAudioStreamBuilder_openStream()
  * @return {@link #AAUDIO_OK} or a negative error.
  */
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 9fa2e40..fce322b 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -14,8 +14,6 @@
  * limitations under the License.
  */
 
-#define LOG_TAG (mInService ? "AudioStreamInternalCapture_Service" \
-                          : "AudioStreamInternalCapture_Client")
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
@@ -29,6 +27,14 @@
 #define ATRACE_TAG ATRACE_TAG_AUDIO
 #include <utils/Trace.h>
 
+// We do this after the #includes because if a header uses ALOG.
+// it would fail on the reference to mInService.
+#undef LOG_TAG
+// This file is used in both client and server processes.
+// This is needed to make sense of the logs more easily.
+#define LOG_TAG (mInService ? "AudioStreamInternalCapture_Service" \
+                          : "AudioStreamInternalCapture_Client")
+
 using android::WrappingBuffer;
 
 using namespace aaudio;
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 1303daf..d6b73b4 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -14,8 +14,6 @@
  * limitations under the License.
  */
 
-#define LOG_TAG (mInService ? "AudioStreamInternalPlay_Service" \
-                          : "AudioStreamInternalPlay_Client")
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
 
@@ -26,6 +24,14 @@
 #include "client/AudioStreamInternalPlay.h"
 #include "utility/AudioClock.h"
 
+// We do this after the #includes because if a header uses ALOG.
+// it would fail on the reference to mInService.
+#undef LOG_TAG
+// This file is used in both client and server processes.
+// This is needed to make sense of the logs more easily.
+#define LOG_TAG (mInService ? "AudioStreamInternalPlay_Service" \
+                            : "AudioStreamInternalPlay_Client")
+
 using android::WrappingBuffer;
 
 using namespace aaudio;
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 8965875..cfa7221 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -255,17 +255,16 @@
     if (audioStream != nullptr) {
         aaudio_stream_id_t id = audioStream->getId();
         ALOGD("%s(s#%u) called ---------------", __func__, id);
-        result = audioStream->safeRelease();
-        // safeRelease will only fail if called illegally, for example, from a callback.
+        result = audioStream->safeReleaseClose();
+        // safeReleaseClose will only fail if called illegally, for example, from a callback.
         // That would result in deleting an active stream, which would cause a crash.
         if (result != AAUDIO_OK) {
             ALOGW("%s(s#%u) failed. Close it from another thread.",
                   __func__, id);
         } else {
             audioStream->unregisterPlayerBase();
-             // Mark CLOSED to keep destructors from asserting.
-            audioStream->closeFinal();
-            delete audioStream;
+            // Allow the stream to be deleted.
+            AudioStreamBuilder::stopUsingStream(audioStream);
         }
         ALOGD("%s(s#%u) returned %d ---------", __func__, id, result);
     }
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index f5c75ca..43240ec 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -39,7 +39,7 @@
 }
 
 AudioStream::AudioStream()
-        : mPlayerBase(new MyPlayerBase(this))
+        : mPlayerBase(new MyPlayerBase())
         , mStreamId(AAudio_getNextStreamId())
         {
     // mThread is a pthread_t of unknown size so we need memset.
@@ -48,6 +48,10 @@
 }
 
 AudioStream::~AudioStream() {
+    // Please preserve this log because there have been several bugs related to
+    // AudioStream deletion and late callbacks.
+    ALOGD("%s(s#%u) mPlayerBase strongCount = %d",
+            __func__, getId(), mPlayerBase->getStrongCount());
     // If the stream is deleted when OPEN or in use then audio resources will leak.
     // This would indicate an internal error. So we want to find this ASAP.
     LOG_ALWAYS_FATAL_IF(!(getState() == AAUDIO_STREAM_STATE_CLOSED
@@ -55,8 +59,6 @@
                           || getState() == AAUDIO_STREAM_STATE_DISCONNECTED),
                         "~AudioStream() - still in use, state = %s",
                         AudioGlobal_convertStreamStateToText(getState()));
-
-    mPlayerBase->clearParentReference(); // remove reference to this AudioStream
 }
 
 aaudio_result_t AudioStream::open(const AudioStreamBuilder& builder)
@@ -301,18 +303,29 @@
 }
 
 aaudio_result_t AudioStream::safeRelease() {
-    // This get temporarily unlocked in the release() when joining callback threads.
+    // This get temporarily unlocked in the MMAP release() when joining callback threads.
     std::lock_guard<std::mutex> lock(mStreamLock);
     if (collidesWithCallback()) {
         ALOGE("%s cannot be called from a callback!", __func__);
         return AAUDIO_ERROR_INVALID_STATE;
     }
-    if (getState() == AAUDIO_STREAM_STATE_CLOSING) {
+    if (getState() == AAUDIO_STREAM_STATE_CLOSING) { // already released?
         return AAUDIO_OK;
     }
     return release_l();
 }
 
+aaudio_result_t AudioStream::safeReleaseClose() {
+    // This get temporarily unlocked in the MMAP release() when joining callback threads.
+    std::lock_guard<std::mutex> lock(mStreamLock);
+    if (collidesWithCallback()) {
+        ALOGE("%s cannot be called from a callback!", __func__);
+        return AAUDIO_ERROR_INVALID_STATE;
+    }
+    releaseCloseFinal();
+    return AAUDIO_OK;
+}
+
 void AudioStream::setState(aaudio_stream_state_t state) {
     ALOGD("%s(s#%d) from %d to %d", __func__, getId(), mState, state);
     // Track transition to DISCONNECTED state.
@@ -520,11 +533,18 @@
 }
 
 #if AAUDIO_USE_VOLUME_SHAPER
-android::media::VolumeShaper::Status AudioStream::applyVolumeShaper(
-        const android::media::VolumeShaper::Configuration& configuration __unused,
-        const android::media::VolumeShaper::Operation& operation __unused) {
-    ALOGW("applyVolumeShaper() is not supported");
-    return android::media::VolumeShaper::Status::ok();
+::android::binder::Status AudioStream::MyPlayerBase::applyVolumeShaper(
+        const ::android::media::VolumeShaper::Configuration& configuration,
+        const ::android::media::VolumeShaper::Operation& operation) {
+    android::sp<AudioStream> audioStream;
+    {
+        std::lock_guard<std::mutex> lock(mParentLock);
+        audioStream = mParent.promote();
+    }
+    if (audioStream) {
+        return audioStream->applyVolumeShaper(configuration, operation);
+    }
+    return android::NO_ERROR;
 }
 #endif
 
@@ -534,26 +554,36 @@
     doSetVolume(); // apply this change
 }
 
-AudioStream::MyPlayerBase::MyPlayerBase(AudioStream *parent) : mParent(parent) {
-}
-
-AudioStream::MyPlayerBase::~MyPlayerBase() {
-}
-
-void AudioStream::MyPlayerBase::registerWithAudioManager() {
+void AudioStream::MyPlayerBase::registerWithAudioManager(const android::sp<AudioStream>& parent) {
+    std::lock_guard<std::mutex> lock(mParentLock);
+    mParent = parent;
     if (!mRegistered) {
-        init(android::PLAYER_TYPE_AAUDIO, AAudioConvert_usageToInternal(mParent->getUsage()));
+        init(android::PLAYER_TYPE_AAUDIO, AAudioConvert_usageToInternal(parent->getUsage()));
         mRegistered = true;
     }
 }
 
 void AudioStream::MyPlayerBase::unregisterWithAudioManager() {
+    std::lock_guard<std::mutex> lock(mParentLock);
     if (mRegistered) {
         baseDestroy();
         mRegistered = false;
     }
 }
 
+android::status_t AudioStream::MyPlayerBase::playerSetVolume() {
+    android::sp<AudioStream> audioStream;
+    {
+        std::lock_guard<std::mutex> lock(mParentLock);
+        audioStream = mParent.promote();
+    }
+    if (audioStream) {
+        // No pan and only left volume is taken into account from IPLayer interface
+        audioStream->setDuckAndMuteVolume(mVolumeMultiplierL  /* * mPanMultiplierL */);
+    }
+    return android::NO_ERROR;
+}
+
 void AudioStream::MyPlayerBase::destroy() {
     unregisterWithAudioManager();
 }
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index fb71c36..e0bd9d8 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -25,8 +25,10 @@
 #include <binder/Status.h>
 #include <utils/StrongPointer.h>
 
-#include "media/VolumeShaper.h"
-#include "media/PlayerBase.h"
+#include <media/AudioSystem.h>
+#include <media/PlayerBase.h>
+#include <media/VolumeShaper.h>
+
 #include "utility/AAudioUtilities.h"
 #include "utility/MonotonicCounter.h"
 
@@ -45,7 +47,8 @@
 /**
  * AAudio audio stream.
  */
-class AudioStream {
+// By extending AudioDeviceCallback, we also inherit from RefBase.
+class AudioStream : public android::AudioSystem::AudioDeviceCallback {
 public:
 
     AudioStream();
@@ -117,6 +120,17 @@
     virtual void logOpen();
     void logReleaseBufferState();
 
+    /* Note about naming for "release"  and "close" related methods.
+     *
+     * These names are intended to match the public AAudio API.
+     * The original AAudio API had an AAudioStream_close() function that
+     * released the hardware and deleted the stream. That made it difficult
+     * because apps want to release the HW ASAP but are not in a rush to delete
+     * the stream object. So in R we added an AAudioStream_release() function
+     * that just released the hardware.
+     * The AAudioStream_close() method releases if needed and then closes.
+     */
+
     /**
      * Free any hardware or system resources from the open() call.
      * It is safe to call release_l() multiple times.
@@ -126,22 +140,27 @@
         return AAUDIO_OK;
     }
 
-    aaudio_result_t closeFinal() {
+    /**
+     * Free any resources not already freed by release_l().
+     * Assume release_l() already called.
+     */
+    virtual void close_l() {
+        // Releasing the stream will set the state to CLOSING.
+        assert(getState() == AAUDIO_STREAM_STATE_CLOSING);
+        // setState() prevents a transition from CLOSING to any state other than CLOSED.
         // State is checked by destructor.
         setState(AAUDIO_STREAM_STATE_CLOSED);
-        return AAUDIO_OK;
     }
 
     /**
      * Release then close the stream.
-     * @return AAUDIO_OK or negative error.
      */
-    aaudio_result_t releaseCloseFinal() {
-        aaudio_result_t result = release_l(); // TODO review locking
-        if (result == AAUDIO_OK) {
-          result = closeFinal();
+    void releaseCloseFinal() {
+        if (getState() != AAUDIO_STREAM_STATE_CLOSING) { // not already released?
+            // Ignore result and keep closing.
+            (void) release_l();
         }
-        return result;
+        close_l();
     }
 
     // This is only used to identify a stream in the logs without
@@ -328,6 +347,10 @@
      */
     bool collidesWithCallback() const;
 
+    // Implement AudioDeviceCallback
+    void onAudioDeviceUpdate(audio_io_handle_t audioIo,
+            audio_port_handle_t deviceId) override {};
+
     // ============== I/O ===========================
     // A Stream will only implement read() or write() depending on its direction.
     virtual aaudio_result_t write(const void *buffer __unused,
@@ -366,7 +389,7 @@
      */
     void registerPlayerBase() {
         if (getDirection() == AAUDIO_DIRECTION_OUTPUT) {
-            mPlayerBase->registerWithAudioManager();
+            mPlayerBase->registerWithAudioManager(this);
         }
     }
 
@@ -395,21 +418,33 @@
      */
     aaudio_result_t systemStopFromCallback();
 
+    /**
+     * Safely RELEASE a stream after taking mStreamLock and checking
+     * to make sure we are not being called from a callback.
+     * @return AAUDIO_OK or a negative error
+     */
     aaudio_result_t safeRelease();
 
+    /**
+     * Safely RELEASE and CLOSE a stream after taking mStreamLock and checking
+     * to make sure we are not being called from a callback.
+     * @return AAUDIO_OK or a negative error
+     */
+    aaudio_result_t safeReleaseClose();
+
 protected:
 
     // PlayerBase allows the system to control the stream volume.
     class MyPlayerBase : public android::PlayerBase {
     public:
-        explicit MyPlayerBase(AudioStream *parent);
+        MyPlayerBase() {};
 
-        virtual ~MyPlayerBase();
+        virtual ~MyPlayerBase() = default;
 
         /**
          * Register for volume changes and remote control.
          */
-        void registerWithAudioManager();
+        void registerWithAudioManager(const android::sp<AudioStream>& parent);
 
         /**
          * UnRegister.
@@ -421,8 +456,6 @@
          */
         void destroy() override;
 
-        void clearParentReference() { mParent = nullptr; }
-
         // Just a stub. The ability to start audio through PlayerBase is being deprecated.
         android::status_t playerStart() override {
             return android::NO_ERROR;
@@ -438,18 +471,10 @@
             return android::NO_ERROR;
         }
 
-        android::status_t playerSetVolume() override {
-            // No pan and only left volume is taken into account from IPLayer interface
-            mParent->setDuckAndMuteVolume(mVolumeMultiplierL  /* * mPanMultiplierL */);
-            return android::NO_ERROR;
-        }
+        android::status_t playerSetVolume() override;
 
 #if AAUDIO_USE_VOLUME_SHAPER
-        ::android::binder::Status applyVolumeShaper(
-                const ::android::media::VolumeShaper::Configuration& configuration,
-                const ::android::media::VolumeShaper::Operation& operation) {
-            return mParent->applyVolumeShaper(configuration, operation);
-        }
+        ::android::binder::Status applyVolumeShaper();
 #endif
 
         aaudio_result_t getResult() {
@@ -457,9 +482,12 @@
         }
 
     private:
-        AudioStream          *mParent;
-        aaudio_result_t       mResult = AAUDIO_OK;
-        bool                  mRegistered = false;
+        // Use a weak pointer so the AudioStream can be deleted.
+
+        std::mutex               mParentLock;
+        android::wp<AudioStream> mParent;
+        aaudio_result_t          mResult = AAUDIO_OK;
+        bool                     mRegistered = false;
     };
 
     /**
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 60dad84..630b289 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -63,27 +63,26 @@
 static aaudio_result_t builder_createStream(aaudio_direction_t direction,
                                          aaudio_sharing_mode_t sharingMode,
                                          bool tryMMap,
-                                         AudioStream **audioStreamPtr) {
-    *audioStreamPtr = nullptr;
+                                         android::sp<AudioStream> &stream) {
     aaudio_result_t result = AAUDIO_OK;
 
     switch (direction) {
 
         case AAUDIO_DIRECTION_INPUT:
             if (tryMMap) {
-                *audioStreamPtr = new AudioStreamInternalCapture(AAudioBinderClient::getInstance(),
+                stream = new AudioStreamInternalCapture(AAudioBinderClient::getInstance(),
                                                                  false);
             } else {
-                *audioStreamPtr = new AudioStreamRecord();
+                stream = new AudioStreamRecord();
             }
             break;
 
         case AAUDIO_DIRECTION_OUTPUT:
             if (tryMMap) {
-                *audioStreamPtr = new AudioStreamInternalPlay(AAudioBinderClient::getInstance(),
+                stream = new AudioStreamInternalPlay(AAudioBinderClient::getInstance(),
                                                               false);
             } else {
-                *audioStreamPtr = new AudioStreamTrack();
+                stream = new AudioStreamTrack();
             }
             break;
 
@@ -98,7 +97,7 @@
 // Fall back to Legacy path if MMAP not available.
 // Exact behavior is controlled by MMapPolicy.
 aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
-    AudioStream *audioStream = nullptr;
+
     if (streamPtr == nullptr) {
         ALOGE("%s() streamPtr is null", __func__);
         return AAUDIO_ERROR_NULL;
@@ -171,41 +170,48 @@
         setPrivacySensitive(true);
     }
 
-    result = builder_createStream(getDirection(), sharingMode, allowMMap, &audioStream);
+    android::sp<AudioStream> audioStream;
+    result = builder_createStream(getDirection(), sharingMode, allowMMap, audioStream);
     if (result == AAUDIO_OK) {
         // Open the stream using the parameters from the builder.
         result = audioStream->open(*this);
-        if (result == AAUDIO_OK) {
-            *streamPtr = audioStream;
-        } else {
+        if (result != AAUDIO_OK) {
             bool isMMap = audioStream->isMMap();
-            delete audioStream;
-            audioStream = nullptr;
-
             if (isMMap && allowLegacy) {
                 ALOGV("%s() MMAP stream did not open so try Legacy path", __func__);
                 // If MMAP stream failed to open then TRY using a legacy stream.
                 result = builder_createStream(getDirection(), sharingMode,
-                                              false, &audioStream);
+                                              false, audioStream);
                 if (result == AAUDIO_OK) {
                     result = audioStream->open(*this);
-                    if (result == AAUDIO_OK) {
-                        *streamPtr = audioStream;
-                    } else {
-                        delete audioStream;
-                        audioStream = nullptr;
-                    }
                 }
             }
         }
-        if (audioStream != nullptr) {
+        if (result == AAUDIO_OK) {
             audioStream->logOpen();
-        }
+            *streamPtr = startUsingStream(audioStream);
+        } // else audioStream will go out of scope and be deleted
     }
 
     return result;
 }
 
+AudioStream *AudioStreamBuilder::startUsingStream(android::sp<AudioStream> &audioStream) {
+    // Increment the smart pointer so it will not get deleted when
+    // we pass it to the C caller and it goes out of scope.
+    // The C code cannot hold a smart pointer so we increment the reference
+    // count to indicate that the C app owns a reference.
+    audioStream->incStrong(nullptr);
+    return audioStream.get();
+}
+
+void AudioStreamBuilder::stopUsingStream(AudioStream *stream) {
+    // Undo the effect of startUsingStream()
+    android::sp<AudioStream> spAudioStream(stream);
+    ALOGV("%s() strongCount = %d", __func__, spAudioStream->getStrongCount());
+    spAudioStream->decStrong(nullptr);
+}
+
 aaudio_result_t AudioStreamBuilder::validate() const {
 
     // Check for values that are ridiculously out of range to prevent math overflow exploits.
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index d5fb80d..9f93341 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -108,9 +108,16 @@
 
     virtual aaudio_result_t validate() const override;
 
+
     void logParameters() const;
 
+    // Mark the stream so it can be deleted.
+    static void stopUsingStream(AudioStream *stream);
+
 private:
+    // Extract a raw pointer that we can pass to a 'C' app.
+    static AudioStream *startUsingStream(android::sp<AudioStream> &spAudioStream);
+
     bool                       mSharingModeMatchRequired = false; // must match sharing mode requested
     aaudio_performance_mode_t  mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
 
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index c062882..33c1bf5 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -34,8 +34,7 @@
 using namespace aaudio;
 
 AudioStreamLegacy::AudioStreamLegacy()
-        : AudioStream()
-        , mDeviceCallback(new StreamDeviceCallback(this)) {
+        : AudioStream() {
 }
 
 AudioStreamLegacy::~AudioStreamLegacy() {
@@ -163,7 +162,11 @@
 }
 
 void AudioStreamLegacy::forceDisconnect(bool errorCallbackEnabled) {
-    if (getState() != AAUDIO_STREAM_STATE_DISCONNECTED) {
+    // There is no need to disconnect if already in these states.
+    if (getState() != AAUDIO_STREAM_STATE_DISCONNECTED
+            && getState() != AAUDIO_STREAM_STATE_CLOSING
+            && getState() != AAUDIO_STREAM_STATE_CLOSED
+            ) {
         setState(AAUDIO_STREAM_STATE_DISCONNECTED);
         if (errorCallbackEnabled) {
             maybeCallErrorCallback(AAUDIO_ERROR_DISCONNECTED);
@@ -205,24 +208,30 @@
     return AAudioConvert_androidToAAudioResult(status);
 }
 
-void AudioStreamLegacy::onAudioDeviceUpdate(audio_port_handle_t deviceId)
-{
+void AudioStreamLegacy::onAudioDeviceUpdate(audio_io_handle_t /* audioIo */,
+            audio_port_handle_t deviceId) {
     // Device routing is a common source of errors and DISCONNECTS.
-    // Please leave this log in place.
-    ALOGD("%s() devId %d => %d", __func__, (int) getDeviceId(), (int)deviceId);
-    if (getDeviceId() != AAUDIO_UNSPECIFIED && getDeviceId() != deviceId &&
-            getState() != AAUDIO_STREAM_STATE_DISCONNECTED) {
+    // Please leave this log in place. If there is a bug then this might
+    // get called after the stream has been deleted so log before we
+    // touch the stream object.
+    ALOGD("%s(deviceId = %d)", __func__, (int)deviceId);
+    if (getDeviceId() != AAUDIO_UNSPECIFIED
+            && getDeviceId() != deviceId
+            && getState() != AAUDIO_STREAM_STATE_DISCONNECTED
+            ) {
         // Note that isDataCallbackActive() is affected by state so call it before DISCONNECTING.
         // If we have a data callback and the stream is active, then ask the data callback
         // to DISCONNECT and call the error callback.
         if (isDataCallbackActive()) {
-            ALOGD("onAudioDeviceUpdate() request DISCONNECT in data callback due to device change");
+            ALOGD("%s() request DISCONNECT in data callback, device %d => %d",
+                  __func__, (int) getDeviceId(), (int) deviceId);
             // If the stream is stopped before the data callback has a chance to handle the
             // request then the requestStop() and requestPause() methods will handle it after
             // the callback has stopped.
             mRequestDisconnect.request();
         } else {
-            ALOGD("onAudioDeviceUpdate() DISCONNECT the stream now");
+            ALOGD("%s() DISCONNECT the stream now, device %d => %d",
+                  __func__, (int) getDeviceId(), (int) deviceId);
             forceDisconnect();
         }
     }
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
index 9c24b2b..fefe6e0 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.h
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -87,29 +87,13 @@
 
 protected:
 
-    class StreamDeviceCallback : public android::AudioSystem::AudioDeviceCallback
-    {
-    public:
-
-        StreamDeviceCallback(AudioStreamLegacy *parent) : mParent(parent) {}
-        virtual ~StreamDeviceCallback() {}
-
-        virtual void onAudioDeviceUpdate(audio_io_handle_t audioIo __unused,
-                                         audio_port_handle_t deviceId) {
-            if (mParent != nullptr) {
-                mParent->onAudioDeviceUpdate(deviceId);
-            }
-        }
-
-        AudioStreamLegacy *mParent;
-    };
-
     aaudio_result_t getBestTimestamp(clockid_t clockId,
                                      int64_t *framePosition,
                                      int64_t *timeNanoseconds,
                                      android::ExtendedTimestamp *extendedTimestamp);
 
-    void onAudioDeviceUpdate(audio_port_handle_t deviceId);
+    void onAudioDeviceUpdate(audio_io_handle_t audioIo,
+            audio_port_handle_t deviceId) override;
 
     /*
      * Check to see whether a callback thread has requested a disconnected.
@@ -140,7 +124,6 @@
     int32_t                    mBlockAdapterBytesPerFrame = 0;
     aaudio_wrapping_frames_t   mPositionWhenStarting = 0;
     int32_t                    mCallbackBufferSize = 0;
-    const android::sp<StreamDeviceCallback>   mDeviceCallback;
 
     AtomicRequestor            mRequestDisconnect;
 
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index b0dc59e..d62951e 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -282,7 +282,7 @@
             : (aaudio_session_id_t) mAudioRecord->getSessionId();
     setSessionId(actualSessionId);
 
-    mAudioRecord->addAudioDeviceCallback(mDeviceCallback);
+    mAudioRecord->addAudioDeviceCallback(this);
 
     return AAUDIO_OK;
 }
@@ -291,16 +291,24 @@
     // TODO add close() or release() to AudioFlinger's AudioRecord API.
     //  Then call it from here
     if (getState() != AAUDIO_STREAM_STATE_CLOSING) {
-        mAudioRecord->removeAudioDeviceCallback(mDeviceCallback);
+        mAudioRecord->removeAudioDeviceCallback(this);
         logReleaseBufferState();
-        mAudioRecord.clear();
-        mFixedBlockWriter.close();
+        // Data callbacks may still be running!
         return AudioStream::release_l();
     } else {
         return AAUDIO_OK; // already released
     }
 }
 
+void AudioStreamRecord::close_l() {
+    mAudioRecord.clear();
+    // Do not close mFixedBlockWriter because a data callback
+    // thread might still be running if someone else has a reference
+    // to mAudioRecord.
+    // It has a unique_ptr to its buffer so it will clean up by itself.
+    AudioStream::close_l();
+}
+
 const void * AudioStreamRecord::maybeConvertDeviceData(const void *audioData, int32_t numFrames) {
     if (mFormatConversionBufferFloat.get() != nullptr) {
         LOG_ALWAYS_FATAL_IF(numFrames > mFormatConversionBufferSizeInFrames,
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index c5944c7..e4ef1c0 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -39,6 +39,7 @@
 
     aaudio_result_t open(const AudioStreamBuilder & builder) override;
     aaudio_result_t release_l() override;
+    void close_l() override;
 
     aaudio_result_t requestStart() override;
     aaudio_result_t requestStop() override;
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 4869480..3831046 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -224,7 +224,7 @@
     mInitialBufferCapacity = getBufferCapacity();
     mInitialFramesPerBurst = getFramesPerBurst();
 
-    mAudioTrack->addAudioDeviceCallback(mDeviceCallback);
+    mAudioTrack->addAudioDeviceCallback(this);
 
     // Update performance mode based on the actual stream flags.
     // For example, if the sample rate is not allowed then you won't get a FAST track.
@@ -253,19 +253,26 @@
 
 aaudio_result_t AudioStreamTrack::release_l() {
     if (getState() != AAUDIO_STREAM_STATE_CLOSING) {
-        mAudioTrack->removeAudioDeviceCallback(mDeviceCallback);
+        status_t err = mAudioTrack->removeAudioDeviceCallback(this);
+        ALOGE_IF(err, "%s() removeAudioDeviceCallback returned %d", __func__, err);
         logReleaseBufferState();
-        // TODO Investigate why clear() causes a hang in test_various.cpp
-        // if I call close() from a data callback.
-        // But the same thing in AudioRecord is OK!
-        // mAudioTrack.clear();
-        mFixedBlockReader.close();
+        // Data callbacks may still be running!
         return AudioStream::release_l();
     } else {
         return AAUDIO_OK; // already released
     }
 }
 
+void AudioStreamTrack::close_l() {
+    // Stop callbacks before deleting mFixedBlockReader memory.
+    mAudioTrack.clear();
+    // Do not close mFixedBlockReader because a data callback
+    // thread might still be running if someone else has a reference
+    // to mAudioRecord.
+    // It has a unique_ptr to its buffer so it will clean up by itself.
+    AudioStream::close_l();
+}
+
 void AudioStreamTrack::processCallback(int event, void *info) {
 
     switch (event) {
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index 93a1ff4..6334f66 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -42,6 +42,7 @@
 
     aaudio_result_t open(const AudioStreamBuilder & builder) override;
     aaudio_result_t release_l() override;
+    void close_l() override;
 
     aaudio_result_t requestStart() override;
     aaudio_result_t requestPause() override;
diff --git a/media/libaaudio/tests/test_various.cpp b/media/libaaudio/tests/test_various.cpp
index a20c799..cbf863f 100644
--- a/media/libaaudio/tests/test_various.cpp
+++ b/media/libaaudio/tests/test_various.cpp
@@ -33,6 +33,11 @@
         void *audioData,
         int32_t numFrames
 ) {
+    aaudio_direction_t direction = AAudioStream_getDirection(stream);
+    if (direction == AAUDIO_DIRECTION_INPUT) {
+        return AAUDIO_CALLBACK_RESULT_CONTINUE;
+    }
+    // Check to make sure the buffer is initialized to all zeros.
     int channels = AAudioStream_getChannelCount(stream);
     int numSamples = channels * numFrames;
     bool allZeros = true;
@@ -48,7 +53,8 @@
 constexpr int64_t NANOS_PER_MILLISECOND = 1000 * 1000;
 
 void checkReleaseThenClose(aaudio_performance_mode_t perfMode,
-        aaudio_sharing_mode_t sharingMode) {
+        aaudio_sharing_mode_t sharingMode,
+        aaudio_direction_t direction = AAUDIO_DIRECTION_OUTPUT) {
     AAudioStreamBuilder* aaudioBuilder = nullptr;
     AAudioStream* aaudioStream = nullptr;
 
@@ -61,6 +67,7 @@
                                         nullptr);
     AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, perfMode);
     AAudioStreamBuilder_setSharingMode(aaudioBuilder, sharingMode);
+    AAudioStreamBuilder_setDirection(aaudioBuilder, direction);
     AAudioStreamBuilder_setFormat(aaudioBuilder, AAUDIO_FORMAT_PCM_FLOAT);
 
     // Create an AAudioStream using the Builder.
@@ -88,14 +95,28 @@
     // We should NOT be able to start or change a stream after it has been released.
     EXPECT_EQ(AAUDIO_ERROR_INVALID_STATE, AAudioStream_requestStart(aaudioStream));
     EXPECT_EQ(AAUDIO_STREAM_STATE_CLOSING, AAudioStream_getState(aaudioStream));
-    EXPECT_EQ(AAUDIO_ERROR_INVALID_STATE, AAudioStream_requestPause(aaudioStream));
+    // Pause is only implemented for OUTPUT.
+    if (direction == AAUDIO_DIRECTION_OUTPUT) {
+        EXPECT_EQ(AAUDIO_ERROR_INVALID_STATE,
+                  AAudioStream_requestPause(aaudioStream));
+    }
     EXPECT_EQ(AAUDIO_STREAM_STATE_CLOSING, AAudioStream_getState(aaudioStream));
     EXPECT_EQ(AAUDIO_ERROR_INVALID_STATE, AAudioStream_requestStop(aaudioStream));
     EXPECT_EQ(AAUDIO_STREAM_STATE_CLOSING, AAudioStream_getState(aaudioStream));
 
     // Does this crash?
-    EXPECT_LT(0, AAudioStream_getFramesRead(aaudioStream));
-    EXPECT_LT(0, AAudioStream_getFramesWritten(aaudioStream));
+    EXPECT_GT(AAudioStream_getFramesRead(aaudioStream), 0);
+    EXPECT_GT(AAudioStream_getFramesWritten(aaudioStream), 0);
+    EXPECT_GT(AAudioStream_getFramesPerBurst(aaudioStream), 0);
+    EXPECT_GE(AAudioStream_getXRunCount(aaudioStream), 0);
+    EXPECT_GT(AAudioStream_getBufferCapacityInFrames(aaudioStream), 0);
+    EXPECT_GT(AAudioStream_getBufferSizeInFrames(aaudioStream), 0);
+
+    int64_t timestampFrames = 0;
+    int64_t timestampNanos = 0;
+    aaudio_result_t result = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC,
+            &timestampFrames, &timestampNanos);
+    EXPECT_TRUE(result == AAUDIO_ERROR_INVALID_STATE || result == AAUDIO_ERROR_UNIMPLEMENTED);
 
     // Verify Closing State. Does this crash?
     aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
@@ -107,20 +128,42 @@
     EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
 }
 
-TEST(test_various, aaudio_release_close_none) {
+TEST(test_various, aaudio_release_close_none_output) {
     checkReleaseThenClose(AAUDIO_PERFORMANCE_MODE_NONE,
-            AAUDIO_SHARING_MODE_SHARED);
+            AAUDIO_SHARING_MODE_SHARED,
+            AAUDIO_DIRECTION_OUTPUT);
     // No EXCLUSIVE streams with MODE_NONE.
 }
 
-TEST(test_various, aaudio_release_close_low_shared) {
-    checkReleaseThenClose(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
-            AAUDIO_SHARING_MODE_SHARED);
+TEST(test_various, aaudio_release_close_none_input) {
+    checkReleaseThenClose(AAUDIO_PERFORMANCE_MODE_NONE,
+            AAUDIO_SHARING_MODE_SHARED,
+            AAUDIO_DIRECTION_INPUT);
+    // No EXCLUSIVE streams with MODE_NONE.
 }
 
-TEST(test_various, aaudio_release_close_low_exclusive) {
+TEST(test_various, aaudio_release_close_low_shared_output) {
     checkReleaseThenClose(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
-            AAUDIO_SHARING_MODE_EXCLUSIVE);
+            AAUDIO_SHARING_MODE_SHARED,
+            AAUDIO_DIRECTION_OUTPUT);
+}
+
+TEST(test_various, aaudio_release_close_low_shared_input) {
+    checkReleaseThenClose(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+            AAUDIO_SHARING_MODE_SHARED,
+            AAUDIO_DIRECTION_INPUT);
+}
+
+TEST(test_various, aaudio_release_close_low_exclusive_output) {
+    checkReleaseThenClose(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+            AAUDIO_SHARING_MODE_EXCLUSIVE,
+            AAUDIO_DIRECTION_OUTPUT);
+}
+
+TEST(test_various, aaudio_release_close_low_exclusive_input) {
+    checkReleaseThenClose(AAUDIO_PERFORMANCE_MODE_LOW_LATENCY,
+            AAUDIO_SHARING_MODE_EXCLUSIVE,
+            AAUDIO_DIRECTION_INPUT);
 }
 
 enum FunctionToCall {
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index df47def..9568e83 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -742,6 +742,8 @@
     void *iMemPointer;
     audio_track_cblk_t* cblk;
     status_t status;
+    static const int32_t kMaxCreateAttempts = 3;
+    int32_t remainingAttempts = kMaxCreateAttempts;
 
     if (audioFlinger == 0) {
         ALOGE("%s(%d): Could not get audioflinger", __func__, mPortId);
@@ -803,15 +805,24 @@
     input.sessionId = mSessionId;
     originalSessionId = mSessionId;
 
-    record = audioFlinger->createRecord(input,
-                                                              output,
-                                                              &status);
+    do {
+        record = audioFlinger->createRecord(input, output, &status);
+        if (status == NO_ERROR) {
+            break;
+        }
+        if (status != FAILED_TRANSACTION || --remainingAttempts <= 0) {
+            ALOGE("%s(%d): AudioFlinger could not create record track, status: %d",
+                  __func__, mPortId, status);
+            goto exit;
+        }
+        // FAILED_TRANSACTION happens under very specific conditions causing a state mismatch
+        // between audio policy manager and audio flinger during the input stream open sequence
+        // and can be recovered by retrying.
+        // Leave time for race condition to clear before retrying and randomize delay
+        // to reduce the probability of concurrent retries in locked steps.
+        usleep((20 + rand() % 30) * 10000);
+    } while (1);
 
-    if (status != NO_ERROR) {
-        ALOGE("%s(%d): AudioFlinger could not create record track, status: %d",
-              __func__, mPortId, status);
-        goto exit;
-    }
     ALOG_ASSERT(record != 0);
 
     // AudioFlinger now owns the reference to the I/O handle,
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 6357da4..e26a831 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -47,8 +47,9 @@
 record_config_callback AudioSystem::gRecordConfigCallback = NULL;
 
 // Required to be held while calling into gSoundTriggerCaptureStateListener.
+class CaptureStateListenerImpl;
 Mutex gSoundTriggerCaptureStateListenerLock;
-sp<AudioSystem::CaptureStateListener> gSoundTriggerCaptureStateListener = nullptr;
+sp<CaptureStateListenerImpl> gSoundTriggerCaptureStateListener = nullptr;
 
 // establish binder interface to AudioFlinger service
 const sp<IAudioFlinger> AudioSystem::get_audio_flinger()
@@ -1634,42 +1635,54 @@
 class CaptureStateListenerImpl : public media::BnCaptureStateListener,
                                  public IBinder::DeathRecipient {
 public:
+    CaptureStateListenerImpl(
+            const sp<IAudioPolicyService>& aps,
+            const sp<AudioSystem::CaptureStateListener>& listener)
+            : mAps(aps), mListener(listener) {}
+
+    void init() {
+        bool active;
+        status_t status = mAps->registerSoundTriggerCaptureStateListener(this, &active);
+        if (status != NO_ERROR) {
+            mListener->onServiceDied();
+            return;
+        }
+        mListener->onStateChanged(active);
+        IInterface::asBinder(mAps)->linkToDeath(this);
+    }
+
     binder::Status setCaptureState(bool active) override {
         Mutex::Autolock _l(gSoundTriggerCaptureStateListenerLock);
-        gSoundTriggerCaptureStateListener->onStateChanged(active);
+        mListener->onStateChanged(active);
         return binder::Status::ok();
     }
 
     void binderDied(const wp<IBinder>&) override {
         Mutex::Autolock _l(gSoundTriggerCaptureStateListenerLock);
-        gSoundTriggerCaptureStateListener->onServiceDied();
+        mListener->onServiceDied();
         gSoundTriggerCaptureStateListener = nullptr;
     }
+
+private:
+    // Need this in order to keep the death receipent alive.
+    sp<IAudioPolicyService> mAps;
+    sp<AudioSystem::CaptureStateListener> mListener;
 };
 
 status_t AudioSystem::registerSoundTriggerCaptureStateListener(
     const sp<CaptureStateListener>& listener) {
+    LOG_ALWAYS_FATAL_IF(listener == nullptr);
+
     const sp<IAudioPolicyService>& aps =
             AudioSystem::get_audio_policy_service();
     if (aps == 0) {
         return PERMISSION_DENIED;
     }
 
-    sp<CaptureStateListenerImpl> wrapper = new CaptureStateListenerImpl();
-
     Mutex::Autolock _l(gSoundTriggerCaptureStateListenerLock);
+    gSoundTriggerCaptureStateListener = new CaptureStateListenerImpl(aps, listener);
+    gSoundTriggerCaptureStateListener->init();
 
-    bool active;
-    status_t status =
-        aps->registerSoundTriggerCaptureStateListener(wrapper, &active);
-    if (status != NO_ERROR) {
-        listener->onServiceDied();
-        return NO_ERROR;
-    }
-    gSoundTriggerCaptureStateListener = listener;
-    listener->onStateChanged(active);
-    sp<IBinder> binder = IInterface::asBinder(aps);
-    binder->linkToDeath(wrapper);
     return NO_ERROR;
 }
 
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 32129f0..68d11d4 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -210,7 +210,11 @@
     return NO_ERROR;
 }
 
-AudioTrack::AudioTrack()
+AudioTrack::AudioTrack() : AudioTrack("" /*opPackageName*/)
+{
+}
+
+AudioTrack::AudioTrack(const std::string& opPackageName)
     : mStatus(NO_INIT),
       mState(STATE_STOPPED),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -218,6 +222,7 @@
       mPausedPosition(0),
       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
       mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
+      mOpPackageName(opPackageName),
       mAudioTrackCallback(new AudioTrackCallback())
 {
     mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
@@ -244,12 +249,14 @@
         const audio_attributes_t* pAttributes,
         bool doNotReconnect,
         float maxRequiredSpeed,
-        audio_port_handle_t selectedDeviceId)
+        audio_port_handle_t selectedDeviceId,
+        const std::string& opPackageName)
     : mStatus(NO_INIT),
       mState(STATE_STOPPED),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
       mPausedPosition(0),
+      mOpPackageName(opPackageName),
       mAudioTrackCallback(new AudioTrackCallback())
 {
     mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
@@ -277,13 +284,15 @@
         pid_t pid,
         const audio_attributes_t* pAttributes,
         bool doNotReconnect,
-        float maxRequiredSpeed)
+        float maxRequiredSpeed,
+        const std::string& opPackageName)
     : mStatus(NO_INIT),
       mState(STATE_STOPPED),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
       mPausedPosition(0),
       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
+      mOpPackageName(opPackageName),
       mAudioTrackCallback(new AudioTrackCallback())
 {
     mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
@@ -1555,6 +1564,7 @@
     input.selectedDeviceId = mSelectedDeviceId;
     input.sessionId = mSessionId;
     input.audioTrackCallback = mAudioTrackCallback;
+    input.opPackageName = mOpPackageName;
 
     IAudioFlinger::CreateTrackOutput output;
 
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 0dbd842..a9946da 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -26,6 +26,8 @@
 #include <media/Modulo.h>
 #include <utils/threads.h>
 
+#include <string>
+
 #include "android/media/BnAudioTrackCallback.h"
 #include "android/media/IAudioTrackCallback.h"
 
@@ -177,6 +179,8 @@
      */
                         AudioTrack();
 
+                        AudioTrack(const std::string& opPackageName);
+
     /* Creates an AudioTrack object and registers it with AudioFlinger.
      * Once created, the track needs to be started before it can be used.
      * Unspecified values are set to appropriate default values.
@@ -258,7 +262,8 @@
                                     const audio_attributes_t* pAttributes = NULL,
                                     bool doNotReconnect = false,
                                     float maxRequiredSpeed = 1.0f,
-                                    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+                                    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+                                    const std::string& opPackageName = "");
 
     /* Creates an audio track and registers it with AudioFlinger.
      * With this constructor, the track is configured for static buffer mode.
@@ -288,7 +293,8 @@
                                     pid_t pid = -1,
                                     const audio_attributes_t* pAttributes = NULL,
                                     bool doNotReconnect = false,
-                                    float maxRequiredSpeed = 1.0f);
+                                    float maxRequiredSpeed = 1.0f,
+                                    const std::string& opPackageName = "");
 
     /* Terminates the AudioTrack and unregisters it from AudioFlinger.
      * Also destroys all resources associated with the AudioTrack.
@@ -1236,6 +1242,8 @@
 
     sp<media::VolumeHandler>       mVolumeHandler;
 
+    const std::string      mOpPackageName;
+
 private:
     class DeathNotifier : public IBinder::DeathRecipient {
     public:
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 612ce7a..bcc11f4 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -37,6 +37,7 @@
 #include <media/IEffectClient.h>
 #include <utils/String8.h>
 #include <media/MicrophoneInfo.h>
+#include <string>
 #include <vector>
 
 #include "android/media/IAudioRecord.h"
@@ -85,6 +86,11 @@
             speed = parcel->readFloat();
             audioTrackCallback = interface_cast<media::IAudioTrackCallback>(
                     parcel->readStrongBinder());
+            const char* opPackageNamePtr = parcel->readCString();
+            if (opPackageNamePtr == nullptr) {
+                return FAILED_TRANSACTION;
+            }
+            opPackageName = opPackageNamePtr;
 
             /* input/output arguments*/
             (void)parcel->read(&flags, sizeof(audio_output_flags_t));
@@ -109,6 +115,7 @@
             (void)parcel->writeInt32(notificationsPerBuffer);
             (void)parcel->writeFloat(speed);
             (void)parcel->writeStrongBinder(IInterface::asBinder(audioTrackCallback));
+            (void)parcel->writeCString(opPackageName.c_str());
 
             /* input/output arguments*/
             (void)parcel->write(&flags, sizeof(audio_output_flags_t));
@@ -127,6 +134,7 @@
         uint32_t notificationsPerBuffer;
         float speed;
         sp<media::IAudioTrackCallback> audioTrackCallback;
+        std::string opPackageName;
 
         /* input/output */
         audio_output_flags_t flags;
diff --git a/media/libaudioprocessing/AudioMixerOps.h b/media/libaudioprocessing/AudioMixerOps.h
index 80bd093..8d374c9 100644
--- a/media/libaudioprocessing/AudioMixerOps.h
+++ b/media/libaudioprocessing/AudioMixerOps.h
@@ -234,17 +234,20 @@
     static_assert(NCHAN > 0 && NCHAN <= 8);
     static_assert(MIXTYPE == MIXTYPE_MULTI_STEREOVOL
             || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
-            || MIXTYPE == MIXTYPE_STEREOEXPAND);
+            || MIXTYPE == MIXTYPE_STEREOEXPAND
+            || MIXTYPE == MIXTYPE_MONOEXPAND);
     auto proc = [](auto& a, const auto& b) {
         if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
-                || MIXTYPE == MIXTYPE_STEREOEXPAND) {
+                || MIXTYPE == MIXTYPE_STEREOEXPAND
+                || MIXTYPE == MIXTYPE_MONOEXPAND) {
             a += b;
         } else {
             a = b;
         }
     };
     auto inp = [&in]() -> const TI& {
-        if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND) {
+        if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND
+                || MIXTYPE == MIXTYPE_MONOEXPAND) {
             return *in;
         } else {
             return *in++;
@@ -312,6 +315,8 @@
  *   TV/TAV: int32_t (U4.28) or int16_t (U4.12) or float
  *   Input channel count is 1.
  *   vol: represents volume array.
+ *   This uses stereo balanced volume vol[0] and vol[1].
+ *   Before R, this was a full volume array but was called only for channels <= 2.
  *
  *   This accumulates into the out pointer.
  *
@@ -356,17 +361,13 @@
         do {
             TA auxaccum = 0;
             if constexpr (MIXTYPE == MIXTYPE_MULTI) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
                     vol[i] += volinc[i];
                 }
-            } else if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) {
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ += MixMulAux<TO, TI, TV, TA>(*in, vol[i], &auxaccum);
-                    vol[i] += volinc[i];
-                }
-                in++;
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_SAVEONLY) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
                     vol[i] += volinc[i];
@@ -383,11 +384,13 @@
                 vol[0] += volinc[0];
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
                     || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
+                    || MIXTYPE == MIXTYPE_MONOEXPAND
                     || MIXTYPE == MIXTYPE_STEREOEXPAND) {
                 stereoVolumeHelper<MIXTYPE, NCHAN>(
                         out, in, vol, [&auxaccum] (auto &a, const auto &b) {
                     return MixMulAux<TO, TI, TV, TA>(a, b, &auxaccum);
                 });
+                if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) in += 1;
                 if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND) in += 2;
                 vol[0] += volinc[0];
                 vol[1] += volinc[1];
@@ -401,17 +404,13 @@
     } else {
         do {
             if constexpr (MIXTYPE == MIXTYPE_MULTI) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMul<TO, TI, TV>(*in++, vol[i]);
                     vol[i] += volinc[i];
                 }
-            } else if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) {
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ += MixMul<TO, TI, TV>(*in, vol[i]);
-                    vol[i] += volinc[i];
-                }
-                in++;
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_SAVEONLY) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ = MixMul<TO, TI, TV>(*in++, vol[i]);
                     vol[i] += volinc[i];
@@ -428,10 +427,12 @@
                 vol[0] += volinc[0];
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
                     || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
+                    || MIXTYPE == MIXTYPE_MONOEXPAND
                     || MIXTYPE == MIXTYPE_STEREOEXPAND) {
                 stereoVolumeHelper<MIXTYPE, NCHAN>(out, in, vol, [] (auto &a, const auto &b) {
                     return MixMul<TO, TI, TV>(a, b);
                 });
+                if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) in += 1;
                 if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND) in += 2;
                 vol[0] += volinc[0];
                 vol[1] += volinc[1];
@@ -454,15 +455,12 @@
         do {
             TA auxaccum = 0;
             if constexpr (MIXTYPE == MIXTYPE_MULTI) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
                 }
-            } else if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) {
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ += MixMulAux<TO, TI, TV, TA>(*in, vol[i], &auxaccum);
-                }
-                in++;
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_SAVEONLY) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ = MixMulAux<TO, TI, TV, TA>(*in++, vol[i], &auxaccum);
                 }
@@ -476,11 +474,13 @@
                 }
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
                     || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
+                    || MIXTYPE == MIXTYPE_MONOEXPAND
                     || MIXTYPE == MIXTYPE_STEREOEXPAND) {
                 stereoVolumeHelper<MIXTYPE, NCHAN>(
                         out, in, vol, [&auxaccum] (auto &a, const auto &b) {
                     return MixMulAux<TO, TI, TV, TA>(a, b, &auxaccum);
                 });
+                if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) in += 1;
                 if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND) in += 2;
             } else /* constexpr */ {
                 static_assert(dependent_false<MIXTYPE>, "invalid mixtype");
@@ -490,16 +490,14 @@
         } while (--frameCount);
     } else {
         do {
+            // ALOGD("Mixtype:%d NCHAN:%d", MIXTYPE, NCHAN);
             if constexpr (MIXTYPE == MIXTYPE_MULTI) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ += MixMul<TO, TI, TV>(*in++, vol[i]);
                 }
-            } else if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) {
-                for (int i = 0; i < NCHAN; ++i) {
-                    *out++ += MixMul<TO, TI, TV>(*in, vol[i]);
-                }
-                in++;
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_SAVEONLY) {
+                static_assert(NCHAN <= 2);
                 for (int i = 0; i < NCHAN; ++i) {
                     *out++ = MixMul<TO, TI, TV>(*in++, vol[i]);
                 }
@@ -513,10 +511,12 @@
                 }
             } else if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
                     || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
+                    || MIXTYPE == MIXTYPE_MONOEXPAND
                     || MIXTYPE == MIXTYPE_STEREOEXPAND) {
                 stereoVolumeHelper<MIXTYPE, NCHAN>(out, in, vol, [] (auto &a, const auto &b) {
                     return MixMul<TO, TI, TV>(a, b);
                 });
+                if constexpr (MIXTYPE == MIXTYPE_MONOEXPAND) in += 1;
                 if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND) in += 2;
             } else /* constexpr */ {
                 static_assert(dependent_false<MIXTYPE>, "invalid mixtype");
diff --git a/media/libaudioprocessing/tests/mixerops_benchmark.cpp b/media/libaudioprocessing/tests/mixerops_benchmark.cpp
index 86f5429..7a4c5c7 100644
--- a/media/libaudioprocessing/tests/mixerops_benchmark.cpp
+++ b/media/libaudioprocessing/tests/mixerops_benchmark.cpp
@@ -74,28 +74,32 @@
     }
 }
 
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI, 2);
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY, 2);
+// MULTI mode and MULTI_SAVEONLY mode are not used by AudioMixer for channels > 2,
+// which is ensured by a static_assert (won't compile for those configurations).
+// So we benchmark MIXTYPE_MULTI_MONOVOL and MIXTYPE_MULTI_SAVEONLY_MONOVOL compared
+// with MIXTYPE_MULTI_STEREOVOL and MIXTYPE_MULTI_SAVEONLY_STEREOVOL.
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_MONOVOL, 2);
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_MONOVOL, 2);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_STEREOVOL, 2);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_STEREOVOL, 2);
 
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI, 4);
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY, 4);
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_MONOVOL, 4);
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_MONOVOL, 4);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_STEREOVOL, 4);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_STEREOVOL, 4);
 
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI, 5);
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY, 5);
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_MONOVOL, 5);
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_MONOVOL, 5);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_STEREOVOL, 5);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_STEREOVOL, 5);
 
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI, 8);
-BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY, 8);
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_MONOVOL, 8);
+BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_MONOVOL, 8);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_STEREOVOL, 8);
 BENCHMARK_TEMPLATE(BM_VolumeRampMulti, MIXTYPE_MULTI_SAVEONLY_STEREOVOL, 8);
 
-BENCHMARK_TEMPLATE(BM_VolumeMulti, MIXTYPE_MULTI, 8);
-BENCHMARK_TEMPLATE(BM_VolumeMulti, MIXTYPE_MULTI_SAVEONLY, 8);
+BENCHMARK_TEMPLATE(BM_VolumeMulti, MIXTYPE_MULTI_MONOVOL, 8);
+BENCHMARK_TEMPLATE(BM_VolumeMulti, MIXTYPE_MULTI_SAVEONLY_MONOVOL, 8);
 BENCHMARK_TEMPLATE(BM_VolumeMulti, MIXTYPE_MULTI_STEREOVOL, 8);
 BENCHMARK_TEMPLATE(BM_VolumeMulti, MIXTYPE_MULTI_SAVEONLY_STEREOVOL, 8);
 
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index bd18a40..11005c6 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -62,11 +62,13 @@
     }
 
     virtual sp<IMediaPlayer> create(
-            const sp<IMediaPlayerClient>& client, audio_session_t audioSessionId) {
+            const sp<IMediaPlayerClient>& client, audio_session_t audioSessionId,
+            const std::string opPackageName) {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
         data.writeStrongBinder(IInterface::asBinder(client));
         data.writeInt32(audioSessionId);
+        data.writeCString(opPackageName.c_str());
 
         remote()->transact(CREATE, data, &reply);
         return interface_cast<IMediaPlayer>(reply.readStrongBinder());
@@ -127,7 +129,12 @@
             sp<IMediaPlayerClient> client =
                 interface_cast<IMediaPlayerClient>(data.readStrongBinder());
             audio_session_t audioSessionId = (audio_session_t) data.readInt32();
-            sp<IMediaPlayer> player = create(client, audioSessionId);
+            const char* opPackageName = data.readCString();
+            if (opPackageName == nullptr) {
+                return FAILED_TRANSACTION;
+            }
+            std::string opPackageNameStr(opPackageName);
+            sp<IMediaPlayer> player = create(client, audioSessionId, opPackageNameStr);
             reply->writeStrongBinder(IInterface::asBinder(player));
             return NO_ERROR;
         } break;
diff --git a/media/libmedia/include/media/IMediaPlayerService.h b/media/libmedia/include/media/IMediaPlayerService.h
index f2e2060..a4207eb 100644
--- a/media/libmedia/include/media/IMediaPlayerService.h
+++ b/media/libmedia/include/media/IMediaPlayerService.h
@@ -28,6 +28,8 @@
 #include <media/IMediaPlayerClient.h>
 #include <media/IMediaMetadataRetriever.h>
 
+#include <string>
+
 namespace android {
 
 class IMediaPlayer;
@@ -47,7 +49,8 @@
     virtual sp<IMediaRecorder> createMediaRecorder(const String16 &opPackageName) = 0;
     virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0;
     virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client,
-            audio_session_t audioSessionId = AUDIO_SESSION_ALLOCATE) = 0;
+            audio_session_t audioSessionId = AUDIO_SESSION_ALLOCATE,
+            const std::string opPackage = "") = 0;
     virtual sp<IMediaCodecList> getCodecList() const = 0;
 
     // Connects to a remote display.
diff --git a/media/libmedia/include/media/mediaplayer.h b/media/libmedia/include/media/mediaplayer.h
index 2335c5a..7c29e50 100644
--- a/media/libmedia/include/media/mediaplayer.h
+++ b/media/libmedia/include/media/mediaplayer.h
@@ -33,6 +33,8 @@
 #include <utils/KeyedVector.h>
 #include <utils/String8.h>
 
+#include <string>
+
 struct ANativeWindow;
 
 namespace android {
@@ -205,7 +207,7 @@
                     public virtual IMediaDeathNotifier
 {
 public:
-    MediaPlayer();
+    MediaPlayer(const std::string opPackageName = "");
     ~MediaPlayer();
             void            died();
             void            disconnect();
@@ -308,6 +310,7 @@
     float                       mSendLevel;
     struct sockaddr_in          mRetransmitEndpoint;
     bool                        mRetransmitEndpointValid;
+    const std::string           mOpPackageName;
 };
 
 }; // namespace android
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 1fadc94..6079a2d 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -41,7 +41,7 @@
 
 using media::VolumeShaper;
 
-MediaPlayer::MediaPlayer()
+MediaPlayer::MediaPlayer(const std::string opPackageName) : mOpPackageName(opPackageName)
 {
     ALOGV("constructor");
     mListener = NULL;
@@ -152,7 +152,7 @@
     if (url != NULL) {
         const sp<IMediaPlayerService> service(getMediaPlayerService());
         if (service != 0) {
-            sp<IMediaPlayer> player(service->create(this, mAudioSessionId));
+            sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mOpPackageName));
             if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
                 (NO_ERROR != player->setDataSource(httpService, url, headers))) {
                 player.clear();
@@ -169,7 +169,7 @@
     status_t err = UNKNOWN_ERROR;
     const sp<IMediaPlayerService> service(getMediaPlayerService());
     if (service != 0) {
-        sp<IMediaPlayer> player(service->create(this, mAudioSessionId));
+        sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mOpPackageName));
         if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
             (NO_ERROR != player->setDataSource(fd, offset, length))) {
             player.clear();
@@ -185,7 +185,7 @@
     status_t err = UNKNOWN_ERROR;
     const sp<IMediaPlayerService> service(getMediaPlayerService());
     if (service != 0) {
-        sp<IMediaPlayer> player(service->create(this, mAudioSessionId));
+        sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mOpPackageName));
         if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
             (NO_ERROR != player->setDataSource(source))) {
             player.clear();
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index c0da0ce..016f622 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -480,14 +480,14 @@
 }
 
 sp<IMediaPlayer> MediaPlayerService::create(const sp<IMediaPlayerClient>& client,
-        audio_session_t audioSessionId)
+        audio_session_t audioSessionId, std::string opPackageName)
 {
     pid_t pid = IPCThreadState::self()->getCallingPid();
     int32_t connId = android_atomic_inc(&mNextConnId);
 
     sp<Client> c = new Client(
             this, pid, connId, client, audioSessionId,
-            IPCThreadState::self()->getCallingUid());
+            IPCThreadState::self()->getCallingUid(), opPackageName);
 
     ALOGV("Create new client(%d) from pid %d, uid %d, ", connId, pid,
          IPCThreadState::self()->getCallingUid());
@@ -733,7 +733,8 @@
 MediaPlayerService::Client::Client(
         const sp<MediaPlayerService>& service, pid_t pid,
         int32_t connId, const sp<IMediaPlayerClient>& client,
-        audio_session_t audioSessionId, uid_t uid)
+        audio_session_t audioSessionId, uid_t uid, const std::string& opPackageName)
+        : mOpPackageName(opPackageName)
 {
     ALOGV("Client(%d) constructor", connId);
     mPid = pid;
@@ -922,7 +923,7 @@
 
     if (!p->hardwareOutput()) {
         mAudioOutput = new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid(),
-                mPid, mAudioAttributes, mAudioDeviceUpdatedListener);
+                mPid, mAudioAttributes, mAudioDeviceUpdatedListener, mOpPackageName);
         static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
     }
 
@@ -1761,7 +1762,8 @@
 #undef LOG_TAG
 #define LOG_TAG "AudioSink"
 MediaPlayerService::AudioOutput::AudioOutput(audio_session_t sessionId, uid_t uid, int pid,
-        const audio_attributes_t* attr, const sp<AudioSystem::AudioDeviceCallback>& deviceCallback)
+        const audio_attributes_t* attr, const sp<AudioSystem::AudioDeviceCallback>& deviceCallback,
+        const std::string& opPackageName)
     : mCallback(NULL),
       mCallbackCookie(NULL),
       mCallbackData(NULL),
@@ -1782,7 +1784,8 @@
       mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
       mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
       mDeviceCallbackEnabled(false),
-      mDeviceCallback(deviceCallback)
+      mDeviceCallback(deviceCallback),
+      mOpPackageName(opPackageName)
 {
     ALOGV("AudioOutput(%d)", sessionId);
     if (attr != NULL) {
@@ -2176,7 +2179,8 @@
                     mAttributes,
                     doNotReconnect,
                     1.0f,  // default value for maxRequiredSpeed
-                    mSelectedDeviceId);
+                    mSelectedDeviceId,
+                    mOpPackageName);
         } else {
             // TODO: Due to buffer memory concerns, we use a max target playback speed
             // based on mPlaybackRate at the time of open (instead of kMaxRequiredSpeed),
@@ -2204,7 +2208,8 @@
                     mAttributes,
                     doNotReconnect,
                     targetSpeed,
-                    mSelectedDeviceId);
+                    mSelectedDeviceId,
+                    mOpPackageName);
         }
         // Set caller name so it can be logged in destructor.
         // MediaMetricsConstants.h: AMEDIAMETRICS_PROP_CALLERNAME_VALUE_MEDIA
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 6431ca1..a7de3f3 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -19,6 +19,7 @@
 #define ANDROID_MEDIAPLAYERSERVICE_H
 
 #include <arpa/inet.h>
+#include <string>
 
 #include <utils/threads.h>
 #include <utils/Errors.h>
@@ -81,7 +82,8 @@
                                         uid_t uid,
                                         int pid,
                                         const audio_attributes_t * attr,
-                                        const sp<AudioSystem::AudioDeviceCallback>& deviceCallback);
+                                        const sp<AudioSystem::AudioDeviceCallback>& deviceCallback,
+                                        const std::string& opPackageName);
         virtual                 ~AudioOutput();
 
         virtual bool            ready() const { return mTrack != 0; }
@@ -178,6 +180,7 @@
         bool                    mDeviceCallbackEnabled;
         wp<AudioSystem::AudioDeviceCallback>        mDeviceCallback;
         mutable Mutex           mLock;
+        const std::string       mOpPackageName;
 
         // static variables below not protected by mutex
         static bool             mIsOnEmulator;
@@ -235,7 +238,8 @@
     virtual sp<IMediaMetadataRetriever> createMetadataRetriever();
 
     virtual sp<IMediaPlayer>    create(const sp<IMediaPlayerClient>& client,
-                                       audio_session_t audioSessionId);
+                                       audio_session_t audioSessionId,
+                                       const std::string opPackageName);
 
     virtual sp<IMediaCodecList> getCodecList() const;
 
@@ -410,7 +414,8 @@
                                         int32_t connId,
                                         const sp<IMediaPlayerClient>& client,
                                         audio_session_t audioSessionId,
-                                        uid_t uid);
+                                        uid_t uid,
+                                        const std::string& opPackageName);
                                 Client();
         virtual                 ~Client();
 
@@ -467,6 +472,7 @@
                     bool                          mRetransmitEndpointValid;
                     sp<Client>                    mNextClient;
                     sp<MediaPlayerBase::Listener> mListener;
+                    const std::string             mOpPackageName;
 
         // Metadata filters.
         media::Metadata::Filter mMetadataAllow;  // protected by mLock
diff --git a/media/libstagefright/FrameCaptureProcessor.cpp b/media/libstagefright/FrameCaptureProcessor.cpp
index 96c1195..63238bc 100644
--- a/media/libstagefright/FrameCaptureProcessor.cpp
+++ b/media/libstagefright/FrameCaptureProcessor.cpp
@@ -171,6 +171,8 @@
     if (err != OK) {
         ALOGW("wait for fence returned err %d", err);
     }
+
+    mRE->cleanupPostRender(renderengine::RenderEngine::CleanupMode::CLEAN_ALL);
     return OK;
 }
 
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 8f7d4bf..d99596e 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -2774,7 +2774,7 @@
         // even if the file is well-formed and the primary picture is correct.
 
         // Reserve item ids for samples + grid
-        size_t numItemsToReserve = mNumTiles + (mNumTiles > 1);
+        size_t numItemsToReserve = mNumTiles + (mNumTiles > 0);
         status_t err = mOwner->reserveItemId_l(numItemsToReserve, &mItemIdBase);
         if (err != OK) {
             return err;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 5d17f97..31c800e 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -2043,20 +2043,25 @@
     } else if (mFlags & kFlagOutputBuffersChanged) {
         PostReplyWithError(replyID, INFO_OUTPUT_BUFFERS_CHANGED);
         mFlags &= ~kFlagOutputBuffersChanged;
-    } else if (mFlags & kFlagOutputFormatChanged) {
-        PostReplyWithError(replyID, INFO_FORMAT_CHANGED);
-        mFlags &= ~kFlagOutputFormatChanged;
     } else {
         sp<AMessage> response = new AMessage;
-        ssize_t index = dequeuePortBuffer(kPortIndexOutput);
-
-        if (index < 0) {
-            CHECK_EQ(index, -EAGAIN);
+        BufferInfo *info = peekNextPortBuffer(kPortIndexOutput);
+        if (!info) {
             return false;
         }
 
-        const sp<MediaCodecBuffer> &buffer =
-            mPortBuffers[kPortIndexOutput][index].mData;
+        // In synchronous mode, output format change should be handled
+        // at dequeue to put the event at the correct order.
+
+        const sp<MediaCodecBuffer> &buffer = info->mData;
+        handleOutputFormatChangeIfNeeded(buffer);
+        if (mFlags & kFlagOutputFormatChanged) {
+            PostReplyWithError(replyID, INFO_FORMAT_CHANGED);
+            mFlags &= ~kFlagOutputFormatChanged;
+            return true;
+        }
+
+        ssize_t index = dequeuePortBuffer(kPortIndexOutput);
 
         response->setSize("index", index);
         response->setSize("offset", buffer->offset());
@@ -2540,107 +2545,13 @@
                         break;
                     }
 
-                    sp<RefBase> obj;
-                    CHECK(msg->findObject("buffer", &obj));
-                    sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
-
-                    if (mOutputFormat != buffer->format()) {
-                        if (mFlags & kFlagUseBlockModel) {
-                            sp<AMessage> diff1 = mOutputFormat->changesFrom(buffer->format());
-                            sp<AMessage> diff2 = buffer->format()->changesFrom(mOutputFormat);
-                            std::set<std::string> keys;
-                            size_t numEntries = diff1->countEntries();
-                            AMessage::Type type;
-                            for (size_t i = 0; i < numEntries; ++i) {
-                                keys.emplace(diff1->getEntryNameAt(i, &type));
-                            }
-                            numEntries = diff2->countEntries();
-                            for (size_t i = 0; i < numEntries; ++i) {
-                                keys.emplace(diff2->getEntryNameAt(i, &type));
-                            }
-                            sp<WrapperObject<std::set<std::string>>> changedKeys{
-                                new WrapperObject<std::set<std::string>>{std::move(keys)}};
-                            buffer->meta()->setObject("changedKeys", changedKeys);
-                        }
-                        mOutputFormat = buffer->format();
-                        ALOGV("[%s] output format changed to: %s",
-                                mComponentName.c_str(), mOutputFormat->debugString(4).c_str());
-
-                        if (mSoftRenderer == NULL &&
-                                mSurface != NULL &&
-                                (mFlags & kFlagUsesSoftwareRenderer)) {
-                            AString mime;
-                            CHECK(mOutputFormat->findString("mime", &mime));
-
-                            // TODO: propagate color aspects to software renderer to allow better
-                            // color conversion to RGB. For now, just mark dataspace for YUV
-                            // rendering.
-                            int32_t dataSpace;
-                            if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
-                                ALOGD("[%s] setting dataspace on output surface to #%x",
-                                        mComponentName.c_str(), dataSpace);
-                                int err = native_window_set_buffers_data_space(
-                                        mSurface.get(), (android_dataspace)dataSpace);
-                                ALOGW_IF(err != 0, "failed to set dataspace on surface (%d)", err);
-                            }
-                            if (mOutputFormat->contains("hdr-static-info")) {
-                                HDRStaticInfo info;
-                                if (ColorUtils::getHDRStaticInfoFromFormat(mOutputFormat, &info)) {
-                                    setNativeWindowHdrMetadata(mSurface.get(), &info);
-                                }
-                            }
-
-                            sp<ABuffer> hdr10PlusInfo;
-                            if (mOutputFormat->findBuffer("hdr10-plus-info", &hdr10PlusInfo)
-                                    && hdr10PlusInfo != nullptr && hdr10PlusInfo->size() > 0) {
-                                native_window_set_buffers_hdr10_plus_metadata(mSurface.get(),
-                                        hdr10PlusInfo->size(), hdr10PlusInfo->data());
-                            }
-
-                            if (mime.startsWithIgnoreCase("video/")) {
-                                mSurface->setDequeueTimeout(-1);
-                                mSoftRenderer = new SoftwareRenderer(mSurface, mRotationDegrees);
-                            }
-                        }
-
-                        requestCpuBoostIfNeeded();
-
-                        if (mFlags & kFlagIsEncoder) {
-                            // Before we announce the format change we should
-                            // collect codec specific data and amend the output
-                            // format as necessary.
-                            int32_t flags = 0;
-                            (void) buffer->meta()->findInt32("flags", &flags);
-                            if ((flags & BUFFER_FLAG_CODECCONFIG) && !(mFlags & kFlagIsSecure)) {
-                                status_t err =
-                                    amendOutputFormatWithCodecSpecificData(buffer);
-
-                                if (err != OK) {
-                                    ALOGE("Codec spit out malformed codec "
-                                          "specific data!");
-                                }
-                            }
-                        }
-                        if (mFlags & kFlagIsAsync) {
-                            onOutputFormatChanged();
-                        } else {
-                            mFlags |= kFlagOutputFormatChanged;
-                            postActivityNotificationIfPossible();
-                        }
-
-                        // Notify mCrypto of video resolution changes
-                        if (mCrypto != NULL) {
-                            int32_t left, top, right, bottom, width, height;
-                            if (mOutputFormat->findRect("crop", &left, &top, &right, &bottom)) {
-                                mCrypto->notifyResolution(right - left + 1, bottom - top + 1);
-                            } else if (mOutputFormat->findInt32("width", &width)
-                                    && mOutputFormat->findInt32("height", &height)) {
-                                mCrypto->notifyResolution(width, height);
-                            }
-                        }
-                    }
-
                     if (mFlags & kFlagIsAsync) {
+                        sp<RefBase> obj;
+                        CHECK(msg->findObject("buffer", &obj));
+                        sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
+
+                        // In asynchronous mode, output format change is processed immediately.
+                        handleOutputFormatChangeIfNeeded(buffer);
                         onOutputBufferAvailable();
                     } else if (mFlags & kFlagDequeueOutputPending) {
                         CHECK(handleDequeueOutputBuffer(mDequeueOutputReplyID));
@@ -3469,6 +3380,106 @@
     }
 }
 
+void MediaCodec::handleOutputFormatChangeIfNeeded(const sp<MediaCodecBuffer> &buffer) {
+    sp<AMessage> format = buffer->format();
+    if (mOutputFormat == format) {
+        return;
+    }
+    if (mFlags & kFlagUseBlockModel) {
+        sp<AMessage> diff1 = mOutputFormat->changesFrom(format);
+        sp<AMessage> diff2 = format->changesFrom(mOutputFormat);
+        std::set<std::string> keys;
+        size_t numEntries = diff1->countEntries();
+        AMessage::Type type;
+        for (size_t i = 0; i < numEntries; ++i) {
+            keys.emplace(diff1->getEntryNameAt(i, &type));
+        }
+        numEntries = diff2->countEntries();
+        for (size_t i = 0; i < numEntries; ++i) {
+            keys.emplace(diff2->getEntryNameAt(i, &type));
+        }
+        sp<WrapperObject<std::set<std::string>>> changedKeys{
+            new WrapperObject<std::set<std::string>>{std::move(keys)}};
+        buffer->meta()->setObject("changedKeys", changedKeys);
+    }
+    mOutputFormat = format;
+    ALOGV("[%s] output format changed to: %s",
+            mComponentName.c_str(), mOutputFormat->debugString(4).c_str());
+
+    if (mSoftRenderer == NULL &&
+            mSurface != NULL &&
+            (mFlags & kFlagUsesSoftwareRenderer)) {
+        AString mime;
+        CHECK(mOutputFormat->findString("mime", &mime));
+
+        // TODO: propagate color aspects to software renderer to allow better
+        // color conversion to RGB. For now, just mark dataspace for YUV
+        // rendering.
+        int32_t dataSpace;
+        if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
+            ALOGD("[%s] setting dataspace on output surface to #%x",
+                    mComponentName.c_str(), dataSpace);
+            int err = native_window_set_buffers_data_space(
+                    mSurface.get(), (android_dataspace)dataSpace);
+            ALOGW_IF(err != 0, "failed to set dataspace on surface (%d)", err);
+        }
+        if (mOutputFormat->contains("hdr-static-info")) {
+            HDRStaticInfo info;
+            if (ColorUtils::getHDRStaticInfoFromFormat(mOutputFormat, &info)) {
+                setNativeWindowHdrMetadata(mSurface.get(), &info);
+            }
+        }
+
+        sp<ABuffer> hdr10PlusInfo;
+        if (mOutputFormat->findBuffer("hdr10-plus-info", &hdr10PlusInfo)
+                && hdr10PlusInfo != nullptr && hdr10PlusInfo->size() > 0) {
+            native_window_set_buffers_hdr10_plus_metadata(mSurface.get(),
+                    hdr10PlusInfo->size(), hdr10PlusInfo->data());
+        }
+
+        if (mime.startsWithIgnoreCase("video/")) {
+            mSurface->setDequeueTimeout(-1);
+            mSoftRenderer = new SoftwareRenderer(mSurface, mRotationDegrees);
+        }
+    }
+
+    requestCpuBoostIfNeeded();
+
+    if (mFlags & kFlagIsEncoder) {
+        // Before we announce the format change we should
+        // collect codec specific data and amend the output
+        // format as necessary.
+        int32_t flags = 0;
+        (void) buffer->meta()->findInt32("flags", &flags);
+        if ((flags & BUFFER_FLAG_CODECCONFIG) && !(mFlags & kFlagIsSecure)) {
+            status_t err =
+                amendOutputFormatWithCodecSpecificData(buffer);
+
+            if (err != OK) {
+                ALOGE("Codec spit out malformed codec "
+                      "specific data!");
+            }
+        }
+    }
+    if (mFlags & kFlagIsAsync) {
+        onOutputFormatChanged();
+    } else {
+        mFlags |= kFlagOutputFormatChanged;
+        postActivityNotificationIfPossible();
+    }
+
+    // Notify mCrypto of video resolution changes
+    if (mCrypto != NULL) {
+        int32_t left, top, right, bottom, width, height;
+        if (mOutputFormat->findRect("crop", &left, &top, &right, &bottom)) {
+            mCrypto->notifyResolution(right - left + 1, bottom - top + 1);
+        } else if (mOutputFormat->findInt32("width", &width)
+                && mOutputFormat->findInt32("height", &height)) {
+            mCrypto->notifyResolution(width, height);
+        }
+    }
+}
+
 void MediaCodec::extractCSD(const sp<AMessage> &format) {
     mCSD.clear();
 
@@ -3934,19 +3945,31 @@
     return OK;
 }
 
-ssize_t MediaCodec::dequeuePortBuffer(int32_t portIndex) {
+MediaCodec::BufferInfo *MediaCodec::peekNextPortBuffer(int32_t portIndex) {
     CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
 
     List<size_t> *availBuffers = &mAvailPortBuffers[portIndex];
 
     if (availBuffers->empty()) {
+        return nullptr;
+    }
+
+    return &mPortBuffers[portIndex][*availBuffers->begin()];
+}
+
+ssize_t MediaCodec::dequeuePortBuffer(int32_t portIndex) {
+    CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
+
+    BufferInfo *info = peekNextPortBuffer(portIndex);
+    if (!info) {
         return -EAGAIN;
     }
 
+    List<size_t> *availBuffers = &mAvailPortBuffers[portIndex];
     size_t index = *availBuffers->begin();
+    CHECK_EQ(info, &mPortBuffers[portIndex][index]);
     availBuffers->erase(availBuffers->begin());
 
-    BufferInfo *info = &mPortBuffers[portIndex][index];
     CHECK(!info->mOwnedByClient);
     {
         Mutex::Autolock al(mBufferLock);
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index cab4ebd..8d9bc06 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -92,7 +92,9 @@
     }
 
     sp<MetaData> trackMeta = new MetaData;
-    convertMessageToMetaData(format, trackMeta);
+    if (convertMessageToMetaData(format, trackMeta) != OK) {
+        return BAD_VALUE;
+    }
 
     sp<MediaAdapter> newTrack = new MediaAdapter(trackMeta);
     status_t result = mWriter->addSource(newTrack);
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
index 4c94baa..c284ef7 100644
--- a/media/libstagefright/SurfaceUtils.cpp
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -132,37 +132,47 @@
 }
 
 void setNativeWindowHdrMetadata(ANativeWindow *nativeWindow, HDRStaticInfo *info) {
-    struct android_smpte2086_metadata smpte2086_meta = {
-            .displayPrimaryRed = {
-                    info->sType1.mR.x * 0.00002f,
-                    info->sType1.mR.y * 0.00002f
-            },
-            .displayPrimaryGreen = {
-                    info->sType1.mG.x * 0.00002f,
-                    info->sType1.mG.y * 0.00002f
-            },
-            .displayPrimaryBlue = {
-                    info->sType1.mB.x * 0.00002f,
-                    info->sType1.mB.y * 0.00002f
-            },
-            .whitePoint = {
-                    info->sType1.mW.x * 0.00002f,
-                    info->sType1.mW.y * 0.00002f
-            },
-            .maxLuminance = (float) info->sType1.mMaxDisplayLuminance,
-            .minLuminance = info->sType1.mMinDisplayLuminance * 0.0001f
-    };
+    // If mastering max and min luminance fields are 0, do not use them.
+    // It indicates the value may not be present in the stream.
+    if ((float)info->sType1.mMaxDisplayLuminance > 0.0f &&
+        (info->sType1.mMinDisplayLuminance * 0.0001f) > 0.0f) {
+        struct android_smpte2086_metadata smpte2086_meta = {
+                .displayPrimaryRed = {
+                        info->sType1.mR.x * 0.00002f,
+                        info->sType1.mR.y * 0.00002f
+                },
+                .displayPrimaryGreen = {
+                        info->sType1.mG.x * 0.00002f,
+                        info->sType1.mG.y * 0.00002f
+                },
+                .displayPrimaryBlue = {
+                        info->sType1.mB.x * 0.00002f,
+                        info->sType1.mB.y * 0.00002f
+                },
+                .whitePoint = {
+                        info->sType1.mW.x * 0.00002f,
+                        info->sType1.mW.y * 0.00002f
+                },
+                .maxLuminance = (float) info->sType1.mMaxDisplayLuminance,
+                .minLuminance = info->sType1.mMinDisplayLuminance * 0.0001f
+        };
 
-    int err = native_window_set_buffers_smpte2086_metadata(nativeWindow, &smpte2086_meta);
-    ALOGW_IF(err != 0, "failed to set smpte2086 metadata on surface (%d)", err);
+        int err = native_window_set_buffers_smpte2086_metadata(nativeWindow, &smpte2086_meta);
+        ALOGW_IF(err != 0, "failed to set smpte2086 metadata on surface (%d)", err);
+    }
 
-    struct android_cta861_3_metadata cta861_meta = {
-            .maxContentLightLevel = (float) info->sType1.mMaxContentLightLevel,
-            .maxFrameAverageLightLevel = (float) info->sType1.mMaxFrameAverageLightLevel
-    };
+    // If the content light level fields are 0, do not use them, it
+    // indicates the value may not be present in the stream.
+    if ((float)info->sType1.mMaxContentLightLevel > 0.0f &&
+        (float)info->sType1.mMaxFrameAverageLightLevel > 0.0f) {
+        struct android_cta861_3_metadata cta861_meta = {
+                .maxContentLightLevel = (float) info->sType1.mMaxContentLightLevel,
+                .maxFrameAverageLightLevel = (float) info->sType1.mMaxFrameAverageLightLevel
+        };
 
-    err = native_window_set_buffers_cta861_3_metadata(nativeWindow, &cta861_meta);
-    ALOGW_IF(err != 0, "failed to set cta861_3 metadata on surface (%d)", err);
+        int err = native_window_set_buffers_cta861_3_metadata(nativeWindow, &cta861_meta);
+        ALOGW_IF(err != 0, "failed to set cta861_3 metadata on surface (%d)", err);
+    }
 }
 
 status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */) {
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index a1e4d43..ece79c4 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1663,13 +1663,16 @@
         meta->setInt32(kKeyColorMatrix, colorAspects.mMatrixCoeffs);
     }
 }
-
-void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
+/* Converts key and value pairs in AMessage format to MetaData format.
+ * Also checks for the presence of required keys.
+ */
+status_t convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
     AString mime;
     if (msg->findString("mime", &mime)) {
         meta->setCString(kKeyMIMEType, mime.c_str());
     } else {
-        ALOGW("did not find mime type");
+        ALOGE("did not find mime type");
+        return BAD_VALUE;
     }
 
     convertMessageToMetaDataFromMappings(msg, meta);
@@ -1718,7 +1721,8 @@
             meta->setInt32(kKeyWidth, width);
             meta->setInt32(kKeyHeight, height);
         } else {
-            ALOGV("did not find width and/or height");
+            ALOGE("did not find width and/or height");
+            return BAD_VALUE;
         }
 
         int32_t sarWidth, sarHeight;
@@ -1803,14 +1807,14 @@
             }
         }
     } else if (mime.startsWith("audio/")) {
-        int32_t numChannels;
-        if (msg->findInt32("channel-count", &numChannels)) {
-            meta->setInt32(kKeyChannelCount, numChannels);
+        int32_t numChannels, sampleRate;
+        if (!msg->findInt32("channel-count", &numChannels) ||
+                !msg->findInt32("sample-rate", &sampleRate)) {
+            ALOGE("did not find channel-count and/or sample-rate");
+            return BAD_VALUE;
         }
-        int32_t sampleRate;
-        if (msg->findInt32("sample-rate", &sampleRate)) {
-            meta->setInt32(kKeySampleRate, sampleRate);
-        }
+        meta->setInt32(kKeyChannelCount, numChannels);
+        meta->setInt32(kKeySampleRate, sampleRate);
         int32_t bitsPerSample;
         if (msg->findInt32("bits-per-sample", &bitsPerSample)) {
             meta->setInt32(kKeyBitsPerSample, bitsPerSample);
@@ -1925,7 +1929,8 @@
                     }
                 }
             } else {
-                ALOGW("We need csd-2!!. %s", msg->debugString().c_str());
+                ALOGE("We need csd-2!!. %s", msg->debugString().c_str());
+                return BAD_VALUE;
             }
         } else if (mime == MEDIA_MIMETYPE_VIDEO_VP9) {
             meta->setData(kKeyVp9CodecPrivate, 0, csd0->data(), csd0->size());
@@ -1991,6 +1996,7 @@
     ALOGI("converted %s to:", msg->debugString(0).c_str());
     meta->dumpToLog();
 #endif
+    return OK;
 }
 
 status_t sendMetaDataToHal(sp<MediaPlayerBase::AudioSink>& sink,
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index f7e6c27..8057312 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -445,6 +445,7 @@
     size_t updateBuffers(int32_t portIndex, const sp<AMessage> &msg);
     status_t onQueueInputBuffer(const sp<AMessage> &msg);
     status_t onReleaseOutputBuffer(const sp<AMessage> &msg);
+    BufferInfo *peekNextPortBuffer(int32_t portIndex);
     ssize_t dequeuePortBuffer(int32_t portIndex);
 
     status_t getBufferAndFormat(
@@ -476,6 +477,7 @@
     status_t onSetParameters(const sp<AMessage> &params);
 
     status_t amendOutputFormatWithCodecSpecificData(const sp<MediaCodecBuffer> &buffer);
+    void handleOutputFormatChangeIfNeeded(const sp<MediaCodecBuffer> &buffer);
     bool isExecuting() const;
 
     uint64_t getGraphicBufferSize();
diff --git a/media/libstagefright/include/media/stagefright/Utils.h b/media/libstagefright/include/media/stagefright/Utils.h
index 2b9b759..1673120 100644
--- a/media/libstagefright/include/media/stagefright/Utils.h
+++ b/media/libstagefright/include/media/stagefright/Utils.h
@@ -33,7 +33,7 @@
         const MetaDataBase *meta, sp<AMessage> *format);
 status_t convertMetaDataToMessage(
         const sp<MetaData> &meta, sp<AMessage> *format);
-void convertMessageToMetaData(
+status_t convertMessageToMetaData(
         const sp<AMessage> &format, sp<MetaData> &meta);
 
 // Returns a pointer to the next NAL start code in buffer of size |length| starting at |data|, or
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 87ea084..7699700 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -62,7 +62,7 @@
 }
 
 static bool checkRecordingInternal(const String16& opPackageName, pid_t pid,
-        uid_t uid, bool start) {
+        uid_t uid, bool start, bool isHotwordSource) {
     // Okay to not track in app ops as audio server or media server is us and if
     // device is rooted security model is considered compromised.
     // system_server loses its RECORD_AUDIO permission when a secondary
@@ -87,16 +87,21 @@
     }
 
     AppOpsManager appOps;
-    const int32_t op = appOps.permissionToOpCode(sAndroidPermissionRecordAudio);
+    const int32_t opRecordAudio = appOps.permissionToOpCode(sAndroidPermissionRecordAudio);
+
     if (start) {
+        const int32_t op = isHotwordSource ?
+                AppOpsManager::OP_RECORD_AUDIO_HOTWORD : opRecordAudio;
         if (appOps.startOpNoThrow(op, uid, resolvedOpPackageName, /*startIfModeDefault*/ false)
                 != AppOpsManager::MODE_ALLOWED) {
             ALOGE("Request denied by app op: %d", op);
             return false;
         }
     } else {
-        if (appOps.checkOp(op, uid, resolvedOpPackageName) != AppOpsManager::MODE_ALLOWED) {
-            ALOGE("Request denied by app op: %d", op);
+        // Always use OP_RECORD_AUDIO for checks at creation time.
+        if (appOps.checkOp(opRecordAudio, uid, resolvedOpPackageName)
+                != AppOpsManager::MODE_ALLOWED) {
+            ALOGE("Request denied by app op: %d", opRecordAudio);
             return false;
         }
     }
@@ -105,14 +110,15 @@
 }
 
 bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid) {
-    return checkRecordingInternal(opPackageName, pid, uid, /*start*/ false);
+    return checkRecordingInternal(opPackageName, pid, uid, /*start*/ false,
+            /*is_hotword_source*/ false);
 }
 
-bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid) {
-     return checkRecordingInternal(opPackageName, pid, uid, /*start*/ true);
+bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid, bool isHotwordSource) {
+     return checkRecordingInternal(opPackageName, pid, uid, /*start*/ true, isHotwordSource);
 }
 
-void finishRecording(const String16& opPackageName, uid_t uid) {
+void finishRecording(const String16& opPackageName, uid_t uid, bool isHotwordSource) {
     // Okay to not track in app ops as audio server is us and if
     // device is rooted security model is considered compromised.
     if (isAudioServerOrRootUid(uid)) return;
@@ -125,7 +131,8 @@
     }
 
     AppOpsManager appOps;
-    const int32_t op = appOps.permissionToOpCode(sAndroidPermissionRecordAudio);
+    const int32_t op = isHotwordSource ? AppOpsManager::OP_RECORD_AUDIO_HOTWORD
+            : appOps.permissionToOpCode(sAndroidPermissionRecordAudio);
     appOps.finishOp(op, uid, resolvedOpPackageName);
 }
 
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 212599a..431dd7a 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -79,8 +79,8 @@
 }
 
 bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid);
-bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid);
-void finishRecording(const String16& opPackageName, uid_t uid);
+bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid, bool isHotwordSource);
+void finishRecording(const String16& opPackageName, uid_t uid, bool isHotwordSource);
 bool captureAudioOutputAllowed(pid_t pid, uid_t uid);
 bool captureMediaOutputAllowed(pid_t pid, uid_t uid);
 bool captureVoiceCommunicationOutputAllowed(pid_t pid, uid_t uid);
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index f014209..764fdc3 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -852,7 +852,8 @@
                                       input.notificationsPerBuffer, input.speed,
                                       input.sharedBuffer, sessionId, &output.flags,
                                       callingPid, input.clientInfo.clientTid, clientUid,
-                                      &lStatus, portId, input.audioTrackCallback);
+                                      &lStatus, portId, input.audioTrackCallback,
+                                      input.opPackageName);
         LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (track == 0));
         // we don't abort yet if lStatus != NO_ERROR; there is still work to be done regardless
 
@@ -2068,8 +2069,8 @@
         Mutex::Autolock _l(mLock);
         RecordThread *thread = checkRecordThread_l(output.inputId);
         if (thread == NULL) {
-            ALOGE("createRecord() checkRecordThread_l failed, input handle %d", output.inputId);
-            lStatus = BAD_VALUE;
+            ALOGW("createRecord() checkRecordThread_l failed, input handle %d", output.inputId);
+            lStatus = FAILED_TRANSACTION;
             goto Exit;
         }
 
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index d8eebf3..a2df29b 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -26,10 +26,11 @@
     bool hasOpPlayAudio() const;
 
     static sp<OpPlayAudioMonitor> createIfNeeded(
-            uid_t uid, const audio_attributes_t& attr, int id, audio_stream_type_t streamType);
+            uid_t uid, const audio_attributes_t& attr, int id, audio_stream_type_t streamType,
+            const std::string& opPackageName);
 
 private:
-    OpPlayAudioMonitor(uid_t uid, audio_usage_t usage, int id);
+    OpPlayAudioMonitor(uid_t uid, audio_usage_t usage, int id, const String16& opPackageName);
     void onFirstRef() override;
     static void getPackagesForUid(uid_t uid, Vector<String16>& packages);
 
@@ -49,10 +50,10 @@
     void checkPlayAudioForUsage();
 
     std::atomic_bool mHasOpPlayAudio;
-    Vector<String16> mPackages;
     const uid_t mUid;
     const int32_t mUsage; // on purpose not audio_usage_t because always checked in appOps as int32_t
     const int mId; // for logging purposes only
+    const String16 mOpPackageName;
 };
 
 // playback track
@@ -77,7 +78,8 @@
                                 audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE,
                                 /** default behaviour is to start when there are as many frames
                                   * ready as possible (aka. Buffer is full). */
-                                size_t frameCountToBeReady = SIZE_MAX);
+                                size_t frameCountToBeReady = SIZE_MAX,
+                                const std::string opPackageName = "");
     virtual             ~Track();
     virtual status_t    initCheck() const;
 
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 2af27d8..1d0147d 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2067,7 +2067,8 @@
         uid_t uid,
         status_t *status,
         audio_port_handle_t portId,
-        const sp<media::IAudioTrackCallback>& callback)
+        const sp<media::IAudioTrackCallback>& callback,
+        const std::string& opPackageName)
 {
     size_t frameCount = *pFrameCount;
     size_t notificationFrameCount = *pNotificationFrameCount;
@@ -2348,7 +2349,8 @@
         track = new Track(this, client, streamType, attr, sampleRate, format,
                           channelMask, frameCount,
                           nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
-                          sessionId, creatorPid, uid, *flags, TrackBase::TYPE_DEFAULT, portId);
+                          sessionId, creatorPid, uid, *flags, TrackBase::TYPE_DEFAULT, portId,
+                          SIZE_MAX /*frameCountToBeReady*/, opPackageName);
 
         lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
         if (lStatus != NO_ERROR) {
@@ -2360,7 +2362,7 @@
         {
             Mutex::Autolock _atCbL(mAudioTrackCbLock);
             if (callback.get() != nullptr) {
-                mAudioTrackCallbacks.emplace(callback);
+                mAudioTrackCallbacks.emplace(track, callback);
             }
         }
 
@@ -2588,6 +2590,10 @@
     mLocalLog.log("removeTrack_l (%p) %s", track.get(), result.string());
 
     mTracks.remove(track);
+    {
+        Mutex::Autolock _atCbL(mAudioTrackCbLock);
+        mAudioTrackCallbacks.erase(track);
+    }
     if (track->isFastTrack()) {
         int index = track->mFastIndex;
         ALOG_ASSERT(0 < index && index < (int)FastMixerState::sMaxFastTracks);
@@ -2683,8 +2689,8 @@
                     audio_utils::metadata::byteStringFromData(metadata);
             std::vector metadataVec(metaDataStr.begin(), metaDataStr.end());
             Mutex::Autolock _l(mAudioTrackCbLock);
-            for (const auto& callback : mAudioTrackCallbacks) {
-                callback->onCodecFormatChanged(metadataVec);
+            for (const auto& callbackPair : mAudioTrackCallbacks) {
+                callbackPair.second->onCodecFormatChanged(metadataVec);
             }
     }).detach();
 }
@@ -7869,7 +7875,8 @@
         AutoMutex lock(mLock);
         if (recordTrack->isInvalid()) {
             recordTrack->clearSyncStartEvent();
-            return INVALID_OPERATION;
+            ALOGW("%s track %d: invalidated before startInput", __func__, recordTrack->portId());
+            return DEAD_OBJECT;
         }
         if (mActiveTracks.indexOf(recordTrack) >= 0) {
             if (recordTrack->mState == TrackBase::PAUSING) {
@@ -7899,7 +7906,8 @@
                     recordTrack->mState = TrackBase::STARTING_2;
                     // STARTING_2 forces destroy to call stopInput.
                 }
-                return INVALID_OPERATION;
+                ALOGW("%s track %d: invalidated after startInput", __func__, recordTrack->portId());
+                return DEAD_OBJECT;
             }
             if (recordTrack->mState != TrackBase::STARTING_1) {
                 ALOGW("%s(%d): unsynchronized mState:%d change",
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index c1ac2e4..6b33ad5 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -864,7 +864,8 @@
                                 uid_t uid,
                                 status_t *status /*non-NULL*/,
                                 audio_port_handle_t portId,
-                                const sp<media::IAudioTrackCallback>& callback);
+                                const sp<media::IAudioTrackCallback>& callback,
+                                const std::string& opPackageName);
 
                 AudioStreamOut* getOutput() const;
                 AudioStreamOut* clearOutput();
@@ -1186,7 +1187,7 @@
 
     Mutex                                    mAudioTrackCbLock;
     // Record of IAudioTrackCallback
-    std::set<sp<media::IAudioTrackCallback>> mAudioTrackCallbacks;
+    std::map<sp<Track>, sp<media::IAudioTrackCallback>> mAudioTrackCallbacks;
 
 private:
     // The HAL output sink is treated as non-blocking, but current implementation is blocking
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 126015f..f286d8a 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -386,11 +386,12 @@
 // static
 sp<AudioFlinger::PlaybackThread::OpPlayAudioMonitor>
 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::createIfNeeded(
-            uid_t uid, const audio_attributes_t& attr, int id, audio_stream_type_t streamType)
+            uid_t uid, const audio_attributes_t& attr, int id, audio_stream_type_t streamType,
+            const std::string& opPackageName)
 {
+    Vector <String16> packages;
+    getPackagesForUid(uid, packages);
     if (isServiceUid(uid)) {
-        Vector <String16> packages;
-        getPackagesForUid(uid, packages);
         if (packages.isEmpty()) {
             ALOGD("OpPlayAudio: not muting track:%d usage:%d for service UID %d",
                   id,
@@ -410,12 +411,32 @@
             id, attr.flags);
         return nullptr;
     }
-    return new OpPlayAudioMonitor(uid, attr.usage, id);
+
+    String16 opPackageNameStr(opPackageName.c_str());
+    if (opPackageName.empty()) {
+        // If no package name is provided by the client, use the first associated with the uid
+        if (!packages.isEmpty()) {
+            opPackageNameStr = packages[0];
+        }
+    } else {
+        // If the provided package name is invalid, we force app ops denial by clearing the package
+        // name passed to OpPlayAudioMonitor
+        if (std::find_if(packages.begin(), packages.end(),
+                [&opPackageNameStr](const auto& package) {
+                return opPackageNameStr == package; }) == packages.end()) {
+            ALOGW("The package name(%s) provided does not correspond to the uid %d, "
+                  "force muting the track", opPackageName.c_str(), uid);
+            // Set package name as an empty string so that hasOpPlayAudio will always return false.
+            opPackageNameStr = String16("");
+        }
+    }
+    return new OpPlayAudioMonitor(uid, attr.usage, id, opPackageNameStr);
 }
 
 AudioFlinger::PlaybackThread::OpPlayAudioMonitor::OpPlayAudioMonitor(
-        uid_t uid, audio_usage_t usage, int id)
-        : mHasOpPlayAudio(true), mUid(uid), mUsage((int32_t) usage), mId(id)
+        uid_t uid, audio_usage_t usage, int id, const String16& opPackageName)
+        : mHasOpPlayAudio(true), mUid(uid), mUsage((int32_t) usage), mId(id),
+          mOpPackageName(opPackageName)
 {
 }
 
@@ -429,11 +450,10 @@
 
 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::onFirstRef()
 {
-    getPackagesForUid(mUid, mPackages);
     checkPlayAudioForUsage();
-    if (!mPackages.isEmpty()) {
+    if (mOpPackageName.size() != 0) {
         mOpCallback = new PlayAudioOpCallback(this);
-        mAppOpsManager.startWatchingMode(AppOpsManager::OP_PLAY_AUDIO, mPackages[0], mOpCallback);
+        mAppOpsManager.startWatchingMode(AppOpsManager::OP_PLAY_AUDIO, mOpPackageName, mOpCallback);
     }
 }
 
@@ -446,18 +466,11 @@
 // - not called from PlayAudioOpCallback because the callback is not installed in this case
 void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::checkPlayAudioForUsage()
 {
-    if (mPackages.isEmpty()) {
+    if (mOpPackageName.size() == 0) {
         mHasOpPlayAudio.store(false);
     } else {
-        bool hasIt = true;
-        for (const String16& packageName : mPackages) {
-            const int32_t mode = mAppOpsManager.checkAudioOpNoThrow(AppOpsManager::OP_PLAY_AUDIO,
-                    mUsage, mUid, packageName);
-            if (mode != AppOpsManager::MODE_ALLOWED) {
-                hasIt = false;
-                break;
-            }
-        }
+        bool hasIt = mAppOpsManager.checkAudioOpNoThrow(AppOpsManager::OP_PLAY_AUDIO,
+                    mUsage, mUid, mOpPackageName) == AppOpsManager::MODE_ALLOWED;
         ALOGD("OpPlayAudio: track:%d usage:%d %smuted", mId, mUsage, hasIt ? "not " : "");
         mHasOpPlayAudio.store(hasIt);
     }
@@ -511,7 +524,8 @@
             audio_output_flags_t flags,
             track_type type,
             audio_port_handle_t portId,
-            size_t frameCountToBeReady)
+            size_t frameCountToBeReady,
+            const std::string opPackageName)
     :   TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
                   // TODO: Using unsecurePointer() has some associated security pitfalls
                   //       (see declaration for details).
@@ -534,7 +548,8 @@
     mPresentationCompleteFrames(0),
     mFrameMap(16 /* sink-frame-to-track-frame map memory */),
     mVolumeHandler(new media::VolumeHandler(sampleRate)),
-    mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(uid, attr, id(), streamType)),
+    mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(
+            uid, attr, id(), streamType, opPackageName)),
     // mSinkTimestamp
     mFrameCountToBeReady(frameCountToBeReady),
     mFastIndex(-1),
@@ -598,7 +613,7 @@
     if (channelMask & AUDIO_CHANNEL_HAPTIC_ALL) {
         mAudioVibrationController = new AudioVibrationController(this);
         mExternalVibration = new os::ExternalVibration(
-                mUid, "" /* pkg */, mAttr, mAudioVibrationController);
+                mUid, opPackageName, mAttr, mAudioVibrationController);
     }
 
     // Once this item is logged by the server, the client can add properties.
@@ -2207,7 +2222,8 @@
         RecordThread *recordThread = (RecordThread *)thread.get();
         return recordThread->start(this, event, triggerSession);
     } else {
-        return BAD_VALUE;
+        ALOGW("%s track %d: thread was destroyed", __func__, portId());
+        return DEAD_OBJECT;
     }
 }
 
diff --git a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
index 981582e..1821140 100644
--- a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
+++ b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
@@ -136,7 +136,7 @@
     {"rerouting",
      {
          {"", AUDIO_STREAM_REROUTING, "AUDIO_STREAM_REROUTING",
-          {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}}
+          {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VIRTUAL_SOURCE, AUDIO_SOURCE_DEFAULT, 0, ""}}
          }
      },
     },
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index c5c13e9..07e80d1 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -2256,7 +2256,7 @@
     sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId);
     if (inputDesc == 0) {
         ALOGW("%s no input for client %d", __FUNCTION__, portId);
-        return BAD_VALUE;
+        return DEAD_OBJECT;
     }
     audio_io_handle_t input = inputDesc->mIoHandle;
     sp<RecordClientDescriptor> client = inputDesc->getClient(portId);
@@ -3161,6 +3161,8 @@
     if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
         DeviceVector newDevices = getNewOutputDevices(mPrimaryOutput, true /*fromCache*/);
         waitMs = updateCallRouting(newDevices, delayMs);
+        // Only apply special touch sound delay once
+        delayMs = 0;
     }
     for (size_t i = 0; i < mOutputs.size(); i++) {
         sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
@@ -3170,6 +3172,8 @@
             // preventing the force re-routing in case of default dev that distinguishes on address.
             // Let's give back to engine full device choice decision however.
             waitMs = setOutputDevices(outputDesc, newDevices, !newDevices.isEmpty(), delayMs);
+            // Only apply special touch sound delay once
+            delayMs = 0;
         }
         if (forceVolumeReeval && !newDevices.isEmpty()) {
             applyStreamVolumes(outputDesc, newDevices.types(), waitMs, true);
@@ -5648,7 +5652,7 @@
             sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
             setVolumeSourceMute(ttsVolumeSource, mute/*on*/, desc, 0 /*delay*/, DeviceTypeSet());
             const uint32_t latency = desc->latency() * 2;
-            if (latency > maxLatency) {
+            if (desc->isActive(latency * 2) && latency > maxLatency) {
                 maxLatency = latency;
             }
         }
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 34d07b6..df27f6e 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -572,7 +572,8 @@
     }
 
     // check calling permissions
-    if (!(startRecording(client->opPackageName, client->pid, client->uid)
+    if (!(startRecording(client->opPackageName, client->pid, client->uid,
+            client->attributes.source == AUDIO_SOURCE_HOTWORD)
             || client->attributes.source == AUDIO_SOURCE_FM_TUNER)) {
         ALOGE("%s permission denied: recording not allowed for uid %d pid %d",
                 __func__, client->uid, client->pid);
@@ -660,7 +661,8 @@
         client->active = false;
         client->startTimeNs = 0;
         updateUidStates_l();
-        finishRecording(client->opPackageName, client->uid);
+        finishRecording(client->opPackageName, client->uid,
+                        client->attributes.source == AUDIO_SOURCE_HOTWORD);
     }
 
     return status;
@@ -686,7 +688,8 @@
     updateUidStates_l();
 
     // finish the recording app op
-    finishRecording(client->opPackageName, client->uid);
+    finishRecording(client->opPackageName, client->uid,
+                    client->attributes.source == AUDIO_SOURCE_HOTWORD);
     AutoCallerClear acc;
     return mAudioPolicyManager->stopInput(portId);
 }
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index e847f9f..a6e8989 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -488,9 +488,9 @@
         }
 
         bool isAccessibility = mUidPolicy->isA11yUid(current->uid);
-        // Clients capturing for Accessibility services are not considered
+        // Clients capturing for Accessibility services or virtual sources are not considered
         // for top or latest active to avoid masking regular clients started before
-        if (!isAccessibility) {
+        if (!isAccessibility && !isVirtualSource(current->attributes.source)) {
             bool isAssistant = mUidPolicy->isAssistantUid(current->uid);
             bool isPrivacySensitive =
                     (current->attributes.flags & AUDIO_FLAG_CAPTURE_PRIVATE) != 0;
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index af1e01d..24288d6 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -3751,9 +3751,14 @@
                 __FUNCTION__, cameraId.string());
         return;
     }
+
+    // Collect the logical cameras without holding mStatusLock in updateStatus
+    // as that can lead to a deadlock(b/162192331).
+    auto logicalCameraIds = getLogicalCameras(cameraId);
     // Update the status for this camera state, then send the onStatusChangedCallbacks to each
     // of the listeners with both the mStatusLock and mStatusListenerLock held
-    state->updateStatus(status, cameraId, rejectSourceStates, [this, &deviceKind, &supportsHAL3]
+    state->updateStatus(status, cameraId, rejectSourceStates, [this, &deviceKind, &supportsHAL3,
+                        &logicalCameraIds]
             (const String8& cameraId, StatusInternal status) {
 
             if (status != StatusInternal::ENUMERATING) {
@@ -3773,8 +3778,8 @@
             }
 
             Mutex::Autolock lock(mStatusListenerLock);
-
-            notifyPhysicalCameraStatusLocked(mapToInterface(status), cameraId, deviceKind);
+            notifyPhysicalCameraStatusLocked(mapToInterface(status), String16(cameraId),
+                    logicalCameraIds, deviceKind);
 
             for (auto& listener : mListenerList) {
                 bool isVendorListener = listener->isVendorListener();
@@ -3892,8 +3897,9 @@
     return OK;
 }
 
-void CameraService::notifyPhysicalCameraStatusLocked(int32_t status, const String8& cameraId,
-        SystemCameraKind deviceKind) {
+std::list<String16> CameraService::getLogicalCameras(
+        const String8& physicalCameraId) {
+    std::list<String16> retList;
     Mutex::Autolock lock(mCameraStatesLock);
     for (const auto& state : mCameraStates) {
         std::vector<std::string> physicalCameraIds;
@@ -3901,26 +3907,39 @@
             // This is not a logical multi-camera.
             continue;
         }
-        if (std::find(physicalCameraIds.begin(), physicalCameraIds.end(), cameraId.c_str())
+        if (std::find(physicalCameraIds.begin(), physicalCameraIds.end(), physicalCameraId.c_str())
                 == physicalCameraIds.end()) {
             // cameraId is not a physical camera of this logical multi-camera.
             continue;
         }
 
-        String16 id16(state.first), physicalId16(cameraId);
+        retList.emplace_back(String16(state.first));
+    }
+    return retList;
+}
+
+void CameraService::notifyPhysicalCameraStatusLocked(int32_t status,
+        const String16& physicalCameraId, const std::list<String16>& logicalCameraIds,
+        SystemCameraKind deviceKind) {
+    // mStatusListenerLock is expected to be locked
+    for (const auto& logicalCameraId : logicalCameraIds) {
         for (auto& listener : mListenerList) {
+            // Note: we check only the deviceKind of the physical camera id
+            // since, logical camera ids and their physical camera ids are
+            // guaranteed to have the same system camera kind.
             if (shouldSkipStatusUpdates(deviceKind, listener->isVendorListener(),
                     listener->getListenerPid(), listener->getListenerUid())) {
                 ALOGV("Skipping discovery callback for system-only camera device %s",
-                        cameraId.c_str());
+                        String8(physicalCameraId).c_str());
                 continue;
             }
             listener->getListener()->onPhysicalCameraStatusChanged(status,
-                    id16, physicalId16);
+                    logicalCameraId, physicalCameraId);
         }
     }
 }
 
+
 void CameraService::blockClientsForUid(uid_t uid) {
     const auto clients = mActiveClientManager.getAll();
     for (auto& current : clients) {
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 4321201..685ed5e 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -49,6 +49,7 @@
 
 #include <set>
 #include <string>
+#include <list>
 #include <map>
 #include <memory>
 #include <utility>
@@ -1005,8 +1006,13 @@
             hardware::camera::common::V1_0::TorchModeStatus status);
 
     // notify physical camera status when the physical camera is public.
-    void notifyPhysicalCameraStatusLocked(int32_t status, const String8& cameraId,
-            SystemCameraKind deviceKind);
+    // Expects mStatusListenerLock to be locked.
+    void notifyPhysicalCameraStatusLocked(int32_t status, const String16& physicalCameraId,
+            const std::list<String16>& logicalCameraIds, SystemCameraKind deviceKind);
+
+    // get list of logical cameras which are backed by physicalCameraId
+    std::list<String16> getLogicalCameras(const String8& physicalCameraId);
+
 
     // IBinder::DeathRecipient implementation
     virtual void        binderDied(const wp<IBinder> &who);
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 32d118d..876d70d 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -1344,6 +1344,20 @@
         }
     }
 
+    // cameraDeviceStatusChange callbacks may be called (and causing new devices added)
+    // before setCallback returns
+    hardware::Return<Status> status = interface->setCallback(this);
+    if (!status.isOk()) {
+        ALOGE("%s: Transaction error setting up callbacks with camera provider '%s': %s",
+                __FUNCTION__, mProviderName.c_str(), status.description().c_str());
+        return DEAD_OBJECT;
+    }
+    if (status != Status::OK) {
+        ALOGE("%s: Unable to register callbacks with camera provider '%s'",
+                __FUNCTION__, mProviderName.c_str());
+        return mapToStatusT(status);
+    }
+
     hardware::Return<bool> linked = interface->linkToDeath(this, /*cookie*/ mId);
     if (!linked.isOk()) {
         ALOGE("%s: Transaction error in linking to camera provider '%s' death: %s",
@@ -1372,7 +1386,6 @@
         return res;
     }
 
-    Status status;
     // Get initial list of camera devices, if any
     std::vector<std::string> devices;
     hardware::Return<void> ret = interface->getCameraIdList([&status, this, &devices](
@@ -1437,26 +1450,43 @@
         }
     }
 
-    // cameraDeviceStatusChange callbacks may be called (and causing new devices added)
-    // before setCallback returns. setCallback must be called after addDevice so that
-    // the physical camera status callback can look up available regular
-    // cameras.
-    hardware::Return<Status> st = interface->setCallback(this);
-    if (!st.isOk()) {
-        ALOGE("%s: Transaction error setting up callbacks with camera provider '%s': %s",
-                __FUNCTION__, mProviderName.c_str(), st.description().c_str());
-        return DEAD_OBJECT;
-    }
-    if (st != Status::OK) {
-        ALOGE("%s: Unable to register callbacks with camera provider '%s'",
-                __FUNCTION__, mProviderName.c_str());
-        return mapToStatusT(st);
-    }
-
     ALOGI("Camera provider %s ready with %zu camera devices",
             mProviderName.c_str(), mDevices.size());
 
-    mInitialized = true;
+    // Process cached status callbacks
+    std::unique_ptr<std::vector<CameraStatusInfoT>> cachedStatus =
+            std::make_unique<std::vector<CameraStatusInfoT>>();
+    {
+        std::lock_guard<std::mutex> lock(mInitLock);
+
+        for (auto& statusInfo : mCachedStatus) {
+            std::string id, physicalId;
+            status_t res = OK;
+            if (statusInfo.isPhysicalCameraStatus) {
+                res = physicalCameraDeviceStatusChangeLocked(&id, &physicalId,
+                    statusInfo.cameraId, statusInfo.physicalCameraId, statusInfo.status);
+            } else {
+                res = cameraDeviceStatusChangeLocked(&id, statusInfo.cameraId, statusInfo.status);
+            }
+            if (res == OK) {
+                cachedStatus->emplace_back(statusInfo.isPhysicalCameraStatus,
+                        id.c_str(), physicalId.c_str(), statusInfo.status);
+            }
+        }
+        mCachedStatus.clear();
+
+        mInitialized = true;
+    }
+
+    // The cached status change callbacks cannot be fired directly from this
+    // function, due to same-thread deadlock trying to acquire mInterfaceMutex
+    // twice.
+    if (listener != nullptr) {
+        mInitialStatusCallbackFuture = std::async(std::launch::async,
+                &CameraProviderManager::ProviderInfo::notifyInitialStatusChange, this,
+                listener, std::move(cachedStatus));
+    }
+
     return OK;
 }
 
@@ -1734,104 +1764,139 @@
         CameraDeviceStatus newStatus) {
     sp<StatusListener> listener;
     std::string id;
-    bool initialized = false;
+    std::lock_guard<std::mutex> lock(mInitLock);
+
+    if (!mInitialized) {
+        mCachedStatus.emplace_back(false /*isPhysicalCameraStatus*/,
+                cameraDeviceName.c_str(), std::string().c_str(), newStatus);
+        return hardware::Void();
+    }
+
     {
         std::lock_guard<std::mutex> lock(mLock);
-        bool known = false;
-        for (auto& deviceInfo : mDevices) {
-            if (deviceInfo->mName == cameraDeviceName) {
-                ALOGI("Camera device %s status is now %s, was %s", cameraDeviceName.c_str(),
-                        deviceStatusToString(newStatus), deviceStatusToString(deviceInfo->mStatus));
-                deviceInfo->mStatus = newStatus;
-                // TODO: Handle device removal (NOT_PRESENT)
-                id = deviceInfo->mId;
-                known = true;
-                break;
-            }
-        }
-        // Previously unseen device; status must not be NOT_PRESENT
-        if (!known) {
-            if (newStatus == CameraDeviceStatus::NOT_PRESENT) {
-                ALOGW("Camera provider %s says an unknown camera device %s is not present. Curious.",
-                    mProviderName.c_str(), cameraDeviceName.c_str());
-                return hardware::Void();
-            }
-            addDevice(cameraDeviceName, newStatus, &id);
-        } else if (newStatus == CameraDeviceStatus::NOT_PRESENT) {
-            removeDevice(id);
+        if (OK != cameraDeviceStatusChangeLocked(&id, cameraDeviceName, newStatus)) {
+            return hardware::Void();
         }
         listener = mManager->getStatusListener();
-        initialized = mInitialized;
-        if (reCacheConcurrentStreamingCameraIdsLocked() != OK) {
-            ALOGE("%s: CameraProvider %s could not re-cache concurrent streaming camera id list ",
-                      __FUNCTION__, mProviderName.c_str());
-        }
     }
+
     // Call without lock held to allow reentrancy into provider manager
-    // Don't send the callback if providerInfo hasn't been initialized.
-    // CameraService will initialize device status after provider is
-    // initialized
-    if (listener != nullptr && initialized) {
+    if (listener != nullptr) {
         listener->onDeviceStatusChanged(String8(id.c_str()), newStatus);
     }
+
     return hardware::Void();
 }
 
+status_t CameraProviderManager::ProviderInfo::cameraDeviceStatusChangeLocked(
+        std::string* id, const hardware::hidl_string& cameraDeviceName,
+        CameraDeviceStatus newStatus) {
+    bool known = false;
+    std::string cameraId;
+    for (auto& deviceInfo : mDevices) {
+        if (deviceInfo->mName == cameraDeviceName) {
+            ALOGI("Camera device %s status is now %s, was %s", cameraDeviceName.c_str(),
+                    deviceStatusToString(newStatus), deviceStatusToString(deviceInfo->mStatus));
+            deviceInfo->mStatus = newStatus;
+            // TODO: Handle device removal (NOT_PRESENT)
+            cameraId = deviceInfo->mId;
+            known = true;
+            break;
+        }
+    }
+    // Previously unseen device; status must not be NOT_PRESENT
+    if (!known) {
+        if (newStatus == CameraDeviceStatus::NOT_PRESENT) {
+            ALOGW("Camera provider %s says an unknown camera device %s is not present. Curious.",
+                mProviderName.c_str(), cameraDeviceName.c_str());
+            return BAD_VALUE;
+        }
+        addDevice(cameraDeviceName, newStatus, &cameraId);
+    } else if (newStatus == CameraDeviceStatus::NOT_PRESENT) {
+        removeDevice(cameraId);
+    }
+    if (reCacheConcurrentStreamingCameraIdsLocked() != OK) {
+        ALOGE("%s: CameraProvider %s could not re-cache concurrent streaming camera id list ",
+                  __FUNCTION__, mProviderName.c_str());
+    }
+    *id = cameraId;
+    return OK;
+}
+
 hardware::Return<void> CameraProviderManager::ProviderInfo::physicalCameraDeviceStatusChange(
         const hardware::hidl_string& cameraDeviceName,
         const hardware::hidl_string& physicalCameraDeviceName,
         CameraDeviceStatus newStatus) {
     sp<StatusListener> listener;
     std::string id;
-    bool initialized = false;
+    std::string physicalId;
+    std::lock_guard<std::mutex> lock(mInitLock);
+
+    if (!mInitialized) {
+        mCachedStatus.emplace_back(true /*isPhysicalCameraStatus*/, cameraDeviceName,
+                physicalCameraDeviceName, newStatus);
+        return hardware::Void();
+    }
+
     {
         std::lock_guard<std::mutex> lock(mLock);
-        bool known = false;
-        for (auto& deviceInfo : mDevices) {
-            if (deviceInfo->mName == cameraDeviceName) {
-                id = deviceInfo->mId;
 
-                if (!deviceInfo->mIsLogicalCamera) {
-                    ALOGE("%s: Invalid combination of camera id %s, physical id %s",
-                            __FUNCTION__, id.c_str(), physicalCameraDeviceName.c_str());
-                    return hardware::Void();
-                }
-                if (std::find(deviceInfo->mPhysicalIds.begin(), deviceInfo->mPhysicalIds.end(),
-                        physicalCameraDeviceName) == deviceInfo->mPhysicalIds.end()) {
-                    ALOGE("%s: Invalid combination of camera id %s, physical id %s",
-                            __FUNCTION__, id.c_str(), physicalCameraDeviceName.c_str());
-                    return hardware::Void();
-                }
-                ALOGI("Camera device %s physical device %s status is now %s, was %s",
-                        cameraDeviceName.c_str(), physicalCameraDeviceName.c_str(),
-                        deviceStatusToString(newStatus), deviceStatusToString(
-                        deviceInfo->mPhysicalStatus[physicalCameraDeviceName]));
-                known = true;
-                break;
-            }
-        }
-        // Previously unseen device; status must not be NOT_PRESENT
-        if (!known) {
-            ALOGW("Camera provider %s says an unknown camera device %s-%s is not present. Curious.",
-                    mProviderName.c_str(), cameraDeviceName.c_str(),
-                    physicalCameraDeviceName.c_str());
+        if (OK != physicalCameraDeviceStatusChangeLocked(&id, &physicalId, cameraDeviceName,
+                physicalCameraDeviceName, newStatus)) {
             return hardware::Void();
         }
+
         listener = mManager->getStatusListener();
-        initialized = mInitialized;
     }
     // Call without lock held to allow reentrancy into provider manager
-    // Don't send the callback if providerInfo hasn't been initialized.
-    // CameraService will initialize device status after provider is
-    // initialized
-    if (listener != nullptr && initialized) {
-        String8 physicalId(physicalCameraDeviceName.c_str());
+    if (listener != nullptr) {
         listener->onDeviceStatusChanged(String8(id.c_str()),
-                physicalId, newStatus);
+                String8(physicalId.c_str()), newStatus);
     }
     return hardware::Void();
 }
 
+status_t CameraProviderManager::ProviderInfo::physicalCameraDeviceStatusChangeLocked(
+            std::string* id, std::string* physicalId,
+            const hardware::hidl_string& cameraDeviceName,
+            const hardware::hidl_string& physicalCameraDeviceName,
+            CameraDeviceStatus newStatus) {
+    bool known = false;
+    std::string cameraId;
+    for (auto& deviceInfo : mDevices) {
+        if (deviceInfo->mName == cameraDeviceName) {
+            cameraId = deviceInfo->mId;
+            if (!deviceInfo->mIsLogicalCamera) {
+                ALOGE("%s: Invalid combination of camera id %s, physical id %s",
+                        __FUNCTION__, cameraId.c_str(), physicalCameraDeviceName.c_str());
+                return BAD_VALUE;
+            }
+            if (std::find(deviceInfo->mPhysicalIds.begin(), deviceInfo->mPhysicalIds.end(),
+                    physicalCameraDeviceName) == deviceInfo->mPhysicalIds.end()) {
+                ALOGE("%s: Invalid combination of camera id %s, physical id %s",
+                        __FUNCTION__, cameraId.c_str(), physicalCameraDeviceName.c_str());
+                return BAD_VALUE;
+            }
+            ALOGI("Camera device %s physical device %s status is now %s",
+                    cameraDeviceName.c_str(), physicalCameraDeviceName.c_str(),
+                    deviceStatusToString(newStatus));
+            known = true;
+            break;
+        }
+    }
+    // Previously unseen device; status must not be NOT_PRESENT
+    if (!known) {
+        ALOGW("Camera provider %s says an unknown camera device %s-%s is not present. Curious.",
+                mProviderName.c_str(), cameraDeviceName.c_str(),
+                physicalCameraDeviceName.c_str());
+        return BAD_VALUE;
+    }
+
+    *id = cameraId;
+    *physicalId = physicalCameraDeviceName.c_str();
+    return OK;
+}
+
 hardware::Return<void> CameraProviderManager::ProviderInfo::torchModeStatusChange(
         const hardware::hidl_string& cameraDeviceName,
         TorchModeStatus newStatus) {
@@ -1986,6 +2051,20 @@
     return INVALID_OPERATION;
 }
 
+void CameraProviderManager::ProviderInfo::notifyInitialStatusChange(
+        sp<StatusListener> listener,
+        std::unique_ptr<std::vector<CameraStatusInfoT>> cachedStatus) {
+    for (auto& statusInfo : *cachedStatus) {
+        if (statusInfo.isPhysicalCameraStatus) {
+            listener->onDeviceStatusChanged(String8(statusInfo.cameraId.c_str()),
+                    String8(statusInfo.physicalCameraId.c_str()), statusInfo.status);
+        } else {
+            listener->onDeviceStatusChanged(
+                    String8(statusInfo.cameraId.c_str()), statusInfo.status);
+        }
+    }
+}
+
 template<class DeviceInfoT>
 std::unique_ptr<CameraProviderManager::ProviderInfo::DeviceInfo>
     CameraProviderManager::ProviderInfo::initializeDeviceInfo(
@@ -2689,9 +2768,11 @@
 
 
 CameraProviderManager::ProviderInfo::~ProviderInfo() {
+    if (mInitialStatusCallbackFuture.valid()) {
+        mInitialStatusCallbackFuture.wait();
+    }
     // Destruction of ProviderInfo is only supposed to happen when the respective
     // CameraProvider interface dies, so do not unregister callbacks.
-
 }
 
 status_t CameraProviderManager::mapToStatusT(const Status& s)  {
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 25d3639..a0e5f8f 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -22,6 +22,7 @@
 #include <unordered_set>
 #include <string>
 #include <mutex>
+#include <future>
 
 #include <camera/camera2/ConcurrentCamera.h>
 #include <camera/CameraParameters2.h>
@@ -403,6 +404,15 @@
                 const hardware::hidl_string& physicalCameraDeviceName,
                 hardware::camera::common::V1_0::CameraDeviceStatus newStatus) override;
 
+        status_t cameraDeviceStatusChangeLocked(
+                std::string* id, const hardware::hidl_string& cameraDeviceName,
+                hardware::camera::common::V1_0::CameraDeviceStatus newStatus);
+        status_t physicalCameraDeviceStatusChangeLocked(
+                std::string* id, std::string* physicalId,
+                const hardware::hidl_string& cameraDeviceName,
+                const hardware::hidl_string& physicalCameraDeviceName,
+                hardware::camera::common::V1_0::CameraDeviceStatus newStatus);
+
         // hidl_death_recipient interface - this locks the parent mInterfaceMutex
         virtual void serviceDied(uint64_t cookie, const wp<hidl::base::V1_0::IBase>& who) override;
 
@@ -444,8 +454,6 @@
             const hardware::camera::common::V1_0::CameraResourceCost mResourceCost;
 
             hardware::camera::common::V1_0::CameraDeviceStatus mStatus;
-            std::map<std::string, hardware::camera::common::V1_0::CameraDeviceStatus>
-                    mPhysicalStatus;
 
             wp<ProviderInfo> mParentProvider;
 
@@ -600,7 +608,27 @@
 
         CameraProviderManager *mManager;
 
+        struct CameraStatusInfoT {
+            bool isPhysicalCameraStatus = false;
+            hardware::hidl_string cameraId;
+            hardware::hidl_string physicalCameraId;
+            hardware::camera::common::V1_0::CameraDeviceStatus status;
+            CameraStatusInfoT(bool isForPhysicalCamera, const hardware::hidl_string& id,
+                    const hardware::hidl_string& physicalId,
+                    hardware::camera::common::V1_0::CameraDeviceStatus s) :
+                    isPhysicalCameraStatus(isForPhysicalCamera), cameraId(id),
+                    physicalCameraId(physicalId), status(s) {}
+        };
+
+        // Lock to synchronize between initialize() and camera status callbacks
+        std::mutex mInitLock;
         bool mInitialized = false;
+        std::vector<CameraStatusInfoT> mCachedStatus;
+        // End of scope for mInitLock
+
+        std::future<void> mInitialStatusCallbackFuture;
+        void notifyInitialStatusChange(sp<StatusListener> listener,
+                std::unique_ptr<std::vector<CameraStatusInfoT>> cachedStatus);
 
         std::vector<std::unordered_set<std::string>> mConcurrentCameraIdCombinations;
 
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 4a509aa..d5f136b 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1833,10 +1833,12 @@
 
     mStatusWaiters++;
 
+    bool signalPipelineDrain = false;
     if (!active && mUseHalBufManager) {
         auto streamIds = mOutputStreams.getStreamIds();
         if (mStatus == STATUS_ACTIVE) {
             mRequestThread->signalPipelineDrain(streamIds);
+            signalPipelineDrain = true;
         }
         mRequestBufferSM.onWaitUntilIdle();
     }
@@ -1866,6 +1868,10 @@
         }
     } while (!stateSeen);
 
+    if (signalPipelineDrain) {
+        mRequestThread->resetPipelineDrain();
+    }
+
     mStatusWaiters--;
 
     return res;
@@ -2306,6 +2312,15 @@
         newRequest->mRotateAndCropAuto = false;
     }
 
+    auto zoomRatioEntry =
+            newRequest->mSettingsList.begin()->metadata.find(ANDROID_CONTROL_ZOOM_RATIO);
+    if (zoomRatioEntry.count > 0 &&
+            zoomRatioEntry.data.f[0] == 1.0f) {
+        newRequest->mZoomRatioIs1x = true;
+    } else {
+        newRequest->mZoomRatioIs1x = false;
+    }
+
     return newRequest;
 }
 
@@ -4426,13 +4441,17 @@
                                 parent->mDistortionMappers.end()) {
                             continue;
                         }
-                        res = parent->mDistortionMappers[it->cameraId].correctCaptureRequest(
-                            &(it->metadata));
-                        if (res != OK) {
-                            SET_ERR("RequestThread: Unable to correct capture requests "
-                                    "for lens distortion for request %d: %s (%d)",
-                                    halRequest->frame_number, strerror(-res), res);
-                            return INVALID_OPERATION;
+
+                        if (!captureRequest->mDistortionCorrectionUpdated) {
+                            res = parent->mDistortionMappers[it->cameraId].correctCaptureRequest(
+                                    &(it->metadata));
+                            if (res != OK) {
+                                SET_ERR("RequestThread: Unable to correct capture requests "
+                                        "for lens distortion for request %d: %s (%d)",
+                                        halRequest->frame_number, strerror(-res), res);
+                                return INVALID_OPERATION;
+                            }
+                            captureRequest->mDistortionCorrectionUpdated = true;
                         }
                     }
 
@@ -4443,21 +4462,24 @@
                             continue;
                         }
 
-                        camera_metadata_entry_t e = it->metadata.find(ANDROID_CONTROL_ZOOM_RATIO);
-                        if (e.count > 0 && e.data.f[0] != 1.0f) {
+                        if (!captureRequest->mZoomRatioIs1x) {
                             cameraIdsWithZoom.insert(it->cameraId);
                         }
 
-                        res = parent->mZoomRatioMappers[it->cameraId].updateCaptureRequest(
-                            &(it->metadata));
-                        if (res != OK) {
-                            SET_ERR("RequestThread: Unable to correct capture requests "
-                                    "for zoom ratio for request %d: %s (%d)",
-                                    halRequest->frame_number, strerror(-res), res);
-                            return INVALID_OPERATION;
+                        if (!captureRequest->mZoomRatioUpdated) {
+                            res = parent->mZoomRatioMappers[it->cameraId].updateCaptureRequest(
+                                    &(it->metadata));
+                            if (res != OK) {
+                                SET_ERR("RequestThread: Unable to correct capture requests "
+                                        "for zoom ratio for request %d: %s (%d)",
+                                        halRequest->frame_number, strerror(-res), res);
+                                return INVALID_OPERATION;
+                            }
+                            captureRequest->mZoomRatioUpdated = true;
                         }
                     }
-                    if (captureRequest->mRotateAndCropAuto) {
+                    if (captureRequest->mRotateAndCropAuto &&
+                            !captureRequest->mRotationAndCropUpdated) {
                         for (it = captureRequest->mSettingsList.begin();
                                 it != captureRequest->mSettingsList.end(); it++) {
                             auto mapper = parent->mRotateAndCropMappers.find(it->cameraId);
@@ -4471,6 +4493,7 @@
                                 }
                             }
                         }
+                        captureRequest->mRotationAndCropUpdated = true;
                     }
                 }
             }
@@ -4785,6 +4808,12 @@
     mStreamIdsToBeDrained = streamIds;
 }
 
+void Camera3Device::RequestThread::resetPipelineDrain() {
+    Mutex::Autolock pl(mPauseLock);
+    mNotifyPipelineDrain = false;
+    mStreamIdsToBeDrained.clear();
+}
+
 void Camera3Device::RequestThread::clearPreviousRequest() {
     Mutex::Autolock l(mRequestLock);
     mPrevRequest.clear();
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 408f1f9..e10da2c 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -517,6 +517,19 @@
         // overriding of ROTATE_AND_CROP value and adjustment of coordinates
         // in several other controls in both the request and the result
         bool                                mRotateAndCropAuto;
+        // Whether this capture request has its zoom ratio set to 1.0x before
+        // the framework overrides it for camera HAL consumption.
+        bool                                mZoomRatioIs1x;
+
+
+        // Whether this capture request's distortion correction update has
+        // been done.
+        bool                                mDistortionCorrectionUpdated = false;
+        // Whether this capture request's rotation and crop update has been
+        // done.
+        bool                                mRotationAndCropUpdated = false;
+        // Whether this capture request's zoom ratio update has been done.
+        bool                                mZoomRatioUpdated = false;
     };
     typedef List<sp<CaptureRequest> > RequestList;
 
@@ -832,6 +845,7 @@
         }
 
         void signalPipelineDrain(const std::vector<int>& streamIds);
+        void resetPipelineDrain();
 
         status_t switchToOffline(
                 const std::vector<int32_t>& streamsToKeep,
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index 29801a4..d78d1e3 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -43,7 +43,7 @@
 #endif
 
 // Maximum length of a device name.
-static constexpr size_t STATSD_DEVICE_NAME_MAX_LENGTH = 32;
+// static constexpr size_t STATSD_DEVICE_NAME_MAX_LENGTH = 32; // unused since we suppress
 
 // Transmit Enums to statsd in integer or strings  (this must match the atoms.proto)
 static constexpr bool STATSD_USE_INT_FOR_ENUM = false;
@@ -66,6 +66,8 @@
 
 static constexpr int PREVIOUS_STATE_EXPIRE_SEC = 60 * 60; // 1 hour.
 
+static constexpr const char * SUPPRESSED = "SUPPRESSED";
+
 /*
  * For logging purposes, we list all of the MediaMetrics atom fields,
  * which can then be associated with consecutive arguments to the statsd write.
@@ -448,6 +450,8 @@
     std::string outputDeviceNames;
     if (outputDevices.find("AUDIO_DEVICE_OUT_BLUETOOTH") != std::string::npos) {
         isBluetooth = true;
+        outputDeviceNames = SUPPRESSED;
+#if 0   // TODO(b/161554630) sanitize name
         mAudioAnalytics.mAnalyticsState->timeMachine().get(
             "audio.device.bt_a2dp", AMEDIAMETRICS_PROP_NAME, &outputDeviceNames);
         // Remove | if present
@@ -455,6 +459,7 @@
         if (outputDeviceNames.size() > STATSD_DEVICE_NAME_MAX_LENGTH) {
             outputDeviceNames.resize(STATSD_DEVICE_NAME_MAX_LENGTH); // truncate
         }
+#endif
     }
 
     switch (itemType) {
@@ -775,7 +780,7 @@
         std::lock_guard l(mLock);
         mA2dpConnectionRequestNs = atNs;
         ++mA2dpConnectionRequests;
-        mA2dpDeviceName = name;
+        mA2dpDeviceName = SUPPRESSED; // TODO(b/161554630) sanitize name
     }
     ALOGD("(key=%s) a2dp connection name:%s request atNs:%lld",
             key.c_str(), name.c_str(), (long long)atNs);
diff --git a/services/mediametrics/AudioPowerUsage.cpp b/services/mediametrics/AudioPowerUsage.cpp
index cca6b41..33dfa8fa 100644
--- a/services/mediametrics/AudioPowerUsage.cpp
+++ b/services/mediametrics/AudioPowerUsage.cpp
@@ -200,6 +200,34 @@
     return true;
 }
 
+bool AudioPowerUsage::saveAsItems_l(
+        int32_t device, int64_t duration_ns, int32_t type, double average_vol)
+{
+    ALOGV("%s: (%#x, %d, %lld, %f)", __func__, device, type,
+                                   (long long)duration_ns, average_vol );
+    if (duration_ns == 0) {
+        return true; // skip duration 0 usage
+    }
+    if (device == 0) {
+        return true; //ignore unknown device
+    }
+
+    bool ret = false;
+    const int32_t input_bit = device & INPUT_DEVICE_BIT;
+    int32_t device_bits = device ^ input_bit;
+
+    while (device_bits != 0) {
+        int32_t tmp_device = device_bits & -device_bits; // get lowest bit
+        device_bits ^= tmp_device;  // clear lowest bit
+        tmp_device |= input_bit;    // restore input bit
+        ret = saveAsItem_l(tmp_device, duration_ns, type, average_vol);
+
+        ALOGV("%s: device %#x recorded, remaining device_bits = %#x", __func__,
+            tmp_device, device_bits);
+    }
+    return ret;
+}
+
 void AudioPowerUsage::checkTrackRecord(
         const std::shared_ptr<const mediametrics::Item>& item, bool isTrack)
 {
@@ -245,7 +273,7 @@
         ALOGV("device = %s => %d", device_strings.c_str(), device);
     }
     std::lock_guard l(mLock);
-    saveAsItem_l(device, deviceTimeNs, type, deviceVolume);
+    saveAsItems_l(device, deviceTimeNs, type, deviceVolume);
 }
 
 void AudioPowerUsage::checkMode(const std::shared_ptr<const mediametrics::Item>& item)
@@ -262,7 +290,7 @@
         if (durationNs > 0) {
             mDeviceVolume = (mDeviceVolume * double(mVolumeTimeNs - mDeviceTimeNs) +
                     mVoiceVolume * double(endCallNs - mVolumeTimeNs)) / durationNs;
-            saveAsItem_l(mPrimaryDevice, durationNs, VOICE_CALL_TYPE, mDeviceVolume);
+            saveAsItems_l(mPrimaryDevice, durationNs, VOICE_CALL_TYPE, mDeviceVolume);
         }
     } else if (mode == "AUDIO_MODE_IN_CALL") { // entering call mode
         mStartCallNs = item->getTimestamp(); // advisory only
@@ -321,7 +349,7 @@
         if (durationNs > 0) {
             mDeviceVolume = (mDeviceVolume * double(mVolumeTimeNs - mDeviceTimeNs) +
                     mVoiceVolume * double(endDeviceNs - mVolumeTimeNs)) / durationNs;
-            saveAsItem_l(mPrimaryDevice, durationNs, VOICE_CALL_TYPE, mDeviceVolume);
+            saveAsItems_l(mPrimaryDevice, durationNs, VOICE_CALL_TYPE, mDeviceVolume);
         }
         // reset statistics
         mDeviceVolume = 0;
diff --git a/services/mediametrics/AudioPowerUsage.h b/services/mediametrics/AudioPowerUsage.h
index 446ff4f..b705a6a 100644
--- a/services/mediametrics/AudioPowerUsage.h
+++ b/services/mediametrics/AudioPowerUsage.h
@@ -85,6 +85,8 @@
          REQUIRES(mLock);
     static void sendItem(const std::shared_ptr<const mediametrics::Item>& item);
     void collect();
+    bool saveAsItems_l(int32_t device, int64_t duration, int32_t type, double average_vol)
+         REQUIRES(mLock);
 
     AudioAnalytics * const mAudioAnalytics;
     const bool mDisabled;
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index a171cb0..04b906a 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -47,7 +47,11 @@
 
     virtual aaudio_result_t open(const aaudio::AAudioStreamRequest &request) = 0;
 
-    virtual aaudio_result_t close() = 0;
+    /*
+     * Perform any cleanup necessary before deleting the stream.
+     * This might include releasing and closing internal streams.
+     */
+    virtual void close() = 0;
 
     aaudio_result_t registerStream(android::sp<AAudioServiceStreamBase> stream);
 
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.cpp b/services/oboeservice/AAudioServiceEndpointCapture.cpp
index 37d105b..220584c 100644
--- a/services/oboeservice/AAudioServiceEndpointCapture.cpp
+++ b/services/oboeservice/AAudioServiceEndpointCapture.cpp
@@ -36,18 +36,18 @@
 using namespace aaudio;   // TODO just import names needed
 
 AAudioServiceEndpointCapture::AAudioServiceEndpointCapture(AAudioService &audioService)
-        : mStreamInternalCapture(audioService, true) {
-    mStreamInternal = &mStreamInternalCapture;
+    : AAudioServiceEndpointShared(
+            (AudioStreamInternal *)(new AudioStreamInternalCapture(audioService, true))) {
 }
 
 AAudioServiceEndpointCapture::~AAudioServiceEndpointCapture() {
-    delete mDistributionBuffer;
+    delete[] mDistributionBuffer;
 }
 
 aaudio_result_t AAudioServiceEndpointCapture::open(const aaudio::AAudioStreamRequest &request) {
     aaudio_result_t result = AAudioServiceEndpointShared::open(request);
     if (result == AAUDIO_OK) {
-        delete mDistributionBuffer;
+        delete[] mDistributionBuffer;
         int distributionBufferSizeBytes = getStreamInternal()->getFramesPerBurst()
                                           * getStreamInternal()->getBytesPerFrame();
         mDistributionBuffer = new uint8_t[distributionBufferSizeBytes];
@@ -69,7 +69,7 @@
         // Read audio data from stream using a blocking read.
         result = getStreamInternal()->read(mDistributionBuffer, getFramesPerBurst(), timeoutNanos);
         if (result == AAUDIO_ERROR_DISCONNECTED) {
-            disconnectRegisteredStreams();
+            ALOGV("%s() read() returned AAUDIO_ERROR_DISCONNECTED, break", __func__);
             break;
         } else if (result != getFramesPerBurst()) {
             ALOGW("callbackLoop() read %d / %d",
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.h b/services/oboeservice/AAudioServiceEndpointCapture.h
index 971da9a..2bbe81d 100644
--- a/services/oboeservice/AAudioServiceEndpointCapture.h
+++ b/services/oboeservice/AAudioServiceEndpointCapture.h
@@ -36,8 +36,7 @@
     void *callbackLoop() override;
 
 private:
-    AudioStreamInternalCapture  mStreamInternalCapture;
-    uint8_t                    *mDistributionBuffer = nullptr;
+    uint8_t        *mDistributionBuffer = nullptr;
 };
 
 } /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 0843e0b..04c6453 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -226,7 +226,7 @@
     return result;
 }
 
-aaudio_result_t AAudioServiceEndpointMMAP::close() {
+void AAudioServiceEndpointMMAP::close() {
     if (mMmapStream != nullptr) {
         // Needs to be explicitly cleared or CTS will fail but it is not clear why.
         mMmapStream.clear();
@@ -235,8 +235,6 @@
         // FIXME Make closing synchronous.
         AudioClock::sleepForNanos(100 * AAUDIO_NANOS_PER_MILLISECOND);
     }
-
-    return AAUDIO_OK;
 }
 
 aaudio_result_t AAudioServiceEndpointMMAP::startStream(sp<AAudioServiceStreamBase> stream,
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index 3d10861..b6003b6 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -50,7 +50,7 @@
 
     aaudio_result_t open(const aaudio::AAudioStreamRequest &request) override;
 
-    aaudio_result_t close() override;
+    void close() override;
 
     aaudio_result_t startStream(android::sp<AAudioServiceStreamBase> stream,
                                 audio_port_handle_t *clientHandle) override;
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index bda4b90..dfe7193 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -42,8 +42,8 @@
 #define BURSTS_PER_BUFFER_DEFAULT   2
 
 AAudioServiceEndpointPlay::AAudioServiceEndpointPlay(AAudioService &audioService)
-        : mStreamInternalPlay(audioService, true) {
-    mStreamInternal = &mStreamInternalPlay;
+    : AAudioServiceEndpointShared(
+        (AudioStreamInternal *)(new AudioStreamInternalPlay(audioService, true))) {
 }
 
 aaudio_result_t AAudioServiceEndpointPlay::open(const aaudio::AAudioStreamRequest &request) {
@@ -145,7 +145,7 @@
         result = getStreamInternal()->write(mMixer.getOutputBuffer(),
                                             getFramesPerBurst(), timeoutNanos);
         if (result == AAUDIO_ERROR_DISCONNECTED) {
-            AAudioServiceEndpointShared::disconnectRegisteredStreams();
+            ALOGV("%s() write() returned AAUDIO_ERROR_DISCONNECTED, break", __func__);
             break;
         } else if (result != getFramesPerBurst()) {
             ALOGW("callbackLoop() wrote %d / %d",
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.h b/services/oboeservice/AAudioServiceEndpointPlay.h
index 981e430..160a1de 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.h
+++ b/services/oboeservice/AAudioServiceEndpointPlay.h
@@ -45,7 +45,6 @@
     void *callbackLoop() override;
 
 private:
-    AudioStreamInternalPlay  mStreamInternalPlay; // for playing output of mixer
     bool                     mLatencyTuningEnabled = false; // TODO implement tuning
     AAudioMixer              mMixer;    //
 };
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index dc21886..f5de59f 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -40,6 +40,9 @@
 // This is the maximum size in frames. The effective size can be tuned smaller at runtime.
 #define DEFAULT_BUFFER_CAPACITY   (48 * 8)
 
+AAudioServiceEndpointShared::AAudioServiceEndpointShared(AudioStreamInternal *streamInternal)
+    : mStreamInternal(streamInternal) {}
+
 std::string AAudioServiceEndpointShared::dump() const {
     std::stringstream result;
 
@@ -84,8 +87,8 @@
     return result;
 }
 
-aaudio_result_t AAudioServiceEndpointShared::close() {
-    return getStreamInternal()->releaseCloseFinal();
+void AAudioServiceEndpointShared::close() {
+    getStreamInternal()->releaseCloseFinal();
 }
 
 // Glue between C and C++ callbacks.
diff --git a/services/oboeservice/AAudioServiceEndpointShared.h b/services/oboeservice/AAudioServiceEndpointShared.h
index bfc1744..020b926 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.h
+++ b/services/oboeservice/AAudioServiceEndpointShared.h
@@ -35,12 +35,13 @@
 class AAudioServiceEndpointShared : public AAudioServiceEndpoint {
 
 public:
+    explicit AAudioServiceEndpointShared(AudioStreamInternal *streamInternal);
 
     std::string dump() const override;
 
     aaudio_result_t open(const aaudio::AAudioStreamRequest &request) override;
 
-    aaudio_result_t close() override;
+    void close() override;
 
     aaudio_result_t startStream(android::sp<AAudioServiceStreamBase> stream,
                                 audio_port_handle_t *clientHandle) override;
@@ -57,15 +58,15 @@
 protected:
 
     AudioStreamInternal *getStreamInternal() const {
-        return mStreamInternal;
+        return mStreamInternal.get();
     };
 
     aaudio_result_t          startSharingThread_l();
 
     aaudio_result_t          stopSharingThread();
 
-    // pointer to object statically allocated in subclasses
-    AudioStreamInternal     *mStreamInternal = nullptr;
+    // An MMAP stream that is shared by multiple clients.
+    android::sp<AudioStreamInternal> mStreamInternal;
 
     std::atomic<bool>        mCallbackEnabled{false};