Merge "Surface MicrophoneDirection API in MediaRecorder"
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 793cbf4..e584ffb 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -81,6 +81,7 @@
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libstagefright_xmlparser@1.0.so)
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libstagefright_soft_*)
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/vndk/libstagefright_soft_*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libaudiopolicyengineconfig*)
 
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
diff --git a/apex/Android.bp b/apex/Android.bp
index 9455290..f182856 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -42,6 +42,9 @@
     },
     key: "com.android.media.key",
     certificate: ":com.android.media.certificate",
+
+    // Use a custom AndroidManifest.xml used for API targeting.
+    androidManifest: ":com.android.media-androidManifest",
 }
 
 apex {
@@ -50,6 +53,16 @@
     defaults: ["com.android.media-defaults"],
 }
 
+filegroup {
+    name: "com.android.media-androidManifest",
+    srcs: ["AndroidManifest-media.xml"],
+}
+
+filegroup {
+    name: "com.android.media.swcodec-androidManifest",
+    srcs: ["AndroidManifest-swcodec.xml"],
+}
+
 apex_defaults {
     name: "com.android.media.swcodec-defaults",
     native_shared_libs: [
@@ -58,6 +71,9 @@
     use_vendor: true,
     key: "com.android.media.swcodec.key",
     certificate: ":com.android.media.swcodec.certificate",
+
+    // Use a custom AndroidManifest.xml used for API targeting.
+    androidManifest: ":com.android.media.swcodec-androidManifest",
 }
 
 apex {
diff --git a/apex/AndroidManifest-media.xml b/apex/AndroidManifest-media.xml
new file mode 100644
index 0000000..17d3f3a
--- /dev/null
+++ b/apex/AndroidManifest-media.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ -->
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+  package="com.android.media">
+  <!-- APEX does not have classes.dex -->
+  <application android:hasCode="false" />
+  <uses-sdk
+      android:minSdkVersion="28"
+      android:targetSdkVersion="28"
+  />
+</manifest>
diff --git a/apex/AndroidManifest-swcodec.xml b/apex/AndroidManifest-swcodec.xml
new file mode 100644
index 0000000..bd20dc0
--- /dev/null
+++ b/apex/AndroidManifest-swcodec.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ -->
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+  package="com.android.media.swcodec">
+  <!-- APEX does not have classes.dex -->
+  <application android:hasCode="false" />
+  <uses-sdk
+      android:minSdkVersion="28"
+      android:targetSdkVersion="28"
+  />
+</manifest>
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index 0e969c7..3e8992a 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -108,7 +108,7 @@
      *
      * Also returns the set of currently-known camera IDs and state of each device.
      * Adding a listener will trigger the torch status listener to fire for all
-     * devices that have a flash unit
+     * devices that have a flash unit.
      */
     CameraStatus[] addListener(ICameraServiceListener listener);
 
diff --git a/camera/aidl/android/hardware/ICameraServiceListener.aidl b/camera/aidl/android/hardware/ICameraServiceListener.aidl
index f871ce4..e9dcbdb 100644
--- a/camera/aidl/android/hardware/ICameraServiceListener.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceListener.aidl
@@ -76,4 +76,11 @@
     const int TORCH_STATUS_UNKNOWN = -1;
 
     oneway void onTorchStatusChanged(int status, String cameraId);
+
+    /**
+     * Notify registered clients about camera access priority changes.
+     * Clients which were previously unable to open a certain camera device
+     * can retry after receiving this callback.
+     */
+    oneway void onCameraAccessPrioritiesChanged();
 }
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index 55bfa7e..c3407f0 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -86,6 +86,11 @@
             return binder::Status::ok();
         }
 
+        // Access priority API not implemented yet
+        virtual binder::Status onCameraAccessPrioritiesChanged() {
+            return binder::Status::ok();
+        }
+
       private:
         const wp<CameraManagerGlobal> mCameraManager;
     };
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 8c19e1d..b200abf 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -5688,13 +5688,17 @@
      *
      * <p>The ID of the active physical camera that's backing the logical camera. All camera
      * streams and metadata that are not physical camera specific will be originating from this
-     * physical camera. This must be one of valid physical IDs advertised in the physicalIds
-     * static tag.</p>
+     * physical camera.</p>
      * <p>For a logical camera made up of physical cameras where each camera's lenses have
      * different characteristics, the camera device may choose to switch between the physical
      * cameras when application changes FOCAL_LENGTH or SCALER_CROP_REGION.
      * At the time of lens switch, this result metadata reflects the new active physical camera
      * ID.</p>
+     * <p>This key will be available if the camera device advertises this key via {@link ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS }.
+     * When available, this must be one of valid physical IDs backing this logical multi-camera.
+     * If this key is not available for a logical multi-camera, the camera device implementation
+     * may still switch between different active physical cameras based on use case, but the
+     * current active physical camera information won't be available to the application.</p>
      */
     ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID =           // byte
             ACAMERA_LOGICAL_MULTI_CAMERA_START + 2,
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index 8534b28..8fe029a 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -90,6 +90,11 @@
         return binder::Status::ok();
     };
 
+    virtual binder::Status onCameraAccessPrioritiesChanged() {
+        // No op
+        return binder::Status::ok();
+    }
+
     bool waitForNumCameras(size_t num) const {
         Mutex::Autolock l(mLock);
 
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index badb99e..d7dacb8 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -527,6 +527,10 @@
             mMockError = Status_V1_2::ERROR_DRM_SESSION_LOST_STATE;
         } else if (value == kFrameTooLargeValue) {
             mMockError = Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE;
+        } else if (value == kInvalidStateValue)  {
+            mMockError = Status_V1_2::ERROR_DRM_INVALID_STATE;
+        } else {
+            mMockError = Status_V1_2::ERROR_DRM_UNKNOWN;
         }
     }
 
@@ -683,6 +687,10 @@
 Return<void> DrmPlugin::getOfflineLicenseKeySetIds(getOfflineLicenseKeySetIds_cb _hidl_cb) {
     std::vector<std::string> licenseNames = mFileHandle.ListLicenses();
     std::vector<KeySetId> keySetIds;
+    if (mMockError != Status_V1_2::OK) {
+        _hidl_cb(toStatus_1_0(mMockError), keySetIds);
+        return Void();
+    }
     for (const auto& name : licenseNames) {
         std::vector<uint8_t> keySetId(name.begin(), name.end());
         keySetIds.push_back(keySetId);
@@ -693,6 +701,9 @@
 
 
 Return<Status> DrmPlugin::removeOfflineLicense(const KeySetId& keySetId) {
+    if (mMockError != Status_V1_2::OK) {
+        return toStatus_1_0(mMockError);
+    }
     std::string licenseName(keySetId.begin(), keySetId.end());
     if (mFileHandle.DeleteLicense(licenseName)) {
         return Status::OK;
@@ -706,7 +717,9 @@
     DeviceFiles::LicenseState state;
     std::string license;
     OfflineLicenseState hLicenseState;
-    if (mFileHandle.RetrieveLicense(licenseName, &state, &license)) {
+    if (mMockError != Status_V1_2::OK) {
+        _hidl_cb(toStatus_1_0(mMockError), OfflineLicenseState::UNKNOWN);
+    } else if (mFileHandle.RetrieveLicense(licenseName, &state, &license)) {
         switch (state) {
         case DeviceFiles::kLicenseStateActive:
             hLicenseState = OfflineLicenseState::USABLE;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyDrmProperties.h b/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyDrmProperties.h
index 1bbc822..b83ce69 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyDrmProperties.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyDrmProperties.h
@@ -40,6 +40,7 @@
 static const std::string kResourceContentionValue("resourceContention");
 static const std::string kLostStateValue("lostState");
 static const std::string kFrameTooLargeValue("frameTooLarge");
+static const std::string kInvalidStateValue("invalidState");
 
 static const std::string kDeviceIdKey("deviceId");
 static const uint8_t kTestDeviceIdData[] =
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index 10d8b13..1f24413 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -16,6 +16,7 @@
 	libhwbinder \
 	libmedia \
 	libmedialogservice \
+	libmediautils \
 	libnbaio \
 	libnblog \
 	libsoundtriggerservice \
diff --git a/media/bufferpool/2.0/Android.bp b/media/bufferpool/2.0/Android.bp
index cd4e06e..c71ac17 100644
--- a/media/bufferpool/2.0/Android.bp
+++ b/media/bufferpool/2.0/Android.bp
@@ -1,6 +1,9 @@
 cc_library {
     name: "libstagefright_bufferpool@2.0",
     vendor_available: true,
+    vndk: {
+        enabled: true,
+    },
     srcs: [
         "Accessor.cpp",
         "AccessorImpl.cpp",
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.cpp b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
index 2c0a7a0..7045b6a 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright (C) 2019 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -39,7 +39,7 @@
 namespace android {
 
 class C2SoftHevcEnc::IntfImpl : public C2InterfaceHelper {
-   public:
+  public:
     explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper>& helper)
         : C2InterfaceHelper(helper) {
         setDerivedInstance(this);
@@ -73,6 +73,7 @@
                              0u, (uint64_t)C2MemoryUsage::CPU_READ))
                          .build());
 
+        // matches size limits in codec library
         addParameter(
             DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
                 .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
@@ -91,6 +92,7 @@
                     Setter<decltype(*mFrameRate)>::StrictValueWithNoDeps)
                 .build());
 
+        // matches limits in codec library
         addParameter(
             DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
                 .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
@@ -98,6 +100,7 @@
                 .withSetter(BitrateSetter)
                 .build());
 
+        // matches levels allowed within codec library
         addParameter(
             DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
                 .withDefault(new C2StreamProfileLevelInfo::output(
@@ -137,7 +140,7 @@
                              C2P<C2StreamBitrateInfo::output>& me) {
         (void)mayBlock;
         C2R res = C2R::Ok();
-        if (me.v.value <= 4096) {
+        if (me.v.value < 4096) {
             me.set().value = 4096;
         }
         return res;
@@ -278,7 +281,7 @@
         return (uint32_t)c2_max(c2_min(period + 0.5, double(UINT32_MAX)), 1.);
     }
 
-   std::shared_ptr<C2StreamPictureSizeInfo::input> getSize_l() const {
+    std::shared_ptr<C2StreamPictureSizeInfo::input> getSize_l() const {
         return mSize;
     }
     std::shared_ptr<C2StreamFrameRateInfo::output> getFrameRate_l() const {
@@ -304,18 +307,21 @@
     std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
     std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
 };
+
 constexpr char COMPONENT_NAME[] = "c2.android.hevc.encoder";
 
 static size_t GetCPUCoreCount() {
-    long cpuCoreCount = 1;
+    long cpuCoreCount = 0;
+
 #if defined(_SC_NPROCESSORS_ONLN)
     cpuCoreCount = sysconf(_SC_NPROCESSORS_ONLN);
 #else
     // _SC_NPROC_ONLN must be defined...
     cpuCoreCount = sysconf(_SC_NPROC_ONLN);
 #endif
-    CHECK(cpuCoreCount >= 1);
-    ALOGV("Number of CPU cores: %ld", cpuCoreCount);
+
+    if (cpuCoreCount < 1)
+        cpuCoreCount = 1;
     return (size_t)cpuCoreCount;
 }
 
@@ -383,7 +389,7 @@
 
 c2_status_t C2SoftHevcEnc::initEncParams() {
     mCodecCtx = nullptr;
-    mNumCores = MIN(GetCPUCoreCount(), CODEC_MAX_CORES);
+    mNumCores = std::min(GetCPUCoreCount(), (size_t) CODEC_MAX_CORES);
     memset(&mEncParams, 0, sizeof(ihevce_static_cfg_params_t));
 
     // default configuration
@@ -397,7 +403,8 @@
     mEncParams.s_src_prms.i4_width = mSize->width;
     mEncParams.s_src_prms.i4_height = mSize->height;
     mEncParams.s_src_prms.i4_frm_rate_denom = 1000;
-    mEncParams.s_src_prms.i4_frm_rate_num = mFrameRate->value * mEncParams.s_src_prms.i4_frm_rate_denom;
+    mEncParams.s_src_prms.i4_frm_rate_num =
+        mFrameRate->value * mEncParams.s_src_prms.i4_frm_rate_denom;
     mEncParams.s_tgt_lyr_prms.as_tgt_params[0].i4_quality_preset = IHEVCE_QUALITY_P5;
     mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_tgt_bitrate[0] =
         mBitrate->value;
@@ -470,7 +477,7 @@
                                          const C2GraphicView* const input,
                                          uint64_t timestamp) {
     ihevce_static_cfg_params_t* params = &mEncParams;
-    memset(ps_encode_ip, 0, sizeof(ihevce_inp_buf_t));
+    memset(ps_encode_ip, 0, sizeof(*ps_encode_ip));
 
     if (!input) {
         return C2_OK;
@@ -495,13 +502,14 @@
     int32_t uStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
     int32_t vStride = layout.planes[C2PlanarLayout::PLANE_V].rowInc;
 
-    uint32_t width = mSize->width;
-    uint32_t height = mSize->height;
+    const uint32_t width = mSize->width;
+    const uint32_t height = mSize->height;
 
-    // width and height are always even
-    // width and height are always even (as block size is 16x16)
-    CHECK_EQ((width & 1u), 0u);
-    CHECK_EQ((height & 1u), 0u);
+    // width and height must be even
+    if (width & 1u || height & 1u) {
+        ALOGW("height(%u) and width(%u) must both be even", height, width);
+        return C2_BAD_VALUE;
+    }
 
     size_t yPlaneSize = width * height;
 
@@ -650,6 +658,7 @@
         if (view->error() != C2_OK) {
             ALOGE("graphic view map err = %d", view->error());
             mSignalledError = true;
+            work->result = C2_CORRUPTED;
             return;
         }
     }
@@ -687,8 +696,8 @@
 
     status = setEncodeArgs(&s_encode_ip, view.get(), timestamp);
     if (C2_OK != status) {
-        mSignalledError = true;
         ALOGE("setEncodeArgs failed : 0x%x", status);
+        mSignalledError = true;
         work->result = status;
         return;
     }
@@ -761,8 +770,9 @@
         : mHelper(std::static_pointer_cast<C2ReflectorHelper>(
               GetCodec2PlatformComponentStore()->getParamReflector())) {}
 
-    virtual c2_status_t createComponent(
-        c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+    c2_status_t createComponent(
+        c2_node_id_t id,
+        std::shared_ptr<C2Component>* const component,
         std::function<void(C2Component*)> deleter) override {
         *component = std::shared_ptr<C2Component>(
             new C2SoftHevcEnc(
@@ -772,8 +782,9 @@
         return C2_OK;
     }
 
-    virtual c2_status_t createInterface(
-        c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+    c2_status_t createInterface(
+        c2_node_id_t id,
+        std::shared_ptr<C2ComponentInterface>* const interface,
         std::function<void(C2ComponentInterface*)> deleter) override {
         *interface = std::shared_ptr<C2ComponentInterface>(
             new SimpleInterface<C2SoftHevcEnc::IntfImpl>(
@@ -783,7 +794,7 @@
         return C2_OK;
     }
 
-    virtual ~C2SoftHevcEncFactory() override = default;
+    ~C2SoftHevcEncFactory() override = default;
 
    private:
     std::shared_ptr<C2ReflectorHelper> mHelper;
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.h b/media/codec2/components/hevc/C2SoftHevcEnc.h
index c22fea2..9d90b95 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.h
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2018 The Android Open Source Project
+ * Copyright 2019 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -17,18 +17,18 @@
 #ifndef ANDROID_C2_SOFT_HEVC_ENC_H_
 #define ANDROID_C2_SOFT_HEVC_ENC_H_
 
-#include <map>
-#include <utils/Vector.h>
-#include <media/stagefright/foundation/ColorUtils.h>
 #include <SimpleC2Component.h>
+#include <algorithm>
+#include <map>
+#include <media/stagefright/foundation/ColorUtils.h>
+#include <utils/Vector.h>
 
 #include "ihevc_typedefs.h"
 
 namespace android {
-#define MIN(a, b) ((a) < (b)) ? (a) : (b)
 
 /** Get time */
-#define GETTIME(a, b) gettimeofday(a, b);
+#define GETTIME(a, b) gettimeofday(a, b)
 
 /** Compute difference between start and end */
 #define TIME_DIFF(start, end, diff)                      \
@@ -55,7 +55,7 @@
                       const std::shared_ptr<C2BlockPool>& pool) override;
 
    protected:
-    virtual ~C2SoftHevcEnc();
+    ~C2SoftHevcEnc() override;
 
    private:
     std::shared_ptr<IntfImpl> mIntf;
diff --git a/media/codec2/components/opus/C2SoftOpusDec.cpp b/media/codec2/components/opus/C2SoftOpusDec.cpp
index 680712e..7dcd53d 100644
--- a/media/codec2/components/opus/C2SoftOpusDec.cpp
+++ b/media/codec2/components/opus/C2SoftOpusDec.cpp
@@ -252,20 +252,25 @@
     const uint8_t *data = rView.data() + inOffset;
     if (mInputBufferCount < 3) {
         if (mInputBufferCount == 0) {
-            size_t opusHeadSize = inSize;
+            size_t opusHeadSize = 0;
             size_t codecDelayBufSize = 0;
             size_t seekPreRollBufSize = 0;
-            void *opusHeadBuf = (void *)data;
+            void *opusHeadBuf = NULL;
             void *codecDelayBuf = NULL;
             void *seekPreRollBuf = NULL;
 
-            GetOpusHeaderBuffers(data, inSize, &opusHeadBuf,
-                                &opusHeadSize, &codecDelayBuf,
-                                &codecDelayBufSize, &seekPreRollBuf,
-                                &seekPreRollBufSize);
+            if (!GetOpusHeaderBuffers(data, inSize, &opusHeadBuf,
+                                     &opusHeadSize, &codecDelayBuf,
+                                     &codecDelayBufSize, &seekPreRollBuf,
+                                     &seekPreRollBufSize)) {
+                ALOGE("%s encountered error in GetOpusHeaderBuffers", __func__);
+                mSignalledError = true;
+                work->result = C2_CORRUPTED;
+                return;
+            }
 
             if (!ParseOpusHeader((uint8_t *)opusHeadBuf, opusHeadSize, &mHeader)) {
-                ALOGE("Encountered error while Parsing Opus Header.");
+                ALOGE("%s Encountered error while Parsing Opus Header.", __func__);
                 mSignalledError = true;
                 work->result = C2_CORRUPTED;
                 return;
@@ -304,16 +309,16 @@
                 return;
             }
 
-            if (codecDelayBuf && codecDelayBufSize == 8) {
+            if (codecDelayBuf && codecDelayBufSize == sizeof(uint64_t)) {
                 uint64_t value;
                 memcpy(&value, codecDelayBuf, sizeof(uint64_t));
                 mCodecDelay = ns_to_samples(value, kRate);
                 mSamplesToDiscard = mCodecDelay;
                 ++mInputBufferCount;
             }
-            if (seekPreRollBuf && seekPreRollBufSize == 8) {
+            if (seekPreRollBuf && seekPreRollBufSize == sizeof(uint64_t)) {
                 uint64_t value;
-                memcpy(&value, codecDelayBuf, sizeof(uint64_t));
+                memcpy(&value, seekPreRollBuf, sizeof(uint64_t));
                 mSeekPreRoll = ns_to_samples(value, kRate);
                 ++mInputBufferCount;
             }
diff --git a/media/codec2/core/Android.bp b/media/codec2/core/Android.bp
index b723755..a7e8997 100644
--- a/media/codec2/core/Android.bp
+++ b/media/codec2/core/Android.bp
@@ -7,6 +7,9 @@
 cc_library_shared {
     name: "libcodec2",
     vendor_available: true,
+    vndk: {
+        enabled: true,
+    },
 
     srcs: ["C2.cpp"],
 
diff --git a/media/codec2/hidl/1.0/utils/Android.bp b/media/codec2/hidl/1.0/utils/Android.bp
index d0296a5..f5aa65b 100644
--- a/media/codec2/hidl/1.0/utils/Android.bp
+++ b/media/codec2/hidl/1.0/utils/Android.bp
@@ -3,6 +3,9 @@
 cc_library {
     name: "libcodec2_hidl@1.0",
     vendor_available: true,
+    vndk: {
+        enabled: true,
+    },
 
     defaults: ["hidl_defaults"],
 
diff --git a/media/codec2/hidl/1.0/utils/Component.cpp b/media/codec2/hidl/1.0/utils/Component.cpp
index 0473b57..f3bf6f7 100644
--- a/media/codec2/hidl/1.0/utils/Component.cpp
+++ b/media/codec2/hidl/1.0/utils/Component.cpp
@@ -22,7 +22,6 @@
 #include <codec2/hidl/1.0/ComponentStore.h>
 #include <codec2/hidl/1.0/InputBufferManager.h>
 
-#include <android/hardware/media/c2/1.0/IInputSink.h>
 #include <hidl/HidlBinderSupport.h>
 #include <utils/Timers.h>
 
@@ -298,19 +297,12 @@
 Return<void> Component::connectToInputSurface(
         const sp<IInputSurface>& inputSurface,
         connectToInputSurface_cb _hidl_cb) {
-    sp<Sink> sink;
-    {
-        std::lock_guard<std::mutex> lock(mSinkMutex);
-        if (!mSink) {
-            mSink = new Sink(shared_from_this());
-        }
-        sink = mSink;
-    }
     Status status;
     sp<IInputSurfaceConnection> connection;
-    auto transStatus = inputSurface->connect(sink,
-            [&status, &connection](Status s,
-                                   const sp<IInputSurfaceConnection>& c) {
+    auto transStatus = inputSurface->connect(
+            asInputSink(),
+            [&status, &connection](
+                    Status s, const sp<IInputSurfaceConnection>& c) {
                 status = s;
                 connection = c;
             }
@@ -454,6 +446,14 @@
     return sp<IComponentInterface>(mInterface);
 }
 
+Return<sp<IInputSink>> Component::asInputSink() {
+    std::lock_guard<std::mutex> lock(mSinkMutex);
+    if (!mSink) {
+        mSink = new Sink(shared_from_this());
+    }
+    return {mSink};
+}
+
 std::shared_ptr<C2Component> Component::findLocalComponent(
         const sp<IInputSink>& sink) {
     return Component::Sink::findLocalComponent(sink);
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
index 4ac95c5..e444013 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
@@ -26,6 +26,7 @@
 #include <android/hardware/media/c2/1.0/IComponentInterface.h>
 #include <android/hardware/media/c2/1.0/IComponentListener.h>
 #include <android/hardware/media/c2/1.0/IComponentStore.h>
+#include <android/hardware/media/c2/1.0/IInputSink.h>
 #include <hidl/Status.h>
 #include <hwbinder/IBinder.h>
 
@@ -94,6 +95,7 @@
     virtual Return<Status> reset() override;
     virtual Return<Status> release() override;
     virtual Return<sp<IComponentInterface>> getInterface() override;
+    virtual Return<sp<IInputSink>> asInputSink() override;
 
     // Returns a C2Component associated to the given sink if the sink is indeed
     // a local component. Returns nullptr otherwise.
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 7a444a3..fb6af93 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -99,6 +99,34 @@
      */
     virtual size_t numClientBuffers() const = 0;
 
+    void handleImageData(const sp<Codec2Buffer> &buffer) {
+        sp<ABuffer> imageDataCandidate = buffer->getImageData();
+        if (imageDataCandidate == nullptr) {
+            return;
+        }
+        sp<ABuffer> imageData;
+        if (!mFormat->findBuffer("image-data", &imageData)
+                || imageDataCandidate->size() != imageData->size()
+                || memcmp(imageDataCandidate->data(), imageData->data(), imageData->size()) != 0) {
+            ALOGD("[%s] updating image-data", mName);
+            sp<AMessage> newFormat = dupFormat();
+            newFormat->setBuffer("image-data", imageDataCandidate);
+            MediaImage2 *img = (MediaImage2*)imageDataCandidate->data();
+            if (img->mNumPlanes > 0 && img->mType != img->MEDIA_IMAGE_TYPE_UNKNOWN) {
+                int32_t stride = img->mPlane[0].mRowInc;
+                newFormat->setInt32(KEY_STRIDE, stride);
+                ALOGD("[%s] updating stride = %d", mName, stride);
+                if (img->mNumPlanes > 1 && stride > 0) {
+                    int32_t vstride = (img->mPlane[1].mOffset - img->mPlane[0].mOffset) / stride;
+                    newFormat->setInt32(KEY_SLICE_HEIGHT, vstride);
+                    ALOGD("[%s] updating vstride = %d", mName, vstride);
+                }
+            }
+            setFormat(newFormat);
+            buffer->setFormat(newFormat);
+        }
+    }
+
 protected:
     std::string mComponentName; ///< name of component for debugging
     std::string mChannelName; ///< name of channel for debugging
@@ -255,34 +283,6 @@
         mSkipCutBuffer = scb;
     }
 
-    void handleImageData(const sp<Codec2Buffer> &buffer) {
-        sp<ABuffer> imageDataCandidate = buffer->getImageData();
-        if (imageDataCandidate == nullptr) {
-            return;
-        }
-        sp<ABuffer> imageData;
-        if (!mFormat->findBuffer("image-data", &imageData)
-                || imageDataCandidate->size() != imageData->size()
-                || memcmp(imageDataCandidate->data(), imageData->data(), imageData->size()) != 0) {
-            ALOGD("[%s] updating image-data", mName);
-            sp<AMessage> newFormat = dupFormat();
-            newFormat->setBuffer("image-data", imageDataCandidate);
-            MediaImage2 *img = (MediaImage2*)imageDataCandidate->data();
-            if (img->mNumPlanes > 0 && img->mType != img->MEDIA_IMAGE_TYPE_UNKNOWN) {
-                int32_t stride = img->mPlane[0].mRowInc;
-                newFormat->setInt32(KEY_STRIDE, stride);
-                ALOGD("[%s] updating stride = %d", mName, stride);
-                if (img->mNumPlanes > 1 && stride > 0) {
-                    int32_t vstride = (img->mPlane[1].mOffset - img->mPlane[0].mOffset) / stride;
-                    newFormat->setInt32(KEY_SLICE_HEIGHT, vstride);
-                    ALOGD("[%s] updating vstride = %d", mName, vstride);
-                }
-            }
-            setFormat(newFormat);
-            buffer->setFormat(newFormat);
-        }
-    }
-
 protected:
     sp<SkipCutBuffer> mSkipCutBuffer;
 
@@ -783,6 +783,7 @@
         status_t err = mImpl.grabBuffer(index, &c2Buffer);
         if (err == OK) {
             c2Buffer->setFormat(mFormat);
+            handleImageData(c2Buffer);
             *buffer = c2Buffer;
             return true;
         }
@@ -1053,6 +1054,7 @@
             return false;
         }
         *index = mImpl.assignSlot(newBuffer);
+        handleImageData(newBuffer);
         *buffer = newBuffer;
         return true;
     }
diff --git a/media/codec2/vndk/Android.bp b/media/codec2/vndk/Android.bp
index e0b1355..ab6a105 100644
--- a/media/codec2/vndk/Android.bp
+++ b/media/codec2/vndk/Android.bp
@@ -14,6 +14,9 @@
 cc_library_shared {
     name: "libcodec2_vndk",
     vendor_available: true,
+    vndk: {
+        enabled: true,
+    },
 
     srcs: [
         "C2AllocatorIon.cpp",
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index 4200a46..a399940 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -119,6 +119,9 @@
     const mkvparser::BlockEntry *mBlockEntry;
     long mBlockEntryIndex;
 
+    unsigned long mTrackType;
+    void seekwithoutcue_l(int64_t seekTimeUs, int64_t *actualFrameTimeUs);
+
     void advance_l();
 
     BlockIterator(const BlockIterator &);
@@ -290,6 +293,7 @@
       mCluster(NULL),
       mBlockEntry(NULL),
       mBlockEntryIndex(0) {
+    mTrackType = mExtractor->mSegment->GetTracks()->GetTrackByNumber(trackNum)->GetType();
     reset();
 }
 
@@ -442,12 +446,14 @@
         }
 
         if (!pCues) {
-            ALOGE("No Cues in file");
+            ALOGV("No Cues in file,seek without cue data");
+            seekwithoutcue_l(seekTimeUs, actualFrameTimeUs);
             return;
         }
     }
     else if (!pSH) {
-        ALOGE("No SeekHead");
+        ALOGV("No SeekHead, seek without cue data");
+        seekwithoutcue_l(seekTimeUs, actualFrameTimeUs);
         return;
     }
 
@@ -456,7 +462,9 @@
     while (!pCues->DoneParsing()) {
         pCues->LoadCuePoint();
         pCP = pCues->GetLast();
-        CHECK(pCP);
+        ALOGV("pCP = %s", pCP == NULL ? "NULL" : "not NULL");
+        if (pCP == NULL)
+            continue;
 
         size_t trackCount = mExtractor->mTracks.size();
         for (size_t index = 0; index < trackCount; ++index) {
@@ -494,6 +502,7 @@
     // Always *search* based on the video track, but finalize based on mTrackNum
     if (!pTP) {
         ALOGE("Did not locate the video track for seeking");
+        seekwithoutcue_l(seekTimeUs, actualFrameTimeUs);
         return;
     }
 
@@ -537,6 +546,31 @@
     return (mBlockEntry->GetBlock()->GetTime(mCluster) + 500ll) / 1000ll;
 }
 
+void BlockIterator::seekwithoutcue_l(int64_t seekTimeUs, int64_t *actualFrameTimeUs) {
+    mCluster = mExtractor->mSegment->FindCluster(seekTimeUs * 1000ll);
+    const long status = mCluster->GetFirst(mBlockEntry);
+    if (status < 0) {  // error
+        ALOGE("get last blockenry failed!");
+        mCluster = NULL;
+        return;
+    }
+    mBlockEntryIndex = 0;
+    while (!eos() && ((block()->GetTrackNumber() != mTrackNum) || (blockTimeUs() < seekTimeUs))) {
+        advance_l();
+    }
+
+    // video track will seek to the next key frame.
+    if (mTrackType == 1) {
+        while (!eos() && ((block()->GetTrackNumber() != mTrackNum) ||
+                      !mBlockEntry->GetBlock()->IsKey())) {
+            advance_l();
+        }
+    }
+    *actualFrameTimeUs = blockTimeUs();
+     ALOGV("seekTimeUs:%lld, actualFrameTimeUs:%lld, tracknum:%lld",
+              (long long)seekTimeUs, (long long)*actualFrameTimeUs, (long long)mTrackNum);
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 
 static unsigned U24_AT(const uint8_t *ptr) {
@@ -956,17 +990,56 @@
         return;
     }
 
-    // from mkvparser::Segment::Load(), but stop at first cluster
-    ret = mSegment->ParseHeaders();
-    if (ret == 0) {
-        long len;
-        ret = mSegment->LoadCluster(pos, len);
-        if (ret >= 1) {
-            // no more clusters
-            ret = 0;
+    if (mIsLiveStreaming) {
+        // from mkvparser::Segment::Load(), but stop at first cluster
+        ret = mSegment->ParseHeaders();
+        if (ret == 0) {
+            long len;
+            ret = mSegment->LoadCluster(pos, len);
+            if (ret >= 1) {
+                // no more clusters
+                ret = 0;
+            }
+        } else if (ret > 0) {
+            ret = mkvparser::E_BUFFER_NOT_FULL;
         }
-    } else if (ret > 0) {
-        ret = mkvparser::E_BUFFER_NOT_FULL;
+    } else {
+        ret = mSegment->ParseHeaders();
+        if (ret < 0) {
+            ALOGE("Segment parse header return fail %lld", ret);
+            delete mSegment;
+            mSegment = NULL;
+            return;
+        } else if (ret == 0) {
+            const mkvparser::Cues* mCues = mSegment->GetCues();
+            const mkvparser::SeekHead* mSH = mSegment->GetSeekHead();
+            if ((mCues == NULL) && (mSH != NULL)) {
+                size_t count = mSH->GetCount();
+                const mkvparser::SeekHead::Entry* mEntry;
+                for (size_t index = 0; index < count; index++) {
+                    mEntry = mSH->GetEntry(index);
+                    if (mEntry->id == 0x0C53BB6B) {  // Cues ID
+                        long len;
+                        long long pos;
+                        mSegment->ParseCues(mEntry->pos, pos, len);
+                        mCues = mSegment->GetCues();
+                        ALOGV("find cue data by seekhead");
+                        break;
+                    }
+                }
+            }
+
+            if (mCues) {
+                long len;
+                ret = mSegment->LoadCluster(pos, len);
+                ALOGV("has Cue data, Cluster num=%ld", mSegment->GetCount());
+            } else  {
+                long status_Load = mSegment->Load();
+                ALOGW("no Cue data,Segment Load status:%ld",status_Load);
+            }
+        } else if (ret > 0) {
+            ret = mkvparser::E_BUFFER_NOT_FULL;
+        }
     }
 
     if (ret < 0) {
@@ -1557,6 +1630,21 @@
                 } else if (!strcmp("A_FLAC", codecID)) {
                     AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_FLAC);
                     err = addFlacMetadata(meta, codecPrivate, codecPrivateSize);
+                } else if ((!strcmp("A_MS/ACM", codecID))) {
+                    if ((NULL == codecPrivate) || (codecPrivateSize < 30)) {
+                        ALOGW("unsupported audio: A_MS/ACM has no valid private data: %s, size: %zu",
+                               codecPrivate == NULL ? "null" : "non-null", codecPrivateSize);
+                        continue;
+                    } else {
+                        uint16_t ID = *(uint16_t *)codecPrivate;
+                        if (ID == 0x0055) {
+                            AMediaFormat_setString(meta,
+                                    AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_MPEG);
+                        } else {
+                            ALOGW("A_MS/ACM unsupported type , continue");
+                            continue;
+                        }
+                    }
                 } else {
                     ALOGW("%s is not supported.", codecID);
                     continue;
diff --git a/media/extractors/mp4/AC4Parser.cpp b/media/extractors/mp4/AC4Parser.cpp
index 59a2e9b..13d60c8 100644
--- a/media/extractors/mp4/AC4Parser.cpp
+++ b/media/extractors/mp4/AC4Parser.cpp
@@ -260,7 +260,7 @@
 
     int32_t short_program_id = -1;
     if (bitstream_version > 1) {
-        if (ac4_dsi_version == 0){
+        if (ac4_dsi_version == 0) {
             ALOGE("invalid ac4 dsi");
             return false;
         }
@@ -295,6 +295,7 @@
         bool b_single_substream_group = false;
         uint32_t presentation_config = 0, presentation_version = 0;
         uint32_t pres_bytes = 0;
+        uint64_t start = 0;
 
         if (ac4_dsi_version == 0) {
             CHECK_BITS_LEFT(1 + 5 + 5);
@@ -315,6 +316,8 @@
                 mBitReader.skipBits(pres_bytes * 8);
                 continue;
             }
+            /* record a marker, less the size of the presentation_config */
+            start = (mDSISize - mBitReader.numBitsLeft()) / 8;
             // ac4_presentation_v0_dsi(), ac4_presentation_v1_dsi() and ac4_presentation_v2_dsi()
             // all start with a presentation_config of 5 bits
             CHECK_BITS_LEFT(5);
@@ -338,9 +341,6 @@
             (presentation_config >= NELEM(PresentationConfig) ?
             "reserved" : PresentationConfig[presentation_config]));
 
-        /* record a marker, less the size of the presentation_config */
-        uint64_t start = (mDSISize - mBitReader.numBitsLeft()) / 8;
-
         bool b_add_emdf_substreams = false;
         if (!b_single_substream_group && presentation_config == 6) {
             b_add_emdf_substreams = true;
@@ -535,14 +535,14 @@
                     }
                     break;
                 }
-                CHECK_BITS_LEFT(1 + 1);
-                bool b_pre_virtualized = (mBitReader.getBits(1) == 1);
-                mPresentations[presentation].mPreVirtualized = b_pre_virtualized;
-                b_add_emdf_substreams = (mBitReader.getBits(1) == 1);
-                ALOGV("%u: b_pre_virtualized = %s\n", presentation, BOOLSTR(b_pre_virtualized));
-                ALOGV("%u: b_add_emdf_substreams = %s\n", presentation,
-                    BOOLSTR(b_add_emdf_substreams));
             }
+            CHECK_BITS_LEFT(1 + 1);
+            bool b_pre_virtualized = (mBitReader.getBits(1) == 1);
+            mPresentations[presentation].mPreVirtualized = b_pre_virtualized;
+            b_add_emdf_substreams = (mBitReader.getBits(1) == 1);
+            ALOGV("%u: b_pre_virtualized = %s\n", presentation, BOOLSTR(b_pre_virtualized));
+            ALOGV("%u: b_add_emdf_substreams = %s\n", presentation,
+                BOOLSTR(b_add_emdf_substreams));
         }
         if (b_add_emdf_substreams) {
             CHECK_BITS_LEFT(7);
@@ -599,10 +599,6 @@
 
         if (ac4_dsi_version == 1) {
             uint64_t end = (mDSISize - mBitReader.numBitsLeft()) / 8;
-            if (mBitReader.numBitsLeft() % 8 != 0) {
-                end += 1;
-            }
-
             uint64_t presentation_bytes = end - start;
             uint64_t skip_bytes = pres_bytes - presentation_bytes;
             ALOGV("skipping = %" PRIu64 " bytes", skip_bytes);
@@ -612,7 +608,7 @@
 
         // we should know this or something is probably wrong
         // with the bitstream (or we don't support it)
-        if (mPresentations[presentation].mChannelMode == -1){
+        if (mPresentations[presentation].mChannelMode == -1) {
             ALOGE("could not determing channel mode of presentation %d", presentation);
             return false;
         }
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index c776c51..9f27528 100755
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -90,7 +90,7 @@
     virtual media_status_t getFormat(AMediaFormat *);
 
     virtual media_status_t read(MediaBufferHelper **buffer, const ReadOptions *options = NULL);
-    virtual bool supportNonblockingRead() { return true; }
+    bool supportsNonBlockingRead() override { return true; }
     virtual media_status_t fragmentedRead(
             MediaBufferHelper **buffer, const ReadOptions *options = NULL);
 
@@ -142,6 +142,7 @@
     uint8_t *mSrcBuffer;
 
     bool mIsHeif;
+    bool mIsAudio;
     sp<ItemTable> mItemTable;
 
     // Start offset from composition time to presentation time.
@@ -4530,6 +4531,7 @@
     }
 
     mIsPcm = !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW);
+    mIsAudio = !strncasecmp(mime, "audio/", 6);
 
     if (mIsPcm) {
         int32_t numChannels = 0;
@@ -5416,8 +5418,11 @@
                 findFlags = SampleTable::kFlagBefore;
             }
 
-            uint32_t syncSampleIndex;
-            if (err == OK) {
+            uint32_t syncSampleIndex = sampleIndex;
+            // assume every audio sample is a sync sample. This works around
+            // seek issues with files that were incorrectly written with an
+            // empty or single-sample stss block for the audio track
+            if (err == OK && !mIsAudio) {
                 err = mSampleTable->findSyncSampleNear(
                         sampleIndex, &syncSampleIndex, findFlags);
             }
diff --git a/media/extractors/wav/WAVExtractor.cpp b/media/extractors/wav/WAVExtractor.cpp
index e58bd1f..5679de8 100644
--- a/media/extractors/wav/WAVExtractor.cpp
+++ b/media/extractors/wav/WAVExtractor.cpp
@@ -81,7 +81,7 @@
     virtual media_status_t read(
             MediaBufferHelper **buffer, const ReadOptions *options = NULL);
 
-    virtual bool supportNonblockingRead() { return true; }
+    bool supportsNonBlockingRead() override { return true; }
 
 protected:
     virtual ~WAVSource();
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index eb8ab48..5851533 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -70,70 +70,34 @@
 
 // ---------------------------------------------------------------------------
 
-static std::string audioFormatTypeString(audio_format_t value) {
-    std::string formatType;
-    if (FormatConverter::toString(value, formatType)) {
-        return formatType;
-    }
-    char rawbuffer[16];  // room for "%d"
-    snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
-    return rawbuffer;
-}
-
-static std::string audioSourceString(audio_source_t value) {
-    std::string source;
-    if (SourceTypeConverter::toString(value, source)) {
-        return source;
-    }
-    char rawbuffer[16];  // room for "%d"
-    snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
-    return rawbuffer;
-}
-
 void AudioRecord::MediaMetrics::gather(const AudioRecord *record)
 {
-    // key for media statistics is defined in the header
-    // attrs for media statistics
-    // NB: these are matched with public Java API constants defined
-    // in frameworks/base/media/java/android/media/AudioRecord.java
-    // These must be kept synchronized with the constants there.
-    static constexpr char kAudioRecordEncoding[] = "android.media.audiorecord.encoding";
-    static constexpr char kAudioRecordSource[] = "android.media.audiorecord.source";
-    static constexpr char kAudioRecordLatency[] = "android.media.audiorecord.latency";
-    static constexpr char kAudioRecordSampleRate[] = "android.media.audiorecord.samplerate";
-    static constexpr char kAudioRecordChannelCount[] = "android.media.audiorecord.channels";
-    static constexpr char kAudioRecordCreated[] = "android.media.audiorecord.createdMs";
-    static constexpr char kAudioRecordDuration[] = "android.media.audiorecord.durationMs";
-    static constexpr char kAudioRecordCount[] = "android.media.audiorecord.n";
-    static constexpr char kAudioRecordError[] = "android.media.audiorecord.errcode";
-    static constexpr char kAudioRecordErrorFunction[] = "android.media.audiorecord.errfunc";
+#define MM_PREFIX "android.media.audiorecord." // avoid cut-n-paste errors.
 
-    // constructor guarantees mAnalyticsItem is valid
+    // Java API 28 entries, do not change.
+    mAnalyticsItem->setCString(MM_PREFIX "encoding", toString(record->mFormat).c_str());
+    mAnalyticsItem->setCString(MM_PREFIX "source", toString(record->mAttributes.source).c_str());
+    mAnalyticsItem->setInt32(MM_PREFIX "latency", (int32_t)record->mLatency); // bad estimate.
+    mAnalyticsItem->setInt32(MM_PREFIX "samplerate", (int32_t)record->mSampleRate);
+    mAnalyticsItem->setInt32(MM_PREFIX "channels", (int32_t)record->mChannelCount);
 
-    mAnalyticsItem->setInt32(kAudioRecordLatency, record->mLatency);
-    mAnalyticsItem->setInt32(kAudioRecordSampleRate, record->mSampleRate);
-    mAnalyticsItem->setInt32(kAudioRecordChannelCount, record->mChannelCount);
-    mAnalyticsItem->setCString(kAudioRecordEncoding,
-                               audioFormatTypeString(record->mFormat).c_str());
-    mAnalyticsItem->setCString(kAudioRecordSource,
-                               audioSourceString(record->mAttributes.source).c_str());
+    // Non-API entries, these can change.
+    mAnalyticsItem->setInt32(MM_PREFIX "portId", (int32_t)record->mPortId);
+    mAnalyticsItem->setInt32(MM_PREFIX "frameCount", (int32_t)record->mFrameCount);
+    mAnalyticsItem->setCString(MM_PREFIX "attributes", toString(record->mAttributes).c_str());
+    mAnalyticsItem->setInt64(MM_PREFIX "channelMask", (int64_t)record->mChannelMask);
 
-    // log total duration recording, including anything currently running [and count].
-    nsecs_t active = 0;
+    // log total duration recording, including anything currently running.
+    int64_t activeNs = 0;
     if (mStartedNs != 0) {
-        active = systemTime() - mStartedNs;
+        activeNs = systemTime() - mStartedNs;
     }
-    mAnalyticsItem->setInt64(kAudioRecordDuration, (mDurationNs + active) / (1000 * 1000));
-    mAnalyticsItem->setInt32(kAudioRecordCount, mCount);
-
-    // XXX I don't know that this adds a lot of value, long term
-    if (mCreatedNs != 0) {
-        mAnalyticsItem->setInt64(kAudioRecordCreated, mCreatedNs / (1000 * 1000));
-    }
+    mAnalyticsItem->setDouble(MM_PREFIX "durationMs", (mDurationNs + activeNs) * 1e-6);
+    mAnalyticsItem->setInt64(MM_PREFIX "startCount", (int64_t)mCount);
 
     if (mLastError != NO_ERROR) {
-        mAnalyticsItem->setInt32(kAudioRecordError, mLastError);
-        mAnalyticsItem->setCString(kAudioRecordErrorFunction, mLastErrorFunc.c_str());
+        mAnalyticsItem->setInt32(MM_PREFIX "lastError.code", (int32_t)mLastError);
+        mAnalyticsItem->setCString(MM_PREFIX "lastError.at", mLastErrorFunc.c_str());
     }
 }
 
@@ -358,7 +322,7 @@
     mCbf = cbf;
 
     if (cbf != NULL) {
-        mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
+        mAudioRecordThread = new AudioRecordThread(*this);
         mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
         // thread begins in paused state, and will not reference us until start()
     }
@@ -1463,8 +1427,9 @@
 
 // =========================================================================
 
-AudioRecord::AudioRecordThread::AudioRecordThread(AudioRecord& receiver, bool bCanCallJava)
-    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
+AudioRecord::AudioRecordThread::AudioRecordThread(AudioRecord& receiver)
+    : Thread(true /* bCanCallJava */)  // binder recursion on restoreRecord_l() may call Java.
+    , mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
       mIgnoreNextPausedInt(false)
 {
 }
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 7881bb8..e59f7e0 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -170,44 +170,8 @@
 
 // ---------------------------------------------------------------------------
 
-static std::string audioContentTypeString(audio_content_type_t value) {
-    std::string contentType;
-    if (AudioContentTypeConverter::toString(value, contentType)) {
-        return contentType;
-    }
-    char rawbuffer[16];  // room for "%d"
-    snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
-    return rawbuffer;
-}
-
-static std::string audioUsageString(audio_usage_t value) {
-    std::string usage;
-    if (UsageTypeConverter::toString(value, usage)) {
-        return usage;
-    }
-    char rawbuffer[16];  // room for "%d"
-    snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
-    return rawbuffer;
-}
-
 void AudioTrack::MediaMetrics::gather(const AudioTrack *track)
 {
-
-    // key for media statistics is defined in the header
-    // attrs for media statistics
-    // NB: these are matched with public Java API constants defined
-    // in frameworks/base/media/java/android/media/AudioTrack.java
-    // These must be kept synchronized with the constants there.
-    static constexpr char kAudioTrackStreamType[] = "android.media.audiotrack.streamtype";
-    static constexpr char kAudioTrackContentType[] = "android.media.audiotrack.type";
-    static constexpr char kAudioTrackUsage[] = "android.media.audiotrack.usage";
-    static constexpr char kAudioTrackSampleRate[] = "android.media.audiotrack.samplerate";
-    static constexpr char kAudioTrackChannelMask[] = "android.media.audiotrack.channelmask";
-
-    // NB: These are not yet exposed as public Java API constants.
-    static constexpr char kAudioTrackUnderrunFrames[] = "android.media.audiotrack.underrunframes";
-    static constexpr char kAudioTrackStartupGlitch[] = "android.media.audiotrack.glitch.startup";
-
     // only if we're in a good state...
     // XXX: shall we gather alternative info if failing?
     const status_t lstatus = track->initCheck();
@@ -216,28 +180,22 @@
         return;
     }
 
-    // constructor guarantees mAnalyticsItem is valid
+#define MM_PREFIX "android.media.audiotrack." // avoid cut-n-paste errors.
 
-    const int32_t underrunFrames = track->getUnderrunFrames();
-    if (underrunFrames != 0) {
-        mAnalyticsItem->setInt32(kAudioTrackUnderrunFrames, underrunFrames);
-    }
+    // Java API 28 entries, do not change.
+    mAnalyticsItem->setCString(MM_PREFIX "streamtype", toString(track->streamType()).c_str());
+    mAnalyticsItem->setCString(MM_PREFIX "type",
+            toString(track->mAttributes.content_type).c_str());
+    mAnalyticsItem->setCString(MM_PREFIX "usage", toString(track->mAttributes.usage).c_str());
 
-    if (track->mTimestampStartupGlitchReported) {
-        mAnalyticsItem->setInt32(kAudioTrackStartupGlitch, 1);
-    }
-
-    if (track->mStreamType != -1) {
-        // deprecated, but this will tell us who still uses it.
-        mAnalyticsItem->setInt32(kAudioTrackStreamType, track->mStreamType);
-    }
-    // XXX: consider including from mAttributes: source type
-    mAnalyticsItem->setCString(kAudioTrackContentType,
-                               audioContentTypeString(track->mAttributes.content_type).c_str());
-    mAnalyticsItem->setCString(kAudioTrackUsage,
-                               audioUsageString(track->mAttributes.usage).c_str());
-    mAnalyticsItem->setInt32(kAudioTrackSampleRate, track->mSampleRate);
-    mAnalyticsItem->setInt64(kAudioTrackChannelMask, track->mChannelMask);
+    // Non-API entries, these can change due to a Java string mistake.
+    mAnalyticsItem->setInt32(MM_PREFIX "sampleRate", (int32_t)track->mSampleRate);
+    mAnalyticsItem->setInt64(MM_PREFIX "channelMask", (int64_t)track->mChannelMask);
+    // Non-API entries, these can change.
+    mAnalyticsItem->setInt32(MM_PREFIX "portId", (int32_t)track->mPortId);
+    mAnalyticsItem->setCString(MM_PREFIX "encoding", toString(track->mFormat).c_str());
+    mAnalyticsItem->setInt32(MM_PREFIX "frameCount", (int32_t)track->mFrameCount);
+    mAnalyticsItem->setCString(MM_PREFIX "attributes", toString(track->mAttributes).c_str());
 }
 
 // hand the user a snapshot of the metrics.
@@ -615,7 +573,7 @@
     mCbf = cbf;
 
     if (cbf != NULL) {
-        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
+        mAudioTrackThread = new AudioTrackThread(*this);
         mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
         // thread begins in paused state, and will not reference us until start()
     }
@@ -3127,8 +3085,9 @@
 
 // =========================================================================
 
-AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
-    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
+AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver)
+    : Thread(true /* bCanCallJava */)  // binder recursion on restoreTrack_l() may call Java.
+    , mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
       mIgnoreNextPausedInt(false)
 {
 }
diff --git a/media/libaudioclient/include/media/AudioMixer.h b/media/libaudioclient/include/media/AudioMixer.h
index 41b425f..783eef3 100644
--- a/media/libaudioclient/include/media/AudioMixer.h
+++ b/media/libaudioclient/include/media/AudioMixer.h
@@ -273,7 +273,7 @@
             mPostDownmixReformatBufferProvider.reset(nullptr);
             mDownmixerBufferProvider.reset(nullptr);
             mReformatBufferProvider.reset(nullptr);
-            mAdjustChannelsNonDestructiveBufferProvider.reset(nullptr);
+            mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
             mAdjustChannelsBufferProvider.reset(nullptr);
         }
 
@@ -347,8 +347,12 @@
          * all pre-mixer track buffer conversions outside the AudioMixer class.
          *
          * 1) mInputBufferProvider: The AudioTrack buffer provider.
-         * 2) mAdjustChannelsBufferProvider: Expend or contracts data
-         * 3) mAdjustChannelsNonDestructiveBufferProvider: Non-destructively adjust sample data
+         * 2) mAdjustChannelsBufferProvider: Expands or contracts sample data from one interleaved
+         *    channel format to another. Expanded channels are filled with zeros and put at the end
+         *    of each audio frame. Contracted channels are copied to the end of the buffer.
+         * 3) mContractChannelsNonDestructiveBufferProvider: Non-destructively contract sample data.
+         *    This is currently using at audio-haptic coupled playback to separate audio and haptic
+         *    data. Contracted channels could be written to given buffer.
          * 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
          *    match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
          *    requires reformat. For example, it may convert floating point input to
@@ -360,9 +364,10 @@
          * 7) mTimestretchBufferProvider: Adds timestretching for playback rate
          */
         AudioBufferProvider*     mInputBufferProvider;    // externally provided buffer provider.
-        // TODO: combine AdjustChannelsBufferProvider and AdjustChannelsNonDestructiveBufferProvider
+        // TODO: combine mAdjustChannelsBufferProvider and
+        // mContractChannelsNonDestructiveBufferProvider
         std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
-        std::unique_ptr<PassthruBufferProvider> mAdjustChannelsNonDestructiveBufferProvider;
+        std::unique_ptr<PassthruBufferProvider> mContractChannelsNonDestructiveBufferProvider;
         std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
         std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
         std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 81161ad..b4ddb69 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -568,7 +568,7 @@
     class AudioRecordThread : public Thread
     {
     public:
-        AudioRecordThread(AudioRecord& receiver, bool bCanCallJava = false);
+        AudioRecordThread(AudioRecord& receiver);
 
         // Do not call Thread::requestExitAndWait() without first calling requestExit().
         // Thread::requestExitAndWait() is not virtual, and the implementation doesn't do enough.
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 12f5d71..3926ead 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -934,7 +934,7 @@
     class AudioTrackThread : public Thread
     {
     public:
-        AudioTrackThread(AudioTrack& receiver, bool bCanCallJava = false);
+        AudioTrackThread(AudioTrack& receiver);
 
         // Do not call Thread::requestExitAndWait() without first calling requestExit().
         // Thread::requestExitAndWait() is not virtual, and the implementation doesn't do enough.
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index 2c57db7..f7cc096 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -408,8 +408,8 @@
 void AudioMixer::Track::unprepareForAdjustChannelsNonDestructive()
 {
     ALOGV("AUDIOMIXER::unprepareForAdjustChannelsNonDestructive");
-    if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
-        mAdjustChannelsNonDestructiveBufferProvider.reset(nullptr);
+    if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+        mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
         reconfigureBufferProviders();
     }
 }
@@ -426,13 +426,13 @@
                 ? (uint8_t*)mainBuffer + frames * audio_bytes_per_frame(
                         mMixerChannelCount, mMixerFormat)
                 : NULL;
-        mAdjustChannelsNonDestructiveBufferProvider.reset(
-                new AdjustChannelsNonDestructiveBufferProvider(
+        mContractChannelsNonDestructiveBufferProvider.reset(
+                new AdjustChannelsBufferProvider(
                         mFormat,
                         mAdjustNonDestructiveInChannelCount,
                         mAdjustNonDestructiveOutChannelCount,
-                        mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
                         frames,
+                        mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
                         buffer));
         reconfigureBufferProviders();
     }
@@ -441,9 +441,9 @@
 
 void AudioMixer::Track::clearContractedBuffer()
 {
-    if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
-        static_cast<AdjustChannelsNonDestructiveBufferProvider*>(
-                mAdjustChannelsNonDestructiveBufferProvider.get())->clearContractedFrames();
+    if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+        static_cast<AdjustChannelsBufferProvider*>(
+                mContractChannelsNonDestructiveBufferProvider.get())->clearContractedFrames();
     }
 }
 
@@ -455,9 +455,9 @@
         mAdjustChannelsBufferProvider->setBufferProvider(bufferProvider);
         bufferProvider = mAdjustChannelsBufferProvider.get();
     }
-    if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
-        mAdjustChannelsNonDestructiveBufferProvider->setBufferProvider(bufferProvider);
-        bufferProvider = mAdjustChannelsNonDestructiveBufferProvider.get();
+    if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+        mContractChannelsNonDestructiveBufferProvider->setBufferProvider(bufferProvider);
+        bufferProvider = mContractChannelsNonDestructiveBufferProvider.get();
     }
     if (mReformatBufferProvider.get() != nullptr) {
         mReformatBufferProvider->setBufferProvider(bufferProvider);
@@ -966,8 +966,8 @@
         track->mDownmixerBufferProvider->reset();
     } else if (track->mReformatBufferProvider.get() != nullptr) {
         track->mReformatBufferProvider->reset();
-    } else if (track->mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
-        track->mAdjustChannelsNonDestructiveBufferProvider->reset();
+    } else if (track->mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+        track->mContractChannelsNonDestructiveBufferProvider->reset();
     } else if (track->mAdjustChannelsBufferProvider.get() != nullptr) {
         track->mAdjustChannelsBufferProvider->reset();
     }
diff --git a/media/libaudioprocessing/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
index b764ccb..21d25e1 100644
--- a/media/libaudioprocessing/BufferProviders.cpp
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -627,79 +627,68 @@
     }
 }
 
-AdjustChannelsBufferProvider::AdjustChannelsBufferProvider(audio_format_t format,
-        size_t inChannelCount, size_t outChannelCount, size_t frameCount) :
+AdjustChannelsBufferProvider::AdjustChannelsBufferProvider(
+        audio_format_t format, size_t inChannelCount, size_t outChannelCount,
+        size_t frameCount, audio_format_t contractedFormat, void* contractedBuffer) :
         CopyBufferProvider(
                 audio_bytes_per_frame(inChannelCount, format),
-                audio_bytes_per_frame(outChannelCount, format),
+                audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
                 frameCount),
         mFormat(format),
         mInChannelCount(inChannelCount),
         mOutChannelCount(outChannelCount),
-        mSampleSizeInBytes(audio_bytes_per_sample(format))
-{
-    ALOGV("AdjustBufferProvider(%p)(%#x, %zu, %zu, %zu)",
-            this, format, inChannelCount, outChannelCount, frameCount);
-}
-
-void AdjustChannelsBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
-    adjust_channels(src, mInChannelCount, dst, mOutChannelCount, mSampleSizeInBytes,
-            frames * mInChannelCount * mSampleSizeInBytes);
-}
-
-AdjustChannelsNonDestructiveBufferProvider::AdjustChannelsNonDestructiveBufferProvider(
-        audio_format_t format, size_t inChannelCount, size_t outChannelCount,
-        audio_format_t contractedFormat, size_t contractedFrameCount, void* contractedBuffer) :
-        CopyBufferProvider(
-                audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
-                audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
-                contractedFrameCount),
-        mFormat(format),
-        mInChannelCount(inChannelCount),
-        mOutChannelCount(outChannelCount),
         mSampleSizeInBytes(audio_bytes_per_sample(format)),
+        mFrameCount(frameCount),
         mContractedChannelCount(inChannelCount - outChannelCount),
         mContractedFormat(contractedFormat),
-        mContractedFrameCount(contractedFrameCount),
         mContractedBuffer(contractedBuffer),
         mContractedWrittenFrames(0)
 {
-    ALOGV("AdjustChannelsNonDestructiveBufferProvider(%p)(%#x, %zu, %zu, %#x, %p)",
-            this, format, inChannelCount, outChannelCount, contractedFormat, contractedBuffer);
+    ALOGV("AdjustChannelsBufferProvider(%p)(%#x, %zu, %zu, %zu, %#x, %p)", this, format,
+            inChannelCount, outChannelCount, frameCount, contractedFormat, contractedBuffer);
     if (mContractedFormat != AUDIO_FORMAT_INVALID && mInChannelCount > mOutChannelCount) {
         mContractedFrameSize = audio_bytes_per_frame(mContractedChannelCount, mContractedFormat);
     }
 }
 
-status_t AdjustChannelsNonDestructiveBufferProvider::getNextBuffer(
-        AudioBufferProvider::Buffer* pBuffer)
+status_t AdjustChannelsBufferProvider::getNextBuffer(AudioBufferProvider::Buffer* pBuffer)
 {
-    const size_t outFramesLeft = mContractedFrameCount - mContractedWrittenFrames;
-    if (outFramesLeft < pBuffer->frameCount) {
-        // Restrict the frame count so that we don't write over the size of the output buffer.
-        pBuffer->frameCount = outFramesLeft;
+    if (mContractedBuffer != nullptr) {
+        // Restrict frame count only when it is needed to save contracted frames.
+        const size_t outFramesLeft = mFrameCount - mContractedWrittenFrames;
+        if (outFramesLeft < pBuffer->frameCount) {
+            // Restrict the frame count so that we don't write over the size of the output buffer.
+            pBuffer->frameCount = outFramesLeft;
+        }
     }
     return CopyBufferProvider::getNextBuffer(pBuffer);
 }
 
-void AdjustChannelsNonDestructiveBufferProvider::copyFrames(
-        void *dst, const void *src, size_t frames)
+void AdjustChannelsBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
 {
-    adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount, mSampleSizeInBytes,
-            frames * mInChannelCount * mSampleSizeInBytes);
-    if (mContractedFormat != AUDIO_FORMAT_INVALID && mContractedBuffer != NULL
-            && mInChannelCount > mOutChannelCount) {
-        const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
-        memcpy_by_audio_format(
-                (uint8_t*)mContractedBuffer + mContractedWrittenFrames * mContractedFrameSize,
-                mContractedFormat, (uint8_t*)dst + contractedIdx, mFormat,
-                mContractedChannelCount * frames);
-        mContractedWrittenFrames += frames;
+    if (mInChannelCount > mOutChannelCount) {
+        // For case multi to mono, adjust_channels has special logic that will mix first two input
+        // channels into a single output channel. In that case, use adjust_channels_non_destructive
+        // to keep only one channel data even when contracting to mono.
+        adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount,
+                mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
+        if (mContractedFormat != AUDIO_FORMAT_INVALID
+            && mContractedBuffer != nullptr) {
+            const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
+            memcpy_by_audio_format(
+                    (uint8_t*) mContractedBuffer + mContractedWrittenFrames * mContractedFrameSize,
+                    mContractedFormat, (uint8_t*) dst + contractedIdx, mFormat,
+                    mContractedChannelCount * frames);
+            mContractedWrittenFrames += frames;
+        }
+    } else {
+        // Prefer expanding data from the end of each audio frame.
+        adjust_channels(src, mInChannelCount, dst, mOutChannelCount,
+                mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
     }
 }
 
-void AdjustChannelsNonDestructiveBufferProvider::reset()
+void AdjustChannelsBufferProvider::reset()
 {
     mContractedWrittenFrames = 0;
     CopyBufferProvider::reset();
diff --git a/media/libeffects/downmix/tests/Android.bp b/media/libeffects/downmix/tests/Android.bp
index e2e7dbd..63afc54 100644
--- a/media/libeffects/downmix/tests/Android.bp
+++ b/media/libeffects/downmix/tests/Android.bp
@@ -24,7 +24,6 @@
     ],
 
     cflags: [
-        "-v",
         "-Werror",
         "-Wextra",
     ],
diff --git a/media/libmedia/include/media/BufferProviders.h b/media/libmedia/include/media/BufferProviders.h
index ea41527..b038854 100644
--- a/media/libmedia/include/media/BufferProviders.h
+++ b/media/libmedia/include/media/BufferProviders.h
@@ -218,33 +218,21 @@
     bool                 mAudioPlaybackRateValid; // flag for current parameters validity
 };
 
-// AdjustBufferProvider derives from CopyBufferProvider to adjust sample data.
+// AdjustChannelsBufferProvider derives from CopyBufferProvider to adjust sample data.
 // Expands or contracts sample data from one interleaved channel format to another.
-// Expanded channels are filled with zeros and put at the end of each audio frame.
-// Contracted channels are omitted from the end of each audio frame.
+// Extra expanded channels are filled with zeros and put at the end of each audio frame.
+// Contracted channels are copied to the end of the output buffer(storage should be
+// allocated appropriately).
+// Contracted channels could be written to output buffer.
 class AdjustChannelsBufferProvider : public CopyBufferProvider {
 public:
     AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
-            size_t outChannelCount, size_t frameCount);
-    //Overrides
-    void copyFrames(void *dst, const void *src, size_t frames) override;
-
-protected:
-    const audio_format_t mFormat;
-    const size_t         mInChannelCount;
-    const size_t         mOutChannelCount;
-    const size_t         mSampleSizeInBytes;
-};
-
-// AdjustChannelsNonDestructiveBufferProvider derives from CopyBufferProvider to adjust sample data.
-// Expands or contracts sample data from one interleaved channel format to another.
-// Extra expanded channels are interleaved in from the end of the input buffer.
-// Contracted channels are copied to the end of the output buffer.
-// Contracted channels could be written to output buffer.
-class AdjustChannelsNonDestructiveBufferProvider : public CopyBufferProvider {
-public:
-    AdjustChannelsNonDestructiveBufferProvider(audio_format_t format, size_t inChannelCount,
-            size_t outChannelCount, audio_format_t contractedFormat, size_t contractedFrameCount,
+            size_t outChannelCount, size_t frameCount) : AdjustChannelsBufferProvider(
+                    format, inChannelCount, outChannelCount,
+                    frameCount, AUDIO_FORMAT_INVALID, nullptr) { }
+    // Contracted data is converted to contractedFormat and put into contractedBuffer.
+    AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
+            size_t outChannelCount, size_t frameCount, audio_format_t contractedFormat,
             void* contractedBuffer);
     //Overrides
     status_t getNextBuffer(Buffer* pBuffer) override;
@@ -258,9 +246,9 @@
     const size_t         mInChannelCount;
     const size_t         mOutChannelCount;
     const size_t         mSampleSizeInBytes;
+    const size_t         mFrameCount;
     const size_t         mContractedChannelCount;
     const audio_format_t mContractedFormat;
-    const size_t         mContractedFrameCount;
     void                *mContractedBuffer;
     size_t               mContractedWrittenFrames;
     size_t               mContractedFrameSize;
diff --git a/media/libmedia/include/media/IMediaSource.h b/media/libmedia/include/media/IMediaSource.h
index 7a4b1b9..5ab6e37 100644
--- a/media/libmedia/include/media/IMediaSource.h
+++ b/media/libmedia/include/media/IMediaSource.h
@@ -126,8 +126,8 @@
 
     static const size_t kBinderMediaBuffers = 4; // buffers managed by BnMediaSource
     static const size_t kTransferSharedAsSharedThreshold = 4 * 1024;  // if >= shared, else inline
-    static const size_t kTransferInlineAsSharedThreshold = 64 * 1024; // if >= shared, else inline
-    static const size_t kInlineMaxTransfer = 256 * 1024; // Binder size limited to BINDER_VM_SIZE.
+    static const size_t kTransferInlineAsSharedThreshold = 8 * 1024; // if >= shared, else inline
+    static const size_t kInlineMaxTransfer = 64 * 1024; // Binder size limited to BINDER_VM_SIZE.
 
 protected:
     virtual ~BnMediaSource();
diff --git a/media/libmediaplayer2/JAudioTrack.cpp b/media/libmediaplayer2/JAudioTrack.cpp
index a01afa3..910edff 100644
--- a/media/libmediaplayer2/JAudioTrack.cpp
+++ b/media/libmediaplayer2/JAudioTrack.cpp
@@ -571,8 +571,8 @@
 }
 
 void JAudioTrack::registerRoutingDelegates(
-        std::vector<std::pair<jobject, jobject>>& routingDelegates) {
-    for (std::vector<std::pair<jobject, jobject>>::iterator it = routingDelegates.begin();
+        Vector<std::pair<jobject, jobject>>& routingDelegates) {
+    for (Vector<std::pair<jobject, jobject>>::iterator it = routingDelegates.begin();
             it != routingDelegates.end(); it++) {
         addAudioDeviceCallback(it->second, getHandler(it->second));
     }
@@ -597,23 +597,9 @@
     return env->CallObjectMethod(routingDelegateObj, jGetHandler);
 }
 
-jobject JAudioTrack::addGlobalRef(const jobject obj) {
+jobject JAudioTrack::findByKey(Vector<std::pair<jobject, jobject>>& mp, const jobject key) {
     JNIEnv *env = JavaVMHelper::getJNIEnv();
-    return reinterpret_cast<jobject>(env->NewGlobalRef(obj));
-}
-
-status_t JAudioTrack::removeGlobalRef(const jobject obj) {
-    if (obj == NULL) {
-        return BAD_VALUE;
-    }
-    JNIEnv *env = JavaVMHelper::getJNIEnv();
-    env->DeleteGlobalRef(obj);
-    return NO_ERROR;
-}
-
-jobject JAudioTrack::findByKey(std::vector<std::pair<jobject, jobject>>& mp, const jobject key) {
-    JNIEnv *env = JavaVMHelper::getJNIEnv();
-    for (std::vector<std::pair<jobject, jobject>>::iterator it = mp.begin(); it != mp.end(); it++) {
+    for (Vector<std::pair<jobject, jobject>>::iterator it = mp.begin(); it != mp.end(); it++) {
         if (env->IsSameObject(it->first, key)) {
             return it->second;
         }
@@ -621,9 +607,9 @@
     return nullptr;
 }
 
-void JAudioTrack::eraseByKey(std::vector<std::pair<jobject, jobject>>& mp, const jobject key) {
+void JAudioTrack::eraseByKey(Vector<std::pair<jobject, jobject>>& mp, const jobject key) {
     JNIEnv *env = JavaVMHelper::getJNIEnv();
-    for (std::vector<std::pair<jobject, jobject>>::iterator it = mp.begin(); it != mp.end(); it++) {
+    for (Vector<std::pair<jobject, jobject>>::iterator it = mp.begin(); it != mp.end(); it++) {
         if (env->IsSameObject(it->first, key)) {
             mp.erase(it);
             return;
diff --git a/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp b/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
index 4de92ad..7c3063d 100644
--- a/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
+++ b/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
@@ -85,9 +85,6 @@
 }
 
 MediaPlayer2AudioOutput::~MediaPlayer2AudioOutput() {
-    for (auto routingDelegate : mRoutingDelegates) {
-        JAudioTrack::removeGlobalRef(routingDelegate.second);
-    }
     close();
     delete mCallbackData;
 }
@@ -524,13 +521,16 @@
 status_t MediaPlayer2AudioOutput::addAudioDeviceCallback(jobject jRoutingDelegate) {
     ALOGV("addAudioDeviceCallback");
     Mutex::Autolock lock(mLock);
-    jobject listener = JAudioTrack::getListener(jRoutingDelegate);
-    if (mJAudioTrack != nullptr &&
-        JAudioTrack::findByKey(mRoutingDelegates, listener) == nullptr) {
-        jobject handler = JAudioTrack::getHandler(jRoutingDelegate);
-        jobject routingDelegate = JAudioTrack::addGlobalRef(jRoutingDelegate);
+    jobject listener = (new JObjectHolder(
+            JAudioTrack::getListener(jRoutingDelegate)))->getJObject();
+    if (JAudioTrack::findByKey(mRoutingDelegates, listener) == nullptr) {
+        jobject handler = (new JObjectHolder(
+                JAudioTrack::getHandler(jRoutingDelegate)))->getJObject();
+        jobject routingDelegate = (new JObjectHolder(jRoutingDelegate))->getJObject();
         mRoutingDelegates.push_back(std::pair<jobject, jobject>(listener, routingDelegate));
-        return mJAudioTrack->addAudioDeviceCallback(routingDelegate, handler);
+        if (mJAudioTrack != nullptr) {
+            return mJAudioTrack->addAudioDeviceCallback(routingDelegate, handler);
+        }
     }
     return NO_ERROR;
 }
@@ -539,13 +539,11 @@
     ALOGV("removeAudioDeviceCallback");
     Mutex::Autolock lock(mLock);
     jobject routingDelegate = nullptr;
-    if (mJAudioTrack != nullptr &&
-        (routingDelegate = JAudioTrack::findByKey(mRoutingDelegates, listener)) != nullptr) {
-        mJAudioTrack->removeAudioDeviceCallback(routingDelegate);
-        JAudioTrack::eraseByKey(mRoutingDelegates, listener);
-        if (JAudioTrack::removeGlobalRef(routingDelegate) != NO_ERROR) {
-            return BAD_VALUE;
+    if ((routingDelegate = JAudioTrack::findByKey(mRoutingDelegates, listener)) != nullptr) {
+        if (mJAudioTrack != nullptr) {
+            mJAudioTrack->removeAudioDeviceCallback(routingDelegate);
         }
+        JAudioTrack::eraseByKey(mRoutingDelegates, listener);
     }
     return NO_ERROR;
 }
diff --git a/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h b/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h
index 87dc889..7381286 100644
--- a/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h
+++ b/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h
@@ -17,7 +17,6 @@
 #ifndef ANDROID_JAUDIOTRACK_H
 #define ANDROID_JAUDIOTRACK_H
 
-#include <vector>
 #include <utility>
 #include <jni.h>
 #include <media/AudioResamplerPublic.h>
@@ -25,6 +24,7 @@
 #include <media/VolumeShaper.h>
 #include <system/audio.h>
 #include <utils/Errors.h>
+#include <utils/Vector.h>
 #include <mediaplayer2/JObjectHolder.h>
 #include <media/AudioTimestamp.h>   // It has dependency on audio.h/Errors.h, but doesn't
                                     // include them in it. Therefore it is included here at last.
@@ -405,7 +405,7 @@
      * routingDelegates: backed-up routing delegates
      *
      */
-    void registerRoutingDelegates(std::vector<std::pair<jobject, jobject>>& routingDelegates);
+    void registerRoutingDelegates(Vector<std::pair<jobject, jobject>>& routingDelegates);
 
     /* get listener from RoutingDelegate object
      */
@@ -415,17 +415,6 @@
      */
     static jobject getHandler(const jobject routingDelegateObj);
 
-    /* convert local reference to global reference.
-     */
-    static jobject addGlobalRef(const jobject obj);
-
-    /* erase global reference.
-     *
-     * Returns NO_ERROR if succeeds
-     *         BAD_VALUE if obj is NULL
-     */
-    static status_t removeGlobalRef(const jobject obj);
-
     /*
      * Parameters:
      * map and key
@@ -433,13 +422,13 @@
      * Returns value if key is in the map
      *         nullptr if key is not in the map
      */
-    static jobject findByKey(std::vector<std::pair<jobject, jobject>>& mp, const jobject key);
+    static jobject findByKey(Vector<std::pair<jobject, jobject>>& mp, const jobject key);
 
     /*
      * Parameters:
      * map and key
      */
-    static void eraseByKey(std::vector<std::pair<jobject, jobject>>& mp, const jobject key);
+    static void eraseByKey(Vector<std::pair<jobject, jobject>>& mp, const jobject key);
 
 private:
     audio_output_flags_t mFlags;
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h
index bda4f61..1b3f2dc 100644
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h
@@ -22,7 +22,6 @@
 #include <mediaplayer2/JAudioTrack.h>
 #include <mediaplayer2/JObjectHolder.h>
 
-#include <vector>
 #include <utility>
 #include <utils/String16.h>
 #include <utils/Vector.h>
@@ -125,7 +124,7 @@
     audio_output_flags_t    mFlags;
     sp<JObjectHolder>       mPreferredDevice;
     mutable Mutex           mLock;
-    std::vector<std::pair<jobject, jobject>> mRoutingDelegates; // <listener, routingDelegate>
+    Vector<std::pair<jobject, jobject>> mRoutingDelegates; // <listener, routingDelegate>
 
     // static variables below not protected by mutex
     static bool             mIsOnEmulator;
diff --git a/media/libstagefright/AACWriter.cpp b/media/libstagefright/AACWriter.cpp
index 2ea5fcd..9eba7e9 100644
--- a/media/libstagefright/AACWriter.cpp
+++ b/media/libstagefright/AACWriter.cpp
@@ -85,7 +85,7 @@
     CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC));
     CHECK(meta->findInt32(kKeyChannelCount, &mChannelCount));
     CHECK(meta->findInt32(kKeySampleRate, &mSampleRate));
-    CHECK(mChannelCount >= 1 && mChannelCount <= 2);
+    CHECK(mChannelCount >= 1 && mChannelCount <= 7);
 
     // Optionally, we want to check whether AACProfile is also set.
     if (meta->findInt32(kKeyAACProfile, &mAACProfile)) {
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 2bd7288..d8b825d 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -199,7 +199,6 @@
         "libhidlallocatorutils",
         "libhidlbase",
         "libhidlmemory",
-        "libziparchive",
         "android.hidl.allocator@1.0",
         "android.hardware.cas.native@1.0",
         "android.hardware.media.omx@1.0",
diff --git a/media/libstagefright/HevcUtils.cpp b/media/libstagefright/HevcUtils.cpp
index f152a38..0c38f2e 100644
--- a/media/libstagefright/HevcUtils.cpp
+++ b/media/libstagefright/HevcUtils.cpp
@@ -457,8 +457,8 @@
         if (numNalus == 0) {
             continue;
         }
-        // array_completeness set to 0.
-        header[0] = type;
+        // array_completeness set to 1.
+        header[0] = type | 0x80;
         header[1] = (numNalus >> 8) & 0xff;
         header[2] = numNalus & 0xff;
         header += 3;
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 6259b15..f6ed0f1 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -3128,8 +3128,8 @@
         if (!mIsHeic) {
             if (mStszTableEntries->count() == 0) {
                 mFirstSampleTimeRealUs = systemTime() / 1000;
+                mOwner->setStartTimestampUs(timestampUs);
                 mStartTimestampUs = timestampUs;
-                mOwner->setStartTimestampUs(mStartTimestampUs);
                 previousPausedDurationUs = mStartTimestampUs;
             }
 
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 9c58e05..d4e4000 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1973,10 +1973,11 @@
 
                 case kWhatComponentConfigured:
                 {
-                    if (mState == UNINITIALIZED || mState == INITIALIZED) {
-                        // In case a kWhatError message came in and replied with error,
+                    if (mState == RELEASING || mState == UNINITIALIZED || mState == INITIALIZED) {
+                        // In case a kWhatError or kWhatRelease message came in and replied,
                         // we log a warning and ignore.
-                        ALOGW("configure interrupted by error, current state %d", mState);
+                        ALOGW("configure interrupted by error or release, current state %d",
+                              mState);
                         break;
                     }
                     CHECK_EQ(mState, CONFIGURING);
@@ -2067,6 +2068,13 @@
 
                 case kWhatStartCompleted:
                 {
+                    if (mState == RELEASING || mState == UNINITIALIZED) {
+                        // In case a kWhatRelease message came in and replied,
+                        // we log a warning and ignore.
+                        ALOGW("start interrupted by release, current state %d", mState);
+                        break;
+                    }
+
                     CHECK_EQ(mState, STARTING);
                     if (mIsVideo) {
                         addResource(
@@ -2632,11 +2640,12 @@
                 break;
             }
 
-            // If we're flushing, or we're stopping but received a release
-            // request, post the reply for the pending call first, and consider
-            // it done. The reply token will be replaced after this, and we'll
-            // no longer be able to reply.
-            if (mState == FLUSHING || mState == STOPPING) {
+            // If we're flushing, stopping, configuring or starting  but
+            // received a release request, post the reply for the pending call
+            // first, and consider it done. The reply token will be replaced
+            // after this, and we'll no longer be able to reply.
+            if (mState == FLUSHING || mState == STOPPING
+                    || mState == CONFIGURING || mState == STARTING) {
                 (new AMessage)->postReply(mReplyID);
             }
 
diff --git a/media/libstagefright/OggWriter.cpp b/media/libstagefright/OggWriter.cpp
index 5c13983..cb87b55 100644
--- a/media/libstagefright/OggWriter.cpp
+++ b/media/libstagefright/OggWriter.cpp
@@ -295,6 +295,18 @@
                   mEstimatedSizeBytes, mMaxFileSizeLimitBytes);
             break;
         }
+
+        int32_t isCodecSpecific;
+        if ((buffer->meta_data().findInt32(kKeyIsCodecConfig, &isCodecSpecific)
+             && isCodecSpecific)
+            || IsOpusHeader((uint8_t*)buffer->data() + buffer->range_offset(),
+                         buffer->range_length())) {
+            ALOGV("Drop codec specific info buffer");
+            buffer->release();
+            buffer = nullptr;
+            continue;
+        }
+
         int64_t timestampUs;
         CHECK(buffer->meta_data().findInt64(kKeyTime, &timestampUs));
         if (timestampUs > mEstimatedDurationUs) {
diff --git a/media/libstagefright/codecs/m4v_h263/enc/test/m4v_h263_enc_test.cpp b/media/libstagefright/codecs/m4v_h263/enc/test/m4v_h263_enc_test.cpp
index db2c61a..5554ebd 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/test/m4v_h263_enc_test.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/test/m4v_h263_enc_test.cpp
@@ -183,6 +183,10 @@
     // Initialize the encoder.
     if (!PVInitVideoEncoder(&handle, &encParams)) {
         fprintf(stderr, "Failed to initialize the encoder\n");
+        fclose(fpInput);
+        fclose(fpOutput);
+        free(inputBuf);
+        free(outputBuf);
         return EXIT_FAILURE;
     }
 
@@ -190,6 +194,10 @@
     int32_t headerLength = kOutputBufferSize;
     if (!PVGetVolHeader(&handle, outputBuf, &headerLength, 0)) {
         fprintf(stderr, "Failed to get VOL header\n");
+        fclose(fpInput);
+        fclose(fpOutput);
+        free(inputBuf);
+        free(outputBuf);
         return EXIT_FAILURE;
     }
     fwrite(outputBuf, 1, headerLength, fpOutput);
diff --git a/media/libstagefright/codecs/raw/SoftRaw.cpp b/media/libstagefright/codecs/raw/SoftRaw.cpp
index 1a527b3..0e31804 100644
--- a/media/libstagefright/codecs/raw/SoftRaw.cpp
+++ b/media/libstagefright/codecs/raw/SoftRaw.cpp
@@ -60,7 +60,7 @@
     def.eDir = OMX_DirInput;
     def.nBufferCountMin = kNumBuffers;
     def.nBufferCountActual = def.nBufferCountMin;
-    def.nBufferSize = 64 * 1024;
+    def.nBufferSize = 192 * 1024;
     def.bEnabled = OMX_TRUE;
     def.bPopulated = OMX_FALSE;
     def.eDomain = OMX_PortDomainAudio;
@@ -78,7 +78,7 @@
     def.eDir = OMX_DirOutput;
     def.nBufferCountMin = kNumBuffers;
     def.nBufferCountActual = def.nBufferCountMin;
-    def.nBufferSize = 64 * 1024;
+    def.nBufferSize = 192 * 1024;
     def.bEnabled = OMX_TRUE;
     def.bPopulated = OMX_FALSE;
     def.eDomain = OMX_PortDomainAudio;
diff --git a/media/libstagefright/foundation/OpusHeader.cpp b/media/libstagefright/foundation/OpusHeader.cpp
index 9faede1..acb9ccf 100644
--- a/media/libstagefright/foundation/OpusHeader.cpp
+++ b/media/libstagefright/foundation/OpusHeader.cpp
@@ -15,9 +15,9 @@
  */
 
 //#define LOG_NDEBUG 0
-#define LOG_TAG "SoftOpus"
-#include <algorithm>
+#define LOG_TAG "OpusHeader"
 #include <cstring>
+#include <inttypes.h>
 #include <stdint.h>
 
 #include <log/log.h>
@@ -91,6 +91,9 @@
 
 // Parses Opus Header. Header spec: http://wiki.xiph.org/OggOpus#ID_Header
 bool ParseOpusHeader(const uint8_t* data, size_t data_size, OpusHeader* header) {
+    if (data == NULL) {
+        return false;
+    }
     if (data_size < kOpusHeaderSize) {
         ALOGV("Header size is too small.");
         return false;
@@ -183,53 +186,88 @@
         ALOGD("Buffer not large enough to hold unified OPUS CSD");
         return -1;
     }
+    int headerLen = 0;
 
-    int headerLen = WriteOpusHeader(header, inputSampleRate, output,
+    // Add opus header
+    /*
+      Following is the CSD syntax for signalling OpusHeader
+      (http://wiki.xiph.org/OggOpus#ID_Header)
+
+      Marker (8 bytes) | Length (8 bytes) | OpusHeader
+
+      Markers supported:
+      AOPUS_CSD_OPUS_HEADER_MARKER - Signals Opus Header
+
+      Length should be a value within AOPUS_OPUSHEAD_MINSIZE and AOPUS_OPUSHEAD_MAXSIZE.
+    */
+
+    memcpy(output + headerLen, AOPUS_CSD_OPUS_HEADER_MARKER, AOPUS_MARKER_SIZE);
+    headerLen += AOPUS_MARKER_SIZE;
+
+    // Place holder for opusHeader Size
+    headerLen += AOPUS_LENGTH_SIZE;
+
+    int headerSize = WriteOpusHeader(header, inputSampleRate, output + headerLen,
         outputSize);
-    if (headerLen < 0) {
-        ALOGD("WriteOpusHeader failed");
+    if (headerSize < 0) {
+        ALOGD("%s: WriteOpusHeader failed", __func__);
         return -1;
     }
-    if (headerLen >= (outputSize - 2 * AOPUS_TOTAL_CSD_SIZE)) {
-        ALOGD("Buffer not large enough to hold codec delay and seek pre roll");
-        return -1;
-    }
+    headerLen += headerSize;
 
-    uint64_t length = AOPUS_LENGTH;
+    // Update opus headerSize after AOPUS_CSD_OPUS_HEADER_MARKER
+    uint64_t length = headerSize;
+    memcpy(output + AOPUS_MARKER_SIZE, &length, AOPUS_LENGTH_SIZE);
 
     /*
       Following is the CSD syntax for signalling codec delay and
       seek pre-roll which is to be appended after OpusHeader
 
-      Marker (8 bytes) | Length (8 bytes) | Samples (8 bytes)
+      Marker (8 bytes) | Length (8 bytes) | Samples in ns (8 bytes)
 
       Markers supported:
-      AOPUSDLY - Signals Codec Delay
-      AOPUSPRL - Signals seek pre roll
+      AOPUS_CSD_CODEC_DELAY_MARKER - codec delay as samples in ns, represented in 8 bytes
+      AOPUS_CSD_SEEK_PREROLL_MARKER - preroll adjustment as samples in ns, represented in 8 bytes
 
-      Length should be 8.
     */
-
+    length = sizeof(codecDelay);
+    if (headerLen > (outputSize - AOPUS_MARKER_SIZE - AOPUS_LENGTH_SIZE - length)) {
+        ALOGD("Buffer not large enough to hold codec delay");
+        return -1;
+    }
     // Add codec delay
     memcpy(output + headerLen, AOPUS_CSD_CODEC_DELAY_MARKER, AOPUS_MARKER_SIZE);
     headerLen += AOPUS_MARKER_SIZE;
     memcpy(output + headerLen, &length, AOPUS_LENGTH_SIZE);
     headerLen += AOPUS_LENGTH_SIZE;
-    memcpy(output + headerLen, &codecDelay, AOPUS_CSD_SIZE);
-    headerLen += AOPUS_CSD_SIZE;
+    memcpy(output + headerLen, &codecDelay, length);
+    headerLen += length;
 
+    length = sizeof(seekPreRoll);
+    if (headerLen > (outputSize - AOPUS_MARKER_SIZE - AOPUS_LENGTH_SIZE - length)) {
+        ALOGD("Buffer not large enough to hold seek pre roll");
+        return -1;
+    }
     // Add skip pre roll
     memcpy(output + headerLen, AOPUS_CSD_SEEK_PREROLL_MARKER, AOPUS_MARKER_SIZE);
     headerLen += AOPUS_MARKER_SIZE;
     memcpy(output + headerLen, &length, AOPUS_LENGTH_SIZE);
     headerLen += AOPUS_LENGTH_SIZE;
-    memcpy(output + headerLen, &seekPreRoll, AOPUS_CSD_SIZE);
-    headerLen += AOPUS_CSD_SIZE;
+    memcpy(output + headerLen, &seekPreRoll, length);
+    headerLen += length;
 
     return headerLen;
 }
 
-void GetOpusHeaderBuffers(const uint8_t *data, size_t data_size,
+bool IsOpusHeader(const uint8_t *data, size_t data_size) {
+    if (data_size < AOPUS_MARKER_SIZE) {
+        return false;
+    }
+
+    return !memcmp(data, AOPUS_CSD_OPUS_HEADER_MARKER, AOPUS_MARKER_SIZE);
+}
+
+bool GetOpusHeaderBuffers(const uint8_t *data, size_t data_size,
                           void **opusHeadBuf, size_t *opusHeadSize,
                           void **codecDelayBuf, size_t *codecDelaySize,
                           void **seekPreRollBuf, size_t *seekPreRollSize) {
@@ -237,26 +275,77 @@
     *codecDelaySize = 0;
     *seekPreRollBuf = NULL;
     *seekPreRollSize = 0;
-    *opusHeadBuf = (void *)data;
-    *opusHeadSize = data_size;
-    if (data_size >= AOPUS_UNIFIED_CSD_MINSIZE) {
+    *opusHeadBuf = NULL;
+    *opusHeadSize = 0;
+
+    // AOPUS_MARKER_SIZE is 8 "OpusHead" is of size 8
+    if (data_size < 8)
+        return false;
+
+    // Check if the CSD is in legacy format
+    if (!memcmp("OpusHead", data, 8)) {
+        if (data_size < AOPUS_OPUSHEAD_MINSIZE || data_size > AOPUS_OPUSHEAD_MAXSIZE) {
+            ALOGD("Unexpected size for opusHeadSize %zu", data_size);
+            return false;
+        }
+        *opusHeadBuf = (void *)data;
+        *opusHeadSize = data_size;
+        return true;
+    } else if (memcmp(AOPUS_CSD_MARKER_PREFIX, data, AOPUS_CSD_MARKER_PREFIX_SIZE) == 0) {
         size_t i = 0;
-        while (i < data_size - AOPUS_TOTAL_CSD_SIZE) {
+        bool found = false;
+        while (i <= data_size - AOPUS_MARKER_SIZE - AOPUS_LENGTH_SIZE) {
             uint8_t *csdBuf = (uint8_t *)data + i;
-            if (!memcmp(csdBuf, AOPUS_CSD_CODEC_DELAY_MARKER, AOPUS_MARKER_SIZE)) {
-                *opusHeadSize = std::min(*opusHeadSize, i);
+            if (!memcmp(csdBuf, AOPUS_CSD_OPUS_HEADER_MARKER, AOPUS_MARKER_SIZE)) {
+                uint64_t value;
+                memcpy(&value, csdBuf + AOPUS_MARKER_SIZE, sizeof(value));
+                if (value < AOPUS_OPUSHEAD_MINSIZE || value > AOPUS_OPUSHEAD_MAXSIZE) {
+                    ALOGD("Unexpected size for opusHeadSize %" PRIu64, value);
+                    return false;
+                }
+                i += AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE + value;
+                if (i > data_size) {
+                    ALOGD("Marker signals a header that is larger than input");
+                    return false;
+                }
+                *opusHeadBuf = csdBuf + AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE;
+                *opusHeadSize = value;
+                found = true;
+            } else if (!memcmp(csdBuf, AOPUS_CSD_CODEC_DELAY_MARKER, AOPUS_MARKER_SIZE)) {
+                uint64_t value;
+                memcpy(&value, csdBuf + AOPUS_MARKER_SIZE, sizeof(value));
+                if (value != sizeof(uint64_t)) {
+                    ALOGD("Unexpected size for codecDelay %" PRIu64, value);
+                    return false;
+                }
+                i += AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE + value;
+                if (i > data_size) {
+                    ALOGD("Marker signals a header that is larger than input");
+                    return false;
+                }
                 *codecDelayBuf = csdBuf + AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE;
-                *codecDelaySize = AOPUS_CSD_SIZE;
-                i += AOPUS_TOTAL_CSD_SIZE;
+                *codecDelaySize = value;
             } else if (!memcmp(csdBuf, AOPUS_CSD_SEEK_PREROLL_MARKER, AOPUS_MARKER_SIZE)) {
-                *opusHeadSize = std::min(*opusHeadSize, i);
+                uint64_t value;
+                memcpy(&value, csdBuf + AOPUS_MARKER_SIZE, sizeof(value));
+                if (value != sizeof(uint64_t)) {
+                    ALOGD("Unexpected size for seekPreRollSize %" PRIu64, value);
+                    return false;
+                }
+                i += AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE + value;
+                if (i > data_size) {
+                    ALOGD("Marker signals a header that is larger than input");
+                    return false;
+                }
                 *seekPreRollBuf = csdBuf + AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE;
-                *seekPreRollSize = AOPUS_CSD_SIZE;
-                i += AOPUS_TOTAL_CSD_SIZE;
+                *seekPreRollSize = value;
             } else {
                 i++;
             }
         }
+        return found;
+    } else {
+        return false;  // it isn't in either format
     }
 }
 
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/OpusHeader.h b/media/libstagefright/foundation/include/media/stagefright/foundation/OpusHeader.h
index 9bffccb..29037af 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/OpusHeader.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/OpusHeader.h
@@ -25,22 +25,37 @@
 namespace android {
 
 /* Constants used for delimiting Opus CSD */
-#define AOPUS_CSD_CODEC_DELAY_MARKER "AOPUSDLY"
-#define AOPUS_CSD_SEEK_PREROLL_MARKER "AOPUSPRL"
-#define AOPUS_CSD_SIZE 8
-#define AOPUS_LENGTH 8
+#define AOPUS_CSD_MARKER_PREFIX "AOPUS"
+#define AOPUS_CSD_MARKER_PREFIX_SIZE (sizeof(AOPUS_CSD_MARKER_PREFIX) - 1)
+#define AOPUS_CSD_OPUS_HEADER_MARKER AOPUS_CSD_MARKER_PREFIX "HDR"
+#define AOPUS_CSD_CODEC_DELAY_MARKER AOPUS_CSD_MARKER_PREFIX "DLY"
+#define AOPUS_CSD_SEEK_PREROLL_MARKER AOPUS_CSD_MARKER_PREFIX "PRL"
 #define AOPUS_MARKER_SIZE 8
-#define AOPUS_LENGTH_SIZE 8
-#define AOPUS_TOTAL_CSD_SIZE \
-    ((AOPUS_MARKER_SIZE) + (AOPUS_LENGTH_SIZE) + (AOPUS_CSD_SIZE))
-#define AOPUS_CSD0_MINSIZE 19
-#define AOPUS_UNIFIED_CSD_MINSIZE \
-    ((AOPUS_CSD0_MINSIZE) + 2 * (AOPUS_TOTAL_CSD_SIZE))
+#define AOPUS_LENGTH_SIZE sizeof(uint64_t)
+#define AOPUS_CSD_CODEC_DELAY_SIZE \
+     (AOPUS_MARKER_SIZE) + (AOPUS_LENGTH_SIZE) + sizeof(uint64_t)
+#define AOPUS_CSD_SEEK_PREROLL_SIZE \
+     (AOPUS_MARKER_SIZE) + (AOPUS_LENGTH_SIZE) + sizeof(uint64_t)
 
-/* CSD0 at max can be 22 bytes + max number of channels (255) */
-#define AOPUS_CSD0_MAXSIZE 277
+/* OpusHead csd minimum size is 19 */
+#define AOPUS_OPUSHEAD_MINSIZE 19
+#define AOPUS_CSD_OPUSHEAD_MINSIZE \
+    (AOPUS_MARKER_SIZE) + (AOPUS_LENGTH_SIZE) + (AOPUS_OPUSHEAD_MINSIZE)
+
+#define AOPUS_UNIFIED_CSD_MINSIZE \
+    ((AOPUS_CSD_OPUSHEAD_MINSIZE) + \
+     (AOPUS_CSD_CODEC_DELAY_SIZE) + \
+     (AOPUS_CSD_SEEK_PREROLL_SIZE))
+
+/* OpusHead csd at max can be AOPUS_CSD_OPUSHEAD_MINSIZE + 2 + max number of channels (255) */
+#define AOPUS_OPUSHEAD_MAXSIZE ((AOPUS_OPUSHEAD_MINSIZE) + 2 + 255)
+#define AOPUS_CSD_OPUSHEAD_MAXSIZE \
+    (AOPUS_MARKER_SIZE) + (AOPUS_LENGTH_SIZE) + (AOPUS_OPUSHEAD_MAXSIZE)
+
 #define AOPUS_UNIFIED_CSD_MAXSIZE \
-    ((AOPUS_CSD0_MAXSIZE) + 2 * (AOPUS_TOTAL_CSD_SIZE))
+    ((AOPUS_CSD_OPUSHEAD_MAXSIZE) + \
+     (AOPUS_CSD_CODEC_DELAY_SIZE) + \
+     (AOPUS_CSD_SEEK_PREROLL_SIZE))
 
 struct OpusHeader {
     int channels;
@@ -54,13 +69,14 @@
 
 bool ParseOpusHeader(const uint8_t* data, size_t data_size, OpusHeader* header);
 int WriteOpusHeader(const OpusHeader &header, int input_sample_rate, uint8_t* output, size_t output_size);
-void GetOpusHeaderBuffers(const uint8_t *data, size_t data_size,
+bool GetOpusHeaderBuffers(const uint8_t *data, size_t data_size,
                           void **opusHeadBuf, size_t *opusHeadSize,
                           void **codecDelayBuf, size_t *codecDelaySize,
                           void **seekPreRollBuf, size_t *seekPreRollSize);
 int WriteOpusHeaders(const OpusHeader &header, int inputSampleRate,
                      uint8_t* output, size_t outputSize, uint64_t codecDelay,
                      uint64_t seekPreRoll);
+bool IsOpusHeader(const uint8_t *data, size_t data_size);
 }  // namespace android
 
 #endif  // OPUS_HEADER_H_
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index b91edcd..5af7b23 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -1526,6 +1526,7 @@
                 header, &frameSize, &samplingRate, &numChannels,
                 &bitrate, &numSamples)) {
         ALOGE("Failed to get audio frame size");
+        mBuffer->setRange(0, 0);
         return NULL;
     }
 
@@ -1550,6 +1551,22 @@
         return NULL;
     }
 
+    if (mFormat != NULL) {
+        const char *mime;
+        if (mFormat->findCString(kKeyMIMEType, &mime)) {
+            if ((layer == 1) && strcmp (mime, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I)) {
+                ALOGE("Audio layer is not MPEG_LAYER_I");
+                return NULL;
+            } else if ((layer == 2) && strcmp (mime, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II)) {
+                ALOGE("Audio layer is not MPEG_LAYER_II");
+                return NULL;
+            } else if ((layer == 3) && strcmp (mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
+                ALOGE("Audio layer is not AUDIO_MPEG");
+                return NULL;
+            }
+        }
+    }
+
     accessUnit->meta()->setInt64("timeUs", timeUs);
     accessUnit->meta()->setInt32("isSync", 1);
 
diff --git a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
index 82a0631..4302aee 100644
--- a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
@@ -423,6 +423,11 @@
             CHECK_LE(offset + (mOtherDataLenBits / 8), buffer->size());
             offset += mOtherDataLenBits / 8;
         }
+
+        if (i < mNumSubFrames && offset >= buffer->size()) {
+            ALOGW("Skip subframes after %d, total %d", (int)i, (int)mNumSubFrames);
+            break;
+        }
     }
 
     if (offset < buffer->size()) {
diff --git a/media/mediaserver/Android.bp b/media/mediaserver/Android.bp
index f01947a..8377723 100644
--- a/media/mediaserver/Android.bp
+++ b/media/mediaserver/Android.bp
@@ -21,6 +21,7 @@
         "libutils",
         "libbinder",
         "libandroidicu",
+        "android.hardware.media.omx@1.0",
     ],
 
     static_libs: [
@@ -33,6 +34,9 @@
         "frameworks/av/services/mediaresourcemanager",
     ],
 
+    // back to 32-bit, b/126502613
+    compile_multilib: "32",
+
     init_rc: ["mediaserver.rc"],
 
     cflags: [
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 0b274a7..f9f1acc 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -85,7 +85,6 @@
         "libutils",
         "libcutils",
         "libnativewindow",
-        "libandroid_runtime",
         "libbinder",
         "libhidlbase",
         "libgui",
@@ -94,6 +93,12 @@
         "libmediandk_utils",
     ],
 
+    required: [
+        // libmediandk may be used by Java and non-Java things. When lower-level things use it,
+        // they shouldn't have to take on the cost of loading libandroid_runtime.
+        "libandroid_runtime",
+    ],
+
     export_include_dirs: ["include"],
 
     product_variables: {
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index b010aa9..bcc7ff3 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -25,7 +25,7 @@
 #include <cutils/atomic.h>
 #include <utils/Log.h>
 #include <android_media_Utils.h>
-#include <android_runtime/android_view_Surface.h>
+#include <ui/PublicFormat.h>
 #include <private/android/AHardwareBufferHelpers.h>
 #include <grallocusage/GrallocUsageConversion.h>
 #include <media/stagefright/bqhelper/WGraphicBufferProducer.h>
@@ -272,8 +272,8 @@
 media_status_t
 AImageReader::init() {
     PublicFormat publicFormat = static_cast<PublicFormat>(mFormat);
-    mHalFormat = android_view_Surface_mapPublicFormatToHalFormat(publicFormat);
-    mHalDataSpace = android_view_Surface_mapPublicFormatToHalDataspace(publicFormat);
+    mHalFormat = mapPublicFormatToHalFormat(publicFormat);
+    mHalDataSpace = mapPublicFormatToHalDataspace(publicFormat);
     mHalUsage = AHardwareBuffer_convertToGrallocUsageBits(mUsage);
 
     sp<IGraphicBufferProducer> gbProducer;
diff --git a/media/ndk/NdkMediaCrypto.cpp b/media/ndk/NdkMediaCrypto.cpp
index b8af5ff..ce2c660 100644
--- a/media/ndk/NdkMediaCrypto.cpp
+++ b/media/ndk/NdkMediaCrypto.cpp
@@ -29,7 +29,6 @@
 #include <binder/IServiceManager.h>
 #include <media/ICrypto.h>
 #include <media/IMediaDrmService.h>
-#include <android_runtime/AndroidRuntime.h>
 #include <android_util_Binder.h>
 
 #include <jni.h>
diff --git a/media/ndk/NdkMediaDataSource.cpp b/media/ndk/NdkMediaDataSource.cpp
index 1abee93..0891f2a 100644
--- a/media/ndk/NdkMediaDataSource.cpp
+++ b/media/ndk/NdkMediaDataSource.cpp
@@ -23,9 +23,7 @@
 #include <jni.h>
 #include <unistd.h>
 
-#include <android_runtime/AndroidRuntime.h>
-#include <android_util_Binder.h>
-#include <binder/IServiceManager.h>
+#include <binder/IBinder.h>
 #include <cutils/properties.h>
 #include <utils/Log.h>
 #include <utils/StrongPointer.h>
@@ -41,8 +39,67 @@
 #include "../../libstagefright/include/NuCachedSource2.h"
 #include "NdkMediaDataSourceCallbacksPriv.h"
 
+#include <mutex> // std::call_once,once_flag
+#include <dlfcn.h> // dlopen
+
 using namespace android;
 
+// load libandroid_runtime.so lazily.
+// A vendor process may use libmediandk but should not depend on libandroid_runtime.
+// TODO(jooyung): remove duplicate (b/125550121)
+// frameworks/native/libs/binder/ndk/ibinder_jni.cpp
+namespace {
+
+typedef JNIEnv* (*getJNIEnv_t)();
+typedef sp<IBinder> (*ibinderForJavaObject_t)(JNIEnv* env, jobject obj);
+
+getJNIEnv_t getJNIEnv_;
+ibinderForJavaObject_t ibinderForJavaObject_;
+
+std::once_flag mLoadFlag;
+
+void load() {
+    std::call_once(mLoadFlag, []() {
+        void* handle = dlopen("libandroid_runtime.so", RTLD_LAZY);
+        if (handle == nullptr) {
+            ALOGE("Could not open libandroid_runtime.");
+            return;
+        }
+
+        getJNIEnv_ = reinterpret_cast<getJNIEnv_t>(
+                dlsym(handle, "_ZN7android14AndroidRuntime9getJNIEnvEv"));
+        if (getJNIEnv_ == nullptr) {
+            ALOGE("Could not find AndroidRuntime::getJNIEnv.");
+            // no return
+        }
+
+        ibinderForJavaObject_ = reinterpret_cast<ibinderForJavaObject_t>(
+                dlsym(handle, "_ZN7android20ibinderForJavaObjectEP7_JNIEnvP8_jobject"));
+        if (ibinderForJavaObject_ == nullptr) {
+            ALOGE("Could not find ibinderForJavaObject.");
+            // no return
+        }
+    });
+}
+
+JNIEnv* getJNIEnv() {
+    load();
+    if (getJNIEnv_ == nullptr) {
+        return nullptr;
+    }
+    return (getJNIEnv_)();
+}
+
+sp<IBinder> ibinderForJavaObject(JNIEnv* env, jobject obj) {
+    load();
+    if (ibinderForJavaObject_ == nullptr) {
+        return nullptr;
+    }
+    return (ibinderForJavaObject_)(env, obj);
+}
+
+} // namespace
+
 struct AMediaDataSource {
     void *userdata;
     AMediaDataSourceReadAt readAt;
@@ -124,9 +181,14 @@
     if (obj == NULL) {
         return NULL;
     }
+    sp<IBinder> binder;
     switch (version) {
         case 1:
-            return interface_cast<IMediaHTTPService>(ibinderForJavaObject(env, obj));
+            binder = ibinderForJavaObject(env, obj);
+            if (binder == NULL) {
+                return NULL;
+            }
+            return interface_cast<IMediaHTTPService>(binder);
         case 2:
             return new JMedia2HTTPService(env, obj);
         default:
@@ -179,7 +241,7 @@
 
     switch (version) {
         case 1:
-            env = AndroidRuntime::getJNIEnv();
+            env = getJNIEnv();
             clazz = "android/media/MediaHTTPService";
             method = "createHttpServiceBinderIfNecessary";
             signature = "(Ljava/lang/String;)Landroid/os/IBinder;";
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index 28e4f12..c83b255 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -34,7 +34,6 @@
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/NuMediaExtractor.h>
 #include <media/IMediaHTTPService.h>
-#include <android_runtime/AndroidRuntime.h>
 #include <android_util_Binder.h>
 
 #include <jni.h>
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index 7cc7f16..768a7a9 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -26,7 +26,6 @@
 #include <utils/StrongPointer.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/AMessage.h>
-#include <android_runtime/AndroidRuntime.h>
 #include <android_util_Binder.h>
 
 #include <jni.h>
diff --git a/media/ndk/NdkMediaMuxer.cpp b/media/ndk/NdkMediaMuxer.cpp
index e79926d..d1992bf 100644
--- a/media/ndk/NdkMediaMuxer.cpp
+++ b/media/ndk/NdkMediaMuxer.cpp
@@ -30,7 +30,6 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaMuxer.h>
 #include <media/IMediaHTTPService.h>
-#include <android_runtime/AndroidRuntime.h>
 #include <android_util_Binder.h>
 
 #include <jni.h>
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 599c446..2fb24f5 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -22,6 +22,9 @@
 #include <binder/PermissionCache.h>
 #include "mediautils/ServiceUtilities.h"
 
+#include <iterator>
+#include <algorithm>
+
 /* When performing permission checks we do not use permission cache for
  * runtime permissions (protection level dangerous) as they may change at
  * runtime. All other permissions (protection level normal and dangerous)
@@ -220,4 +223,85 @@
     return NO_ERROR;
 }
 
+sp<content::pm::IPackageManagerNative> MediaPackageManager::retreivePackageManager() {
+    const sp<IServiceManager> sm = defaultServiceManager();
+    if (sm == nullptr) {
+        ALOGW("%s: failed to retrieve defaultServiceManager", __func__);
+        return nullptr;
+    }
+    sp<IBinder> packageManager = sm->checkService(String16(nativePackageManagerName));
+    if (packageManager == nullptr) {
+        ALOGW("%s: failed to retrieve native package manager", __func__);
+        return nullptr;
+    }
+    return interface_cast<content::pm::IPackageManagerNative>(packageManager);
+}
+
+std::optional<bool> MediaPackageManager::doIsAllowed(uid_t uid) {
+    if (mPackageManager == nullptr) {
+        /** Can not fetch package manager at construction it may not yet be registered. */
+        mPackageManager = retreivePackageManager();
+        if (mPackageManager == nullptr) {
+            ALOGW("%s: Playback capture is denied as package manager is not reachable", __func__);
+            return std::nullopt;
+        }
+    }
+
+    std::vector<std::string> packageNames;
+    auto status = mPackageManager->getNamesForUids({(int32_t)uid}, &packageNames);
+    if (!status.isOk()) {
+        ALOGW("%s: Playback capture is denied for uid %u as the package names could not be "
+              "retrieved from the package manager: %s", __func__, uid, status.toString8().c_str());
+        return std::nullopt;
+    }
+    if (packageNames.empty()) {
+        ALOGW("%s: Playback capture for uid %u is denied as no package name could be retrieved "
+              "from the package manager: %s", __func__, uid, status.toString8().c_str());
+        return std::nullopt;
+    }
+    std::vector<bool> isAllowed;
+    status = mPackageManager->isAudioPlaybackCaptureAllowed(packageNames, &isAllowed);
+    if (!status.isOk()) {
+        ALOGW("%s: Playback capture is denied for uid %u as the manifest property could not be "
+              "retrieved from the package manager: %s", __func__, uid, status.toString8().c_str());
+        return std::nullopt;
+    }
+    if (packageNames.size() != isAllowed.size()) {
+        ALOGW("%s: Playback capture is denied for uid %u as the package manager returned incoherent"
+              " response size: %zu != %zu", __func__, uid, packageNames.size(), isAllowed.size());
+        return std::nullopt;
+    }
+
+    // Zip together packageNames and isAllowed for debug logs
+    Packages& packages = mDebugLog[uid];
+    packages.resize(packageNames.size()); // Reuse all objects
+    std::transform(begin(packageNames), end(packageNames), begin(isAllowed),
+                   begin(packages), [] (auto& name, bool isAllowed) -> Package {
+                       return {std::move(name), isAllowed};
+                   });
+
+    // Only allow playback record if all packages in this UID allow it
+    bool playbackCaptureAllowed = std::all_of(begin(isAllowed), end(isAllowed),
+                                                  [](bool b) { return b; });
+
+    return playbackCaptureAllowed;
+}
+
+void MediaPackageManager::dump(int fd, int spaces) const {
+    dprintf(fd, "%*sAllow playback capture log:\n", spaces, "");
+    if (mPackageManager == nullptr) {
+        dprintf(fd, "%*sNo package manager\n", spaces + 2, "");
+    }
+    dprintf(fd, "%*sPackage manager errors: %u\n", spaces + 2, "", mPackageManagerErrors);
+
+    for (const auto& uidCache : mDebugLog) {
+        for (const auto& package : std::get<Packages>(uidCache)) {
+            dprintf(fd, "%*s- uid=%5u, allowPlaybackCapture=%s, packageName=%s\n", spaces + 2, "",
+                    std::get<const uid_t>(uidCache),
+                    package.playbackCaptureAllowed ? "true " : "false",
+                    package.name.c_str());
+        }
+    }
+}
+
 } // namespace android
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 98f54c2..94370ee 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -14,13 +14,22 @@
  * limitations under the License.
  */
 
+#ifndef ANDROID_MEDIAUTILS_SERVICEUTILITIES_H
+#define ANDROID_MEDIAUTILS_SERVICEUTILITIES_H
+
 #include <unistd.h>
 
+#include <android/content/pm/IPackageManagerNative.h>
 #include <binder/IMemory.h>
 #include <binder/PermissionController.h>
 #include <cutils/multiuser.h>
 #include <private/android_filesystem_config.h>
 
+#include <map>
+#include <optional>
+#include <string>
+#include <vector>
+
 namespace android {
 
 // Audio permission utilities
@@ -72,4 +81,31 @@
 bool dumpAllowed();
 bool modifyPhoneStateAllowed(pid_t pid, uid_t uid);
 status_t checkIMemory(const sp<IMemory>& iMemory);
+
+class MediaPackageManager {
+public:
+    /** Query the PackageManager to check if all apps of an UID allow playback capture. */
+    bool allowPlaybackCapture(uid_t uid) {
+        auto result = doIsAllowed(uid);
+        if (!result) {
+            mPackageManagerErrors++;
+        }
+        return result.value_or(false);
+    }
+    void dump(int fd, int spaces = 0) const;
+private:
+    static constexpr const char* nativePackageManagerName = "package_native";
+    std::optional<bool> doIsAllowed(uid_t uid);
+    sp<content::pm::IPackageManagerNative> retreivePackageManager();
+    sp<content::pm::IPackageManagerNative> mPackageManager; // To check apps manifest
+    uint_t mPackageManagerErrors = 0;
+    struct Package {
+        std::string name;
+        bool playbackCaptureAllowed = false;
+    };
+    using Packages = std::vector<Package>;
+    std::map<uid_t, Packages> mDebugLog;
+};
 }
+
+#endif // ANDROID_MEDIAUTILS_SERVICEUTILITIES_H
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 91b7587..40980a6 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -32,6 +32,7 @@
     libbinder \
     libaudioclient \
     libmedialogservice \
+    libmediametrics \
     libmediautils \
     libnbaio \
     libnblog \
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 977f93b..f4a31ed 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -488,6 +488,8 @@
         sp<IBinder> binder = IInterface::asBinder(mPowerManager);
         binder->unlinkToDeath(mDeathRecipient);
     }
+
+    sendStatistics(true /* force */);
 }
 
 status_t AudioFlinger::ThreadBase::readyToRun()
@@ -571,6 +573,15 @@
 // sendIoConfigEvent_l() must be called with ThreadBase::mLock held
 void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event event, pid_t pid)
 {
+    // The audio statistics history is exponentially weighted to forget events
+    // about five or more seconds in the past.  In order to have
+    // crisper statistics for mediametrics, we reset the statistics on
+    // an IoConfigEvent, to reflect different properties for a new device.
+    mIoJitterMs.reset();
+    mLatencyMs.reset();
+    mProcessTimeMs.reset();
+    mTimestampVerifier.discontinuity();
+
     sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event, pid);
     sendConfigEvent_l(configEvent);
 }
@@ -842,6 +853,12 @@
                 mIoJitterMs.toString().c_str());
     }
 
+    if (mLatencyMs.getN() > 0) {
+        dprintf(fd, "  Threadloop %s latency stats: %s\n",
+                isOutput() ? "write" : "read",
+                mLatencyMs.toString().c_str());
+    }
+
     if (locked) {
         mLock.unlock();
     }
@@ -1645,6 +1662,65 @@
     mWaitWorkCV.broadcast();
 }
 
+// Call only from threadLoop() or when it is idle.
+// Do not call from high performance code as this may do binder rpc to the MediaMetrics service.
+void AudioFlinger::ThreadBase::sendStatistics(bool force)
+{
+    // Do not log if we have no stats.
+    // We choose the timestamp verifier because it is the most likely item to be present.
+    const int64_t nstats = mTimestampVerifier.getN() - mLastRecordedTimestampVerifierN;
+    if (nstats == 0) {
+        return;
+    }
+
+    // Don't log more frequently than once per 12 hours.
+    // We use BOOTTIME to include suspend time.
+    const int64_t timeNs = systemTime(SYSTEM_TIME_BOOTTIME);
+    const int64_t sinceNs = timeNs - mLastRecordedTimeNs; // ok if mLastRecordedTimeNs = 0
+    if (!force && sinceNs <= 12 * NANOS_PER_HOUR) {
+        return;
+    }
+
+    mLastRecordedTimestampVerifierN = mTimestampVerifier.getN();
+    mLastRecordedTimeNs = timeNs;
+
+    std::unique_ptr<MediaAnalyticsItem> item(MediaAnalyticsItem::create("audiothread"));
+
+#define MM_PREFIX "android.media.audiothread." // avoid cut-n-paste errors.
+
+    // thread configuration
+    item->setInt32(MM_PREFIX "id", (int32_t)mId); // IO handle
+    // item->setInt32(MM_PREFIX "portId", (int32_t)mPortId);
+    item->setCString(MM_PREFIX "type", threadTypeToString(mType));
+    item->setInt32(MM_PREFIX "sampleRate", (int32_t)mSampleRate);
+    item->setInt64(MM_PREFIX "channelMask", (int64_t)mChannelMask);
+    item->setCString(MM_PREFIX "encoding", toString(mFormat).c_str());
+    item->setInt32(MM_PREFIX "frameCount", (int32_t)mFrameCount);
+    item->setCString(MM_PREFIX "outDevice", toString(mOutDevice).c_str());
+    item->setCString(MM_PREFIX "inDevice", toString(mInDevice).c_str());
+
+    // thread statistics
+    if (mIoJitterMs.getN() > 0) {
+        item->setDouble(MM_PREFIX "ioJitterMs.mean", mIoJitterMs.getMean());
+        item->setDouble(MM_PREFIX "ioJitterMs.std", mIoJitterMs.getStdDev());
+    }
+    if (mProcessTimeMs.getN() > 0) {
+        item->setDouble(MM_PREFIX "processTimeMs.mean", mProcessTimeMs.getMean());
+        item->setDouble(MM_PREFIX "processTimeMs.std", mProcessTimeMs.getStdDev());
+    }
+    const auto tsjitter = mTimestampVerifier.getJitterMs();
+    if (tsjitter.getN() > 0) {
+        item->setDouble(MM_PREFIX "timestampJitterMs.mean", tsjitter.getMean());
+        item->setDouble(MM_PREFIX "timestampJitterMs.std", tsjitter.getStdDev());
+    }
+    if (mLatencyMs.getN() > 0) {
+        item->setDouble(MM_PREFIX "latencyMs.mean", mLatencyMs.getMean());
+        item->setDouble(MM_PREFIX "latencyMs.std", mLatencyMs.getStdDev());
+    }
+
+    item->selfrecord();
+}
+
 // ----------------------------------------------------------------------------
 //      Playback
 // ----------------------------------------------------------------------------
@@ -3380,6 +3456,14 @@
                     }
                 }
             }
+
+            if (audio_has_proportional_frames(mFormat)) {
+                const double latencyMs = mTimestamp.getOutputServerLatencyMs(mSampleRate);
+                if (latencyMs != 0.) { // note 0. means timestamp is empty.
+                    mLatencyMs.add(latencyMs);
+                }
+            }
+
             } // if (mType ... ) { // no indentation
 #if 0
             // logFormat example
@@ -3433,6 +3517,7 @@
                         LOG_AUDIO_STATE();
                     }
                     mStandby = true;
+                    sendStatistics(false /* force */);
                 }
 
                 if (mActiveTracks.isEmpty() && mConfigEvents.isEmpty()) {
@@ -5296,13 +5381,6 @@
     dprintf(fd, "  Master balance: %f (%s)\n", mMasterBalance.load(),
             (hasFastMixer() ? std::to_string(mFastMixer->getMasterBalance())
                             : mBalance.toString()).c_str());
-    const double latencyMs = mTimestamp.getOutputServerLatencyMs(mSampleRate);
-    if (latencyMs != 0.) {
-        dprintf(fd, "  NormalMixer latency ms: %.2lf\n", latencyMs);
-    } else {
-        dprintf(fd, "  NormalMixer latency ms: unavail\n");
-    }
-
     if (hasFastMixer()) {
         dprintf(fd, "  FastMixer thread %p tid=%d", mFastMixer.get(), mFastMixer->getTid());
 
@@ -7042,6 +7120,15 @@
                 mTimestampVerifier.error();
             }
         }
+
+        // From the timestamp, input read latency is negative output write latency.
+        const audio_input_flags_t flags = mInput != NULL ? mInput->flags : AUDIO_INPUT_FLAG_NONE;
+        const double latencyMs = RecordTrack::checkServerLatencySupported(mFormat, flags)
+                ? - mTimestamp.getOutputServerLatencyMs(mSampleRate) : 0.;
+        if (latencyMs != 0.) { // note 0. means timestamp is empty.
+            mLatencyMs.add(latencyMs);
+        }
+
         // Use this to track timestamp information
         // ALOGD("%s", mTimestamp.toString().c_str());
 
@@ -7734,14 +7821,6 @@
         (void)input->stream->dump(fd);
     }
 
-    const double latencyMs = RecordTrack::checkServerLatencySupported(mFormat, flags)
-            ? - mTimestamp.getOutputServerLatencyMs(mSampleRate) : 0.;
-    if (latencyMs != 0.) {
-        dprintf(fd, "  NormalRecord latency ms: %.2lf\n", latencyMs);
-    } else {
-        dprintf(fd, "  NormalRecord latency ms: unavail\n");
-    }
-
     dprintf(fd, "  Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
     dprintf(fd, "  Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
 
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 4968829..97aa9f0 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -399,6 +399,9 @@
 
     virtual     void                dump(int fd, const Vector<String16>& args) = 0;
 
+                // deliver stats to mediametrics.
+                void                sendStatistics(bool force);
+
     mutable     Mutex                   mLock;
 
 protected:
@@ -520,6 +523,11 @@
                 // This should be read under ThreadBase lock (if not on the threadLoop thread).
                 audio_utils::Statistics<double> mIoJitterMs{0.995 /* alpha */};
                 audio_utils::Statistics<double> mProcessTimeMs{0.995 /* alpha */};
+                audio_utils::Statistics<double> mLatencyMs{0.995 /* alpha */};
+
+                // Save the last count when we delivered statistics to mediametrics.
+                int64_t                 mLastRecordedTimestampVerifierN = 0;
+                int64_t                 mLastRecordedTimeNs = 0;  // BOOTTIME to include suspend.
 
                 bool                    mIsMsdDevice = false;
                 // A condition that must be evaluated by the thread loop has changed and
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 65f799e..ad78a45 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -700,8 +700,13 @@
         const AudioBufferProvider::Buffer& sourceBuffer) {
     auto start = std::chrono::steady_clock::now();
     const size_t frameCount = sourceBuffer.frameCount;
-    for (auto& sink : mTeePatches) {
-        RecordThread::PatchRecord* patchRecord = sink.patchRecord.get();
+    if (frameCount == 0) {
+        return;  // No audio to intercept.
+        // Additionally PatchProxyBufferProvider::obtainBuffer (called by PathTrack::getNextBuffer)
+        // does not allow 0 frame size request contrary to getNextBuffer
+    }
+    for (auto& teePatch : mTeePatches) {
+        RecordThread::PatchRecord* patchRecord = teePatch.patchRecord.get();
 
         size_t framesWritten = writeFrames(patchRecord, sourceBuffer.i8, frameCount);
         // On buffer wrap, the buffer frame count will be less than requested,
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index f72f44a..9e4eebc 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -90,7 +90,7 @@
 LOCAL_SHARED_LIBRARIES += libmedia_helper
 LOCAL_SHARED_LIBRARIES += libmediametrics
 
-LOCAL_SHARED_LIBRARIES += libhidlbase libxml2
+LOCAL_SHARED_LIBRARIES += libbinder libhidlbase libxml2
 
 ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
 LOCAL_CFLAGS += -DUSE_XML_AUDIO_POLICY_CONF
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index d61188f..acbfc9e 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -58,10 +58,12 @@
     typedef enum {
         API_INPUT_INVALID = -1,
         API_INPUT_LEGACY  = 0,// e.g. audio recording from a microphone
-        API_INPUT_MIX_CAPTURE,// used for "remote submix", capture of the media to play it remotely
+        API_INPUT_MIX_CAPTURE,// used for "remote submix" legacy mode (no DAP),
+                              // capture of the media to play it remotely
         API_INPUT_MIX_EXT_POLICY_REROUTE,// used for platform audio rerouting, where mixes are
                                          // handled by external and dynamically installed
                                          // policies which reroute audio mixes
+        API_INPUT_MIX_PUBLIC_CAPTURE_PLAYBACK,  // used for playback capture with a MediaProjection
         API_INPUT_TELEPHONY_RX, // used for capture from telephony RX path
     } input_type_t;
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h b/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
index 996347b..4af93e1 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
@@ -20,6 +20,7 @@
 #include <utils/RefBase.h>
 #include <utils/String8.h>
 #include <system/audio.h>
+#include <vector>
 
 namespace android {
 
@@ -59,12 +60,36 @@
     void getDefaultConfig(struct audio_gain_config *config);
     status_t checkConfig(const struct audio_gain_config *config);
 
+    void setUseForVolume(bool canUseForVolume) { mUseForVolume = canUseForVolume; }
+    bool canUseForVolume() const { return mUseForVolume; }
+
     const struct audio_gain &getGain() const { return mGain; }
 
 private:
     int               mIndex;
     struct audio_gain mGain;
     bool              mUseInChannelMask;
+    bool              mUseForVolume = false;
+};
+
+class AudioGains : public std::vector<sp<AudioGain> >
+{
+public:
+    bool canUseForVolume() const
+    {
+        for (const auto &gain: *this) {
+            if (gain->canUseForVolume()) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    int32_t add(const sp<AudioGain> gain)
+    {
+        push_back(gain);
+        return 0;
+    }
 };
 
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index cf9519b..704f404 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -98,7 +98,7 @@
         ActivityTracking::dump(dst, spaces);
         dst->appendFormat(", Volume: %.03f, MuteCount: %02d\n", mCurVolumeDb, mMuteCount);
     }
-    void setVolume(float volume) { mCurVolumeDb = volume; }
+    void setVolume(float volumeDb) { mCurVolumeDb = volumeDb; }
     float getVolume() const { return mCurVolumeDb; }
 
 private:
@@ -156,7 +156,7 @@
     virtual bool isDuplicated() const { return false; }
     virtual uint32_t latency() { return 0; }
     virtual bool isFixedVolume(audio_devices_t device);
-    virtual bool setVolume(float volume,
+    virtual bool setVolume(float volumeDb,
                            audio_stream_type_t stream,
                            audio_devices_t device,
                            uint32_t delayMs,
@@ -219,10 +219,10 @@
     {
         return mVolumeActivities[vs].decMuteCount();
     }
-    void setCurVolume(VolumeSource vs, float volume)
+    void setCurVolume(VolumeSource vs, float volumeDb)
     {
         // Even if not activity for this group registered, need to create anyway
-        mVolumeActivities[vs].setVolume(volume);
+        mVolumeActivities[vs].setVolume(volumeDb);
     }
     float getCurVolume(VolumeSource vs) const
     {
@@ -327,7 +327,7 @@
             setClientActive(client, false);
         }
     }
-    virtual bool setVolume(float volume,
+    virtual bool setVolume(float volumeDb,
                            audio_stream_type_t stream,
                            audio_devices_t device,
                            uint32_t delayMs,
@@ -401,7 +401,7 @@
 
             void dump(String8 *dst) const override;
 
-    virtual bool setVolume(float volume,
+    virtual bool setVolume(float volumeDb,
                            audio_stream_type_t stream,
                            audio_devices_t device,
                            uint32_t delayMs,
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index 1b5a2d6..2d182bd 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -18,6 +18,7 @@
 
 #include "AudioCollections.h"
 #include "AudioProfile.h"
+#include "AudioGain.h"
 #include "HandleGenerator.h"
 #include <utils/String8.h>
 #include <utils/Vector.h>
@@ -29,9 +30,7 @@
 namespace android {
 
 class HwModule;
-class AudioGain;
 class AudioRoute;
-typedef Vector<sp<AudioGain> > AudioGainCollection;
 
 class AudioPort : public virtual RefBase, private HandleGenerator<audio_port_handle_t>
 {
@@ -49,8 +48,8 @@
 
     virtual const String8 getTagName() const = 0;
 
-    void setGains(const AudioGainCollection &gains) { mGains = gains; }
-    const AudioGainCollection &getGains() const { return mGains; }
+    void setGains(const AudioGains &gains) { mGains = gains; }
+    const AudioGains &getGains() const { return mGains; }
 
     virtual void setFlags(uint32_t flags)
     {
@@ -138,7 +137,7 @@
 
     void log(const char* indent) const;
 
-    AudioGainCollection mGains; // gain controllers
+    AudioGains mGains; // gain controllers
 
 private:
     void pickChannelMask(audio_channel_mask_t &channelMask, const ChannelsVector &channelMasks) const;
@@ -165,6 +164,8 @@
         return (other != 0) && (other->getAudioPort() != 0) && (getAudioPort() != 0) &&
                 (other->getAudioPort()->getModuleHandle() == getAudioPort()->getModuleHandle());
     }
+    bool hasGainController(bool canUseForVolume = false) const;
+
     unsigned int mSamplingRate = 0u;
     audio_format_t mFormat = AUDIO_FORMAT_INVALID;
     audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 7293bc4..fd33649 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -149,7 +149,7 @@
     return false;
 }
 
-bool AudioOutputDescriptor::setVolume(float volume,
+bool AudioOutputDescriptor::setVolume(float volumeDb,
                                       audio_stream_type_t stream,
                                       audio_devices_t device __unused,
                                       uint32_t delayMs,
@@ -158,9 +158,9 @@
     // We actually change the volume if:
     // - the float value returned by computeVolume() changed
     // - the force flag is set
-    if (volume != getCurVolume(static_cast<VolumeSource>(stream)) || force) {
-        ALOGV("setVolume() for stream %d, volume %f, delay %d", stream, volume, delayMs);
-        setCurVolume(static_cast<VolumeSource>(stream), volume);
+    if (volumeDb != getCurVolume(static_cast<VolumeSource>(stream)) || force) {
+        ALOGV("setVolume() for stream %d, volume %f, delay %d", stream, volumeDb, delayMs);
+        setCurVolume(static_cast<VolumeSource>(stream), volumeDb);
         return true;
     }
     return false;
@@ -388,15 +388,39 @@
             mFlags & AUDIO_OUTPUT_FLAG_FAST ? AUDIO_LATENCY_LOW : AUDIO_LATENCY_NORMAL;
 }
 
-bool SwAudioOutputDescriptor::setVolume(float volume,
+bool SwAudioOutputDescriptor::setVolume(float volumeDb,
                                         audio_stream_type_t stream,
                                         audio_devices_t device,
                                         uint32_t delayMs,
                                         bool force)
 {
-    if (!AudioOutputDescriptor::setVolume(volume, stream, device, delayMs, force)) {
+    if (!AudioOutputDescriptor::setVolume(volumeDb, stream, device, delayMs, force)) {
         return false;
     }
+    if (!devices().isEmpty()) {
+        // Assume first device to check upon Gain Crontroller availability
+        const auto &devicePort = devices().itemAt(0);
+        ALOGV("%s: device %s hasGC %d", __FUNCTION__,
+            devicePort->toString().c_str(), devices().itemAt(0)->hasGainController(true));
+        if (devicePort->hasGainController(true)) {
+            // @todo: default stream volume to max (0) when using HW Port gain?
+            float volumeAmpl = Volume::DbToAmpl(0);
+            mClientInterface->setStreamVolume(stream, volumeAmpl, mIoHandle, delayMs);
+
+            AudioGains gains = devicePort->getGains();
+            int gainMinValueInMb = gains[0]->getMinValueInMb();
+            int gainMaxValueInMb = gains[0]->getMaxValueInMb();
+            int gainStepValueInMb = gains[0]->getStepValueInMb();
+            int gainValueMb = ((volumeDb * 100)/ gainStepValueInMb) * gainStepValueInMb;
+            gainValueMb = std::max(gainMinValueInMb, std::min(gainValueMb, gainMaxValueInMb));
+
+            audio_port_config config = {};
+            devicePort->toAudioPortConfig(&config);
+            config.config_mask = AUDIO_PORT_CONFIG_GAIN;
+            config.gain.values[0] = gainValueMb;
+            return mClientInterface->setAudioPortConfig(&config, 0) == NO_ERROR;
+        }
+    }
     // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is enabled
     float volumeAmpl = Volume::DbToAmpl(getCurVolume(static_cast<VolumeSource>(stream)));
     if (stream == AUDIO_STREAM_BLUETOOTH_SCO) {
@@ -591,13 +615,13 @@
 }
 
 
-bool HwAudioOutputDescriptor::setVolume(float volume,
+bool HwAudioOutputDescriptor::setVolume(float volumeDb,
                                         audio_stream_type_t stream,
                                         audio_devices_t device,
                                         uint32_t delayMs,
                                         bool force)
 {
-    bool changed = AudioOutputDescriptor::setVolume(volume, stream, device, delayMs, force);
+    bool changed = AudioOutputDescriptor::setVolume(volumeDb, stream, device, delayMs, force);
 
     if (changed) {
       // TODO: use gain controller on source device if any to adjust volume
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index 9fcf5e7..a66c695 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -479,4 +479,14 @@
             dstConfig, srcConfig, AUDIO_PORT_CONFIG_FLAGS, { AUDIO_INPUT_FLAG_NONE });
 }
 
+bool AudioPortConfig::hasGainController(bool canUseForVolume) const
+{
+    sp<AudioPort> audioport = getAudioPort();
+    if (audioport == nullptr) {
+        return false;
+    }
+    return canUseForVolume ? audioport->getGains().canUseForVolume()
+                           : audioport->getGains().size() > 0;
+}
+
 } // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 81d3968..5f820c2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -64,7 +64,7 @@
     }
 };
 
-struct AudioGainTraits : public AndroidCollectionTraits<AudioGain, AudioGainCollection>
+struct AudioGainTraits : public AndroidCollectionTraits<AudioGain, AudioGains>
 {
     static constexpr const char *tag = "gain";
     static constexpr const char *collectionTag = "gains";
@@ -84,6 +84,9 @@
         static constexpr const char *minRampMs = "minRampMs";
         /** needed if mode AUDIO_GAIN_MODE_RAMP. */
         static constexpr const char *maxRampMs = "maxRampMs";
+        /** needed to allow use setPortGain instead of setStreamVolume. */
+        static constexpr const char *useForVolume = "useForVolume";
+
     };
 
     static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
@@ -375,9 +378,14 @@
     if (!maxRampMsLiteral.empty() && convertTo(maxRampMsLiteral, maxRampMs)) {
         gain->setMaxRampInMs(maxRampMs);
     }
-    ALOGV("%s: adding new gain mode %08x channel mask %08x min mB %d max mB %d", __func__,
-          gain->getMode(), gain->getChannelMask(), gain->getMinValueInMb(),
-          gain->getMaxValueInMb());
+    std::string useForVolumeLiteral = getXmlAttribute(cur, Attributes::useForVolume);
+    bool useForVolume = false;
+    if (!useForVolumeLiteral.empty() && convertTo(useForVolumeLiteral, useForVolume)) {
+        gain->setUseForVolume(useForVolume);
+    }
+    ALOGV("%s: adding new gain mode %08x channel mask %08x min mB %d max mB %d UseForVolume: %d",
+          __func__, gain->getMode(), gain->getChannelMask(), gain->getMinValueInMb(),
+          gain->getMaxValueInMb(), useForVolume);
 
     if (gain->getMode() != 0) {
         return gain;
diff --git a/services/audiopolicy/config/a2dp_in_audio_policy_configuration.xml b/services/audiopolicy/config/a2dp_in_audio_policy_configuration.xml
new file mode 100644
index 0000000..57bd4f8
--- /dev/null
+++ b/services/audiopolicy/config/a2dp_in_audio_policy_configuration.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Input Audio HAL Audio Policy Configuration file -->
+<module name="a2dp" halVersion="2.0">
+    <mixPorts>
+        <mixPort name="a2dp input" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100,48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+        </mixPort>
+    </mixPorts>
+    <devicePorts>
+        <devicePort tagName="BT A2DP In" type="AUDIO_DEVICE_IN_BLUETOOTH_A2DP" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100,48000"
+                     channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+        </devicePort>
+    </devicePorts>
+    <routes>
+        <route type="mix" sink="a2dp input"
+               sources="BT A2DP In"/>
+    </routes>
+</module>
diff --git a/services/audiopolicy/config/audio_policy_configuration.xml b/services/audiopolicy/config/audio_policy_configuration.xml
index b4cc1d3..b28381b 100644
--- a/services/audiopolicy/config/audio_policy_configuration.xml
+++ b/services/audiopolicy/config/audio_policy_configuration.xml
@@ -1,5 +1,5 @@
 <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-<!-- Copyright (C) 2015 The Android Open Source Project
+<!-- Copyright (C) 2019 The Android Open Source Project
 
      Licensed under the Apache License, Version 2.0 (the "License");
      you may not use this file except in compliance with the License.
@@ -173,8 +173,8 @@
 
         </module>
 
-        <!-- A2dp Audio HAL -->
-        <xi:include href="a2dp_audio_policy_configuration.xml"/>
+        <!-- A2dp Input Audio HAL -->
+        <xi:include href="a2dp_in_audio_policy_configuration.xml"/>
 
         <!-- Usb Audio HAL -->
         <xi:include href="usb_audio_policy_configuration.xml"/>
@@ -182,8 +182,8 @@
         <!-- Remote Submix Audio HAL -->
         <xi:include href="r_submix_audio_policy_configuration.xml"/>
 
-        <!-- Hearing aid Audio HAL -->
-        <xi:include href="hearing_aid_audio_policy_configuration.xml"/>
+        <!-- Bluetooth Audio HAL -->
+        <xi:include href="bluetooth_audio_policy_configuration.xml"/>
 
         <!-- MSD Audio HAL (optional) -->
         <xi:include href="msd_audio_policy_configuration.xml"/>
diff --git a/services/audiopolicy/config/audio_policy_configuration_bluetooth_legacy_hal.xml b/services/audiopolicy/config/audio_policy_configuration_bluetooth_legacy_hal.xml
new file mode 100644
index 0000000..b4cc1d3
--- /dev/null
+++ b/services/audiopolicy/config/audio_policy_configuration_bluetooth_legacy_hal.xml
@@ -0,0 +1,211 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+    <!-- version section contains a “version” tag in the form “major.minor” e.g version=”1.0” -->
+
+    <!-- Global configuration Decalaration -->
+    <globalConfiguration speaker_drc_enabled="true"/>
+
+
+    <!-- Modules section:
+        There is one section per audio HW module present on the platform.
+        Each module section will contains two mandatory tags for audio HAL “halVersion” and “name”.
+        The module names are the same as in current .conf file:
+                “primary”, “A2DP”, “remote_submix”, “USB”
+        Each module will contain the following sections:
+        “devicePorts”: a list of device descriptors for all input and output devices accessible via this
+        module.
+        This contains both permanently attached devices and removable devices.
+        “mixPorts”: listing all output and input streams exposed by the audio HAL
+        “routes”: list of possible connections between input and output devices or between stream and
+        devices.
+            "route": is defined by an attribute:
+                -"type": <mux|mix> means all sources are mutual exclusive (mux) or can be mixed (mix)
+                -"sink": the sink involved in this route
+                -"sources": all the sources than can be connected to the sink via vis route
+        “attachedDevices”: permanently attached devices.
+        The attachedDevices section is a list of devices names. The names correspond to device names
+        defined in <devicePorts> section.
+        “defaultOutputDevice”: device to be used by default when no policy rule applies
+    -->
+    <modules>
+        <!-- Primary Audio HAL -->
+        <module name="primary" halVersion="3.0">
+            <attachedDevices>
+                <item>Speaker</item>
+                <item>Built-In Mic</item>
+                <item>Built-In Back Mic</item>
+            </attachedDevices>
+            <defaultOutputDevice>Speaker</defaultOutputDevice>
+            <mixPorts>
+                <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </mixPort>
+                <mixPort name="deep_buffer" role="source"
+                        flags="AUDIO_OUTPUT_FLAG_DEEP_BUFFER">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </mixPort>
+                <mixPort name="compressed_offload" role="source"
+                         flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|AUDIO_OUTPUT_FLAG_NON_BLOCKING">
+                    <profile name="" format="AUDIO_FORMAT_MP3"
+                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+                    <profile name="" format="AUDIO_FORMAT_AAC"
+                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+                    <profile name="" format="AUDIO_FORMAT_AAC_LC"
+                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+                </mixPort>
+                <mixPort name="voice_tx" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+                </mixPort>
+                <mixPort name="primary input" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+                </mixPort>
+                <mixPort name="voice_rx" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+                </mixPort>
+            </mixPorts>
+            <devicePorts>
+                <!-- Output devices declaration, i.e. Sink DEVICE PORT -->
+                <devicePort tagName="Earpiece" type="AUDIO_DEVICE_OUT_EARPIECE" role="sink">
+                   <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                            samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+                </devicePort>
+                <devicePort tagName="Speaker" role="sink" type="AUDIO_DEVICE_OUT_SPEAKER" address="">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                    <gains>
+                        <gain name="gain_1" mode="AUDIO_GAIN_MODE_JOINT"
+                              minValueMB="-8400"
+                              maxValueMB="4000"
+                              defaultValueMB="0"
+                              stepValueMB="100"/>
+                    </gains>
+                </devicePort>
+                <devicePort tagName="Wired Headset" type="AUDIO_DEVICE_OUT_WIRED_HEADSET" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </devicePort>
+                <devicePort tagName="Wired Headphones" type="AUDIO_DEVICE_OUT_WIRED_HEADPHONE" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </devicePort>
+                <devicePort tagName="BT SCO" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+                </devicePort>
+                <devicePort tagName="BT SCO Headset" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+                </devicePort>
+                <devicePort tagName="BT SCO Car Kit" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+                </devicePort>
+                <devicePort tagName="Telephony Tx" type="AUDIO_DEVICE_OUT_TELEPHONY_TX" role="sink">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+                </devicePort>
+
+                <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+                </devicePort>
+                <devicePort tagName="Built-In Back Mic" type="AUDIO_DEVICE_IN_BACK_MIC" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+                </devicePort>
+                <devicePort tagName="Wired Headset Mic" type="AUDIO_DEVICE_IN_WIRED_HEADSET" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+                             channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+                </devicePort>
+                <devicePort tagName="BT SCO Headset Mic" type="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+                </devicePort>
+                <devicePort tagName="Telephony Rx" type="AUDIO_DEVICE_IN_TELEPHONY_RX" role="source">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+                </devicePort>
+            </devicePorts>
+            <!-- route declaration, i.e. list all available sources for a given sink -->
+            <routes>
+                <route type="mix" sink="Earpiece"
+                       sources="primary output,deep_buffer,BT SCO Headset Mic"/>
+                <route type="mix" sink="Speaker"
+                       sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+                <route type="mix" sink="Wired Headset"
+                       sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+                <route type="mix" sink="Wired Headphones"
+                       sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+                <route type="mix" sink="primary input"
+                       sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic"/>
+                <route type="mix" sink="Telephony Tx"
+                       sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic, voice_tx"/>
+                <route type="mix" sink="voice_rx"
+                       sources="Telephony Rx"/>
+            </routes>
+
+        </module>
+
+        <!-- A2dp Audio HAL -->
+        <xi:include href="a2dp_audio_policy_configuration.xml"/>
+
+        <!-- Usb Audio HAL -->
+        <xi:include href="usb_audio_policy_configuration.xml"/>
+
+        <!-- Remote Submix Audio HAL -->
+        <xi:include href="r_submix_audio_policy_configuration.xml"/>
+
+        <!-- Hearing aid Audio HAL -->
+        <xi:include href="hearing_aid_audio_policy_configuration.xml"/>
+
+        <!-- MSD Audio HAL (optional) -->
+        <xi:include href="msd_audio_policy_configuration.xml"/>
+
+    </modules>
+    <!-- End of Modules section -->
+
+    <!-- Volume section:
+        IMPORTANT NOTE: Volume tables have been moved to engine configuration.
+                        Keep it here for legacy.
+                        Engine will fallback on these files if none are provided by engine.
+     -->
+
+    <xi:include href="audio_policy_volumes.xml"/>
+    <xi:include href="default_volume_tables.xml"/>
+
+    <!-- End of Volume section -->
+
+    <!-- Surround Sound configuration -->
+
+    <xi:include href="surround_sound_configuration_5_0.xml"/>
+
+    <!-- End of Surround Sound configuration -->
+
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
new file mode 100644
index 0000000..ce78eb0
--- /dev/null
+++ b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Audio HAL Audio Policy Configuration file -->
+<module name="bluetooth" halVersion="2.0">
+    <mixPorts>
+        <!-- A2DP Audio Ports -->
+        <mixPort name="a2dp output" role="source"/>
+        <!-- Hearing AIDs Audio Ports -->
+        <mixPort name="hearing aid output" role="source">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="24000,16000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </mixPort>
+    </mixPorts>
+    <devicePorts>
+        <!-- A2DP Audio Ports -->
+        <devicePort tagName="BT A2DP Out" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100,48000,88200,96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <devicePort tagName="BT A2DP Headphones" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100,48000,88200,96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <devicePort tagName="BT A2DP Speaker" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER" role="sink">
+            <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                     samplingRates="44100,48000,88200,96000"
+                     channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+        </devicePort>
+        <!-- Hearing AIDs Audio Ports -->
+        <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
+    </devicePorts>
+    <routes>
+        <route type="mix" sink="BT A2DP Out"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT A2DP Headphones"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT A2DP Speaker"
+               sources="a2dp output"/>
+        <route type="mix" sink="BT Hearing Aid Out"
+               sources="hearing aid output"/>
+    </routes>
+</module>
diff --git a/services/audiopolicy/engine/common/Android.bp b/services/audiopolicy/engine/common/Android.bp
index e6ede07..d0775ad 100644
--- a/services/audiopolicy/engine/common/Android.bp
+++ b/services/audiopolicy/engine/common/Android.bp
@@ -17,3 +17,31 @@
     host_supported: true,
     export_include_dirs: ["include"],
 }
+
+cc_library_static {
+    name: "libaudiopolicyengine_common",
+    srcs: [
+        "src/EngineBase.cpp",
+        "src/ProductStrategy.cpp",
+        "src/VolumeCurve.cpp",
+        "src/VolumeGroup.cpp",
+    ],
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+    ],
+    header_libs: [
+        "libbase_headers",
+        "libaudiopolicycommon",
+        "libaudiopolicyengine_common_headers",
+        "libaudiopolicyengine_interface_headers",
+    ],
+    export_header_lib_headers: [
+        "libaudiopolicyengine_common_headers",
+    ],
+    static_libs: [
+        "libaudiopolicycomponents",
+        "libaudiopolicyengine_config",
+    ],
+}
diff --git a/services/audiopolicy/engine/config/Android.bp b/services/audiopolicy/engine/config/Android.bp
new file mode 100644
index 0000000..6e72f2a
--- /dev/null
+++ b/services/audiopolicy/engine/config/Android.bp
@@ -0,0 +1,31 @@
+cc_library_static {
+    name: "libaudiopolicyengine_config",
+    export_include_dirs: ["include"],
+    include_dirs: [
+        "external/libxml2/include",
+        "external/icu/icu4c/source/common",
+    ],
+    srcs: [
+        "src/EngineConfig.cpp",
+    ],
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+    ],
+    shared_libs: [
+        "libmedia_helper",
+        "libandroidicu",
+        "libxml2",
+        "libutils",
+        "liblog",
+        "libcutils",
+    ],
+    static_libs: [
+        "libaudiopolicycomponents",
+    ],
+    header_libs: [
+        "libaudio_system_headers",
+        "libaudiopolicycommon",
+    ],
+}
diff --git a/services/audiopolicy/engine/config/Android.mk b/services/audiopolicy/engine/config/Android.mk
deleted file mode 100644
index 0b292a5..0000000
--- a/services/audiopolicy/engine/config/Android.mk
+++ /dev/null
@@ -1,42 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-##################################################################
-# Component build
-##################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_EXPORT_C_INCLUDE_DIRS :=  $(LOCAL_PATH)/include
-
-LOCAL_C_INCLUDES := \
-    $(LOCAL_EXPORT_C_INCLUDE_DIRS) \
-    external/libxml2/include \
-    external/icu/icu4c/source/common
-
-LOCAL_SRC_FILES := \
-    src/EngineConfig.cpp
-
-LOCAL_CFLAGS += -Wall -Werror -Wextra
-
-LOCAL_SHARED_LIBRARIES := \
-    libmedia_helper \
-    libandroidicu \
-    libxml2 \
-    libutils \
-    liblog \
-    libcutils
-
-LOCAL_STATIC_LIBRARIES := \
-    libaudiopolicycomponents
-
-LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-
-LOCAL_MODULE := libaudiopolicyengineconfig
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_HEADER_LIBRARIES := \
-    libaudio_system_headers \
-    libaudiopolicycommon
-
-include $(BUILD_SHARED_LIBRARY)
-
diff --git a/services/audiopolicy/engineconfigurable/Android.mk b/services/audiopolicy/engineconfigurable/Android.mk
index 4eff6e6..84a4422 100644
--- a/services/audiopolicy/engineconfigurable/Android.mk
+++ b/services/audiopolicy/engineconfigurable/Android.mk
@@ -12,10 +12,6 @@
     src/EngineInstance.cpp \
     src/Stream.cpp \
     src/InputSource.cpp \
-    ../engine/common/src/VolumeCurve.cpp \
-    ../engine/common/src/VolumeGroup.cpp \
-    ../engine/common/src/ProductStrategy.cpp \
-    ../engine/common/src/EngineBase.cpp
 
 audio_policy_engine_includes_common := \
     frameworks/av/services/audiopolicy/engineconfigurable/include \
@@ -37,7 +33,6 @@
 
 LOCAL_HEADER_LIBRARIES := \
     libaudiopolicycommon \
-    libaudiopolicyengine_common_headers \
     libaudiopolicyengine_interface_headers
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
@@ -47,13 +42,15 @@
 
 LOCAL_STATIC_LIBRARIES := \
     libaudiopolicypfwwrapper \
-    libaudiopolicycomponents
+    libaudiopolicycomponents \
+    libaudiopolicyengine_common \
+    libaudiopolicyengine_config \
 
 LOCAL_SHARED_LIBRARIES := \
-    libaudiopolicyengineconfig \
     liblog \
     libutils \
     liblog \
+    libcutils \
     libaudioutils \
     libparameter \
     libmedia_helper \
diff --git a/services/audiopolicy/enginedefault/Android.bp b/services/audiopolicy/enginedefault/Android.bp
new file mode 100644
index 0000000..7b42c6a
--- /dev/null
+++ b/services/audiopolicy/enginedefault/Android.bp
@@ -0,0 +1,32 @@
+cc_library_shared {
+    name: "libaudiopolicyenginedefault",
+    export_include_dirs: ["include"],
+    srcs: [
+        "src/Engine.cpp",
+        "src/EngineInstance.cpp",
+    ],
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+    ],
+    local_include_dirs: ["include"],
+    header_libs: [
+        "libbase_headers",
+        "libaudiopolicycommon",
+        "libaudiopolicyengine_interface_headers",
+    ],
+    static_libs: [
+        "libaudiopolicycomponents",
+        "libaudiopolicyengine_common",
+        "libaudiopolicyengine_config",
+    ],
+    shared_libs: [
+        "liblog",
+        "libcutils",
+        "libutils",
+        "libmedia_helper",
+        "libaudiopolicy",
+        "libxml2",
+    ],
+}
diff --git a/services/audiopolicy/enginedefault/Android.mk b/services/audiopolicy/enginedefault/Android.mk
deleted file mode 100644
index ebf383b..0000000
--- a/services/audiopolicy/enginedefault/Android.mk
+++ /dev/null
@@ -1,57 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-# Component build
-#######################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
-    src/Engine.cpp \
-    src/EngineInstance.cpp \
-    ../engine/common/src/VolumeCurve.cpp \
-    ../engine/common/src/ProductStrategy.cpp \
-    ../engine/common/src/EngineBase.cpp \
-    ../engine/common/src/VolumeGroup.cpp
-
-audio_policy_engine_includes_common := \
-    $(LOCAL_PATH)/include
-
-LOCAL_CFLAGS += \
-    -Wall \
-    -Werror \
-    -Wextra \
-
-LOCAL_EXPORT_C_INCLUDE_DIRS := \
-    $(audio_policy_engine_includes_common)
-
-LOCAL_C_INCLUDES := \
-    $(audio_policy_engine_includes_common) \
-    $(TARGET_OUT_HEADERS)/hw \
-    $(call include-path-for, frameworks-av) \
-    $(call include-path-for, audio-utils) \
-    $(call include-path-for, bionic)
-
-LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-
-LOCAL_MODULE := libaudiopolicyenginedefault
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_HEADER_LIBRARIES := libbase_headers
-
-LOCAL_STATIC_LIBRARIES := \
-    libaudiopolicycomponents
-
-LOCAL_SHARED_LIBRARIES := \
-    liblog \
-    libcutils \
-    libutils \
-    libmedia_helper \
-    libaudiopolicyengineconfig \
-    libaudiopolicy
-
-LOCAL_HEADER_LIBRARIES := \
-    libaudiopolicycommon \
-    libaudiopolicyengine_common_headers \
-    libaudiopolicyengine_interface_headers
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index ce305a4..ea98253 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -33,8 +33,8 @@
 #define AUDIO_POLICY_XML_CONFIG_FILE_NAME "audio_policy_configuration.xml"
 #define AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME \
         "audio_policy_configuration_a2dp_offload_disabled.xml"
-#define AUDIO_POLICY_BLUETOOTH_HAL_ENABLED_XML_CONFIG_FILE_NAME \
-        "audio_policy_configuration_bluetooth_hal_enabled.xml"
+#define AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME \
+        "audio_policy_configuration_bluetooth_legacy_hal.xml"
 
 #include <inttypes.h>
 #include <math.h>
@@ -479,36 +479,16 @@
                                     std::vector<audio_format_t> *formats)
 {
     ALOGV("getHwOffloadEncodingFormatsSupportedForA2DP()");
-    char *tok = NULL, *saveptr;
     status_t status = NO_ERROR;
-    char encoding_formats_list[PROPERTY_VALUE_MAX];
-    audio_format_t format = AUDIO_FORMAT_DEFAULT;
-    // FIXME This list should not come from a property but the supported encoded
-    // formats of declared A2DP devices in primary module
-    property_get("persist.bluetooth.a2dp_offload.cap", encoding_formats_list, "");
-    tok = strtok_r(encoding_formats_list, "-", &saveptr);
-    for (;tok != NULL; tok = strtok_r(NULL, "-", &saveptr)) {
-        if (strcmp(tok, "sbc") == 0) {
-            ALOGV("%s: SBC offload supported\n",__func__);
-            format = AUDIO_FORMAT_SBC;
-        } else if (strcmp(tok, "aptx") == 0) {
-            ALOGV("%s: APTX offload supported\n",__func__);
-            format = AUDIO_FORMAT_APTX;
-        } else if (strcmp(tok, "aptxhd") == 0) {
-            ALOGV("%s: APTX HD offload supported\n",__func__);
-            format = AUDIO_FORMAT_APTX_HD;
-        } else if (strcmp(tok, "ldac") == 0) {
-            ALOGV("%s: LDAC offload supported\n",__func__);
-            format = AUDIO_FORMAT_LDAC;
-        } else if (strcmp(tok, "aac") == 0) {
-            ALOGV("%s: AAC offload supported\n",__func__);
-            format = AUDIO_FORMAT_AAC;
-        } else {
-            ALOGE("%s: undefined token - %s\n",__func__, tok);
-            continue;
-        }
-        formats->push_back(format);
+    std::unordered_set<audio_format_t> formatSet;
+    sp<HwModule> primaryModule =
+            mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY);
+    DeviceVector declaredDevices = primaryModule->getDeclaredDevices().getDevicesFromTypeMask(
+            AUDIO_DEVICE_OUT_ALL_A2DP);
+    for (const auto& device : declaredDevices) {
+        formatSet.insert(device->encodedFormats().begin(), device->encodedFormats().end());
     }
+    formats->assign(formatSet.begin(), formatSet.end());
     return status;
 }
 
@@ -1042,7 +1022,8 @@
         }
     }
     if (*output == AUDIO_IO_HANDLE_NONE) {
-        *output = getOutputForDevices(outputDevices, session, *stream, config, flags);
+        *output = getOutputForDevices(outputDevices, session, *stream, config,
+                flags, attr->flags & AUDIO_FLAG_MUTE_HAPTIC);
     }
     if (*output == AUDIO_IO_HANDLE_NONE) {
         return INVALID_OPERATION;
@@ -1120,11 +1101,16 @@
         audio_session_t session,
         audio_stream_type_t stream,
         const audio_config_t *config,
-        audio_output_flags_t *flags)
+        audio_output_flags_t *flags,
+        bool forceMutingHaptic)
 {
     audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
     status_t status;
 
+    // Discard haptic channel mask when forcing muting haptic channels.
+    audio_channel_mask_t channelMask = forceMutingHaptic
+            ? (config->channel_mask & ~AUDIO_CHANNEL_HAPTIC_ALL) : config->channel_mask;
+
     // open a direct output if required by specified parameters
     //force direct flag if offload flag is set: offloading implies a direct output stream
     // and all common behaviors are driven by checking only the direct flag
@@ -1161,7 +1147,7 @@
     // and not explicitly requested
     if (((*flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
             audio_is_linear_pcm(config->format) && config->sample_rate <= SAMPLE_RATE_HZ_MAX &&
-            audio_channel_count_from_out_mask(config->channel_mask) <= 2) {
+            audio_channel_count_from_out_mask(channelMask) <= 2) {
         goto non_direct_output;
     }
 
@@ -1177,7 +1163,7 @@
         profile = getProfileForOutput(devices,
                                    config->sample_rate,
                                    config->format,
-                                   config->channel_mask,
+                                   channelMask,
                                    (audio_output_flags_t)*flags,
                                    true /* directOnly */);
     }
@@ -1191,7 +1177,7 @@
                 // and configured with same parameters
                 if ((config->sample_rate == desc->mSamplingRate) &&
                     (config->format == desc->mFormat) &&
-                    (config->channel_mask == desc->mChannelMask) &&
+                    (channelMask == desc->mChannelMask) &&
                     (session == desc->mDirectClientSession)) {
                     desc->mDirectOpenCount++;
                     ALOGI("%s reusing direct output %d for session %d", __func__, 
@@ -1233,11 +1219,11 @@
         if (status != NO_ERROR ||
             (config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) ||
             (config->format != AUDIO_FORMAT_DEFAULT && config->format != outputDesc->mFormat) ||
-            (config->channel_mask != 0 && config->channel_mask != outputDesc->mChannelMask)) {
+            (channelMask != 0 && channelMask != outputDesc->mChannelMask)) {
             ALOGV("%s failed opening direct output: output %d sample rate %d %d," 
                     "format %d %d, channel mask %04x %04x", __func__, output, config->sample_rate,
                     outputDesc->mSamplingRate, config->format, outputDesc->mFormat,
-                    config->channel_mask, outputDesc->mChannelMask);
+                    channelMask, outputDesc->mChannelMask);
             if (output != AUDIO_IO_HANDLE_NONE) {
                 outputDesc->close();
             }
@@ -1278,12 +1264,11 @@
 
         // at this stage we should ignore the DIRECT flag as no direct output could be found earlier
         *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
-        output = selectOutput(outputs, *flags, config->format,
-                config->channel_mask, config->sample_rate);
+        output = selectOutput(outputs, *flags, config->format, channelMask, config->sample_rate);
     }
     ALOGW_IF((output == 0), "getOutputForDevices() could not find output for stream %d, "
             "sampling rate %d, format %#x, channels %#x, flags %#x",
-            stream, config->sample_rate, config->format, config->channel_mask, *flags);
+            stream, config->sample_rate, config->format, channelMask, *flags);
 
     return output;
 }
@@ -1986,7 +1971,11 @@
         if (status != NO_ERROR) {
             goto error;
         }
-        *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
+        if (is_mix_loopback_render(policyMix->mRouteFlags)) {
+            *inputType = API_INPUT_MIX_PUBLIC_CAPTURE_PLAYBACK;
+        } else {
+            *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
+        }
         device = mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
                                                   String8(attr->tags + strlen("addr=")),
                                                   AUDIO_FORMAT_DEFAULT);
@@ -4178,17 +4167,17 @@
     status_t ret;
 
     if (property_get_bool("ro.bluetooth.a2dp_offload.supported", false)) {
-        if (property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+        if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.disabled", false) &&
+            property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+            // Both BluetoothAudio@2.0 and BluetoothA2dp@1.0 (Offlaod) are disabled, and uses
+            // the legacy hardware module for A2DP and hearing aid.
+            fileNames.push_back(AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME);
+        } else if (property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+            // A2DP offload supported but disabled: try to use special XML file
             fileNames.push_back(AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME);
-        } else if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.enabled", false)) {
-            // This property persist.bluetooth.bluetooth_audio_hal.enabled is temporary only.
-            // xml files AUDIO_POLICY_BLUETOOTH_HAL_ENABLED_XML_CONFIG_FILE_NAME, although having
-            // the same name, must be different in offload and non offload cases in device
-            // specific configuration file.
-            fileNames.push_back(AUDIO_POLICY_BLUETOOTH_HAL_ENABLED_XML_CONFIG_FILE_NAME);
         }
-    } else if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.enabled", false)) {
-        fileNames.push_back(AUDIO_POLICY_BLUETOOTH_HAL_ENABLED_XML_CONFIG_FILE_NAME);
+    } else if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.disabled", false)) {
+        fileNames.push_back(AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME);
     }
     fileNames.push_back(AUDIO_POLICY_XML_CONFIG_FILE_NAME);
 
@@ -5155,7 +5144,7 @@
 
         if ((hasVoiceStream(streams) &&
              (isInCall() || mOutputs.isStrategyActiveOnSameModule(productStrategy, outputDesc))) ||
-             (hasStream(streams, AUDIO_STREAM_ALARM) &&
+             ((hasStream(streams, AUDIO_STREAM_ALARM) || hasStream(streams, AUDIO_STREAM_ENFORCED_AUDIBLE)) &&
                 mOutputs.isStrategyActiveOnSameModule(productStrategy, outputDesc)) ||
                 outputDesc->isStrategyActive(productStrategy)) {
             // Retrieval of devices for voice DL is done on primary output profile, cannot
@@ -5628,7 +5617,7 @@
                                         audio_devices_t device)
 {
     auto &curves = getVolumeCurves(stream);
-    float volumeDB = curves.volIndexToDb(Volume::getDeviceCategory(device), index);
+    float volumeDb = curves.volIndexToDb(Volume::getDeviceCategory(device), index);
 
     // handle the case of accessibility active while a ringtone is playing: if the ringtone is much
     // louder than the accessibility prompt, the prompt cannot be heard, thus masking the touch
@@ -5638,7 +5627,7 @@
             && (AUDIO_MODE_RINGTONE == mEngine->getPhoneState())
             && isStreamActive(AUDIO_STREAM_RING, 0)) {
         const float ringVolumeDB = computeVolume(AUDIO_STREAM_RING, index, device);
-        return ringVolumeDB - 4 > volumeDB ? ringVolumeDB - 4 : volumeDB;
+        return ringVolumeDB - 4 > volumeDb ? ringVolumeDB - 4 : volumeDb;
     }
 
     // in-call: always cap volume by voice volume + some low headroom
@@ -5657,10 +5646,10 @@
             const float maxVoiceVolDb =
                 computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, device)
                 + IN_CALL_EARPIECE_HEADROOM_DB;
-            if (volumeDB > maxVoiceVolDb) {
+            if (volumeDb > maxVoiceVolDb) {
                 ALOGV("computeVolume() stream %d at vol=%f overriden by stream %d at vol=%f",
-                        stream, volumeDB, AUDIO_STREAM_VOICE_CALL, maxVoiceVolDb);
-                volumeDB = maxVoiceVolDb;
+                        stream, volumeDb, AUDIO_STREAM_VOICE_CALL, maxVoiceVolDb);
+                volumeDb = maxVoiceVolDb;
             }
             } break;
         default:
@@ -5693,7 +5682,7 @@
         // just stopped
         if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY) ||
                 mLimitRingtoneVolume) {
-            volumeDB += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
+            volumeDb += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
             audio_devices_t musicDevice =
                     mEngine->getOutputDevicesForAttributes(attributes_initializer(AUDIO_USAGE_MEDIA),
                                                            nullptr, true /*fromCache*/).types();
@@ -5702,29 +5691,29 @@
                                    musicDevice);
             float minVolDB = (musicVolDB > SONIFICATION_HEADSET_VOLUME_MIN_DB) ?
                     musicVolDB : SONIFICATION_HEADSET_VOLUME_MIN_DB;
-            if (volumeDB > minVolDB) {
-                volumeDB = minVolDB;
+            if (volumeDb > minVolDB) {
+                volumeDb = minVolDB;
                 ALOGV("computeVolume limiting volume to %f musicVol %f", minVolDB, musicVolDB);
             }
             if (device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
                     AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES)) {
                 // on A2DP, also ensure notification volume is not too low compared to media when
                 // intended to be played
-                if ((volumeDB > -96.0f) &&
-                        (musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB > volumeDB)) {
+                if ((volumeDb > -96.0f) &&
+                        (musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB > volumeDb)) {
                     ALOGV("computeVolume increasing volume for stream=%d device=0x%X from %f to %f",
                             stream, device,
-                            volumeDB, musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB);
-                    volumeDB = musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB;
+                            volumeDb, musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB);
+                    volumeDb = musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB;
                 }
             }
         } else if ((Volume::getDeviceForVolume(device) != AUDIO_DEVICE_OUT_SPEAKER) ||
                 (stream != AUDIO_STREAM_ALARM && stream != AUDIO_STREAM_RING)) {
-            volumeDB += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
+            volumeDb += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
         }
     }
 
-    return volumeDB;
+    return volumeDb;
 }
 
 int AudioPolicyManager::rescaleVolumeIndex(int srcIndex,
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 641a03a..3a31e1e 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -779,7 +779,8 @@
                 audio_session_t session,
                 audio_stream_type_t stream,
                 const audio_config_t *config,
-                audio_output_flags_t *flags);
+                audio_output_flags_t *flags,
+                bool forceMutingHaptic = false);
 
         /**
          * @brief getInputForDevice selects an input handle for a given input device and
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index a1b6b0f..a672521 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -20,7 +20,6 @@
 #include "AudioPolicyService.h"
 #include "TypeConverter.h"
 #include <media/MediaAnalyticsItem.h>
-#include <mediautils/ServiceUtilities.h>
 #include <media/AudioPolicy.h>
 #include <utils/Log.h>
 
@@ -167,7 +166,7 @@
     return mAudioPolicyManager->getOutput(stream);
 }
 
-status_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *attr,
+status_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *originalAttr,
                                               audio_io_handle_t *output,
                                               audio_session_t session,
                                               audio_stream_type_t *stream,
@@ -191,9 +190,13 @@
                 "%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, uid);
         uid = callingUid;
     }
+    audio_attributes_t attr = *originalAttr;
+    if (!mPackageManager.allowPlaybackCapture(uid)) {
+        attr.flags |= AUDIO_FLAG_NO_CAPTURE;
+    }
     audio_output_flags_t originalFlags = flags;
     AutoCallerClear acc;
-    status_t result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
+    status_t result = mAudioPolicyManager->getOutputForAttr(&attr, output, session, stream, uid,
                                                  config,
                                                  &flags, selectedDeviceId, portId,
                                                  secondaryOutputs);
@@ -209,14 +212,14 @@
         *selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
         *portId = AUDIO_PORT_HANDLE_NONE;
         secondaryOutputs->clear();
-        result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid, config,
+        result = mAudioPolicyManager->getOutputForAttr(&attr, output, session, stream, uid, config,
                                                        &flags, selectedDeviceId, portId,
                                                        secondaryOutputs);
     }
 
     if (result == NO_ERROR) {
         sp <AudioPlaybackClient> client =
-            new AudioPlaybackClient(*attr, *output, uid, pid, session, *selectedDeviceId, *stream);
+            new AudioPlaybackClient(attr, *output, uid, pid, session, *selectedDeviceId, *stream);
         mAudioPlaybackClients.add(*portId, client);
     }
     return result;
@@ -404,6 +407,9 @@
         if (status == NO_ERROR) {
             // enforce permission (if any) required for each type of input
             switch (inputType) {
+            case AudioPolicyInterface::API_INPUT_MIX_PUBLIC_CAPTURE_PLAYBACK:
+                // this use case has been validated in audio service with a MediaProjection token,
+                // and doesn't rely on regular permissions
             case AudioPolicyInterface::API_INPUT_LEGACY:
                 break;
             case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index cf9cf71..8cbf3af 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -450,7 +450,7 @@
     for (size_t i =0; i < mAudioRecordClients.size(); i++) {
         sp<AudioRecordClient> current = mAudioRecordClients[i];
         if (!current->active) continue;
-        if (isPrivacySensitive(current->attributes.source)) {
+        if (isPrivacySensitiveSource(current->attributes.source)) {
             if (current->startTimeNs > latestSensitiveStartNs) {
                 latestSensitiveActive = current;
                 latestSensitiveStartNs = current->startTimeNs;
@@ -489,7 +489,10 @@
         bool isLatest = current == latestActive;
         bool isLatestSensitive = current == latestSensitiveActive;
         bool forceIdle = true;
-        if (mUidPolicy->isAssistantUid(current->uid)) {
+
+        if (isVirtualSource(source)) {
+            forceIdle = false;
+        } else if (mUidPolicy->isAssistantUid(current->uid)) {
             if (isA11yOnTop) {
                 if (source == AUDIO_SOURCE_HOTWORD || source == AUDIO_SOURCE_VOICE_RECOGNITION) {
                     forceIdle = false;
@@ -505,10 +508,6 @@
                 (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
                 forceIdle = false;
             }
-        } else if (source == AUDIO_SOURCE_VOICE_DOWNLINK ||
-                   source == AUDIO_SOURCE_VOICE_CALL ||
-                   (source == AUDIO_SOURCE_VOICE_UPLINK)) {
-            forceIdle = false;
         } else {
             if (!isAssistantOnTop && (isOnTop || isLatest) &&
                 (!isSensitiveActive || isLatestSensitive)) {
@@ -542,14 +541,27 @@
 }
 
 /* static */
-bool AudioPolicyService::isPrivacySensitive(audio_source_t source)
+bool AudioPolicyService::isPrivacySensitiveSource(audio_source_t source)
+{
+    switch (source) {
+        case AUDIO_SOURCE_CAMCORDER:
+        case AUDIO_SOURCE_VOICE_COMMUNICATION:
+            return true;
+        default:
+            break;
+    }
+    return false;
+}
+
+/* static */
+bool AudioPolicyService::isVirtualSource(audio_source_t source)
 {
     switch (source) {
         case AUDIO_SOURCE_VOICE_UPLINK:
         case AUDIO_SOURCE_VOICE_DOWNLINK:
         case AUDIO_SOURCE_VOICE_CALL:
-        case AUDIO_SOURCE_CAMCORDER:
-        case AUDIO_SOURCE_VOICE_COMMUNICATION:
+        case AUDIO_SOURCE_REMOTE_SUBMIX:
+        case AUDIO_SOURCE_FM_TUNER:
             return true;
         default:
             break;
@@ -591,6 +603,8 @@
             mAudioPolicyManager->dump(fd);
         }
 
+        mPackageManager.dump(fd);
+
         if (locked) mLock.unlock();
     }
     return NO_ERROR;
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 5888841..a2e75cd 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -31,6 +31,7 @@
 #include <media/ToneGenerator.h>
 #include <media/AudioEffect.h>
 #include <media/AudioPolicy.h>
+#include <mediautils/ServiceUtilities.h>
 #include "AudioPolicyEffects.h"
 #include "managerdefault/AudioPolicyManager.h"
 #include <android/hardware/BnSensorPrivacyListener.h>
@@ -326,7 +327,8 @@
 
     void silenceAllRecordings_l();
 
-    static bool isPrivacySensitive(audio_source_t source);
+    static bool isPrivacySensitiveSource(audio_source_t source);
+    static bool isVirtualSource(audio_source_t source);
 
     // If recording we need to make sure the UID is allowed to do that. If the UID is idle
     // then it cannot record and gets buffers with zeros - silence. As soon as the UID
@@ -819,6 +821,8 @@
 
     DefaultKeyedVector< audio_port_handle_t, sp<AudioRecordClient> >   mAudioRecordClients;
     DefaultKeyedVector< audio_port_handle_t, sp<AudioPlaybackClient> >   mAudioPlaybackClients;
+
+    MediaPackageManager mPackageManager; // To check allowPlaybackCapture
 };
 
 } // namespace android
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index e06897f..62ec955 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -227,7 +227,7 @@
     Mutex::Autolock lock(mStatusListenerLock);
 
     for (auto& i : mListenerList) {
-        i.second->onTorchStatusChanged(mapToInterface(status), String16{cameraId});
+        i.second->getListener()->onTorchStatusChanged(mapToInterface(status), String16{cameraId});
     }
 }
 
@@ -1654,6 +1654,18 @@
     return Status::ok();
 }
 
+void CameraService::notifyMonitoredUids() {
+    Mutex::Autolock lock(mStatusListenerLock);
+
+    for (const auto& it : mListenerList) {
+        auto ret = it.second->getListener()->onCameraAccessPrioritiesChanged();
+        if (!ret.isOk()) {
+            ALOGE("%s: Failed to trigger permission callback: %d", __FUNCTION__,
+                    ret.exceptionCode());
+        }
+    }
+}
+
 Status CameraService::notifyDeviceStateChange(int64_t newState) {
     const int pid = CameraThreadState::getCallingPid();
     const int selfPid = getpid();
@@ -1721,15 +1733,25 @@
 
     {
         Mutex::Autolock lock(mStatusListenerLock);
-        for (auto& it : mListenerList) {
-            if (IInterface::asBinder(it.second) == IInterface::asBinder(listener)) {
+        for (const auto &it : mListenerList) {
+            if (IInterface::asBinder(it.second->getListener()) == IInterface::asBinder(listener)) {
                 ALOGW("%s: Tried to add listener %p which was already subscribed",
                       __FUNCTION__, listener.get());
                 return STATUS_ERROR(ERROR_ALREADY_EXISTS, "Listener already registered");
             }
         }
 
-        mListenerList.emplace_back(isVendorListener, listener);
+        auto clientUid = CameraThreadState::getCallingUid();
+        sp<ServiceListener> serviceListener = new ServiceListener(this, listener, clientUid);
+        auto ret = serviceListener->initialize();
+        if (ret != NO_ERROR) {
+            String8 msg = String8::format("Failed to initialize service listener: %s (%d)",
+                    strerror(-ret), ret);
+            ALOGE("%s: %s", __FUNCTION__, msg.string());
+            return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
+        }
+        mListenerList.emplace_back(isVendorListener, serviceListener);
+        mUidPolicy->registerMonitorUid(clientUid);
     }
 
     /* Collect current devices and status */
@@ -1776,7 +1798,9 @@
     {
         Mutex::Autolock lock(mStatusListenerLock);
         for (auto it = mListenerList.begin(); it != mListenerList.end(); it++) {
-            if (IInterface::asBinder(it->second) == IInterface::asBinder(listener)) {
+            if (IInterface::asBinder(it->second->getListener()) == IInterface::asBinder(listener)) {
+                mUidPolicy->unregisterMonitorUid(it->second->getListenerUid());
+                IInterface::asBinder(listener)->unlinkToDeath(it->second);
                 mListenerList.erase(it);
                 return Status::ok();
             }
@@ -2396,6 +2420,8 @@
     sCameraService->updateProxyDeviceState(ICameraServiceProxy::CAMERA_STATE_OPEN,
             mCameraIdStr, mCameraFacing, mClientPackageName, apiLevel);
 
+    sCameraService->mUidPolicy->registerMonitorUid(mClientUid);
+
     return OK;
 }
 
@@ -2433,6 +2459,8 @@
     }
     mOpsCallback.clear();
 
+    sCameraService->mUidPolicy->unregisterMonitorUid(mClientUid);
+
     return OK;
 }
 
@@ -2523,7 +2551,7 @@
     if (mRegistered) return;
     am.registerUidObserver(this, ActivityManager::UID_OBSERVER_GONE
             | ActivityManager::UID_OBSERVER_IDLE
-            | ActivityManager::UID_OBSERVER_ACTIVE,
+            | ActivityManager::UID_OBSERVER_ACTIVE | ActivityManager::UID_OBSERVER_PROCSTATE,
             ActivityManager::PROCESS_STATE_UNKNOWN,
             String16("cameraserver"));
     status_t res = am.linkToDeath(this);
@@ -2569,6 +2597,51 @@
     }
 }
 
+void CameraService::UidPolicy::onUidStateChanged(uid_t uid, int32_t procState,
+        int64_t /*procStateSeq*/) {
+    bool procStateChange = false;
+    {
+        Mutex::Autolock _l(mUidLock);
+        if ((mMonitoredUids.find(uid) != mMonitoredUids.end()) &&
+                (mMonitoredUids[uid].first != procState)) {
+            mMonitoredUids[uid].first = procState;
+            procStateChange = true;
+        }
+    }
+
+    if (procStateChange) {
+        sp<CameraService> service = mService.promote();
+        if (service != nullptr) {
+            service->notifyMonitoredUids();
+        }
+    }
+}
+
+void CameraService::UidPolicy::registerMonitorUid(uid_t uid) {
+    Mutex::Autolock _l(mUidLock);
+    auto it = mMonitoredUids.find(uid);
+    if (it != mMonitoredUids.end()) {
+        it->second.second++;
+    } else {
+        mMonitoredUids.emplace(
+                std::pair<uid_t, std::pair<int32_t, size_t>> (uid,
+                    std::pair<int32_t, size_t> (ActivityManager::PROCESS_STATE_NONEXISTENT, 1)));
+    }
+}
+
+void CameraService::UidPolicy::unregisterMonitorUid(uid_t uid) {
+    Mutex::Autolock _l(mUidLock);
+    auto it = mMonitoredUids.find(uid);
+    if (it != mMonitoredUids.end()) {
+        it->second.second--;
+        if (it->second.second == 0) {
+            mMonitoredUids.erase(it);
+        }
+    } else {
+        ALOGE("%s: Trying to unregister uid: %d which is not monitored!", __FUNCTION__, uid);
+    }
+}
+
 bool CameraService::UidPolicy::isUidActive(uid_t uid, String16 callingPackage) {
     Mutex::Autolock _l(mUidLock);
     return isUidActiveLocked(uid, callingPackage);
@@ -3118,7 +3191,8 @@
                           cameraId.c_str());
                     continue;
                 }
-                listener.second->onStatusChanged(mapToInterface(status), String16(cameraId));
+                listener.second->getListener()->onStatusChanged(mapToInterface(status),
+                        String16(cameraId));
             }
         });
 }
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index cf0cef8..65727ec 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -179,6 +179,9 @@
             /*out*/
             std::vector<hardware::CameraStatus>* cameraStatuses, bool isVendor = false);
 
+    // Monitored UIDs availability notification
+    void                notifyMonitoredUids();
+
     /////////////////////////////////////////////////////////////////////
     // Client functionality
 
@@ -543,11 +546,14 @@
         void onUidGone(uid_t uid, bool disabled);
         void onUidActive(uid_t uid);
         void onUidIdle(uid_t uid, bool disabled);
-        void onUidStateChanged(uid_t uid __unused, int32_t procState __unused, int64_t procStateSeq __unused) {}
+        void onUidStateChanged(uid_t uid, int32_t procState, int64_t procStateSeq);
 
         void addOverrideUid(uid_t uid, String16 callingPackage, bool active);
         void removeOverrideUid(uid_t uid, String16 callingPackage);
 
+        void registerMonitorUid(uid_t uid);
+        void unregisterMonitorUid(uid_t uid);
+
         // IBinder::DeathRecipient implementation
         virtual void binderDied(const wp<IBinder> &who);
     private:
@@ -558,6 +564,8 @@
         bool mRegistered;
         wp<CameraService> mService;
         std::unordered_set<uid_t> mActiveUids;
+        // Monitored uid map to cached procState and refCount pair
+        std::unordered_map<uid_t, std::pair<int32_t, size_t>> mMonitoredUids;
         std::unordered_map<uid_t, bool> mOverrideUids;
     }; // class UidPolicy
 
@@ -790,8 +798,33 @@
 
     sp<CameraProviderManager> mCameraProviderManager;
 
+    class ServiceListener : public virtual IBinder::DeathRecipient {
+        public:
+            ServiceListener(sp<CameraService> parent, sp<hardware::ICameraServiceListener> listener,
+                    int uid) : mParent(parent), mListener(listener), mListenerUid(uid) {}
+
+            status_t initialize() {
+                return IInterface::asBinder(mListener)->linkToDeath(this);
+            }
+
+            virtual void binderDied(const wp<IBinder> &/*who*/) {
+                auto parent = mParent.promote();
+                if (parent.get() != nullptr) {
+                    parent->removeListener(mListener);
+                }
+            }
+
+            int getListenerUid() { return mListenerUid; }
+            sp<hardware::ICameraServiceListener> getListener() { return mListener; }
+
+        private:
+            wp<CameraService> mParent;
+            sp<hardware::ICameraServiceListener> mListener;
+            int mListenerUid;
+    };
+
     // Guarded by mStatusListenerMutex
-    std::vector<std::pair<bool, sp<hardware::ICameraServiceListener>>> mListenerList;
+    std::vector<std::pair<bool, sp<ServiceListener>>> mListenerList;
 
     Mutex       mStatusListenerLock;
 
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 923d17a..22e09e4 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -58,6 +58,8 @@
 #include "CameraService.h"
 #include "utils/CameraThreadState.h"
 
+#include <tuple>
+
 using namespace android::camera3;
 using namespace android::hardware::camera;
 using namespace android::hardware::camera::device::V3_2;
@@ -1094,7 +1096,7 @@
             hBuf.acquireFence.setTo(acquireFence, /*shouldOwn*/true);
             hBuf.releaseFence = nullptr;
 
-            res = mInterface->pushInflightRequestBuffer(bufferId, buffer);
+            res = mInterface->pushInflightRequestBuffer(bufferId, buffer, streamId);
             if (res != OK) {
                 ALOGE("%s: Can't get register request buffers for stream %d: %s (%d)",
                         __FUNCTION__, streamId, strerror(-res), res);
@@ -2847,12 +2849,19 @@
         }
         streams.add(outputStream);
 
-        if (outputStream->format == HAL_PIXEL_FORMAT_BLOB &&
-                outputStream->data_space == HAL_DATASPACE_V0_JFIF) {
+        if (outputStream->format == HAL_PIXEL_FORMAT_BLOB) {
             size_t k = i + ((mInputStream != nullptr) ? 1 : 0); // Input stream if present should
                                                                 // always occupy the initial entry.
-            bufferSizes[k] = static_cast<uint32_t>(
-                    getJpegBufferSize(outputStream->width, outputStream->height));
+            if (outputStream->data_space == HAL_DATASPACE_V0_JFIF) {
+                bufferSizes[k] = static_cast<uint32_t>(
+                        getJpegBufferSize(outputStream->width, outputStream->height));
+            } else if (outputStream->data_space ==
+                    static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS)) {
+                bufferSizes[k] = outputStream->width * outputStream->height;
+            } else {
+                ALOGW("%s: Blob dataSpace %d not supported",
+                        __FUNCTION__, outputStream->data_space);
+            }
         }
     }
 
@@ -3270,7 +3279,15 @@
     std::vector<std::pair<int32_t, int32_t>> inflightKeys;
     mInterface->getInflightBufferKeys(&inflightKeys);
 
-    int32_t inputStreamId = (mInputStream != nullptr) ? mInputStream->getId() : -1;
+    // Inflight buffers for HAL buffer manager
+    std::vector<uint64_t> inflightRequestBufferKeys;
+    mInterface->getInflightRequestBufferKeys(&inflightRequestBufferKeys);
+
+    // (streamId, frameNumber, buffer_handle_t*) tuple for all inflight buffers.
+    // frameNumber will be -1 for buffers from HAL buffer manager
+    std::vector<std::tuple<int32_t, int32_t, buffer_handle_t*>> inflightBuffers;
+    inflightBuffers.reserve(inflightKeys.size() + inflightRequestBufferKeys.size());
+
     for (auto& pair : inflightKeys) {
         int32_t frameNumber = pair.first;
         int32_t streamId = pair.second;
@@ -3281,6 +3298,26 @@
                     __FUNCTION__, frameNumber, streamId);
             continue;
         }
+        inflightBuffers.push_back(std::make_tuple(streamId, frameNumber, buffer));
+    }
+
+    for (auto& bufferId : inflightRequestBufferKeys) {
+        int32_t streamId = -1;
+        buffer_handle_t* buffer = nullptr;
+        status_t res = mInterface->popInflightRequestBuffer(bufferId, &buffer, &streamId);
+        if (res != OK) {
+            ALOGE("%s: cannot find in-flight buffer %" PRIu64, __FUNCTION__, bufferId);
+            continue;
+        }
+        inflightBuffers.push_back(std::make_tuple(streamId, /*frameNumber*/-1, buffer));
+    }
+
+    int32_t inputStreamId = (mInputStream != nullptr) ? mInputStream->getId() : -1;
+    for (auto& tuple : inflightBuffers) {
+        status_t res = OK;
+        int32_t streamId = std::get<0>(tuple);
+        int32_t frameNumber = std::get<1>(tuple);
+        buffer_handle_t* buffer = std::get<2>(tuple);
 
         camera3_stream_buffer_t streamBuffer;
         streamBuffer.buffer = buffer;
@@ -4354,8 +4391,11 @@
             dst.status = BufferStatus::OK;
             dst.releaseFence = nullptr;
 
-            pushInflightBufferLocked(captureRequest->frameNumber, streamId,
-                    src->buffer, src->acquire_fence);
+            // Output buffers are empty when using HAL buffer manager
+            if (!mUseHalBufManager) {
+                pushInflightBufferLocked(captureRequest->frameNumber, streamId,
+                        src->buffer, src->acquire_fence);
+            }
         }
     }
     return OK;
@@ -4583,6 +4623,17 @@
     return;
 }
 
+void Camera3Device::HalInterface::getInflightRequestBufferKeys(
+        std::vector<uint64_t>* out) {
+    std::lock_guard<std::mutex> lock(mRequestedBuffersLock);
+    out->clear();
+    out->reserve(mRequestedBuffers.size());
+    for (auto& pair : mRequestedBuffers) {
+        out->push_back(pair.first);
+    }
+    return;
+}
+
 status_t Camera3Device::HalInterface::pushInflightBufferLocked(
         int32_t frameNumber, int32_t streamId, buffer_handle_t *buffer, int acquireFence) {
     uint64_t key = static_cast<uint64_t>(frameNumber) << 32 | static_cast<uint64_t>(streamId);
@@ -4610,9 +4661,9 @@
 }
 
 status_t Camera3Device::HalInterface::pushInflightRequestBuffer(
-        uint64_t bufferId, buffer_handle_t* buf) {
+        uint64_t bufferId, buffer_handle_t* buf, int32_t streamId) {
     std::lock_guard<std::mutex> lock(mRequestedBuffersLock);
-    auto pair = mRequestedBuffers.insert({bufferId, buf});
+    auto pair = mRequestedBuffers.insert({bufferId, {streamId, buf}});
     if (!pair.second) {
         ALOGE("%s: bufId %" PRIu64 " is already inflight!",
                 __FUNCTION__, bufferId);
@@ -4623,7 +4674,13 @@
 
 // Find and pop a buffer_handle_t based on bufferId
 status_t Camera3Device::HalInterface::popInflightRequestBuffer(
-        uint64_t bufferId, /*out*/ buffer_handle_t **buffer) {
+        uint64_t bufferId,
+        /*out*/ buffer_handle_t** buffer,
+        /*optional out*/ int32_t* streamId) {
+    if (buffer == nullptr) {
+        ALOGE("%s: buffer (%p) must not be null", __FUNCTION__, buffer);
+        return BAD_VALUE;
+    }
     std::lock_guard<std::mutex> lock(mRequestedBuffersLock);
     auto it = mRequestedBuffers.find(bufferId);
     if (it == mRequestedBuffers.end()) {
@@ -4631,7 +4688,10 @@
                 __FUNCTION__, bufferId);
         return BAD_VALUE;
     }
-    *buffer = it->second;
+    *buffer = it->second.second;
+    if (streamId != nullptr) {
+        *streamId = it->second.first;
+    }
     mRequestedBuffers.erase(it);
     return OK;
 }
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index b25d89d..d3bb212 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -320,16 +320,22 @@
         status_t popInflightBuffer(int32_t frameNumber, int32_t streamId,
                 /*out*/ buffer_handle_t **buffer);
 
-        // Register a bufId/buffer_handle_t to inflight request buffer
-        status_t pushInflightRequestBuffer(uint64_t bufferId, buffer_handle_t* buf);
+        // Register a bufId (streamId, buffer_handle_t) to inflight request buffer
+        status_t pushInflightRequestBuffer(
+                uint64_t bufferId, buffer_handle_t* buf, int32_t streamId);
 
         // Find a buffer_handle_t based on bufferId
-        status_t popInflightRequestBuffer(uint64_t bufferId, /*out*/ buffer_handle_t **buffer);
+        status_t popInflightRequestBuffer(uint64_t bufferId,
+                /*out*/ buffer_handle_t** buffer,
+                /*optional out*/ int32_t* streamId = nullptr);
 
         // Get a vector of (frameNumber, streamId) pair of currently inflight
         // buffers
         void getInflightBufferKeys(std::vector<std::pair<int32_t, int32_t>>* out);
 
+        // Get a vector of bufferId of currently inflight buffers
+        void getInflightRequestBufferKeys(std::vector<uint64_t>* out);
+
         static const uint64_t BUFFER_ID_NO_BUFFER = 0;
       private:
         // Always valid
@@ -398,7 +404,7 @@
 
         // Buffers given to HAL through requestStreamBuffer API
         std::mutex mRequestedBuffersLock;
-        std::unordered_map<uint64_t, buffer_handle_t*> mRequestedBuffers;
+        std::unordered_map<uint64_t, std::pair<int32_t, buffer_handle_t*>> mRequestedBuffers;
 
         uint32_t mNextStreamConfigCounter = 1;
 
diff --git a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
index ca9143d..0f6be79 100644
--- a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
+++ b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
@@ -50,6 +50,10 @@
 
     virtual ::android::binder::Status onTorchStatusChanged(
         int32_t status, const ::android::String16& cameraId) override;
+    virtual binder::Status onCameraAccessPrioritiesChanged() {
+        // TODO: no implementation yet.
+        return binder::Status::ok();
+    }
 };
 
 } // implementation
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index 65fcf40..661a475 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -6,7 +6,7 @@
 LOCAL_SRC_FILES := \
     MediaExtractorService.cpp
 
-LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
+LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils
 LOCAL_MODULE:= libmediaextractorservice
 include $(BUILD_SHARED_LIBRARY)
 
@@ -21,7 +21,7 @@
 
 LOCAL_SRC_FILES := main_extractorservice.cpp
 LOCAL_SHARED_LIBRARIES := libmedia libmediaextractorservice libbinder libutils \
-    liblog libbase libandroidicu libavservices_minijail
+    liblog libandroidicu libavservices_minijail
 LOCAL_STATIC_LIBRARIES := libicuandroid_utils
 LOCAL_MODULE:= mediaextractor
 LOCAL_INIT_RC := mediaextractor.rc