Merge "Revert^2 "Set use_vendor to false for media apex""
diff --git a/OWNERS b/OWNERS
index 9989bf0..7f523a2 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,5 +1,7 @@
+chz@google.com
 elaurent@google.com
 etalvala@google.com
+hkuang@google.com
 lajos@google.com
 marcone@google.com
 
diff --git a/apex/Android.bp b/apex/Android.bp
index f3e8a55..b314e5d 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -89,6 +89,9 @@
     binaries: [
         "mediaswcodec",
     ],
+    native_shared_libs: [
+        "libstagefright_foundation",
+    ],
     prebuilts: [
         "com.android.media.swcodec-mediaswcodec.rc",
         "com.android.media.swcodec-ld.config.txt",
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 2d54bd1..6b912f1 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -1957,7 +1957,10 @@
      * explicitly set ACAMERA_CONTROL_ZOOM_RATIO, its value defaults to 1.0.</p>
      * <p>One limitation of controlling zoom using zoomRatio is that the ACAMERA_SCALER_CROP_REGION
      * must only be used for letterboxing or pillarboxing of the sensor active array, and no
-     * FREEFORM cropping can be used with ACAMERA_CONTROL_ZOOM_RATIO other than 1.0.</p>
+     * FREEFORM cropping can be used with ACAMERA_CONTROL_ZOOM_RATIO other than 1.0. If
+     * ACAMERA_CONTROL_ZOOM_RATIO is not 1.0, and ACAMERA_SCALER_CROP_REGION is set to be
+     * windowboxing, the camera framework will override the ACAMERA_SCALER_CROP_REGION to be
+     * the active array.</p>
      *
      * @see ACAMERA_CONTROL_AE_REGIONS
      * @see ACAMERA_CONTROL_ZOOM_RATIO
@@ -3651,7 +3654,9 @@
      * </ol>
      * </li>
      * <li>Setting ACAMERA_CONTROL_ZOOM_RATIO to values different than 1.0 and
-     * ACAMERA_SCALER_CROP_REGION to be windowboxing at the same time is undefined behavior.</li>
+     * ACAMERA_SCALER_CROP_REGION to be windowboxing at the same time are not supported. In this
+     * case, the camera framework will override the ACAMERA_SCALER_CROP_REGION to be the active
+     * array.</li>
      * </ul>
      * <p>LEGACY capability devices will only support CENTER_ONLY cropping.</p>
      *
@@ -8517,10 +8522,10 @@
      * respective color channel provided in
      * ACAMERA_SENSOR_TEST_PATTERN_DATA.</p>
      * <p>For example:</p>
-     * <pre><code>android.testPatternData = [0, 0xFFFFFFFF, 0xFFFFFFFF, 0]
+     * <pre><code>android.control.testPatternData = [0, 0xFFFFFFFF, 0xFFFFFFFF, 0]
      * </code></pre>
      * <p>All green pixels are 100% green. All red/blue pixels are black.</p>
-     * <pre><code>android.testPatternData = [0xFFFFFFFF, 0, 0xFFFFFFFF, 0]
+     * <pre><code>android.control.testPatternData = [0xFFFFFFFF, 0, 0xFFFFFFFF, 0]
      * </code></pre>
      * <p>All red pixels are 100% red. Only the odd green pixels
      * are 100% green. All blue pixels are 100% black.</p>
diff --git a/cmds/stagefright/SimplePlayer.cpp b/cmds/stagefright/SimplePlayer.cpp
index f4b8164..e000633 100644
--- a/cmds/stagefright/SimplePlayer.cpp
+++ b/cmds/stagefright/SimplePlayer.cpp
@@ -272,7 +272,7 @@
 status_t SimplePlayer::onPrepare() {
     CHECK_EQ(mState, UNPREPARED);
 
-    mExtractor = new NuMediaExtractor;
+    mExtractor = new NuMediaExtractor(NuMediaExtractor::EntryPoint::OTHER);
 
     status_t err = mExtractor->setDataSource(
             NULL /* httpService */, mPath.c_str());
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index c26e0b9..33c4663 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -79,7 +79,7 @@
 
     static int64_t kTimeout = 500ll;
 
-    sp<NuMediaExtractor> extractor = new NuMediaExtractor;
+    sp<NuMediaExtractor> extractor = new NuMediaExtractor(NuMediaExtractor::EntryPoint::OTHER);
     if (extractor->setDataSource(NULL /* httpService */, path) != OK) {
         fprintf(stderr, "unable to instantiate extractor.\n");
         return 1;
diff --git a/cmds/stagefright/mediafilter.cpp b/cmds/stagefright/mediafilter.cpp
index b894545..ca058ab 100644
--- a/cmds/stagefright/mediafilter.cpp
+++ b/cmds/stagefright/mediafilter.cpp
@@ -319,7 +319,8 @@
 
     static int64_t kTimeout = 500ll;
 
-    sp<NuMediaExtractor> extractor = new NuMediaExtractor;
+    sp<NuMediaExtractor> extractor = new NuMediaExtractor(NuMediaExtractor::EntryPoint::OTHER);
+
     if (extractor->setDataSource(NULL /* httpService */, path) != OK) {
         fprintf(stderr, "unable to instantiate extractor.\n");
         return 1;
diff --git a/cmds/stagefright/muxer.cpp b/cmds/stagefright/muxer.cpp
index 4a83a4a..bc7e41e 100644
--- a/cmds/stagefright/muxer.cpp
+++ b/cmds/stagefright/muxer.cpp
@@ -62,7 +62,7 @@
         int trimEndTimeMs,
         int rotationDegrees,
         MediaMuxer::OutputFormat container = MediaMuxer::OUTPUT_FORMAT_MPEG_4) {
-    sp<NuMediaExtractor> extractor = new NuMediaExtractor;
+    sp<NuMediaExtractor> extractor = new NuMediaExtractor(NuMediaExtractor::EntryPoint::OTHER);
     if (extractor->setDataSource(NULL /* httpService */, path) != OK) {
         fprintf(stderr, "unable to instantiate extractor. %s\n", path);
         return 1;
diff --git a/drm/TEST_MAPPING b/drm/TEST_MAPPING
index 9f6a532..aa8a7d8 100644
--- a/drm/TEST_MAPPING
+++ b/drm/TEST_MAPPING
@@ -1,5 +1,5 @@
 {
-  "presubmit": [
+  "presubmit-large": [
     // The following tests validate codec and drm path.
     {
       "name": "GtsMediaTestCases",
diff --git a/include/media/MicrophoneInfo.h b/include/media/MicrophoneInfo.h
index e89401a..a5045b9 100644
--- a/include/media/MicrophoneInfo.h
+++ b/include/media/MicrophoneInfo.h
@@ -208,6 +208,21 @@
     int32_t mDirectionality;
 };
 
+// Conversion routines, according to AidlConversion.h conventions.
+inline ConversionResult<MicrophoneInfo>
+aidl2legacy_MicrophoneInfo(const media::MicrophoneInfoData& aidl) {
+    MicrophoneInfo legacy;
+    RETURN_IF_ERROR(legacy.readFromParcelable(aidl));
+    return legacy;
+}
+
+inline ConversionResult<media::MicrophoneInfoData>
+legacy2aidl_MicrophoneInfo(const MicrophoneInfo& legacy) {
+    media::MicrophoneInfoData aidl;
+    RETURN_IF_ERROR(legacy.writeToParcelable(&aidl));
+    return aidl;
+}
+
 } // namespace media
 } // namespace android
 
diff --git a/media/TEST_MAPPING b/media/TEST_MAPPING
index 50facfb..80e0924 100644
--- a/media/TEST_MAPPING
+++ b/media/TEST_MAPPING
@@ -1,6 +1,6 @@
 // for frameworks/av/media
 {
-    "presubmit": [
+    "presubmit-large": [
         // runs whenever we change something in this tree
         {
             "name": "CtsMediaTestCases",
@@ -17,7 +17,9 @@
                     "include-filter": "android.media.cts.DecodeEditEncodeTest"
                 }
             ]
-        },
+        }
+    ],
+    "presubmit": [
         {
             "name": "GtsMediaTestCases",
             "options" : [
diff --git a/media/bufferpool/2.0/AccessorImpl.cpp b/media/bufferpool/2.0/AccessorImpl.cpp
index 6111fea..1d2562e 100644
--- a/media/bufferpool/2.0/AccessorImpl.cpp
+++ b/media/bufferpool/2.0/AccessorImpl.cpp
@@ -39,6 +39,8 @@
 
     static constexpr size_t kMinAllocBytesForEviction = 1024*1024*15;
     static constexpr size_t kMinBufferCountForEviction = 25;
+    static constexpr size_t kMaxUnusedBufferCount = 64;
+    static constexpr size_t kUnusedBufferCountTarget = kMaxUnusedBufferCount - 16;
 
     static constexpr nsecs_t kEvictGranularityNs = 1000000000; // 1 sec
     static constexpr nsecs_t kEvictDurationNs = 5000000000; // 5 secs
@@ -724,9 +726,11 @@
 }
 
 void Accessor::Impl::BufferPool::cleanUp(bool clearCache) {
-    if (clearCache || mTimestampUs > mLastCleanUpUs + kCleanUpDurationUs) {
+    if (clearCache || mTimestampUs > mLastCleanUpUs + kCleanUpDurationUs ||
+            mStats.buffersNotInUse() > kMaxUnusedBufferCount) {
         mLastCleanUpUs = mTimestampUs;
-        if (mTimestampUs > mLastLogUs + kLogDurationUs) {
+        if (mTimestampUs > mLastLogUs + kLogDurationUs ||
+                mStats.buffersNotInUse() > kMaxUnusedBufferCount) {
             mLastLogUs = mTimestampUs;
             ALOGD("bufferpool2 %p : %zu(%zu size) total buffers - "
                   "%zu(%zu size) used buffers - %zu/%zu (recycle/alloc) - "
@@ -737,8 +741,9 @@
                   mStats.mTotalFetches, mStats.mTotalTransfers);
         }
         for (auto freeIt = mFreeBuffers.begin(); freeIt != mFreeBuffers.end();) {
-            if (!clearCache && (mStats.mSizeCached < kMinAllocBytesForEviction
-                    || mBuffers.size() < kMinBufferCountForEviction)) {
+            if (!clearCache && mStats.buffersNotInUse() <= kUnusedBufferCountTarget &&
+                    (mStats.mSizeCached < kMinAllocBytesForEviction ||
+                     mBuffers.size() < kMinBufferCountForEviction)) {
                 break;
             }
             auto it = mBuffers.find(*freeIt);
diff --git a/media/bufferpool/2.0/AccessorImpl.h b/media/bufferpool/2.0/AccessorImpl.h
index cd1b4d0..3d39941 100644
--- a/media/bufferpool/2.0/AccessorImpl.h
+++ b/media/bufferpool/2.0/AccessorImpl.h
@@ -193,6 +193,12 @@
                 : mSizeCached(0), mBuffersCached(0), mSizeInUse(0), mBuffersInUse(0),
                   mTotalAllocations(0), mTotalRecycles(0), mTotalTransfers(0), mTotalFetches(0) {}
 
+            /// # of currently unused buffers
+            size_t buffersNotInUse() const {
+                ALOG_ASSERT(mBuffersCached >= mBuffersInUse);
+                return mBuffersCached - mBuffersInUse;
+            }
+
             /// A new buffer is allocated on an allocation request.
             void onBufferAllocated(size_t allocSize) {
                 mSizeCached += allocSize;
diff --git a/media/bufferpool/2.0/BufferPoolClient.cpp b/media/bufferpool/2.0/BufferPoolClient.cpp
index 342fef6..9308b81 100644
--- a/media/bufferpool/2.0/BufferPoolClient.cpp
+++ b/media/bufferpool/2.0/BufferPoolClient.cpp
@@ -32,6 +32,8 @@
 static constexpr int64_t kReceiveTimeoutUs = 1000000; // 100ms
 static constexpr int kPostMaxRetry = 3;
 static constexpr int kCacheTtlUs = 1000000; // TODO: tune
+static constexpr size_t kMaxCachedBufferCount = 64;
+static constexpr size_t kCachedBufferCountTarget = kMaxCachedBufferCount - 16;
 
 class BufferPoolClient::Impl
         : public std::enable_shared_from_this<BufferPoolClient::Impl> {
@@ -136,6 +138,10 @@
             --mActive;
             mLastChangeUs = getTimestampNow();
         }
+
+        int cachedBufferCount() const {
+            return mBuffers.size() - mActive;
+        }
     } mCache;
 
     // FMQ - release notifier
@@ -668,10 +674,12 @@
 // should have mCache.mLock
 void BufferPoolClient::Impl::evictCaches(bool clearCache) {
     int64_t now = getTimestampNow();
-    if (now >= mLastEvictCacheUs + kCacheTtlUs || clearCache) {
+    if (now >= mLastEvictCacheUs + kCacheTtlUs ||
+            clearCache || mCache.cachedBufferCount() > kMaxCachedBufferCount) {
         size_t evicted = 0;
         for (auto it = mCache.mBuffers.begin(); it != mCache.mBuffers.end();) {
-            if (!it->second->hasCache() && (it->second->expire() || clearCache)) {
+            if (!it->second->hasCache() && (it->second->expire() ||
+                        clearCache || mCache.cachedBufferCount() > kCachedBufferCountTarget)) {
                 it = mCache.mBuffers.erase(it);
                 ++evicted;
             } else {
diff --git a/media/codec2/TEST_MAPPING b/media/codec2/TEST_MAPPING
index fca3477..6ac4210 100644
--- a/media/codec2/TEST_MAPPING
+++ b/media/codec2/TEST_MAPPING
@@ -4,7 +4,9 @@
     // { "name": "codec2_core_param_test"},
     // TODO(b/155516524)
     // { "name": "codec2_vndk_interface_test"},
-    { "name": "codec2_vndk_test"},
+    { "name": "codec2_vndk_test"}
+  ],
+  "presubmit-large": [
     {
       "name": "CtsMediaTestCases",
       "options": [
diff --git a/media/codec2/components/aac/Android.bp b/media/codec2/components/aac/Android.bp
index 9eca585..50495a9 100644
--- a/media/codec2/components/aac/Android.bp
+++ b/media/codec2/components/aac/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_aacdec",
     defaults: [
         "libcodec2_soft-defaults",
@@ -15,7 +15,7 @@
     ],
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_aacenc",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/amr_nb_wb/Android.bp b/media/codec2/components/amr_nb_wb/Android.bp
index ce25bc9..b09a505 100644
--- a/media/codec2/components/amr_nb_wb/Android.bp
+++ b/media/codec2/components/amr_nb_wb/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_amrnbdec",
     defaults: [
         "libcodec2_soft-defaults",
@@ -21,7 +21,7 @@
     ],
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_amrwbdec",
     defaults: [
         "libcodec2_soft-defaults",
@@ -40,7 +40,7 @@
     ],
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_amrnbenc",
     defaults: [
         "libcodec2_soft-defaults",
@@ -58,7 +58,7 @@
     ],
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_amrwbenc",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/aom/Android.bp b/media/codec2/components/aom/Android.bp
index 61dbd4c..fcc4552 100644
--- a/media/codec2/components/aom/Android.bp
+++ b/media/codec2/components/aom/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_av1dec_aom",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/avc/Android.bp b/media/codec2/components/avc/Android.bp
index 4021444..6b0e363 100644
--- a/media/codec2/components/avc/Android.bp
+++ b/media/codec2/components/avc/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_avcdec",
     defaults: [
         "libcodec2_soft-defaults",
@@ -15,7 +15,7 @@
     ],
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_avcenc",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/base/Android.bp b/media/codec2/components/base/Android.bp
index f10835f..3712564 100644
--- a/media/codec2/components/base/Android.bp
+++ b/media/codec2/components/base/Android.bp
@@ -1,6 +1,6 @@
 // DO NOT DEPEND ON THIS DIRECTLY
 // use libcodec2_soft-defaults instead
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_common",
     defaults: ["libcodec2-impl-defaults"],
     vendor_available: true,
@@ -96,7 +96,7 @@
 }
 
 // TEMP: used by cheets2 project - remove when no longer used
-cc_library_shared {
+cc_library {
     name: "libcodec2_simple_component",
     vendor_available: true,
 
diff --git a/media/codec2/components/flac/Android.bp b/media/codec2/components/flac/Android.bp
index 48cc51b..603c412 100644
--- a/media/codec2/components/flac/Android.bp
+++ b/media/codec2/components/flac/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_flacdec",
     defaults: [
         "libcodec2_soft-defaults",
@@ -14,7 +14,7 @@
     ],
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_flacenc",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/g711/Android.bp b/media/codec2/components/g711/Android.bp
index 0101b1a..c39df7b 100644
--- a/media/codec2/components/g711/Android.bp
+++ b/media/codec2/components/g711/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_g711alawdec",
     defaults: [
         "libcodec2_soft-defaults",
@@ -14,7 +14,7 @@
     ],
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_g711mlawdec",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/gav1/Android.bp b/media/codec2/components/gav1/Android.bp
index f374089..32aa98d 100644
--- a/media/codec2/components/gav1/Android.bp
+++ b/media/codec2/components/gav1/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_av1dec_gav1",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/gsm/Android.bp b/media/codec2/components/gsm/Android.bp
index 9330c01..7f54af8 100644
--- a/media/codec2/components/gsm/Android.bp
+++ b/media/codec2/components/gsm/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_gsmdec",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/hevc/Android.bp b/media/codec2/components/hevc/Android.bp
index 369bd78..2858212 100644
--- a/media/codec2/components/hevc/Android.bp
+++ b/media/codec2/components/hevc/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_hevcdec",
     defaults: [
         "libcodec2_soft-defaults",
@@ -11,7 +11,7 @@
 
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_hevcenc",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/mp3/Android.bp b/media/codec2/components/mp3/Android.bp
index 66665ed..b4fb1b0 100644
--- a/media/codec2/components/mp3/Android.bp
+++ b/media/codec2/components/mp3/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_mp3dec",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/mpeg2/Android.bp b/media/codec2/components/mpeg2/Android.bp
index 841f0a9..666e697 100644
--- a/media/codec2/components/mpeg2/Android.bp
+++ b/media/codec2/components/mpeg2/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_mpeg2dec",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/mpeg4_h263/Android.bp b/media/codec2/components/mpeg4_h263/Android.bp
index 41e4f44..0673709 100644
--- a/media/codec2/components/mpeg4_h263/Android.bp
+++ b/media/codec2/components/mpeg4_h263/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_mpeg4dec",
     defaults: [
         "libcodec2_soft-defaults",
@@ -15,7 +15,7 @@
     ],
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_h263dec",
     defaults: [
         "libcodec2_soft-defaults",
@@ -31,7 +31,7 @@
     ],
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_mpeg4enc",
     defaults: [
         "libcodec2_soft-defaults",
@@ -49,7 +49,7 @@
     ],
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_h263enc",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/opus/Android.bp b/media/codec2/components/opus/Android.bp
index 0ed141b..32e2bf8 100644
--- a/media/codec2/components/opus/Android.bp
+++ b/media/codec2/components/opus/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_opusdec",
     defaults: [
         "libcodec2_soft-defaults",
@@ -9,7 +9,7 @@
 
     shared_libs: ["libopus"],
 }
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_opusenc",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/raw/Android.bp b/media/codec2/components/raw/Android.bp
index dc944da..d4fb8f8 100644
--- a/media/codec2/components/raw/Android.bp
+++ b/media/codec2/components/raw/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_rawdec",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/vorbis/Android.bp b/media/codec2/components/vorbis/Android.bp
index bc1c380..ff1183f 100644
--- a/media/codec2/components/vorbis/Android.bp
+++ b/media/codec2/components/vorbis/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_vorbisdec",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/vpx/Android.bp b/media/codec2/components/vpx/Android.bp
index 34f5753..72178aa 100644
--- a/media/codec2/components/vpx/Android.bp
+++ b/media/codec2/components/vpx/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_vp9dec",
     defaults: [
         "libcodec2_soft-defaults",
@@ -14,7 +14,7 @@
     ],
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_vp8dec",
     defaults: [
         "libcodec2_soft-defaults",
@@ -26,7 +26,7 @@
     shared_libs: ["libvpx"],
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_vp9enc",
     defaults: [
         "libcodec2_soft-defaults",
@@ -43,7 +43,7 @@
     cflags: ["-DVP9"],
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_vp8enc",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/components/xaac/Android.bp b/media/codec2/components/xaac/Android.bp
index 7795cc1..4889d78 100644
--- a/media/codec2/components/xaac/Android.bp
+++ b/media/codec2/components/xaac/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libcodec2_soft_xaacdec",
     defaults: [
         "libcodec2_soft-defaults",
diff --git a/media/codec2/core/Android.bp b/media/codec2/core/Android.bp
index 33fafa7..beeadb8 100644
--- a/media/codec2/core/Android.bp
+++ b/media/codec2/core/Android.bp
@@ -5,7 +5,7 @@
     export_include_dirs: ["include"],
 }
 
-cc_library_shared {
+cc_library {
     name: "libcodec2",
     vendor_available: true,
     min_sdk_version: "29",
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
index 12ed725..b520c17 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
@@ -734,7 +734,7 @@
             }
             if (timestampMax < timestamp) timestampMax = timestamp;
         }
-        timestampOffset = timestampMax;
+        timestampOffset = timestampMax + 33333;
         eleInfo.close();
 
         // Reset Total frames before second decode loop
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 3ef454b..06464b5 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -1403,6 +1403,7 @@
                 continue;
             }
             if (work->input.buffers.empty()
+                    || work->input.buffers.front() == nullptr
                     || work->input.buffers.front()->data().linearBlocks().empty()) {
                 ALOGD("[%s] no linear codec config data found", mName);
                 continue;
diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp
index 692da58..566a18f 100644
--- a/media/codec2/sfplugin/CCodecBuffers.cpp
+++ b/media/codec2/sfplugin/CCodecBuffers.cpp
@@ -96,6 +96,9 @@
                 int32_t vstride = int32_t(offsetDelta / stride);
                 newFormat->setInt32(KEY_SLICE_HEIGHT, vstride);
                 ALOGD("[%s] updating vstride = %d", mName, vstride);
+                buffer->setRange(
+                        img->mPlane[0].mOffset,
+                        buffer->size() - img->mPlane[0].mOffset);
             }
         }
         setFormat(newFormat);
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 25e7da9..19414a0 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -276,20 +276,22 @@
                             int32_t planeSize = 0;
                             for (uint32_t i = 0; i < layout.numPlanes; ++i) {
                                 const C2PlaneInfo &plane = layout.planes[i];
-                                ssize_t minOffset = plane.minOffset(mWidth, mHeight);
-                                ssize_t maxOffset = plane.maxOffset(mWidth, mHeight);
+                                int64_t planeStride = std::abs(plane.rowInc / plane.colInc);
+                                ssize_t minOffset = plane.minOffset(
+                                        mWidth / plane.colSampling, mHeight / plane.rowSampling);
+                                ssize_t maxOffset = plane.maxOffset(
+                                        mWidth / plane.colSampling, mHeight / plane.rowSampling);
                                 if (minPtr > mView.data()[i] + minOffset) {
                                     minPtr = mView.data()[i] + minOffset;
                                 }
                                 if (maxPtr < mView.data()[i] + maxOffset) {
                                     maxPtr = mView.data()[i] + maxOffset;
                                 }
-                                planeSize += std::abs(plane.rowInc) * align(mHeight, 64)
-                                        / plane.rowSampling / plane.colSampling
-                                        * divUp(mAllocatedDepth, 8u);
+                                planeSize += planeStride * divUp(mAllocatedDepth, 8u)
+                                        * align(mHeight, 64) / plane.rowSampling;
                             }
 
-                            if ((maxPtr - minPtr + 1) <= planeSize) {
+                            if (minPtr == mView.data()[0] && (maxPtr - minPtr + 1) <= planeSize) {
                                 // FIXME: this is risky as reading/writing data out of bound results
                                 //        in an undefined behavior, but gralloc does assume a
                                 //        contiguous mapping
diff --git a/media/codec2/sfplugin/InputSurfaceWrapper.h b/media/codec2/sfplugin/InputSurfaceWrapper.h
index bb35763..479acb1 100644
--- a/media/codec2/sfplugin/InputSurfaceWrapper.h
+++ b/media/codec2/sfplugin/InputSurfaceWrapper.h
@@ -61,24 +61,24 @@
     /// Input Surface configuration
     struct Config {
         // IN PARAMS (GBS)
-        float mMinFps; // minimum fps (repeat frame to achieve this)
-        float mMaxFps; // max fps (via frame drop)
-        float mCaptureFps; // capture fps
-        float mCodedFps;   // coded fps
-        bool mSuspended; // suspended
-        int64_t mTimeOffsetUs; // time offset (input => codec)
-        int64_t mSuspendAtUs; // suspend/resume time
-        int64_t mStartAtUs; // start time
-        bool mStopped; // stopped
-        int64_t mStopAtUs; // stop time
+        float mMinFps = 0.0; // minimum fps (repeat frame to achieve this)
+        float mMaxFps = 0.0; // max fps (via frame drop)
+        float mCaptureFps = 0.0; // capture fps
+        float mCodedFps = 0.0;   // coded fps
+        bool mSuspended = false; // suspended
+        int64_t mTimeOffsetUs = 0; // time offset (input => codec)
+        int64_t mSuspendAtUs = 0; // suspend/resume time
+        int64_t mStartAtUs = 0; // start time
+        bool mStopped = false; // stopped
+        int64_t mStopAtUs = 0; // stop time
 
         // OUT PARAMS (GBS)
-        int64_t mInputDelayUs; // delay between encoder input and surface input
+        int64_t mInputDelayUs = 0; // delay between encoder input and surface input
 
         // IN PARAMS (CODEC WRAPPER)
-        float mFixedAdjustedFps; // fixed fps via PTS manipulation
-        float mMinAdjustedFps; // minimum fps via PTS manipulation
-        uint64_t mUsage; // consumer usage
+        float mFixedAdjustedFps = 0.0; // fixed fps via PTS manipulation
+        float mMinAdjustedFps = 0.0; // minimum fps via PTS manipulation
+        uint64_t mUsage = 0; // consumer usage
     };
 
     /**
diff --git a/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp b/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp
index 5bee605..ad8f6e5 100644
--- a/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp
+++ b/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp
@@ -18,22 +18,31 @@
 
 #include <gtest/gtest.h>
 
+#include <media/stagefright/foundation/AString.h>
 #include <media/stagefright/MediaCodecConstants.h>
 
+#include <C2BlockInternal.h>
 #include <C2PlatformSupport.h>
 
 namespace android {
 
+static std::shared_ptr<RawGraphicOutputBuffers> GetRawGraphicOutputBuffers(
+        int32_t width, int32_t height) {
+    std::shared_ptr<RawGraphicOutputBuffers> buffers =
+        std::make_shared<RawGraphicOutputBuffers>("test");
+    sp<AMessage> format{new AMessage};
+    format->setInt32(KEY_WIDTH, width);
+    format->setInt32(KEY_HEIGHT, height);
+    buffers->setFormat(format);
+    return buffers;
+}
+
 TEST(RawGraphicOutputBuffersTest, ChangeNumSlots) {
     constexpr int32_t kWidth = 3840;
     constexpr int32_t kHeight = 2160;
 
     std::shared_ptr<RawGraphicOutputBuffers> buffers =
-        std::make_shared<RawGraphicOutputBuffers>("test");
-    sp<AMessage> format{new AMessage};
-    format->setInt32("width", kWidth);
-    format->setInt32("height", kHeight);
-    buffers->setFormat(format);
+        GetRawGraphicOutputBuffers(kWidth, kHeight);
 
     std::shared_ptr<C2BlockPool> pool;
     ASSERT_EQ(OK, GetCodec2BlockPool(C2BlockPool::BASIC_GRAPHIC, nullptr, &pool));
@@ -96,4 +105,435 @@
     }
 }
 
+class TestGraphicAllocation : public C2GraphicAllocation {
+public:
+    TestGraphicAllocation(
+            uint32_t width,
+            uint32_t height,
+            const C2PlanarLayout &layout,
+            size_t capacity,
+            std::vector<size_t> offsets)
+        : C2GraphicAllocation(width, height),
+          mLayout(layout),
+          mMemory(capacity, 0xAA),
+          mOffsets(offsets) {
+    }
+
+    c2_status_t map(
+            C2Rect rect, C2MemoryUsage usage, C2Fence *fence,
+            C2PlanarLayout *layout, uint8_t **addr) override {
+        (void)rect;
+        (void)usage;
+        (void)fence;
+        *layout = mLayout;
+        for (size_t i = 0; i < mLayout.numPlanes; ++i) {
+            addr[i] = mMemory.data() + mOffsets[i];
+        }
+        return C2_OK;
+    }
+
+    c2_status_t unmap(uint8_t **, C2Rect, C2Fence *) override { return C2_OK; }
+
+    C2Allocator::id_t getAllocatorId() const override { return -1; }
+
+    const C2Handle *handle() const override { return nullptr; }
+
+    bool equals(const std::shared_ptr<const C2GraphicAllocation> &other) const override {
+        return other.get() == this;
+    }
+
+private:
+    C2PlanarLayout mLayout;
+    std::vector<uint8_t> mMemory;
+    std::vector<uint8_t *> mAddr;
+    std::vector<size_t> mOffsets;
+};
+
+class LayoutTest : public ::testing::TestWithParam<std::tuple<bool, std::string, bool, int32_t>> {
+private:
+    static C2PlanarLayout YUVPlanarLayout(int32_t stride) {
+        C2PlanarLayout layout = {
+            C2PlanarLayout::TYPE_YUV,
+            3,  /* numPlanes */
+            3,  /* rootPlanes */
+            {},  /* planes --- to be filled below */
+        };
+        layout.planes[C2PlanarLayout::PLANE_Y] = {
+            C2PlaneInfo::CHANNEL_Y,
+            1,  /* colInc */
+            stride,  /* rowInc */
+            1,  /* colSampling */
+            1,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_Y,  /* rootIx */
+            0,  /* offset */
+        };
+        layout.planes[C2PlanarLayout::PLANE_U] = {
+            C2PlaneInfo::CHANNEL_CB,
+            1,  /* colInc */
+            stride / 2,  /* rowInc */
+            2,  /* colSampling */
+            2,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_U,  /* rootIx */
+            0,  /* offset */
+        };
+        layout.planes[C2PlanarLayout::PLANE_V] = {
+            C2PlaneInfo::CHANNEL_CR,
+            1,  /* colInc */
+            stride / 2,  /* rowInc */
+            2,  /* colSampling */
+            2,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_V,  /* rootIx */
+            0,  /* offset */
+        };
+        return layout;
+    }
+
+    static C2PlanarLayout YUVSemiPlanarLayout(int32_t stride) {
+        C2PlanarLayout layout = {
+            C2PlanarLayout::TYPE_YUV,
+            3,  /* numPlanes */
+            2,  /* rootPlanes */
+            {},  /* planes --- to be filled below */
+        };
+        layout.planes[C2PlanarLayout::PLANE_Y] = {
+            C2PlaneInfo::CHANNEL_Y,
+            1,  /* colInc */
+            stride,  /* rowInc */
+            1,  /* colSampling */
+            1,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_Y,  /* rootIx */
+            0,  /* offset */
+        };
+        layout.planes[C2PlanarLayout::PLANE_U] = {
+            C2PlaneInfo::CHANNEL_CB,
+            2,  /* colInc */
+            stride,  /* rowInc */
+            2,  /* colSampling */
+            2,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_U,  /* rootIx */
+            0,  /* offset */
+        };
+        layout.planes[C2PlanarLayout::PLANE_V] = {
+            C2PlaneInfo::CHANNEL_CR,
+            2,  /* colInc */
+            stride,  /* rowInc */
+            2,  /* colSampling */
+            2,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_U,  /* rootIx */
+            1,  /* offset */
+        };
+        return layout;
+    }
+
+    static C2PlanarLayout YVUSemiPlanarLayout(int32_t stride) {
+        C2PlanarLayout layout = {
+            C2PlanarLayout::TYPE_YUV,
+            3,  /* numPlanes */
+            2,  /* rootPlanes */
+            {},  /* planes --- to be filled below */
+        };
+        layout.planes[C2PlanarLayout::PLANE_Y] = {
+            C2PlaneInfo::CHANNEL_Y,
+            1,  /* colInc */
+            stride,  /* rowInc */
+            1,  /* colSampling */
+            1,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_Y,  /* rootIx */
+            0,  /* offset */
+        };
+        layout.planes[C2PlanarLayout::PLANE_U] = {
+            C2PlaneInfo::CHANNEL_CB,
+            2,  /* colInc */
+            stride,  /* rowInc */
+            2,  /* colSampling */
+            2,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_V,  /* rootIx */
+            1,  /* offset */
+        };
+        layout.planes[C2PlanarLayout::PLANE_V] = {
+            C2PlaneInfo::CHANNEL_CR,
+            2,  /* colInc */
+            stride,  /* rowInc */
+            2,  /* colSampling */
+            2,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_V,  /* rootIx */
+            0,  /* offset */
+        };
+        return layout;
+    }
+
+    static std::shared_ptr<C2GraphicBlock> CreateGraphicBlock(
+            uint32_t width,
+            uint32_t height,
+            const C2PlanarLayout &layout,
+            size_t capacity,
+            std::vector<size_t> offsets) {
+        std::shared_ptr<C2GraphicAllocation> alloc = std::make_shared<TestGraphicAllocation>(
+                width,
+                height,
+                layout,
+                capacity,
+                offsets);
+
+        return _C2BlockFactory::CreateGraphicBlock(alloc);
+    }
+
+    static constexpr uint8_t GetPixelValue(uint8_t value, uint32_t row, uint32_t col) {
+        return (uint32_t(value) * row + col) & 0xFF;
+    }
+
+    static void FillPlane(C2GraphicView &view, size_t index, uint8_t value) {
+        C2PlanarLayout layout = view.layout();
+
+        uint8_t *rowPtr = view.data()[index];
+        C2PlaneInfo plane = layout.planes[index];
+        for (uint32_t row = 0; row < view.height() / plane.rowSampling; ++row) {
+            uint8_t *colPtr = rowPtr;
+            for (uint32_t col = 0; col < view.width() / plane.colSampling; ++col) {
+                *colPtr = GetPixelValue(value, row, col);
+                colPtr += plane.colInc;
+            }
+            rowPtr += plane.rowInc;
+        }
+    }
+
+    static void FillBlock(const std::shared_ptr<C2GraphicBlock> &block) {
+        C2GraphicView view = block->map().get();
+
+        FillPlane(view, C2PlanarLayout::PLANE_Y, 'Y');
+        FillPlane(view, C2PlanarLayout::PLANE_U, 'U');
+        FillPlane(view, C2PlanarLayout::PLANE_V, 'V');
+    }
+
+    static bool VerifyPlane(
+            const MediaImage2 *mediaImage,
+            const uint8_t *base,
+            uint32_t index,
+            uint8_t value,
+            std::string *errorMsg) {
+        *errorMsg = "";
+        MediaImage2::PlaneInfo plane = mediaImage->mPlane[index];
+        const uint8_t *rowPtr = base + plane.mOffset;
+        for (uint32_t row = 0; row < mediaImage->mHeight / plane.mVertSubsampling; ++row) {
+            const uint8_t *colPtr = rowPtr;
+            for (uint32_t col = 0; col < mediaImage->mWidth / plane.mHorizSubsampling; ++col) {
+                if (GetPixelValue(value, row, col) != *colPtr) {
+                    *errorMsg = AStringPrintf("row=%u col=%u expected=%02x actual=%02x",
+                            row, col, GetPixelValue(value, row, col), *colPtr).c_str();
+                    return false;
+                }
+                colPtr += plane.mColInc;
+            }
+            rowPtr += plane.mRowInc;
+        }
+        return true;
+    }
+
+public:
+    static constexpr int32_t kWidth = 320;
+    static constexpr int32_t kHeight = 240;
+    static constexpr int32_t kGapLength = kWidth * kHeight * 10;
+
+    static std::shared_ptr<C2Buffer> CreateAndFillBufferFromParam(const ParamType &param) {
+        bool contiguous = std::get<0>(param);
+        std::string planeOrderStr = std::get<1>(param);
+        bool planar = std::get<2>(param);
+        int32_t stride = std::get<3>(param);
+
+        C2PlanarLayout::plane_index_t planeOrder[3];
+        C2PlanarLayout layout;
+
+        if (planeOrderStr.size() != 3) {
+            return nullptr;
+        }
+        for (size_t i = 0; i < 3; ++i) {
+            C2PlanarLayout::plane_index_t planeIndex;
+            switch (planeOrderStr[i]) {
+                case 'Y': planeIndex = C2PlanarLayout::PLANE_Y; break;
+                case 'U': planeIndex = C2PlanarLayout::PLANE_U; break;
+                case 'V': planeIndex = C2PlanarLayout::PLANE_V; break;
+                default:  return nullptr;
+            }
+            planeOrder[i] = planeIndex;
+        }
+
+        if (planar) {
+            layout = YUVPlanarLayout(stride);
+        } else {  // semi-planar
+            for (size_t i = 0; i < 3; ++i) {
+                if (planeOrder[i] == C2PlanarLayout::PLANE_U) {
+                    layout = YUVSemiPlanarLayout(stride);
+                    break;
+                }
+                if (planeOrder[i] == C2PlanarLayout::PLANE_V) {
+                    layout = YVUSemiPlanarLayout(stride);
+                    break;
+                }
+            }
+        }
+
+        size_t yPlaneSize = stride * kHeight;
+        size_t uvPlaneSize = stride * kHeight / 4;
+        size_t capacity = yPlaneSize + uvPlaneSize * 2;
+        std::vector<size_t> offsets(3);
+
+        if (!contiguous) {
+            if (planar) {
+                capacity += kGapLength * 2;
+            } else {  // semi-planar
+                capacity += kGapLength;
+            }
+        }
+
+        offsets[planeOrder[0]] = 0;
+        size_t planeSize = (planeOrder[0] == C2PlanarLayout::PLANE_Y) ? yPlaneSize : uvPlaneSize;
+        for (size_t i = 1; i < 3; ++i) {
+            offsets[planeOrder[i]] = offsets[planeOrder[i - 1]] + planeSize;
+            if (!contiguous) {
+                offsets[planeOrder[i]] += kGapLength;
+            }
+            planeSize = (planeOrder[i] == C2PlanarLayout::PLANE_Y) ? yPlaneSize : uvPlaneSize;
+            if (!planar  // semi-planar
+                    && planeOrder[i - 1] != C2PlanarLayout::PLANE_Y
+                    && planeOrder[i] != C2PlanarLayout::PLANE_Y) {
+                offsets[planeOrder[i]] = offsets[planeOrder[i - 1]] + 1;
+                planeSize = uvPlaneSize * 2 - 1;
+            }
+        }
+
+        std::shared_ptr<C2GraphicBlock> block = CreateGraphicBlock(
+                kWidth,
+                kHeight,
+                layout,
+                capacity,
+                offsets);
+        FillBlock(block);
+        return C2Buffer::CreateGraphicBuffer(
+                block->share(block->crop(), C2Fence()));
+    }
+
+    static bool VerifyClientBuffer(
+            const sp<MediaCodecBuffer> &buffer, std::string *errorMsg) {
+        *errorMsg = "";
+        sp<ABuffer> imageData;
+        if (!buffer->format()->findBuffer("image-data", &imageData)) {
+            *errorMsg = "Missing image data";
+            return false;
+        }
+        MediaImage2 *mediaImage = (MediaImage2 *)imageData->data();
+        if (mediaImage->mType != MediaImage2::MEDIA_IMAGE_TYPE_YUV) {
+            *errorMsg = AStringPrintf("Unexpected type: %d", mediaImage->mType).c_str();
+            return false;
+        }
+        std::string planeErrorMsg;
+        if (!VerifyPlane(mediaImage, buffer->base(), MediaImage2::Y, 'Y', &planeErrorMsg)) {
+            *errorMsg = "Y plane does not match: " + planeErrorMsg;
+            return false;
+        }
+        if (!VerifyPlane(mediaImage, buffer->base(), MediaImage2::U, 'U', &planeErrorMsg)) {
+            *errorMsg = "U plane does not match: " + planeErrorMsg;
+            return false;
+        }
+        if (!VerifyPlane(mediaImage, buffer->base(), MediaImage2::V, 'V', &planeErrorMsg)) {
+            *errorMsg = "V plane does not match: " + planeErrorMsg;
+            return false;
+        }
+
+        int32_t width, height, stride;
+        buffer->format()->findInt32(KEY_WIDTH, &width);
+        buffer->format()->findInt32(KEY_HEIGHT, &height);
+        buffer->format()->findInt32(KEY_STRIDE, &stride);
+
+        MediaImage2 legacyYLayout = {
+            MediaImage2::MEDIA_IMAGE_TYPE_Y,
+            1,  // mNumPlanes
+            uint32_t(width),
+            uint32_t(height),
+            8,
+            8,
+            {},  // mPlane
+        };
+        legacyYLayout.mPlane[MediaImage2::Y] = {
+            0,  // mOffset
+            1,  // mColInc
+            stride,  // mRowInc
+            1,  // mHorizSubsampling
+            1,  // mVertSubsampling
+        };
+        if (!VerifyPlane(&legacyYLayout, buffer->data(), MediaImage2::Y, 'Y', &planeErrorMsg)) {
+            *errorMsg = "Y plane by legacy layout does not match: " + planeErrorMsg;
+            return false;
+        }
+        return true;
+    }
+
+};
+
+TEST_P(LayoutTest, VerifyLayout) {
+    std::shared_ptr<RawGraphicOutputBuffers> buffers =
+        GetRawGraphicOutputBuffers(kWidth, kHeight);
+
+    std::shared_ptr<C2Buffer> c2Buffer = CreateAndFillBufferFromParam(GetParam());
+    ASSERT_NE(nullptr, c2Buffer);
+    sp<MediaCodecBuffer> clientBuffer;
+    size_t index;
+    ASSERT_EQ(OK, buffers->registerBuffer(c2Buffer, &index, &clientBuffer));
+    ASSERT_NE(nullptr, clientBuffer);
+    std::string errorMsg;
+    ASSERT_TRUE(VerifyClientBuffer(clientBuffer, &errorMsg)) << errorMsg;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+        RawGraphicOutputBuffersTest,
+        LayoutTest,
+        ::testing::Combine(
+            ::testing::Bool(),  /* contiguous */
+            ::testing::Values("YUV", "YVU", "UVY", "VUY"),
+            ::testing::Bool(),  /* planar */
+            ::testing::Values(320, 512)),
+        [](const ::testing::TestParamInfo<LayoutTest::ParamType> &info) {
+            std::string contiguous = std::get<0>(info.param) ? "Contiguous" : "Noncontiguous";
+            std::string planar = std::get<2>(info.param) ? "Planar" : "SemiPlanar";
+            return contiguous
+                    + std::get<1>(info.param)
+                    + planar
+                    + std::to_string(std::get<3>(info.param));
+        });
+
 } // namespace android
diff --git a/media/codec2/sfplugin/utils/Android.bp b/media/codec2/sfplugin/utils/Android.bp
index 6287221..e7dc92a 100644
--- a/media/codec2/sfplugin/utils/Android.bp
+++ b/media/codec2/sfplugin/utils/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libsfplugin_ccodec_utils",
     vendor_available: true,
     min_sdk_version: "29",
diff --git a/media/codec2/vndk/Android.bp b/media/codec2/vndk/Android.bp
index 60f4736..19afccf 100644
--- a/media/codec2/vndk/Android.bp
+++ b/media/codec2/vndk/Android.bp
@@ -13,7 +13,7 @@
 
 // !!!DO NOT DEPEND ON THIS SHARED LIBRARY DIRECTLY!!!
 // use libcodec2-impl-defaults instead
-cc_library_shared {
+cc_library {
     name: "libcodec2_vndk",
     vendor_available: true,
     min_sdk_version: "29",
diff --git a/media/codecs/m4v_h263/dec/src/vop.cpp b/media/codecs/m4v_h263/dec/src/vop.cpp
index 335846c..7b32498 100644
--- a/media/codecs/m4v_h263/dec/src/vop.cpp
+++ b/media/codecs/m4v_h263/dec/src/vop.cpp
@@ -497,6 +497,13 @@
                 }
                 while ((qmat[*(zigzag_inv+i)] != 0) && (++i < 64));
 
+                /* qmatrix must have at least one non-zero value, which means
+                   i would be non-zero in valid cases */
+                if (i == 0)
+                {
+                    return PV_FAIL;
+                }
+
                 for (j = i; j < 64; j++)
                     qmat[*(zigzag_inv+j)] = qmat[*(zigzag_inv+i-1)];
             }
@@ -520,6 +527,13 @@
                 }
                 while ((qmat[*(zigzag_inv+i)] != 0) && (++i < 64));
 
+                /* qmatrix must have at least one non-zero value, which means
+                   i would be non-zero in valid cases */
+                if (i == 0)
+                {
+                    return PV_FAIL;
+                }
+
                 for (j = i; j < 64; j++)
                     qmat[*(zigzag_inv+j)] = qmat[*(zigzag_inv+i-1)];
             }
diff --git a/media/codecs/mp3dec/Android.bp b/media/codecs/mp3dec/Android.bp
index 1acf0a6..f84da21 100644
--- a/media/codecs/mp3dec/Android.bp
+++ b/media/codecs/mp3dec/Android.bp
@@ -1,3 +1,18 @@
+cc_library_headers {
+    name: "libstagefright_mp3dec_headers",
+    vendor_available: true,
+    min_sdk_version: "29",
+    host_supported:true,
+    export_include_dirs: [
+        "include",
+        "src",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media.swcodec",
+    ],
+}
+
 cc_library_static {
     name: "libstagefright_mp3dec",
     vendor_available: true,
@@ -65,10 +80,8 @@
 
     include_dirs: ["frameworks/av/media/libstagefright/include"],
 
-    export_include_dirs: [
-        "include",
-        "src",
-    ],
+    header_libs: ["libstagefright_mp3dec_headers"],
+    export_header_lib_headers: ["libstagefright_mp3dec_headers"],
 
     cflags: [
         "-DOSCL_UNUSED_ARG(x)=(void)(x)",
diff --git a/media/extractors/flac/FLACExtractor.cpp b/media/extractors/flac/FLACExtractor.cpp
index 0617e88..ec7cb24 100644
--- a/media/extractors/flac/FLACExtractor.cpp
+++ b/media/extractors/flac/FLACExtractor.cpp
@@ -561,6 +561,8 @@
         AMediaFormat_setString(mFileMetadata,
                 AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_FLAC);
     }
+    mMaxBufferSize = getMaxBlockSize() * getChannels() * getOutputSampleSize();
+    AMediaFormat_setInt32(mTrackMetadata, AMEDIAFORMAT_KEY_MAX_INPUT_SIZE, mMaxBufferSize);
     return OK;
 }
 
@@ -568,8 +570,6 @@
 {
     CHECK(mGroup == NULL);
     mGroup = group;
-    mMaxBufferSize = getMaxBlockSize() * getChannels() * getOutputSampleSize();
-    AMediaFormat_setInt32(mTrackMetadata, AMEDIAFORMAT_KEY_MAX_INPUT_SIZE, mMaxBufferSize);
     mGroup->add_buffer(mMaxBufferSize);
 }
 
diff --git a/media/libaaudio/Android.bp b/media/libaaudio/Android.bp
index e81ab06..7796ed5 100644
--- a/media/libaaudio/Android.bp
+++ b/media/libaaudio/Android.bp
@@ -32,6 +32,6 @@
 cc_library_headers {
     name: "libaaudio_headers",
     export_include_dirs: ["include"],
-    export_header_lib_headers: ["aaudio-aidl-cpp"],
-    header_libs: ["aaudio-aidl-cpp"],
+    export_shared_lib_headers: ["aaudio-aidl-cpp"],
+    shared_libs: ["aaudio-aidl-cpp"],
 }
diff --git a/media/libaaudio/src/binding/AAudioBinderAdapter.cpp b/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
index 2b2fe6d..6e3a1c8 100644
--- a/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
+++ b/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
@@ -15,10 +15,12 @@
  */
 
 #include <binding/AAudioBinderAdapter.h>
+#include <media/AidlConversionUtil.h>
 #include <utility/AAudioUtilities.h>
 
 namespace aaudio {
 
+using android::aidl_utils::statusTFromBinderStatus;
 using android::binder::Status;
 
 AAudioBinderAdapter::AAudioBinderAdapter(IAAudioService* delegate)
@@ -36,7 +38,7 @@
                                           &params,
                                           &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     config = params;
     return result;
@@ -46,7 +48,7 @@
     aaudio_result_t result;
     Status status = mDelegate->closeStream(streamHandle, &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     return result;
 }
@@ -59,7 +61,7 @@
                                                     &endpoint,
                                                     &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     endpointOut = std::move(endpoint);
     return result;
@@ -69,7 +71,7 @@
     aaudio_result_t result;
     Status status = mDelegate->startStream(streamHandle, &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     return result;
 }
@@ -78,7 +80,7 @@
     aaudio_result_t result;
     Status status = mDelegate->pauseStream(streamHandle, &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     return result;
 }
@@ -87,7 +89,7 @@
     aaudio_result_t result;
     Status status = mDelegate->stopStream(streamHandle, &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     return result;
 }
@@ -96,7 +98,7 @@
     aaudio_result_t result;
     Status status = mDelegate->flushStream(streamHandle, &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     return result;
 }
@@ -107,7 +109,7 @@
     aaudio_result_t result;
     Status status = mDelegate->registerAudioThread(streamHandle, clientThreadId, periodNanoseconds, &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     return result;
 }
@@ -117,7 +119,7 @@
     aaudio_result_t result;
     Status status = mDelegate->unregisterAudioThread(streamHandle, clientThreadId, &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     return result;
 }
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 1d036d0..af8ff19 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -558,7 +558,7 @@
         if (status < 0) { // a non-negative value is the volume shaper id.
             ALOGE("applyVolumeShaper() failed with status %d", status);
         }
-        return binder::Status::fromStatusT(status);
+        return aidl_utils::binderStatusFromStatusT(status);
     } else {
         ALOGD("applyVolumeShaper()"
                       " no AudioTrack for volume control from IPlayer");
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index f11f184..7e5eba3 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -117,56 +117,58 @@
 
 ConversionResult<Direction> direction(media::AudioPortRole role, media::AudioPortType type) {
     switch (type) {
+        case media::AudioPortType::NONE:
+        case media::AudioPortType::SESSION:
+            break;  // must be listed  -Werror,-Wswitch
         case media::AudioPortType::DEVICE:
             switch (role) {
+                case media::AudioPortRole::NONE:
+                     break;  // must be listed  -Werror,-Wswitch
                 case media::AudioPortRole::SOURCE:
                     return Direction::INPUT;
                 case media::AudioPortRole::SINK:
                     return Direction::OUTPUT;
-                default:
-                    break;
             }
             break;
         case media::AudioPortType::MIX:
             switch (role) {
+                case media::AudioPortRole::NONE:
+                     break;  // must be listed  -Werror,-Wswitch
                 case media::AudioPortRole::SOURCE:
                     return Direction::OUTPUT;
                 case media::AudioPortRole::SINK:
                     return Direction::INPUT;
-                default:
-                    break;
             }
             break;
-        default:
-            break;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<Direction> direction(audio_port_role_t role, audio_port_type_t type) {
     switch (type) {
+        case AUDIO_PORT_TYPE_NONE:
+        case AUDIO_PORT_TYPE_SESSION:
+            break;  // must be listed  -Werror,-Wswitch
         case AUDIO_PORT_TYPE_DEVICE:
             switch (role) {
+                case AUDIO_PORT_ROLE_NONE:
+                     break;  // must be listed  -Werror,-Wswitch
                 case AUDIO_PORT_ROLE_SOURCE:
                     return Direction::INPUT;
                 case AUDIO_PORT_ROLE_SINK:
                     return Direction::OUTPUT;
-                default:
-                    break;
             }
             break;
         case AUDIO_PORT_TYPE_MIX:
             switch (role) {
+                case AUDIO_PORT_ROLE_NONE:
+                     break;  // must be listed  -Werror,-Wswitch
                 case AUDIO_PORT_ROLE_SOURCE:
                     return Direction::OUTPUT;
                 case AUDIO_PORT_ROLE_SINK:
                     return Direction::INPUT;
-                default:
-                    break;
             }
             break;
-        default:
-            break;
     }
     return unexpected(BAD_VALUE);
 }
@@ -236,6 +238,14 @@
     return convertReinterpret<int32_t>(legacy);
 }
 
+ConversionResult<audio_hw_sync_t> aidl2legacy_int32_t_audio_hw_sync_t(int32_t aidl) {
+    return convertReinterpret<audio_hw_sync_t>(aidl);
+}
+
+ConversionResult<int32_t> legacy2aidl_audio_hw_sync_t_int32_t(audio_hw_sync_t legacy) {
+    return convertReinterpret<int32_t>(legacy);
+}
+
 ConversionResult<pid_t> aidl2legacy_int32_t_pid_t(int32_t aidl) {
     return convertReinterpret<pid_t>(aidl);
 }
@@ -268,8 +278,9 @@
     return std::string(legacy.c_str());
 }
 
-// The legacy enum is unnamed. Thus, we use int.
-ConversionResult<int> aidl2legacy_AudioPortConfigType(media::AudioPortConfigType aidl) {
+// The legacy enum is unnamed. Thus, we use int32_t.
+ConversionResult<int32_t> aidl2legacy_AudioPortConfigType_int32_t(
+        media::AudioPortConfigType aidl) {
     switch (aidl) {
         case media::AudioPortConfigType::SAMPLE_RATE:
             return AUDIO_PORT_CONFIG_SAMPLE_RATE;
@@ -285,8 +296,9 @@
     return unexpected(BAD_VALUE);
 }
 
-// The legacy enum is unnamed. Thus, we use int.
-ConversionResult<media::AudioPortConfigType> legacy2aidl_AudioPortConfigType(int legacy) {
+// The legacy enum is unnamed. Thus, we use int32_t.
+ConversionResult<media::AudioPortConfigType> legacy2aidl_int32_t_AudioPortConfigType(
+        int32_t legacy) {
     switch (legacy) {
         case AUDIO_PORT_CONFIG_SAMPLE_RATE:
             return media::AudioPortConfigType::SAMPLE_RATE;
@@ -304,7 +316,7 @@
 
 ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl) {
     return convertBitmask<unsigned int, int32_t, int, media::AudioPortConfigType>(
-            aidl, aidl2legacy_AudioPortConfigType,
+            aidl, aidl2legacy_AudioPortConfigType_int32_t,
             // AudioPortConfigType enum is index-based.
             index2enum_index<media::AudioPortConfigType>,
             // AUDIO_PORT_CONFIG_* flags are mask-based.
@@ -313,7 +325,7 @@
 
 ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy) {
     return convertBitmask<int32_t, unsigned int, media::AudioPortConfigType, int>(
-            legacy, legacy2aidl_AudioPortConfigType,
+            legacy, legacy2aidl_int32_t_AudioPortConfigType,
             // AUDIO_PORT_CONFIG_* flags are mask-based.
             index2enum_bitmask<unsigned>,
             // AudioPortConfigType enum is index-based.
@@ -355,9 +367,8 @@
             return AUDIO_INPUT_CONFIG_CHANGED;
         case media::AudioIoConfigEvent::CLIENT_STARTED:
             return AUDIO_CLIENT_STARTED;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_AudioIoConfigEvent(
@@ -381,9 +392,8 @@
             return media::AudioIoConfigEvent::INPUT_CONFIG_CHANGED;
         case AUDIO_CLIENT_STARTED:
             return media::AudioIoConfigEvent::CLIENT_STARTED;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_port_role_t> aidl2legacy_AudioPortRole_audio_port_role_t(
@@ -395,9 +405,8 @@
             return AUDIO_PORT_ROLE_SOURCE;
         case media::AudioPortRole::SINK:
             return AUDIO_PORT_ROLE_SINK;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioPortRole> legacy2aidl_audio_port_role_t_AudioPortRole(
@@ -409,9 +418,8 @@
             return media::AudioPortRole::SOURCE;
         case AUDIO_PORT_ROLE_SINK:
             return media::AudioPortRole::SINK;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_port_type_t> aidl2legacy_AudioPortType_audio_port_type_t(
@@ -425,9 +433,8 @@
             return AUDIO_PORT_TYPE_MIX;
         case media::AudioPortType::SESSION:
             return AUDIO_PORT_TYPE_SESSION;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioPortType> legacy2aidl_audio_port_type_t_AudioPortType(
@@ -441,9 +448,8 @@
             return media::AudioPortType::MIX;
         case AUDIO_PORT_TYPE_SESSION:
             return media::AudioPortType::SESSION;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_format_t> aidl2legacy_AudioFormat_audio_format_t(
@@ -468,9 +474,8 @@
             return AUDIO_GAIN_MODE_CHANNELS;
         case media::AudioGainMode::RAMP:
             return AUDIO_GAIN_MODE_RAMP;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioGainMode> legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy) {
@@ -481,9 +486,8 @@
             return media::AudioGainMode::CHANNELS;
         case AUDIO_GAIN_MODE_RAMP:
             return media::AudioGainMode::RAMP;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl) {
@@ -495,7 +499,7 @@
             enumToMask_bitmask<audio_gain_mode_t, audio_gain_mode_t>);
 }
 
-ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_mask_int32_t(audio_gain_mode_t legacy) {
+ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy) {
     return convertBitmask<int32_t, audio_gain_mode_t, media::AudioGainMode, audio_gain_mode_t>(
             legacy, legacy2aidl_audio_gain_mode_t_AudioGainMode,
             // AUDIO_GAIN_MODE_* constants are mask-based.
@@ -540,7 +544,7 @@
         const audio_gain_config& legacy, audio_port_role_t role, audio_port_type_t type) {
     media::AudioGainConfig aidl;
     aidl.index = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.index));
-    aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_mask_int32_t(legacy.mode));
+    aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
     aidl.channelMask =
             VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
     const bool isInput = VALUE_OR_RETURN(direction(role, type)) == Direction::INPUT;
@@ -575,14 +579,15 @@
             return AUDIO_INPUT_FLAG_HW_AV_SYNC;
         case media::AudioInputFlags::DIRECT:
             return AUDIO_INPUT_FLAG_DIRECT;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioInputFlags> legacy2aidl_audio_input_flags_t_AudioInputFlags(
         audio_input_flags_t legacy) {
     switch (legacy) {
+        case AUDIO_INPUT_FLAG_NONE:
+            break; // shouldn't get here. must be listed  -Werror,-Wswitch
         case AUDIO_INPUT_FLAG_FAST:
             return media::AudioInputFlags::FAST;
         case AUDIO_INPUT_FLAG_HW_HOTWORD:
@@ -599,9 +604,8 @@
             return media::AudioInputFlags::HW_AV_SYNC;
         case AUDIO_INPUT_FLAG_DIRECT:
             return media::AudioInputFlags::DIRECT;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_output_flags_t> aidl2legacy_AudioOutputFlags_audio_output_flags_t(
@@ -637,14 +641,17 @@
             return AUDIO_OUTPUT_FLAG_VOIP_RX;
         case media::AudioOutputFlags::INCALL_MUSIC:
             return AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
-        default:
-            return unexpected(BAD_VALUE);
+        case media::AudioOutputFlags::GAPLESS_OFFLOAD:
+            return AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD;
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
         audio_output_flags_t legacy) {
     switch (legacy) {
+        case AUDIO_OUTPUT_FLAG_NONE:
+            break; // shouldn't get here. must be listed  -Werror,-Wswitch
         case AUDIO_OUTPUT_FLAG_DIRECT:
             return media::AudioOutputFlags::DIRECT;
         case AUDIO_OUTPUT_FLAG_PRIMARY:
@@ -675,12 +682,14 @@
             return media::AudioOutputFlags::VOIP_RX;
         case AUDIO_OUTPUT_FLAG_INCALL_MUSIC:
             return media::AudioOutputFlags::INCALL_MUSIC;
-        default:
-            return unexpected(BAD_VALUE);
+        case AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD:
+            return media::AudioOutputFlags::GAPLESS_OFFLOAD;
     }
+    return unexpected(BAD_VALUE);
 }
 
-ConversionResult<audio_input_flags_t> aidl2legacy_audio_input_flags_mask(int32_t aidl) {
+ConversionResult<audio_input_flags_t> aidl2legacy_int32_t_audio_input_flags_t_mask(
+        int32_t aidl) {
     using LegacyMask = std::underlying_type_t<audio_input_flags_t>;
 
     LegacyMask converted = VALUE_OR_RETURN(
@@ -691,7 +700,8 @@
     return static_cast<audio_input_flags_t>(converted);
 }
 
-ConversionResult<int32_t> legacy2aidl_audio_input_flags_mask(audio_input_flags_t legacy) {
+ConversionResult<int32_t> legacy2aidl_audio_input_flags_t_int32_t_mask(
+        audio_input_flags_t legacy) {
     using LegacyMask = std::underlying_type_t<audio_input_flags_t>;
 
     LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
@@ -701,7 +711,8 @@
             enumToMask_index<int32_t, media::AudioInputFlags>);
 }
 
-ConversionResult<audio_output_flags_t> aidl2legacy_audio_output_flags_mask(int32_t aidl) {
+ConversionResult<audio_output_flags_t> aidl2legacy_int32_t_audio_output_flags_t_mask(
+        int32_t aidl) {
     return convertBitmask<audio_output_flags_t,
                           int32_t,
                           audio_output_flags_t,
@@ -711,7 +722,8 @@
             enumToMask_bitmask<audio_output_flags_t, audio_output_flags_t>);
 }
 
-ConversionResult<int32_t> legacy2aidl_audio_output_flags_mask(audio_output_flags_t legacy) {
+ConversionResult<int32_t> legacy2aidl_audio_output_flags_t_int32_t_mask(
+        audio_output_flags_t legacy) {
     using LegacyMask = std::underlying_type_t<audio_output_flags_t>;
 
     LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
@@ -728,13 +740,15 @@
     switch (dir) {
         case Direction::INPUT: {
             legacy.input = VALUE_OR_RETURN(
-                    aidl2legacy_audio_input_flags_mask(VALUE_OR_RETURN(UNION_GET(aidl, input))));
+                    aidl2legacy_int32_t_audio_input_flags_t_mask(
+                            VALUE_OR_RETURN(UNION_GET(aidl, input))));
         }
             break;
 
         case Direction::OUTPUT: {
             legacy.output = VALUE_OR_RETURN(
-                    aidl2legacy_audio_output_flags_mask(VALUE_OR_RETURN(UNION_GET(aidl, output))));
+                    aidl2legacy_int32_t_audio_output_flags_t_mask(
+                            VALUE_OR_RETURN(UNION_GET(aidl, output))));
         }
             break;
     }
@@ -750,17 +764,20 @@
     switch (dir) {
         case Direction::INPUT:
             UNION_SET(aidl, input,
-                      VALUE_OR_RETURN(legacy2aidl_audio_input_flags_mask(legacy.input)));
+                      VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(
+                              legacy.input)));
             break;
         case Direction::OUTPUT:
             UNION_SET(aidl, output,
-                      VALUE_OR_RETURN(legacy2aidl_audio_output_flags_mask(legacy.output)));
+                      VALUE_OR_RETURN(legacy2aidl_audio_output_flags_t_int32_t_mask(
+                              legacy.output)));
             break;
     }
     return aidl;
 }
 
-ConversionResult<audio_port_config_device_ext> aidl2legacy_AudioPortConfigDeviceExt(
+ConversionResult<audio_port_config_device_ext>
+aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
         const media::AudioPortConfigDeviceExt& aidl) {
     audio_port_config_device_ext legacy;
     legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
@@ -769,7 +786,8 @@
     return legacy;
 }
 
-ConversionResult<media::AudioPortConfigDeviceExt> legacy2aidl_AudioPortConfigDeviceExt(
+ConversionResult<media::AudioPortConfigDeviceExt>
+legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
         const audio_port_config_device_ext& legacy) {
     media::AudioPortConfigDeviceExt aidl;
     aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
@@ -814,9 +832,8 @@
             return AUDIO_STREAM_PATCH;
         case media::AudioStreamType::CALL_ASSISTANT:
             return AUDIO_STREAM_CALL_ASSISTANT;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
@@ -854,9 +871,8 @@
             return media::AudioStreamType::PATCH;
         case AUDIO_STREAM_CALL_ASSISTANT:
             return media::AudioStreamType::CALL_ASSISTANT;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_source_t> aidl2legacy_AudioSourceType_audio_source_t(
@@ -893,9 +909,8 @@
             return AUDIO_SOURCE_FM_TUNER;
         case media::AudioSourceType::HOTWORD:
             return AUDIO_SOURCE_HOTWORD;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioSourceType> legacy2aidl_audio_source_t_AudioSourceType(
@@ -931,9 +946,8 @@
             return media::AudioSourceType::FM_TUNER;
         case AUDIO_SOURCE_HOTWORD:
             return media::AudioSourceType::HOTWORD;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_session_t> aidl2legacy_int32_t_audio_session_t(int32_t aidl) {
@@ -955,24 +969,21 @@
         case media::AudioPortRole::NONE:
             // Just verify that the union is empty.
             VALUE_OR_RETURN(UNION_GET(aidl, unspecified));
-            break;
+            return legacy;
 
         case media::AudioPortRole::SOURCE:
             // This is not a bug. A SOURCE role corresponds to the stream field.
             legacy.stream = VALUE_OR_RETURN(aidl2legacy_AudioStreamType_audio_stream_type_t(
                     VALUE_OR_RETURN(UNION_GET(aidl, stream))));
-            break;
+            return legacy;
 
         case media::AudioPortRole::SINK:
             // This is not a bug. A SINK role corresponds to the source field.
             legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSourceType_audio_source_t(
                     VALUE_OR_RETURN(UNION_GET(aidl, source))));
-            break;
-
-        default:
-            LOG_ALWAYS_FATAL("Shouldn't get here");
+            return legacy;
     }
-    return legacy;
+    LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<media::AudioPortConfigMixExtUseCase> legacy2aidl_AudioPortConfigMixExtUseCase(
@@ -982,21 +993,19 @@
     switch (role) {
         case AUDIO_PORT_ROLE_NONE:
             UNION_SET(aidl, unspecified, false);
-            break;
+            return aidl;
         case AUDIO_PORT_ROLE_SOURCE:
             // This is not a bug. A SOURCE role corresponds to the stream field.
             UNION_SET(aidl, stream, VALUE_OR_RETURN(
                     legacy2aidl_audio_stream_type_t_AudioStreamType(legacy.stream)));
-            break;
+            return aidl;
         case AUDIO_PORT_ROLE_SINK:
             // This is not a bug. A SINK role corresponds to the source field.
             UNION_SET(aidl, source,
                       VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.source)));
-            break;
-        default:
-            LOG_ALWAYS_FATAL("Shouldn't get here");
+            return aidl;
     }
-    return aidl;
+    LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortConfigMixExt(
@@ -1017,14 +1026,16 @@
     return aidl;
 }
 
-ConversionResult<audio_port_config_session_ext> aidl2legacy_AudioPortConfigSessionExt(
+ConversionResult<audio_port_config_session_ext>
+aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
         const media::AudioPortConfigSessionExt& aidl) {
     audio_port_config_session_ext legacy;
     legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
     return legacy;
 }
 
-ConversionResult<media::AudioPortConfigSessionExt> legacy2aidl_AudioPortConfigSessionExt(
+ConversionResult<media::AudioPortConfigSessionExt>
+legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
         const audio_port_config_session_ext& legacy) {
     media::AudioPortConfigSessionExt aidl;
     aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
@@ -1042,23 +1053,24 @@
         case media::AudioPortType::NONE:
             // Just verify that the union is empty.
             VALUE_OR_RETURN(UNION_GET(aidl, unspecified));
-            break;
+            return legacy;
         case media::AudioPortType::DEVICE:
             legacy.device = VALUE_OR_RETURN(
-                    aidl2legacy_AudioPortConfigDeviceExt(VALUE_OR_RETURN(UNION_GET(aidl, device))));
-            break;
+                    aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
+                            VALUE_OR_RETURN(UNION_GET(aidl, device))));
+            return legacy;
         case media::AudioPortType::MIX:
             legacy.mix = VALUE_OR_RETURN(
                     aidl2legacy_AudioPortConfigMixExt(VALUE_OR_RETURN(UNION_GET(aidl, mix)), role));
-            break;
+            return legacy;
         case media::AudioPortType::SESSION:
-            legacy.session = VALUE_OR_RETURN(aidl2legacy_AudioPortConfigSessionExt(
-                    VALUE_OR_RETURN(UNION_GET(aidl, session))));
-            break;
-        default:
-            LOG_ALWAYS_FATAL("Shouldn't get here");
+            legacy.session = VALUE_OR_RETURN(
+                    aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
+                            VALUE_OR_RETURN(UNION_GET(aidl, session))));
+            return legacy;
+
     }
-    return legacy;
+    LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<media::AudioPortConfigExt> legacy2aidl_AudioPortConfigExt(
@@ -1068,23 +1080,25 @@
     switch (type) {
         case AUDIO_PORT_TYPE_NONE:
             UNION_SET(aidl, unspecified, false);
-            break;
+            return aidl;
         case AUDIO_PORT_TYPE_DEVICE:
             UNION_SET(aidl, device,
-                      VALUE_OR_RETURN(legacy2aidl_AudioPortConfigDeviceExt(legacy.device)));
-            break;
+                      VALUE_OR_RETURN(
+                        legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
+                          legacy.device)));
+            return aidl;
         case AUDIO_PORT_TYPE_MIX:
             UNION_SET(aidl, mix,
                       VALUE_OR_RETURN(legacy2aidl_AudioPortConfigMixExt(legacy.mix, role)));
-            break;
+            return aidl;
         case AUDIO_PORT_TYPE_SESSION:
             UNION_SET(aidl, session,
-                      VALUE_OR_RETURN(legacy2aidl_AudioPortConfigSessionExt(legacy.session)));
-            break;
-        default:
-            LOG_ALWAYS_FATAL("Shouldn't get here");
+                      VALUE_OR_RETURN(
+                        legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
+                          legacy.session)));
+            return aidl;
     }
-    return aidl;
+    LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
@@ -1223,7 +1237,8 @@
     return aidl;
 }
 
-ConversionResult<AudioClient> aidl2legacy_AudioClient(const media::AudioClient& aidl) {
+ConversionResult<AudioClient> aidl2legacy_AudioClient_AudioClient(
+        const media::AudioClient& aidl) {
     AudioClient legacy;
     legacy.clientUid = VALUE_OR_RETURN(aidl2legacy_int32_t_uid_t(aidl.clientUid));
     legacy.clientPid = VALUE_OR_RETURN(aidl2legacy_int32_t_pid_t(aidl.clientPid));
@@ -1232,7 +1247,8 @@
     return legacy;
 }
 
-ConversionResult<media::AudioClient> legacy2aidl_AudioClient(const AudioClient& legacy) {
+ConversionResult<media::AudioClient> legacy2aidl_AudioClient_AudioClient(
+        const AudioClient& legacy) {
     media::AudioClient aidl;
     aidl.clientUid = VALUE_OR_RETURN(legacy2aidl_uid_t_int32_t(legacy.clientUid));
     aidl.clientPid = VALUE_OR_RETURN(legacy2aidl_pid_t_int32_t(legacy.clientPid));
@@ -1660,7 +1676,7 @@
 }
 
 ConversionResult<AudioTimestamp>
-aidl2legacy_AudioTimestamp(const media::AudioTimestampInternal& aidl) {
+aidl2legacy_AudioTimestampInternal_AudioTimestamp(const media::AudioTimestampInternal& aidl) {
     AudioTimestamp legacy;
     legacy.mPosition = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.position));
     legacy.mTime.tv_sec = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sec));
@@ -1669,7 +1685,7 @@
 }
 
 ConversionResult<media::AudioTimestampInternal>
-legacy2aidl_AudioTimestamp(const AudioTimestamp& legacy) {
+legacy2aidl_AudioTimestamp_AudioTimestampInternal(const AudioTimestamp& legacy) {
     media::AudioTimestampInternal aidl;
     aidl.position = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.mPosition));
     aidl.sec = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.mTime.tv_sec));
@@ -1900,25 +1916,24 @@
         case media::AudioPortType::NONE:
             // Just verify that the union is empty.
             VALUE_OR_RETURN(UNION_GET(aidl, unspecified));
-            break;
+            return legacy;
         case media::AudioPortType::DEVICE:
             legacy.device = VALUE_OR_RETURN(
                     aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
                             VALUE_OR_RETURN(UNION_GET(aidl, device))));
-            break;
+            return legacy;
         case media::AudioPortType::MIX:
             legacy.mix = VALUE_OR_RETURN(
                     aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
                             VALUE_OR_RETURN(UNION_GET(aidl, mix))));
-            break;
+            return legacy;
         case media::AudioPortType::SESSION:
             legacy.session = VALUE_OR_RETURN(aidl2legacy_AudioPortSessionExt_audio_port_session_ext(
                     VALUE_OR_RETURN(UNION_GET(aidl, session))));
-            break;
-        default:
-            LOG_ALWAYS_FATAL("Shouldn't get here");
+            return legacy;
+
     }
-    return legacy;
+    LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<media::AudioPortExt> legacy2aidl_AudioPortExt(
@@ -1927,25 +1942,23 @@
     switch (type) {
         case AUDIO_PORT_TYPE_NONE:
             UNION_SET(aidl, unspecified, false);
-            break;
+            return aidl;
         case AUDIO_PORT_TYPE_DEVICE:
             UNION_SET(aidl, device,
                       VALUE_OR_RETURN(
                               legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(legacy.device)));
-            break;
+            return aidl;
         case AUDIO_PORT_TYPE_MIX:
             UNION_SET(aidl, mix,
                       VALUE_OR_RETURN(legacy2aidl_audio_port_mix_ext_AudioPortMixExt(legacy.mix)));
-            break;
+            return aidl;
         case AUDIO_PORT_TYPE_SESSION:
             UNION_SET(aidl, session,
                       VALUE_OR_RETURN(legacy2aidl_audio_port_session_ext_AudioPortSessionExt(
                               legacy.session)));
-            break;
-        default:
-            LOG_ALWAYS_FATAL("Shouldn't get here");
+            return aidl;
     }
-    return aidl;
+    LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<audio_profile>
@@ -2012,7 +2025,7 @@
 ConversionResult<media::AudioGain>
 legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy) {
     media::AudioGain aidl;
-    aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_mask_int32_t(legacy.mode));
+    aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
     aidl.channelMask = VALUE_OR_RETURN(
             legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
     aidl.minValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.min_value));
@@ -2082,4 +2095,96 @@
     return aidl;
 }
 
+ConversionResult<audio_mode_t>
+aidl2legacy_AudioMode_audio_mode_t(media::AudioMode aidl) {
+    switch (aidl) {
+        case media::AudioMode::INVALID:
+            return AUDIO_MODE_INVALID;
+        case media::AudioMode::CURRENT:
+            return AUDIO_MODE_CURRENT;
+        case media::AudioMode::NORMAL:
+            return AUDIO_MODE_NORMAL;
+        case media::AudioMode::RINGTONE:
+            return AUDIO_MODE_RINGTONE;
+        case media::AudioMode::IN_CALL:
+            return AUDIO_MODE_IN_CALL;
+        case media::AudioMode::IN_COMMUNICATION:
+            return AUDIO_MODE_IN_COMMUNICATION;
+        case media::AudioMode::CALL_SCREEN:
+            return AUDIO_MODE_CALL_SCREEN;
+    }
+    return unexpected(BAD_VALUE);
+}
+
+ConversionResult<media::AudioMode>
+legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy) {
+    switch (legacy) {
+        case AUDIO_MODE_INVALID:
+            return media::AudioMode::INVALID;
+        case AUDIO_MODE_CURRENT:
+            return media::AudioMode::CURRENT;
+        case AUDIO_MODE_NORMAL:
+            return media::AudioMode::NORMAL;
+        case AUDIO_MODE_RINGTONE:
+            return media::AudioMode::RINGTONE;
+        case AUDIO_MODE_IN_CALL:
+            return media::AudioMode::IN_CALL;
+        case AUDIO_MODE_IN_COMMUNICATION:
+            return media::AudioMode::IN_COMMUNICATION;
+        case AUDIO_MODE_CALL_SCREEN:
+            return media::AudioMode::CALL_SCREEN;
+        case AUDIO_MODE_CNT:
+            break;
+    }
+    return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_unique_id_use_t>
+aidl2legacy_AudioUniqueIdUse_audio_unique_id_use_t(media::AudioUniqueIdUse aidl) {
+    switch (aidl) {
+        case media::AudioUniqueIdUse::UNSPECIFIED:
+            return AUDIO_UNIQUE_ID_USE_UNSPECIFIED;
+        case media::AudioUniqueIdUse::SESSION:
+            return AUDIO_UNIQUE_ID_USE_SESSION;
+        case media::AudioUniqueIdUse::MODULE:
+            return AUDIO_UNIQUE_ID_USE_MODULE;
+        case media::AudioUniqueIdUse::EFFECT:
+            return AUDIO_UNIQUE_ID_USE_EFFECT;
+        case media::AudioUniqueIdUse::PATCH:
+            return AUDIO_UNIQUE_ID_USE_PATCH;
+        case media::AudioUniqueIdUse::OUTPUT:
+            return AUDIO_UNIQUE_ID_USE_OUTPUT;
+        case media::AudioUniqueIdUse::INPUT:
+            return AUDIO_UNIQUE_ID_USE_INPUT;
+        case media::AudioUniqueIdUse::CLIENT:
+            return AUDIO_UNIQUE_ID_USE_CLIENT;
+    }
+    return unexpected(BAD_VALUE);
+}
+
+ConversionResult<media::AudioUniqueIdUse>
+legacy2aidl_audio_unique_id_use_t_AudioUniqueIdUse(audio_unique_id_use_t legacy) {
+    switch (legacy) {
+        case AUDIO_UNIQUE_ID_USE_UNSPECIFIED:
+            return media::AudioUniqueIdUse::UNSPECIFIED;
+        case AUDIO_UNIQUE_ID_USE_SESSION:
+            return media::AudioUniqueIdUse::SESSION;
+        case AUDIO_UNIQUE_ID_USE_MODULE:
+            return media::AudioUniqueIdUse::MODULE;
+        case AUDIO_UNIQUE_ID_USE_EFFECT:
+            return media::AudioUniqueIdUse::EFFECT;
+        case AUDIO_UNIQUE_ID_USE_PATCH:
+            return media::AudioUniqueIdUse::PATCH;
+        case AUDIO_UNIQUE_ID_USE_OUTPUT:
+            return media::AudioUniqueIdUse::OUTPUT;
+        case AUDIO_UNIQUE_ID_USE_INPUT:
+            return media::AudioUniqueIdUse::INPUT;
+        case AUDIO_UNIQUE_ID_USE_CLIENT:
+            return media::AudioUniqueIdUse::CLIENT;
+        case AUDIO_UNIQUE_ID_USE_MAX:
+            break;
+    }
+    return unexpected(BAD_VALUE);
+}
+
 }  // namespace android
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index fa67898..64ec145 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -347,7 +347,9 @@
         "aidl/android/media/OpenInputResponse.aidl",
         "aidl/android/media/OpenOutputRequest.aidl",
         "aidl/android/media/OpenOutputResponse.aidl",
+        "aidl/android/media/RenderPosition.aidl",
 
+        "aidl/android/media/IAudioFlingerService.aidl",
         "aidl/android/media/IAudioFlingerClient.aidl",
         "aidl/android/media/IAudioRecord.aidl",
         "aidl/android/media/IAudioTrack.aidl",
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index ae899c0..79ea1bb 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -30,7 +30,7 @@
 #include <utils/Log.h>
 
 namespace android {
-
+using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
 
 namespace {
@@ -262,7 +262,7 @@
             bs = mIEffect->disable(&status);
         }
         if (!bs.isOk()) {
-            status = bs.transactionError();
+            status = statusTFromBinderStatus(bs);
         }
         if (status == NO_ERROR) {
             mEnabled = enabled;
@@ -303,7 +303,7 @@
 
     Status bs = mIEffect->command(cmdCode, data, *replySize, &response, &status);
     if (!bs.isOk()) {
-        status = bs.transactionError();
+        status = statusTFromBinderStatus(bs);
     }
     if (status == NO_ERROR) {
         memcpy(replyData, response.data(), response.size());
@@ -351,7 +351,7 @@
                                   &response,
                                   &status);
     if (!bs.isOk()) {
-        status = bs.transactionError();
+        status = statusTFromBinderStatus(bs);
         return status;
     }
     assert(response.size() == sizeof(int));
@@ -410,7 +410,7 @@
                                   &response,
                                   &status);
     if (!bs.isOk()) {
-        status = bs.transactionError();
+        status = statusTFromBinderStatus(bs);
     }
     return status;
 }
@@ -441,7 +441,7 @@
 
     Status bs = mIEffect->command(EFFECT_CMD_GET_PARAM, cmd, psize, &response, &status);
     if (!bs.isOk()) {
-        status = bs.transactionError();
+        status = statusTFromBinderStatus(bs);
         return status;
     }
     memcpy(param, response.data(), response.size());
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index f01b1d0..112cb67 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -47,6 +47,8 @@
 #define WAIT_PERIOD_MS          10
 
 namespace android {
+using aidl_utils::statusTFromBinderStatus;
+
 // ---------------------------------------------------------------------------
 
 // static
@@ -450,7 +452,7 @@
     mActive = true;
 
     if (!(flags & CBLK_INVALID)) {
-        status = mAudioRecord->start(event, triggerSession).transactionError();
+        status = statusTFromBinderStatus(mAudioRecord->start(event, triggerSession));
         if (status == DEAD_OBJECT) {
             flags |= CBLK_INVALID;
         }
@@ -1439,8 +1441,8 @@
         if (mActive) {
             // callback thread or sync event hasn't changed
             // FIXME this fails if we have a new AudioFlinger instance
-            result = mAudioRecord->start(
-                AudioSystem::SYNC_EVENT_SAME, AUDIO_SESSION_NONE).transactionError();
+            result = statusTFromBinderStatus(mAudioRecord->start(
+                AudioSystem::SYNC_EVENT_SAME, AUDIO_SESSION_NONE));
         }
         mFramesReadServerOffset = mFramesRead; // server resets to zero so we need an offset.
     }
@@ -1531,7 +1533,7 @@
 {
     AutoMutex lock(mLock);
     std::vector<media::MicrophoneInfoData> mics;
-    status_t status = mAudioRecord->getActiveMicrophones(&mics).transactionError();
+    status_t status = statusTFromBinderStatus(mAudioRecord->getActiveMicrophones(&mics));
     activeMicrophones->resize(mics.size());
     for (size_t i = 0; status == OK && i < mics.size(); ++i) {
         status = activeMicrophones->at(i).readFromParcelable(mics[i]);
@@ -1552,7 +1554,7 @@
         // the internal AudioRecord hasn't be created yet, so just stash the attribute.
         return OK;
     } else {
-        return mAudioRecord->setPreferredMicrophoneDirection(direction).transactionError();
+        return statusTFromBinderStatus(mAudioRecord->setPreferredMicrophoneDirection(direction));
     }
 }
 
@@ -1568,7 +1570,7 @@
         // the internal AudioRecord hasn't be created yet, so just stash the attribute.
         return OK;
     } else {
-        return mAudioRecord->setPreferredMicrophoneFieldDimension(zoom).transactionError();
+        return statusTFromBinderStatus(mAudioRecord->setPreferredMicrophoneFieldDimension(zoom));
     }
 }
 
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index d163feb..09fcc66 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -35,7 +35,7 @@
 
 #define VALUE_OR_RETURN_BINDER_STATUS(x) \
     ({ auto _tmp = (x); \
-       if (!_tmp.ok()) return Status::fromStatusT(_tmp.error()); \
+       if (!_tmp.ok()) return aidl_utils::binderStatusFromStatusT(_tmp.error()); \
        std::move(_tmp.value()); })
 
 // ----------------------------------------------------------------------------
@@ -71,7 +71,7 @@
             sp<IServiceManager> sm = defaultServiceManager();
             sp<IBinder> binder;
             do {
-                binder = sm->getService(String16("media.audio_flinger"));
+                binder = sm->getService(String16(IAudioFlinger::DEFAULT_SERVICE_NAME));
                 if (binder != 0)
                     break;
                 ALOGW("AudioFlinger not published, waiting...");
@@ -83,7 +83,8 @@
                 reportNoError = true;
             }
             binder->linkToDeath(gAudioFlingerClient);
-            gAudioFlinger = interface_cast<IAudioFlinger>(binder);
+            gAudioFlinger = new AudioFlingerClientAdapter(
+                    interface_cast<media::IAudioFlingerService>(binder));
             LOG_ALWAYS_FATAL_IF(gAudioFlinger == 0);
             afc = gAudioFlingerClient;
             // Make sure callbacks can be received by gAudioFlingerClient
@@ -1187,18 +1188,18 @@
     return aps->setAllowedCapturePolicy(uid, flags);
 }
 
-bool AudioSystem::isOffloadSupported(const audio_offload_info_t& info)
+audio_offload_mode_t AudioSystem::getOffloadSupport(const audio_offload_info_t& info)
 {
-    ALOGV("isOffloadSupported()");
+    ALOGV("%s", __func__);
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
-    if (aps == 0) return false;
-    return aps->isOffloadSupported(info);
+    if (aps == 0) return AUDIO_OFFLOAD_NOT_SUPPORTED;
+    return aps->getOffloadSupport(info);
 }
 
 status_t AudioSystem::listAudioPorts(audio_port_role_t role,
                                      audio_port_type_t type,
                                      unsigned int *num_ports,
-                                     struct audio_port *ports,
+                                     struct audio_port_v7 *ports,
                                      unsigned int *generation)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
@@ -1206,7 +1207,7 @@
     return aps->listAudioPorts(role, type, num_ports, ports, generation);
 }
 
-status_t AudioSystem::getAudioPort(struct audio_port *port)
+status_t AudioSystem::getAudioPort(struct audio_port_v7 *port)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return PERMISSION_DENIED;
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 8c53c5b..1b1e143 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -2819,7 +2819,7 @@
         media::AudioTimestampInternal ts;
         mAudioTrack->getTimestamp(&ts, &status);
         if (status == OK) {
-            timestamp = VALUE_OR_FATAL(aidl2legacy_AudioTimestamp(ts));
+            timestamp = VALUE_OR_FATAL(aidl2legacy_AudioTimestampInternal_AudioTimestamp(ts));
         }
     } else {
         // read timestamp from shared memory
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 786af53..20124df 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -28,19 +28,41 @@
 
 namespace android {
 
+using aidl_utils::statusTFromBinderStatus;
+using binder::Status;
+
 #define MAX_ITEMS_PER_LIST 1024
 
+#define VALUE_OR_RETURN_BINDER(x)                                 \
+    ({                                                            \
+       auto _tmp = (x);                                           \
+       if (!_tmp.ok()) return Status::fromStatusT(_tmp.error());  \
+       std::move(_tmp.value()); \
+     })
+
+#define RETURN_STATUS_IF_ERROR(x)    \
+    {                                \
+       auto _tmp = (x);              \
+       if (_tmp != OK) return _tmp;  \
+    }
+
+#define RETURN_BINDER_IF_ERROR(x)                         \
+    {                                                     \
+       auto _tmp = (x);                                   \
+       if (_tmp != OK) return Status::fromStatusT(_tmp);  \
+    }
+
 ConversionResult<media::CreateTrackRequest> IAudioFlinger::CreateTrackInput::toAidl() const {
     media::CreateTrackRequest aidl;
     aidl.attr = VALUE_OR_RETURN(legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
     aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(config));
-    aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient(clientInfo));
+    aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient_AudioClient(clientInfo));
     aidl.sharedBuffer = VALUE_OR_RETURN(legacy2aidl_NullableIMemory_SharedFileRegion(sharedBuffer));
     aidl.notificationsPerBuffer = VALUE_OR_RETURN(convertIntegral<int32_t>(notificationsPerBuffer));
     aidl.speed = speed;
     aidl.audioTrackCallback = audioTrackCallback;
     aidl.opPackageName = opPackageName;
-    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_output_flags_mask(flags));
+    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
     aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(frameCount));
     aidl.notificationFrameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(notificationFrameCount));
     aidl.selectedDeviceId = VALUE_OR_RETURN(
@@ -54,14 +76,14 @@
     IAudioFlinger::CreateTrackInput legacy;
     legacy.attr = VALUE_OR_RETURN(aidl2legacy_AudioAttributesInternal_audio_attributes_t(aidl.attr));
     legacy.config = VALUE_OR_RETURN(aidl2legacy_AudioConfig_audio_config_t(aidl.config));
-    legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient(aidl.clientInfo));
+    legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient_AudioClient(aidl.clientInfo));
     legacy.sharedBuffer = VALUE_OR_RETURN(aidl2legacy_NullableSharedFileRegion_IMemory(aidl.sharedBuffer));
     legacy.notificationsPerBuffer = VALUE_OR_RETURN(
             convertIntegral<uint32_t>(aidl.notificationsPerBuffer));
     legacy.speed = aidl.speed;
     legacy.audioTrackCallback = aidl.audioTrackCallback;
     legacy.opPackageName = aidl.opPackageName;
-    legacy.flags = VALUE_OR_RETURN(aidl2legacy_audio_output_flags_mask(aidl.flags));
+    legacy.flags = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_output_flags_t_mask(aidl.flags));
     legacy.frameCount = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
     legacy.notificationFrameCount = VALUE_OR_RETURN(
             convertIntegral<size_t>(aidl.notificationFrameCount));
@@ -74,7 +96,7 @@
 ConversionResult<media::CreateTrackResponse>
 IAudioFlinger::CreateTrackOutput::toAidl() const {
     media::CreateTrackResponse aidl;
-    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_output_flags_mask(flags));
+    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
     aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(frameCount));
     aidl.notificationFrameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(notificationFrameCount));
     aidl.selectedDeviceId = VALUE_OR_RETURN(
@@ -94,7 +116,7 @@
 IAudioFlinger::CreateTrackOutput::fromAidl(
         const media::CreateTrackResponse& aidl) {
     IAudioFlinger::CreateTrackOutput legacy;
-    legacy.flags = VALUE_OR_RETURN(aidl2legacy_audio_output_flags_mask(aidl.flags));
+    legacy.flags = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_output_flags_t_mask(aidl.flags));
     legacy.frameCount = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
     legacy.notificationFrameCount = VALUE_OR_RETURN(
             convertIntegral<size_t>(aidl.notificationFrameCount));
@@ -116,10 +138,10 @@
     media::CreateRecordRequest aidl;
     aidl.attr = VALUE_OR_RETURN(legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
     aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(config));
-    aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient(clientInfo));
+    aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient_AudioClient(clientInfo));
     aidl.opPackageName = VALUE_OR_RETURN(legacy2aidl_String16_string(opPackageName));
     aidl.riid = VALUE_OR_RETURN(legacy2aidl_audio_unique_id_t_int32_t(riid));
-    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_input_flags_mask(flags));
+    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
     aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(frameCount));
     aidl.notificationFrameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(notificationFrameCount));
     aidl.selectedDeviceId = VALUE_OR_RETURN(
@@ -134,10 +156,10 @@
     IAudioFlinger::CreateRecordInput legacy;
     legacy.attr = VALUE_OR_RETURN(aidl2legacy_AudioAttributesInternal_audio_attributes_t(aidl.attr));
     legacy.config = VALUE_OR_RETURN(aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config));
-    legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient(aidl.clientInfo));
+    legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient_AudioClient(aidl.clientInfo));
     legacy.opPackageName = VALUE_OR_RETURN(aidl2legacy_string_view_String16(aidl.opPackageName));
     legacy.riid = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_unique_id_t(aidl.riid));
-    legacy.flags = VALUE_OR_RETURN(aidl2legacy_audio_input_flags_mask(aidl.flags));
+    legacy.flags = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_input_flags_t_mask(aidl.flags));
     legacy.frameCount = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
     legacy.notificationFrameCount = VALUE_OR_RETURN(
             convertIntegral<size_t>(aidl.notificationFrameCount));
@@ -150,7 +172,7 @@
 ConversionResult<media::CreateRecordResponse>
 IAudioFlinger::CreateRecordOutput::toAidl() const {
     media::CreateRecordResponse aidl;
-    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_input_flags_mask(flags));
+    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
     aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(frameCount));
     aidl.notificationFrameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(notificationFrameCount));
     aidl.selectedDeviceId = VALUE_OR_RETURN(
@@ -169,7 +191,7 @@
 IAudioFlinger::CreateRecordOutput::fromAidl(
         const media::CreateRecordResponse& aidl) {
     IAudioFlinger::CreateRecordOutput legacy;
-    legacy.flags = VALUE_OR_RETURN(aidl2legacy_audio_input_flags_mask(aidl.flags));
+    legacy.flags = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_input_flags_t_mask(aidl.flags));
     legacy.frameCount = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
     legacy.notificationFrameCount = VALUE_OR_RETURN(
             convertIntegral<size_t>(aidl.notificationFrameCount));
@@ -185,1275 +207,975 @@
     return legacy;
 }
 
-class BpAudioFlinger : public BpInterface<IAudioFlinger>
-{
-public:
-    explicit BpAudioFlinger(const sp<IBinder>& impl)
-        : BpInterface<IAudioFlinger>(impl)
-    {
-    }
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// AudioFlingerClientAdapter
 
-    virtual status_t createTrack(const media::CreateTrackRequest& input,
-                                 media::CreateTrackResponse& output)
-    {
-        Parcel data, reply;
-        status_t status;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeParcelable(input);
+AudioFlingerClientAdapter::AudioFlingerClientAdapter(
+        const sp<media::IAudioFlingerService> delegate) : mDelegate(delegate) {}
 
-        status_t lStatus = remote()->transact(CREATE_TRACK, data, &reply);
-        if (lStatus != NO_ERROR) {
-            ALOGE("createTrack transaction error %d", lStatus);
-            return DEAD_OBJECT;
-        }
-        status = reply.readInt32();
-        if (status != NO_ERROR) {
-            ALOGE("createTrack returned error %d", status);
-            return status;
-        }
-        output.readFromParcel(&reply);
-        if (output.audioTrack == 0) {
-            ALOGE("createTrack returned an NULL IAudioTrack with status OK");
-            return DEAD_OBJECT;
-        }
-        return OK;
-    }
-
-    virtual status_t createRecord(const media::CreateRecordRequest& input,
-                                  media::CreateRecordResponse& output)
-    {
-        Parcel data, reply;
-        status_t status;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-
-        data.writeParcelable(input);
-
-        status_t lStatus = remote()->transact(CREATE_RECORD, data, &reply);
-        if (lStatus != NO_ERROR) {
-            ALOGE("createRecord transaction error %d", lStatus);
-            return DEAD_OBJECT;
-        }
-        status = reply.readInt32();
-        if (status != NO_ERROR) {
-            ALOGE("createRecord returned error %d", status);
-            return status;
-        }
-
-        output.readFromParcel(&reply);
-        if (output.audioRecord == 0) {
-            ALOGE("createRecord returned a NULL IAudioRecord with status OK");
-            return DEAD_OBJECT;
-        }
-        return OK;
-    }
-
-    virtual uint32_t sampleRate(audio_io_handle_t ioHandle) const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) ioHandle);
-        remote()->transact(SAMPLE_RATE, data, &reply);
-        return reply.readInt32();
-    }
-
-    // RESERVED for channelCount()
-
-    virtual audio_format_t format(audio_io_handle_t output) const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) output);
-        remote()->transact(FORMAT, data, &reply);
-        return (audio_format_t) reply.readInt32();
-    }
-
-    virtual size_t frameCount(audio_io_handle_t ioHandle) const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) ioHandle);
-        remote()->transact(FRAME_COUNT, data, &reply);
-        return reply.readInt64();
-    }
-
-    virtual uint32_t latency(audio_io_handle_t output) const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) output);
-        remote()->transact(LATENCY, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t setMasterVolume(float value)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeFloat(value);
-        remote()->transact(SET_MASTER_VOLUME, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t setMasterMute(bool muted)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(muted);
-        remote()->transact(SET_MASTER_MUTE, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual float masterVolume() const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        remote()->transact(MASTER_VOLUME, data, &reply);
-        return reply.readFloat();
-    }
-
-    virtual bool masterMute() const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        remote()->transact(MASTER_MUTE, data, &reply);
-        return reply.readInt32();
-    }
-
-    status_t setMasterBalance(float balance) override
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeFloat(balance);
-        status_t status = remote()->transact(SET_MASTER_BALANCE, data, &reply);
-        if (status != NO_ERROR) {
-            return status;
-        }
-        return reply.readInt32();
-    }
-
-    status_t getMasterBalance(float *balance) const override
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_MASTER_BALANCE, data, &reply);
-        if (status != NO_ERROR) {
-            return status;
-        }
-        status = (status_t)reply.readInt32();
-        if (status != NO_ERROR) {
-            return status;
-        }
-        *balance = reply.readFloat();
-        return NO_ERROR;
-    }
-
-    virtual status_t setStreamVolume(audio_stream_type_t stream, float value,
-            audio_io_handle_t output)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) stream);
-        data.writeFloat(value);
-        data.writeInt32((int32_t) output);
-        remote()->transact(SET_STREAM_VOLUME, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t setStreamMute(audio_stream_type_t stream, bool muted)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) stream);
-        data.writeInt32(muted);
-        remote()->transact(SET_STREAM_MUTE, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual float streamVolume(audio_stream_type_t stream, audio_io_handle_t output) const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) stream);
-        data.writeInt32((int32_t) output);
-        remote()->transact(STREAM_VOLUME, data, &reply);
-        return reply.readFloat();
-    }
-
-    virtual bool streamMute(audio_stream_type_t stream) const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) stream);
-        remote()->transact(STREAM_MUTE, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t setMode(audio_mode_t mode)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(mode);
-        remote()->transact(SET_MODE, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t setMicMute(bool state)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(state);
-        remote()->transact(SET_MIC_MUTE, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual bool getMicMute() const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        remote()->transact(GET_MIC_MUTE, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual void setRecordSilenced(audio_port_handle_t portId, bool silenced)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(portId);
-        data.writeInt32(silenced ? 1 : 0);
-        remote()->transact(SET_RECORD_SILENCED, data, &reply);
-    }
-
-    virtual status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) ioHandle);
-        data.writeString8(keyValuePairs);
-        remote()->transact(SET_PARAMETERS, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) ioHandle);
-        data.writeString8(keys);
-        remote()->transact(GET_PARAMETERS, data, &reply);
-        return reply.readString8();
-    }
-
-    virtual void registerClient(const sp<media::IAudioFlingerClient>& client)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeStrongBinder(IInterface::asBinder(client));
-        remote()->transact(REGISTER_CLIENT, data, &reply);
-    }
-
-    virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
-            audio_channel_mask_t channelMask) const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(sampleRate);
-        data.writeInt32(format);
-        data.writeInt32(channelMask);
-        remote()->transact(GET_INPUTBUFFERSIZE, data, &reply);
-        return reply.readInt64();
-    }
-
-    virtual status_t openOutput(const media::OpenOutputRequest& request,
-                                media::OpenOutputResponse* response)
-    {
-        status_t status;
-        Parcel data, reply;
-        return data.writeParcelable(request)
-                ?: remote()->transact(OPEN_OUTPUT, data, &reply)
-                ?: data.readInt32(&status)
-                ?: status
-                ?: data.readParcelable(response);
-    }
-
-    virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
-            audio_io_handle_t output2)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) output1);
-        data.writeInt32((int32_t) output2);
-        remote()->transact(OPEN_DUPLICATE_OUTPUT, data, &reply);
-        return (audio_io_handle_t) reply.readInt32();
-    }
-
-    virtual status_t closeOutput(audio_io_handle_t output)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) output);
-        remote()->transact(CLOSE_OUTPUT, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t suspendOutput(audio_io_handle_t output)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) output);
-        remote()->transact(SUSPEND_OUTPUT, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t restoreOutput(audio_io_handle_t output)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) output);
-        remote()->transact(RESTORE_OUTPUT, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t openInput(const media::OpenInputRequest& request,
-                               media::OpenInputResponse* response)
-    {
-        Parcel data, reply;
-        status_t status;
-        return data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor())
-            ?: data.writeParcelable(request)
-            ?: remote()->transact(OPEN_INPUT, data, &reply)
-            ?: reply.readInt32(&status)
-            ?: status
-            ?: reply.readParcelable(response);
-    }
-
-    virtual status_t closeInput(int input)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(input);
-        remote()->transact(CLOSE_INPUT, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t invalidateStream(audio_stream_type_t stream)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) stream);
-        remote()->transact(INVALIDATE_STREAM, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t setVoiceVolume(float volume)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeFloat(volume);
-        remote()->transact(SET_VOICE_VOLUME, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
-            audio_io_handle_t output) const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) output);
-        remote()->transact(GET_RENDER_POSITION, data, &reply);
-        status_t status = reply.readInt32();
-        if (status == NO_ERROR) {
-            uint32_t tmp = reply.readInt32();
-            if (halFrames != NULL) {
-                *halFrames = tmp;
-            }
-            tmp = reply.readInt32();
-            if (dspFrames != NULL) {
-                *dspFrames = tmp;
-            }
-        }
-        return status;
-    }
-
-    virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) ioHandle);
-        status_t status = remote()->transact(GET_INPUT_FRAMES_LOST, data, &reply);
-        if (status != NO_ERROR) {
-            return 0;
-        }
-        return (uint32_t) reply.readInt32();
-    }
-
-    virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) use);
-        status_t status = remote()->transact(NEW_AUDIO_UNIQUE_ID, data, &reply);
-        audio_unique_id_t id = AUDIO_UNIQUE_ID_ALLOCATE;
-        if (status == NO_ERROR) {
-            id = reply.readInt32();
-        }
-        return id;
-    }
-
-    void acquireAudioSessionId(audio_session_t audioSession, pid_t pid, uid_t uid) override
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(audioSession);
-        data.writeInt32((int32_t)pid);
-        data.writeInt32((int32_t)uid);
-        remote()->transact(ACQUIRE_AUDIO_SESSION_ID, data, &reply);
-    }
-
-    virtual void releaseAudioSessionId(audio_session_t audioSession, int pid)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(audioSession);
-        data.writeInt32(pid);
-        remote()->transact(RELEASE_AUDIO_SESSION_ID, data, &reply);
-    }
-
-    virtual status_t queryNumberEffects(uint32_t *numEffects) const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        status_t status = remote()->transact(QUERY_NUM_EFFECTS, data, &reply);
-        if (status != NO_ERROR) {
-            return status;
-        }
-        status = reply.readInt32();
-        if (status != NO_ERROR) {
-            return status;
-        }
-        if (numEffects != NULL) {
-            *numEffects = (uint32_t)reply.readInt32();
-        }
-        return NO_ERROR;
-    }
-
-    virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor) const
-    {
-        if (pDescriptor == NULL) {
-            return BAD_VALUE;
-        }
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(index);
-        status_t status = remote()->transact(QUERY_EFFECT, data, &reply);
-        if (status != NO_ERROR) {
-            return status;
-        }
-        status = reply.readInt32();
-        if (status != NO_ERROR) {
-            return status;
-        }
-        reply.read(pDescriptor, sizeof(effect_descriptor_t));
-        return NO_ERROR;
-    }
-
-    virtual status_t getEffectDescriptor(const effect_uuid_t *pUuid,
-                                         const effect_uuid_t *pType,
-                                         uint32_t preferredTypeFlag,
-                                         effect_descriptor_t *pDescriptor) const
-    {
-        if (pUuid == NULL || pType == NULL || pDescriptor == NULL) {
-            return BAD_VALUE;
-        }
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.write(pUuid, sizeof(effect_uuid_t));
-        data.write(pType, sizeof(effect_uuid_t));
-        data.writeUint32(preferredTypeFlag);
-        status_t status = remote()->transact(GET_EFFECT_DESCRIPTOR, data, &reply);
-        if (status != NO_ERROR) {
-            return status;
-        }
-        status = reply.readInt32();
-        if (status != NO_ERROR) {
-            return status;
-        }
-        reply.read(pDescriptor, sizeof(effect_descriptor_t));
-        return NO_ERROR;
-    }
-
-    virtual status_t createEffect(const media::CreateEffectRequest& request,
-                                  media::CreateEffectResponse* response)
-    {
-        Parcel data, reply;
-        sp<media::IEffect> effect;
-        if (response == nullptr) {
-            return BAD_VALUE;
-        }
-        status_t status;
-        status_t lStatus = data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor())
-                           ?: data.writeParcelable(request)
-                           ?: remote()->transact(CREATE_EFFECT, data, &reply)
-                           ?: reply.readInt32(&status)
-                           ?: reply.readParcelable(response)
-                           ?: status;
-        if (lStatus != NO_ERROR) {
-            ALOGE("createEffect error: %s", strerror(-lStatus));
-        }
-        return lStatus;
-    }
-
-    virtual status_t moveEffects(audio_session_t session, audio_io_handle_t srcOutput,
-            audio_io_handle_t dstOutput)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(session);
-        data.writeInt32((int32_t) srcOutput);
-        data.writeInt32((int32_t) dstOutput);
-        remote()->transact(MOVE_EFFECTS, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual void setEffectSuspended(int effectId,
-                                    audio_session_t sessionId,
-                                    bool suspended)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(effectId);
-        data.writeInt32(sessionId);
-        data.writeInt32(suspended ? 1 : 0);
-        remote()->transact(SET_EFFECT_SUSPENDED, data, &reply);
-    }
-
-    virtual audio_module_handle_t loadHwModule(const char *name)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeCString(name);
-        remote()->transact(LOAD_HW_MODULE, data, &reply);
-        return (audio_module_handle_t) reply.readInt32();
-    }
-
-    virtual uint32_t getPrimaryOutputSamplingRate()
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        remote()->transact(GET_PRIMARY_OUTPUT_SAMPLING_RATE, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual size_t getPrimaryOutputFrameCount()
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        remote()->transact(GET_PRIMARY_OUTPUT_FRAME_COUNT, data, &reply);
-        return reply.readInt64();
-    }
-
-    virtual status_t setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) override
-    {
-        Parcel data, reply;
-
-        static_assert(NO_ERROR == 0, "NO_ERROR must be 0");
-        return data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor())
-                ?: data.writeInt32((int) isLowRamDevice)
-                ?: data.writeInt64(totalMemory)
-                ?: remote()->transact(SET_LOW_RAM_DEVICE, data, &reply)
-                ?: reply.readInt32();
-    }
-
-    virtual status_t listAudioPorts(unsigned int *num_ports,
-                                    struct audio_port *ports)
-    {
-        if (num_ports == NULL || *num_ports == 0 || ports == NULL) {
-            return BAD_VALUE;
-        }
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(*num_ports);
-        status_t status = remote()->transact(LIST_AUDIO_PORTS, data, &reply);
-        if (status != NO_ERROR ||
-                (status = (status_t)reply.readInt32()) != NO_ERROR) {
-            return status;
-        }
-        *num_ports = (unsigned int)reply.readInt32();
-        reply.read(ports, *num_ports * sizeof(struct audio_port));
-        return status;
-    }
-    virtual status_t getAudioPort(struct audio_port_v7 *port)
-    {
-        if (port == nullptr) {
-            return BAD_VALUE;
-        }
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.write(port, sizeof(struct audio_port_v7));
-        status_t status = remote()->transact(GET_AUDIO_PORT, data, &reply);
-        if (status != NO_ERROR ||
-                (status = (status_t)reply.readInt32()) != NO_ERROR) {
-            return status;
-        }
-        reply.read(port, sizeof(struct audio_port));
-        return status;
-    }
-    virtual status_t createAudioPatch(const struct audio_patch *patch,
-                                       audio_patch_handle_t *handle)
-    {
-        if (patch == NULL || handle == NULL) {
-            return BAD_VALUE;
-        }
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.write(patch, sizeof(struct audio_patch));
-        data.write(handle, sizeof(audio_patch_handle_t));
-        status_t status = remote()->transact(CREATE_AUDIO_PATCH, data, &reply);
-        if (status != NO_ERROR ||
-                (status = (status_t)reply.readInt32()) != NO_ERROR) {
-            return status;
-        }
-        reply.read(handle, sizeof(audio_patch_handle_t));
-        return status;
-    }
-    virtual status_t releaseAudioPatch(audio_patch_handle_t handle)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.write(&handle, sizeof(audio_patch_handle_t));
-        status_t status = remote()->transact(RELEASE_AUDIO_PATCH, data, &reply);
-        if (status != NO_ERROR) {
-            status = (status_t)reply.readInt32();
-        }
-        return status;
-    }
-    virtual status_t listAudioPatches(unsigned int *num_patches,
-                                      struct audio_patch *patches)
-    {
-        if (num_patches == NULL || *num_patches == 0 || patches == NULL) {
-            return BAD_VALUE;
-        }
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(*num_patches);
-        status_t status = remote()->transact(LIST_AUDIO_PATCHES, data, &reply);
-        if (status != NO_ERROR ||
-                (status = (status_t)reply.readInt32()) != NO_ERROR) {
-            return status;
-        }
-        *num_patches = (unsigned int)reply.readInt32();
-        reply.read(patches, *num_patches * sizeof(struct audio_patch));
-        return status;
-    }
-    virtual status_t setAudioPortConfig(const struct audio_port_config *config)
-    {
-        if (config == NULL) {
-            return BAD_VALUE;
-        }
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.write(config, sizeof(struct audio_port_config));
-        status_t status = remote()->transact(SET_AUDIO_PORT_CONFIG, data, &reply);
-        if (status != NO_ERROR) {
-            status = (status_t)reply.readInt32();
-        }
-        return status;
-    }
-    virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(sessionId);
-        status_t status = remote()->transact(GET_AUDIO_HW_SYNC_FOR_SESSION, data, &reply);
-        if (status != NO_ERROR) {
-            return AUDIO_HW_SYNC_INVALID;
-        }
-        return (audio_hw_sync_t)reply.readInt32();
-    }
-    virtual status_t systemReady()
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        return remote()->transact(SYSTEM_READY, data, &reply, IBinder::FLAG_ONEWAY);
-    }
-    virtual size_t frameCountHAL(audio_io_handle_t ioHandle) const
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32((int32_t) ioHandle);
-        status_t status = remote()->transact(FRAME_COUNT_HAL, data, &reply);
-        if (status != NO_ERROR) {
-            return 0;
-        }
-        return reply.readInt64();
-    }
-    virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        status_t status = remote()->transact(GET_MICROPHONES, data, &reply);
-        if (status != NO_ERROR ||
-                (status = (status_t)reply.readInt32()) != NO_ERROR) {
-            return status;
-        }
-        status = reply.readParcelableVector(microphones);
-        return status;
-    }
-    virtual status_t setAudioHalPids(const std::vector<pid_t>& pids)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
-        data.writeInt32(pids.size());
-        for (auto pid : pids) {
-            data.writeInt32(pid);
-        }
-        status_t status = remote()->transact(SET_AUDIO_HAL_PIDS, data, &reply);
-        if (status != NO_ERROR) {
-            return status;
-        }
-        return static_cast <status_t> (reply.readInt32());
-    }
-};
-
-IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
-
-// ----------------------------------------------------------------------
-
-status_t BnAudioFlinger::onTransact(
-    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
-    switch (code) {
-        case CREATE_TRACK: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-
-            media::CreateTrackRequest input;
-            if (data.readParcelable(&input) != NO_ERROR) {
-                reply->writeInt32(DEAD_OBJECT);
-                return NO_ERROR;
-            }
-
-            status_t status;
-            media::CreateTrackResponse output;
-
-            status = createTrack(input, output);
-
-            LOG_ALWAYS_FATAL_IF((output.audioTrack != 0) != (status == NO_ERROR));
-            reply->writeInt32(status);
-            if (status != NO_ERROR) {
-                return NO_ERROR;
-            }
-            output.writeToParcel(reply);
-            return NO_ERROR;
-        } break;
-        case CREATE_RECORD: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-
-            media::CreateRecordRequest input;
-            if (data.readParcelable(&input) != NO_ERROR) {
-                reply->writeInt32(DEAD_OBJECT);
-                return NO_ERROR;
-            }
-
-            status_t status;
-            media::CreateRecordResponse output;
-
-            status = createRecord(input, output);
-
-            LOG_ALWAYS_FATAL_IF((output.audioRecord != 0) != (status == NO_ERROR));
-            reply->writeInt32(status);
-            if (status != NO_ERROR) {
-                return NO_ERROR;
-            }
-            output.writeToParcel(reply);
-            return NO_ERROR;
-        } break;
-        case SAMPLE_RATE: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32( sampleRate((audio_io_handle_t) data.readInt32()) );
-            return NO_ERROR;
-        } break;
-
-        // RESERVED for channelCount()
-
-        case FORMAT: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32( format((audio_io_handle_t) data.readInt32()) );
-            return NO_ERROR;
-        } break;
-        case FRAME_COUNT: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt64( frameCount((audio_io_handle_t) data.readInt32()) );
-            return NO_ERROR;
-        } break;
-        case LATENCY: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32( latency((audio_io_handle_t) data.readInt32()) );
-            return NO_ERROR;
-        } break;
-        case SET_MASTER_VOLUME: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32( setMasterVolume(data.readFloat()) );
-            return NO_ERROR;
-        } break;
-        case SET_MASTER_MUTE: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32( setMasterMute(data.readInt32()) );
-            return NO_ERROR;
-        } break;
-        case MASTER_VOLUME: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeFloat( masterVolume() );
-            return NO_ERROR;
-        } break;
-        case MASTER_MUTE: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32( masterMute() );
-            return NO_ERROR;
-        } break;
-        case SET_MASTER_BALANCE: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32( setMasterBalance(data.readFloat()) );
-            return NO_ERROR;
-        } break;
-        case GET_MASTER_BALANCE: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            float f;
-            const status_t status = getMasterBalance(&f);
-            reply->writeInt32((int32_t)status);
-            if (status == NO_ERROR) {
-                (void)reply->writeFloat(f);
-            }
-            return NO_ERROR;
-        } break;
-        case SET_STREAM_VOLUME: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int stream = data.readInt32();
-            float volume = data.readFloat();
-            audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
-            reply->writeInt32( setStreamVolume((audio_stream_type_t) stream, volume, output) );
-            return NO_ERROR;
-        } break;
-        case SET_STREAM_MUTE: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int stream = data.readInt32();
-            reply->writeInt32( setStreamMute((audio_stream_type_t) stream, data.readInt32()) );
-            return NO_ERROR;
-        } break;
-        case STREAM_VOLUME: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int stream = data.readInt32();
-            int output = data.readInt32();
-            reply->writeFloat( streamVolume((audio_stream_type_t) stream, output) );
-            return NO_ERROR;
-        } break;
-        case STREAM_MUTE: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int stream = data.readInt32();
-            reply->writeInt32( streamMute((audio_stream_type_t) stream) );
-            return NO_ERROR;
-        } break;
-        case SET_MODE: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            audio_mode_t mode = (audio_mode_t) data.readInt32();
-            reply->writeInt32( setMode(mode) );
-            return NO_ERROR;
-        } break;
-        case SET_MIC_MUTE: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int state = data.readInt32();
-            reply->writeInt32( setMicMute(state) );
-            return NO_ERROR;
-        } break;
-        case GET_MIC_MUTE: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32( getMicMute() );
-            return NO_ERROR;
-        } break;
-        case SET_RECORD_SILENCED: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            audio_port_handle_t portId = data.readInt32();
-            bool silenced = data.readInt32() == 1;
-            setRecordSilenced(portId, silenced);
-            return NO_ERROR;
-        } break;
-        case SET_PARAMETERS: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
-            String8 keyValuePairs(data.readString8());
-            reply->writeInt32(setParameters(ioHandle, keyValuePairs));
-            return NO_ERROR;
-        } break;
-        case GET_PARAMETERS: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
-            String8 keys(data.readString8());
-            reply->writeString8(getParameters(ioHandle, keys));
-            return NO_ERROR;
-        } break;
-
-        case REGISTER_CLIENT: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            sp<media::IAudioFlingerClient> client = interface_cast<media::IAudioFlingerClient>(
-                    data.readStrongBinder());
-            registerClient(client);
-            return NO_ERROR;
-        } break;
-        case GET_INPUTBUFFERSIZE: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            uint32_t sampleRate = data.readInt32();
-            audio_format_t format = (audio_format_t) data.readInt32();
-            audio_channel_mask_t channelMask = (audio_channel_mask_t) data.readInt32();
-            reply->writeInt64( getInputBufferSize(sampleRate, format, channelMask) );
-            return NO_ERROR;
-        } break;
-        case OPEN_OUTPUT: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            status_t status;
-            media::OpenOutputRequest request;
-            media::OpenOutputResponse response;
-            return data.readParcelable(&request)
-                ?: (status = openOutput(request, &response), OK)
-                ?: reply->writeInt32(status)
-                ?: reply->writeParcelable(response);
-        } break;
-        case OPEN_DUPLICATE_OUTPUT: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            audio_io_handle_t output1 = (audio_io_handle_t) data.readInt32();
-            audio_io_handle_t output2 = (audio_io_handle_t) data.readInt32();
-            reply->writeInt32((int32_t) openDuplicateOutput(output1, output2));
-            return NO_ERROR;
-        } break;
-        case CLOSE_OUTPUT: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(closeOutput((audio_io_handle_t) data.readInt32()));
-            return NO_ERROR;
-        } break;
-        case SUSPEND_OUTPUT: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(suspendOutput((audio_io_handle_t) data.readInt32()));
-            return NO_ERROR;
-        } break;
-        case RESTORE_OUTPUT: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(restoreOutput((audio_io_handle_t) data.readInt32()));
-            return NO_ERROR;
-        } break;
-        case OPEN_INPUT: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            media::OpenInputRequest request;
-            media::OpenInputResponse response;
-            status_t status;
-            return data.readParcelable(&request)
-                ?: (status = openInput(request, &response), OK)
-                ?: reply->writeInt32(status)
-                ?: reply->writeParcelable(response);
-        } break;
-        case CLOSE_INPUT: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(closeInput((audio_io_handle_t) data.readInt32()));
-            return NO_ERROR;
-        } break;
-        case INVALIDATE_STREAM: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            audio_stream_type_t stream = (audio_stream_type_t) data.readInt32();
-            reply->writeInt32(invalidateStream(stream));
-            return NO_ERROR;
-        } break;
-        case SET_VOICE_VOLUME: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            float volume = data.readFloat();
-            reply->writeInt32( setVoiceVolume(volume) );
-            return NO_ERROR;
-        } break;
-        case GET_RENDER_POSITION: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
-            uint32_t halFrames = 0;
-            uint32_t dspFrames = 0;
-            status_t status = getRenderPosition(&halFrames, &dspFrames, output);
-            reply->writeInt32(status);
-            if (status == NO_ERROR) {
-                reply->writeInt32(halFrames);
-                reply->writeInt32(dspFrames);
-            }
-            return NO_ERROR;
-        }
-        case GET_INPUT_FRAMES_LOST: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
-            reply->writeInt32((int32_t) getInputFramesLost(ioHandle));
-            return NO_ERROR;
-        } break;
-        case NEW_AUDIO_UNIQUE_ID: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(newAudioUniqueId((audio_unique_id_use_t) data.readInt32()));
-            return NO_ERROR;
-        } break;
-        case ACQUIRE_AUDIO_SESSION_ID: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            audio_session_t audioSession = (audio_session_t) data.readInt32();
-            const pid_t pid = (pid_t)data.readInt32();
-            const uid_t uid = (uid_t)data.readInt32();
-            acquireAudioSessionId(audioSession, pid, uid);
-            return NO_ERROR;
-        } break;
-        case RELEASE_AUDIO_SESSION_ID: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            audio_session_t audioSession = (audio_session_t) data.readInt32();
-            int pid = data.readInt32();
-            releaseAudioSessionId(audioSession, pid);
-            return NO_ERROR;
-        } break;
-        case QUERY_NUM_EFFECTS: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            uint32_t numEffects = 0;
-            status_t status = queryNumberEffects(&numEffects);
-            reply->writeInt32(status);
-            if (status == NO_ERROR) {
-                reply->writeInt32((int32_t)numEffects);
-            }
-            return NO_ERROR;
-        }
-        case QUERY_EFFECT: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            effect_descriptor_t desc = {};
-            status_t status = queryEffect(data.readInt32(), &desc);
-            reply->writeInt32(status);
-            if (status == NO_ERROR) {
-                reply->write(&desc, sizeof(effect_descriptor_t));
-            }
-            return NO_ERROR;
-        }
-        case GET_EFFECT_DESCRIPTOR: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            effect_uuid_t uuid = {};
-            if (data.read(&uuid, sizeof(effect_uuid_t)) != NO_ERROR) {
-                android_errorWriteLog(0x534e4554, "139417189");
-            }
-            effect_uuid_t type = {};
-            if (data.read(&type, sizeof(effect_uuid_t)) != NO_ERROR) {
-                android_errorWriteLog(0x534e4554, "139417189");
-            }
-            uint32_t preferredTypeFlag = data.readUint32();
-            effect_descriptor_t desc = {};
-            status_t status = getEffectDescriptor(&uuid, &type, preferredTypeFlag, &desc);
-            reply->writeInt32(status);
-            if (status == NO_ERROR) {
-                reply->write(&desc, sizeof(effect_descriptor_t));
-            }
-            return NO_ERROR;
-        }
-        case CREATE_EFFECT: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-
-            media::CreateEffectRequest request;
-            media::CreateEffectResponse response;
-
-            return data.readParcelable(&request)
-                ?: reply->writeInt32(createEffect(request, &response))
-                ?: reply->writeParcelable(response);
-        } break;
-        case MOVE_EFFECTS: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            audio_session_t session = (audio_session_t) data.readInt32();
-            audio_io_handle_t srcOutput = (audio_io_handle_t) data.readInt32();
-            audio_io_handle_t dstOutput = (audio_io_handle_t) data.readInt32();
-            reply->writeInt32(moveEffects(session, srcOutput, dstOutput));
-            return NO_ERROR;
-        } break;
-        case SET_EFFECT_SUSPENDED: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int effectId = data.readInt32();
-            audio_session_t sessionId = (audio_session_t) data.readInt32();
-            bool suspended = data.readInt32() == 1;
-            setEffectSuspended(effectId, sessionId, suspended);
-            return NO_ERROR;
-        } break;
-        case LOAD_HW_MODULE: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(loadHwModule(data.readCString()));
-            return NO_ERROR;
-        } break;
-        case GET_PRIMARY_OUTPUT_SAMPLING_RATE: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(getPrimaryOutputSamplingRate());
-            return NO_ERROR;
-        } break;
-        case GET_PRIMARY_OUTPUT_FRAME_COUNT: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt64(getPrimaryOutputFrameCount());
-            return NO_ERROR;
-        } break;
-        case SET_LOW_RAM_DEVICE: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            int32_t isLowRamDevice;
-            int64_t totalMemory;
-            const status_t status =
-                    data.readInt32(&isLowRamDevice) ?:
-                    data.readInt64(&totalMemory) ?:
-                    setLowRamDevice(isLowRamDevice != 0, totalMemory);
-            (void)reply->writeInt32(status);
-            return NO_ERROR;
-        } break;
-        case LIST_AUDIO_PORTS: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            unsigned int numPortsReq = data.readInt32();
-            if (numPortsReq > MAX_ITEMS_PER_LIST) {
-                numPortsReq = MAX_ITEMS_PER_LIST;
-            }
-            unsigned int numPorts = numPortsReq;
-            struct audio_port *ports =
-                    (struct audio_port *)calloc(numPortsReq,
-                                                           sizeof(struct audio_port));
-            if (ports == NULL) {
-                reply->writeInt32(NO_MEMORY);
-                reply->writeInt32(0);
-                return NO_ERROR;
-            }
-            status_t status = listAudioPorts(&numPorts, ports);
-            reply->writeInt32(status);
-            reply->writeInt32(numPorts);
-            if (status == NO_ERROR) {
-                if (numPortsReq > numPorts) {
-                    numPortsReq = numPorts;
-                }
-                reply->write(ports, numPortsReq * sizeof(struct audio_port));
-            }
-            free(ports);
-            return NO_ERROR;
-        } break;
-        case GET_AUDIO_PORT: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            struct audio_port_v7 port = {};
-            status_t status = data.read(&port, sizeof(struct audio_port));
-            if (status != NO_ERROR) {
-                ALOGE("b/23905951");
-                return status;
-            }
-            reply->writeInt32(status);
-            if (status == NO_ERROR) {
-                reply->write(&port, sizeof(struct audio_port_v7));
-            }
-            return NO_ERROR;
-        } break;
-        case CREATE_AUDIO_PATCH: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            struct audio_patch patch;
-            status_t status = data.read(&patch, sizeof(struct audio_patch));
-            if (status != NO_ERROR) {
-                return status;
-            }
-            audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
-            status = data.read(&handle, sizeof(audio_patch_handle_t));
-            if (status != NO_ERROR) {
-                ALOGE("b/23905951");
-                return status;
-            }
-            reply->writeInt32(status);
-            if (status == NO_ERROR) {
-                reply->write(&handle, sizeof(audio_patch_handle_t));
-            }
-            return NO_ERROR;
-        } break;
-        case RELEASE_AUDIO_PATCH: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            audio_patch_handle_t handle;
-            data.read(&handle, sizeof(audio_patch_handle_t));
-            status_t status = releaseAudioPatch(handle);
-            reply->writeInt32(status);
-            return NO_ERROR;
-        } break;
-        case LIST_AUDIO_PATCHES: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            unsigned int numPatchesReq = data.readInt32();
-            if (numPatchesReq > MAX_ITEMS_PER_LIST) {
-                numPatchesReq = MAX_ITEMS_PER_LIST;
-            }
-            unsigned int numPatches = numPatchesReq;
-            struct audio_patch *patches =
-                    (struct audio_patch *)calloc(numPatchesReq,
-                                                 sizeof(struct audio_patch));
-            if (patches == NULL) {
-                reply->writeInt32(NO_MEMORY);
-                reply->writeInt32(0);
-                return NO_ERROR;
-            }
-            status_t status = listAudioPatches(&numPatches, patches);
-            reply->writeInt32(status);
-            reply->writeInt32(numPatches);
-            if (status == NO_ERROR) {
-                if (numPatchesReq > numPatches) {
-                    numPatchesReq = numPatches;
-                }
-                reply->write(patches, numPatchesReq * sizeof(struct audio_patch));
-            }
-            free(patches);
-            return NO_ERROR;
-        } break;
-        case SET_AUDIO_PORT_CONFIG: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            struct audio_port_config config;
-            status_t status = data.read(&config, sizeof(struct audio_port_config));
-            if (status != NO_ERROR) {
-                return status;
-            }
-            reply->writeInt32(status);
-            return NO_ERROR;
-        } break;
-        case GET_AUDIO_HW_SYNC_FOR_SESSION: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt32(getAudioHwSyncForSession((audio_session_t) data.readInt32()));
-            return NO_ERROR;
-        } break;
-        case SYSTEM_READY: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            systemReady();
-            return NO_ERROR;
-        } break;
-        case FRAME_COUNT_HAL: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            reply->writeInt64( frameCountHAL((audio_io_handle_t) data.readInt32()) );
-            return NO_ERROR;
-        } break;
-        case GET_MICROPHONES: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            std::vector<media::MicrophoneInfo> microphones;
-            status_t status = getMicrophones(&microphones);
-            reply->writeInt32(status);
-            if (status == NO_ERROR) {
-                reply->writeParcelableVector(microphones);
-            }
-            return NO_ERROR;
-        }
-        case SET_AUDIO_HAL_PIDS: {
-            CHECK_INTERFACE(IAudioFlinger, data, reply);
-            std::vector<pid_t> pids;
-            int32_t size;
-            status_t status = data.readInt32(&size);
-            if (status != NO_ERROR) {
-                return status;
-            }
-            if (size < 0) {
-                return BAD_VALUE;
-            }
-            if (size > MAX_ITEMS_PER_LIST) {
-                size = MAX_ITEMS_PER_LIST;
-            }
-            for (int32_t i = 0; i < size; i++) {
-                int32_t pid;
-                status =  data.readInt32(&pid);
-                if (status != NO_ERROR) {
-                    return status;
-                }
-                pids.push_back(pid);
-            }
-            reply->writeInt32(setAudioHalPids(pids));
-            return NO_ERROR;
-        }
-        default:
-            return BBinder::onTransact(code, data, reply, flags);
-    }
+status_t AudioFlingerClientAdapter::createTrack(const media::CreateTrackRequest& input,
+                                                media::CreateTrackResponse& output) {
+    return statusTFromBinderStatus(mDelegate->createTrack(input, &output));
 }
 
-// ----------------------------------------------------------------------------
+status_t AudioFlingerClientAdapter::createRecord(const media::CreateRecordRequest& input,
+                                                 media::CreateRecordResponse& output) {
+    return statusTFromBinderStatus(mDelegate->createRecord(input, &output));
+}
+
+uint32_t AudioFlingerClientAdapter::sampleRate(audio_io_handle_t ioHandle) const {
+    auto result = [&]() -> ConversionResult<uint32_t> {
+        int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
+        int32_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->sampleRate(ioHandleAidl, &aidlRet)));
+        return convertIntegral<uint32_t>(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
+audio_format_t AudioFlingerClientAdapter::format(audio_io_handle_t output) const {
+    auto result = [&]() -> ConversionResult<audio_format_t> {
+        int32_t outputAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output));
+        media::audio::common::AudioFormat aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->format(outputAidl, &aidlRet)));
+        return aidl2legacy_AudioFormat_audio_format_t(aidlRet);
+    }();
+    return result.value_or(AUDIO_FORMAT_INVALID);
+}
+
+size_t AudioFlingerClientAdapter::frameCount(audio_io_handle_t ioHandle) const {
+    auto result = [&]() -> ConversionResult<size_t> {
+        int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
+        int64_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->frameCount(ioHandleAidl, &aidlRet)));
+        return convertIntegral<size_t>(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
+uint32_t AudioFlingerClientAdapter::latency(audio_io_handle_t output) const {
+    auto result = [&]() -> ConversionResult<uint32_t> {
+        int32_t outputAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output));
+        int32_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->latency(outputAidl, &aidlRet)));
+        return convertIntegral<uint32_t>(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
+status_t AudioFlingerClientAdapter::setMasterVolume(float value) {
+    return statusTFromBinderStatus(mDelegate->setMasterVolume(value));
+}
+
+status_t AudioFlingerClientAdapter::setMasterMute(bool muted) {
+    return statusTFromBinderStatus(mDelegate->setMasterMute(muted));
+}
+
+float AudioFlingerClientAdapter::masterVolume() const {
+    auto result = [&]() -> ConversionResult<float> {
+        float aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->masterVolume(&aidlRet)));
+        return aidlRet;
+    }();
+    // Failure is ignored.
+    return result.value_or(0.f);
+}
+
+bool AudioFlingerClientAdapter::masterMute() const {
+    auto result = [&]() -> ConversionResult<bool> {
+        bool aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->masterMute(&aidlRet)));
+        return aidlRet;
+    }();
+    // Failure is ignored.
+    return result.value_or(false);
+}
+
+status_t AudioFlingerClientAdapter::setMasterBalance(float balance) {
+    return statusTFromBinderStatus(mDelegate->setMasterBalance(balance));
+}
+
+status_t AudioFlingerClientAdapter::getMasterBalance(float* balance) const{
+    return statusTFromBinderStatus(mDelegate->getMasterBalance(balance));
+}
+
+status_t AudioFlingerClientAdapter::setStreamVolume(audio_stream_type_t stream, float value,
+                                                    audio_io_handle_t output) {
+    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
+    int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
+    return statusTFromBinderStatus(mDelegate->setStreamVolume(streamAidl, value, outputAidl));
+}
+
+status_t AudioFlingerClientAdapter::setStreamMute(audio_stream_type_t stream, bool muted) {
+    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
+    return statusTFromBinderStatus(mDelegate->setStreamMute(streamAidl, muted));
+}
+
+float AudioFlingerClientAdapter::streamVolume(audio_stream_type_t stream,
+                                              audio_io_handle_t output) const {
+    auto result = [&]() -> ConversionResult<float> {
+        media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+                legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
+        int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
+        float aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->streamVolume(streamAidl, outputAidl, &aidlRet)));
+        return aidlRet;
+    }();
+    // Failure is ignored.
+    return result.value_or(0.f);
+}
+
+bool AudioFlingerClientAdapter::streamMute(audio_stream_type_t stream) const {
+    auto result = [&]() -> ConversionResult<bool> {
+        media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+                legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
+        bool aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->streamMute(streamAidl, &aidlRet)));
+        return aidlRet;
+    }();
+    // Failure is ignored.
+    return result.value_or(false);
+}
+
+status_t AudioFlingerClientAdapter::setMode(audio_mode_t mode) {
+    media::AudioMode modeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_mode_t_AudioMode(mode));
+    return statusTFromBinderStatus(mDelegate->setMode(modeAidl));
+}
+
+status_t AudioFlingerClientAdapter::setMicMute(bool state) {
+    return statusTFromBinderStatus(mDelegate->setMicMute(state));
+}
+
+bool AudioFlingerClientAdapter::getMicMute() const {
+    auto result = [&]() -> ConversionResult<bool> {
+        bool aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getMicMute(&aidlRet)));
+        return aidlRet;
+    }();
+    // Failure is ignored.
+    return result.value_or(false);
+}
+
+void AudioFlingerClientAdapter::setRecordSilenced(audio_port_handle_t portId, bool silenced) {
+    auto result = [&]() -> status_t {
+        int32_t portIdAidl = VALUE_OR_RETURN_STATUS(
+                legacy2aidl_audio_port_handle_t_int32_t(portId));
+        return statusTFromBinderStatus(mDelegate->setRecordSilenced(portIdAidl, silenced));
+    }();
+    // Failure is ignored.
+    (void) result;
+}
+
+status_t AudioFlingerClientAdapter::setParameters(audio_io_handle_t ioHandle,
+                                                  const String8& keyValuePairs) {
+    int32_t ioHandleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
+    std::string keyValuePairsAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_String8_string(keyValuePairs));
+    return statusTFromBinderStatus(mDelegate->setParameters(ioHandleAidl, keyValuePairsAidl));
+}
+
+String8 AudioFlingerClientAdapter::getParameters(audio_io_handle_t ioHandle, const String8& keys)
+const {
+    auto result = [&]() -> ConversionResult<String8> {
+        int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
+        std::string keysAidl = VALUE_OR_RETURN(legacy2aidl_String8_string(keys));
+        std::string aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getParameters(ioHandleAidl, keysAidl, &aidlRet)));
+        return aidl2legacy_string_view_String8(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(String8());
+}
+
+void AudioFlingerClientAdapter::registerClient(const sp<media::IAudioFlingerClient>& client) {
+    mDelegate->registerClient(client);
+    // Failure is ignored.
+}
+
+size_t AudioFlingerClientAdapter::getInputBufferSize(uint32_t sampleRate, audio_format_t format,
+                                                     audio_channel_mask_t channelMask) const {
+    auto result = [&]() -> ConversionResult<size_t> {
+        int32_t sampleRateAidl = VALUE_OR_RETURN(convertIntegral<int32_t>(sampleRate));
+        media::audio::common::AudioFormat formatAidl = VALUE_OR_RETURN(
+                legacy2aidl_audio_format_t_AudioFormat(format));
+        int32_t channelMaskAidl = VALUE_OR_RETURN(
+                legacy2aidl_audio_channel_mask_t_int32_t(channelMask));
+        int64_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getInputBufferSize(sampleRateAidl, formatAidl, channelMaskAidl,
+                                              &aidlRet)));
+        return convertIntegral<size_t>(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
+status_t AudioFlingerClientAdapter::openOutput(const media::OpenOutputRequest& request,
+                                               media::OpenOutputResponse* response) {
+    return statusTFromBinderStatus(mDelegate->openOutput(request, response));
+}
+
+audio_io_handle_t AudioFlingerClientAdapter::openDuplicateOutput(audio_io_handle_t output1,
+                                                                 audio_io_handle_t output2) {
+    auto result = [&]() -> ConversionResult<audio_io_handle_t> {
+        int32_t output1Aidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output1));
+        int32_t output2Aidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output2));
+        int32_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->openDuplicateOutput(output1Aidl, output2Aidl, &aidlRet)));
+        return aidl2legacy_int32_t_audio_io_handle_t(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
+status_t AudioFlingerClientAdapter::closeOutput(audio_io_handle_t output) {
+    int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
+    return statusTFromBinderStatus(mDelegate->closeOutput(outputAidl));
+}
+
+status_t AudioFlingerClientAdapter::suspendOutput(audio_io_handle_t output) {
+    int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
+    return statusTFromBinderStatus(mDelegate->suspendOutput(outputAidl));
+}
+
+status_t AudioFlingerClientAdapter::restoreOutput(audio_io_handle_t output) {
+    int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
+    return statusTFromBinderStatus(mDelegate->restoreOutput(outputAidl));
+}
+
+status_t AudioFlingerClientAdapter::openInput(const media::OpenInputRequest& request,
+                                              media::OpenInputResponse* response) {
+    return statusTFromBinderStatus(mDelegate->openInput(request, response));
+}
+
+status_t AudioFlingerClientAdapter::closeInput(audio_io_handle_t input) {
+    int32_t inputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(input));
+    return statusTFromBinderStatus(mDelegate->closeInput(inputAidl));
+}
+
+status_t AudioFlingerClientAdapter::invalidateStream(audio_stream_type_t stream) {
+    media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
+    return statusTFromBinderStatus(mDelegate->invalidateStream(streamAidl));
+}
+
+status_t AudioFlingerClientAdapter::setVoiceVolume(float volume) {
+    return statusTFromBinderStatus(mDelegate->setVoiceVolume(volume));
+}
+
+status_t AudioFlingerClientAdapter::getRenderPosition(uint32_t* halFrames, uint32_t* dspFrames,
+                                                      audio_io_handle_t output) const {
+    int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
+    media::RenderPosition aidlRet;
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->getRenderPosition(outputAidl, &aidlRet)));
+    if (halFrames != nullptr) {
+        *halFrames = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(aidlRet.halFrames));
+    }
+    if (dspFrames != nullptr) {
+        *dspFrames = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(aidlRet.dspFrames));
+    }
+    return OK;
+}
+
+uint32_t AudioFlingerClientAdapter::getInputFramesLost(audio_io_handle_t ioHandle) const {
+    auto result = [&]() -> ConversionResult<uint32_t> {
+        int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
+        int32_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getInputFramesLost(ioHandleAidl, &aidlRet)));
+        return convertIntegral<uint32_t>(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
+audio_unique_id_t AudioFlingerClientAdapter::newAudioUniqueId(audio_unique_id_use_t use) {
+    auto result = [&]() -> ConversionResult<audio_unique_id_t> {
+        media::AudioUniqueIdUse useAidl = VALUE_OR_RETURN(
+                legacy2aidl_audio_unique_id_use_t_AudioUniqueIdUse(use));
+        int32_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->newAudioUniqueId(useAidl, &aidlRet)));
+        return aidl2legacy_int32_t_audio_unique_id_t(aidlRet);
+    }();
+    return result.value_or(AUDIO_UNIQUE_ID_ALLOCATE);
+}
+
+void AudioFlingerClientAdapter::acquireAudioSessionId(audio_session_t audioSession, pid_t pid,
+                                                      uid_t uid) {
+    [&]() -> status_t {
+        int32_t audioSessionAidl = VALUE_OR_RETURN_STATUS(
+                legacy2aidl_audio_session_t_int32_t(audioSession));
+        int32_t pidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(pid));
+        int32_t uidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(uid));
+        return statusTFromBinderStatus(
+                mDelegate->acquireAudioSessionId(audioSessionAidl, pidAidl, uidAidl));
+    }();
+    // Failure is ignored.
+}
+
+void AudioFlingerClientAdapter::releaseAudioSessionId(audio_session_t audioSession, pid_t pid) {
+    [&]() -> status_t {
+        int32_t audioSessionAidl = VALUE_OR_RETURN_STATUS(
+                legacy2aidl_audio_session_t_int32_t(audioSession));
+        int32_t pidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(pid));
+        return statusTFromBinderStatus(
+                mDelegate->releaseAudioSessionId(audioSessionAidl, pidAidl));
+    }();
+    // Failure is ignored.
+}
+
+status_t AudioFlingerClientAdapter::queryNumberEffects(uint32_t* numEffects) const {
+    int32_t aidlRet;
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->queryNumberEffects(&aidlRet)));
+    if (numEffects != nullptr) {
+        *numEffects = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(aidlRet));
+    }
+    return OK;
+}
+
+status_t
+AudioFlingerClientAdapter::queryEffect(uint32_t index, effect_descriptor_t* pDescriptor) const {
+    int32_t indexAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(index));
+    media::EffectDescriptor aidlRet;
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->queryEffect(indexAidl, &aidlRet)));
+    if (pDescriptor != nullptr) {
+        *pDescriptor = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_EffectDescriptor_effect_descriptor_t(aidlRet));
+    }
+    return OK;
+}
+
+status_t AudioFlingerClientAdapter::getEffectDescriptor(const effect_uuid_t* pEffectUUID,
+                                                        const effect_uuid_t* pTypeUUID,
+                                                        uint32_t preferredTypeFlag,
+                                                        effect_descriptor_t* pDescriptor) const {
+    media::AudioUuid effectUuidAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_uuid_t_AudioUuid(*pEffectUUID));
+    media::AudioUuid typeUuidAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_uuid_t_AudioUuid(*pTypeUUID));
+    int32_t preferredTypeFlagAidl = VALUE_OR_RETURN_STATUS(
+            convertReinterpret<int32_t>(preferredTypeFlag));
+    media::EffectDescriptor aidlRet;
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->getEffectDescriptor(effectUuidAidl, typeUuidAidl, preferredTypeFlagAidl,
+                                           &aidlRet)));
+    if (pDescriptor != nullptr) {
+        *pDescriptor = VALUE_OR_RETURN_STATUS(
+                aidl2legacy_EffectDescriptor_effect_descriptor_t(aidlRet));
+    }
+    return OK;
+}
+
+status_t AudioFlingerClientAdapter::createEffect(const media::CreateEffectRequest& request,
+                                                 media::CreateEffectResponse* response) {
+    return statusTFromBinderStatus(mDelegate->createEffect(request, response));
+}
+
+status_t
+AudioFlingerClientAdapter::moveEffects(audio_session_t session, audio_io_handle_t srcOutput,
+                                       audio_io_handle_t dstOutput) {
+    int32_t sessionAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_session_t_int32_t(session));
+    int32_t srcOutputAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_io_handle_t_int32_t(srcOutput));
+    int32_t dstOutputAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_io_handle_t_int32_t(dstOutput));
+    return statusTFromBinderStatus(
+            mDelegate->moveEffects(sessionAidl, srcOutputAidl, dstOutputAidl));
+}
+
+void AudioFlingerClientAdapter::setEffectSuspended(int effectId,
+                                                   audio_session_t sessionId,
+                                                   bool suspended) {
+    [&]() -> status_t {
+        int32_t effectIdAidl = VALUE_OR_RETURN_STATUS(convertReinterpret<int32_t>(effectId));
+        int32_t sessionIdAidl = VALUE_OR_RETURN_STATUS(
+                legacy2aidl_audio_session_t_int32_t(sessionId));
+        return statusTFromBinderStatus(
+                mDelegate->setEffectSuspended(effectIdAidl, sessionIdAidl, suspended));
+    }();
+    // Failure is ignored.
+}
+
+audio_module_handle_t AudioFlingerClientAdapter::loadHwModule(const char* name) {
+    auto result = [&]() -> ConversionResult<audio_module_handle_t> {
+        std::string nameAidl(name);
+        int32_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->loadHwModule(nameAidl, &aidlRet)));
+        return aidl2legacy_int32_t_audio_module_handle_t(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
+uint32_t AudioFlingerClientAdapter::getPrimaryOutputSamplingRate() {
+    auto result = [&]() -> ConversionResult<uint32_t> {
+        int32_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getPrimaryOutputSamplingRate(&aidlRet)));
+        return convertIntegral<uint32_t>(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
+size_t AudioFlingerClientAdapter::getPrimaryOutputFrameCount() {
+    auto result = [&]() -> ConversionResult<size_t> {
+        int64_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getPrimaryOutputFrameCount(&aidlRet)));
+        return convertIntegral<size_t>(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
+status_t AudioFlingerClientAdapter::setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) {
+    return statusTFromBinderStatus(mDelegate->setLowRamDevice(isLowRamDevice, totalMemory));
+}
+
+status_t AudioFlingerClientAdapter::getAudioPort(struct audio_port_v7* port) {
+    media::AudioPort portAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_v7_AudioPort(*port));
+    media::AudioPort aidlRet;
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->getAudioPort(portAidl, &aidlRet)));
+    *port = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPort_audio_port_v7(aidlRet));
+    return OK;
+}
+
+status_t AudioFlingerClientAdapter::createAudioPatch(const struct audio_patch* patch,
+                                                     audio_patch_handle_t* handle) {
+    media::AudioPatch patchAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_patch_AudioPatch(*patch));
+    int32_t aidlRet;
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->createAudioPatch(patchAidl, &aidlRet)));
+    if (handle != nullptr) {
+        *handle = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_patch_handle_t(aidlRet));
+    }
+    return OK;
+}
+
+status_t AudioFlingerClientAdapter::releaseAudioPatch(audio_patch_handle_t handle) {
+    int32_t handleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_patch_handle_t_int32_t(handle));
+    return statusTFromBinderStatus(mDelegate->releaseAudioPatch(handleAidl));
+}
+
+status_t AudioFlingerClientAdapter::listAudioPatches(unsigned int* num_patches,
+                                                     struct audio_patch* patches) {
+    std::vector<media::AudioPatch> aidlRet;
+    int32_t maxPatches = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*num_patches));
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->listAudioPatches(maxPatches, &aidlRet)));
+    *num_patches = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(aidlRet.size()));
+    return convertRange(aidlRet.begin(), aidlRet.end(), patches,
+                        aidl2legacy_AudioPatch_audio_patch);
+}
+
+status_t AudioFlingerClientAdapter::setAudioPortConfig(const struct audio_port_config* config) {
+    media::AudioPortConfig configAidl = VALUE_OR_RETURN_STATUS(
+            legacy2aidl_audio_port_config_AudioPortConfig(*config));
+    return statusTFromBinderStatus(mDelegate->setAudioPortConfig(configAidl));
+}
+
+audio_hw_sync_t AudioFlingerClientAdapter::getAudioHwSyncForSession(audio_session_t sessionId) {
+    auto result = [&]() -> ConversionResult<audio_hw_sync_t> {
+        int32_t sessionIdAidl = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(sessionId));
+        int32_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getAudioHwSyncForSession(sessionIdAidl, &aidlRet)));
+        return aidl2legacy_int32_t_audio_hw_sync_t(aidlRet);
+    }();
+    return result.value_or(AUDIO_HW_SYNC_INVALID);
+}
+
+status_t AudioFlingerClientAdapter::systemReady() {
+    return statusTFromBinderStatus(mDelegate->systemReady());
+}
+
+size_t AudioFlingerClientAdapter::frameCountHAL(audio_io_handle_t ioHandle) const {
+    auto result = [&]() -> ConversionResult<size_t> {
+        int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
+        int64_t aidlRet;
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->frameCountHAL(ioHandleAidl, &aidlRet)));
+        return convertIntegral<size_t>(aidlRet);
+    }();
+    // Failure is ignored.
+    return result.value_or(0);
+}
+
+status_t
+AudioFlingerClientAdapter::getMicrophones(std::vector<media::MicrophoneInfo>* microphones) {
+    std::vector<media::MicrophoneInfoData> aidlRet;
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->getMicrophones(&aidlRet)));
+    if (microphones != nullptr) {
+        *microphones = VALUE_OR_RETURN_STATUS(
+                convertContainer<std::vector<media::MicrophoneInfo>>(aidlRet,
+                         media::aidl2legacy_MicrophoneInfo));
+    }
+    return OK;
+}
+
+status_t AudioFlingerClientAdapter::setAudioHalPids(const std::vector<pid_t>& pids) {
+    std::vector<int32_t> pidsAidl = VALUE_OR_RETURN_STATUS(
+            convertContainer<std::vector<int32_t>>(pids, legacy2aidl_pid_t_int32_t));
+    return statusTFromBinderStatus(mDelegate->setAudioHalPids(pidsAidl));
+}
+
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// AudioFlingerServerAdapter
+AudioFlingerServerAdapter::AudioFlingerServerAdapter(
+        const sp<AudioFlingerServerAdapter::Delegate>& delegate) : mDelegate(delegate) {}
+
+status_t AudioFlingerServerAdapter::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+                                               uint32_t flags) {
+    return mDelegate->onPreTransact(static_cast<Delegate::TransactionCode>(code), data, flags)
+           ?: BnAudioFlingerService::onTransact(code, data, reply, flags);
+}
+
+status_t AudioFlingerServerAdapter::dump(int fd, const Vector<String16>& args) {
+    return mDelegate->dump(fd, args);
+}
+
+Status AudioFlingerServerAdapter::createTrack(const media::CreateTrackRequest& request,
+                                              media::CreateTrackResponse* _aidl_return) {
+    return Status::fromStatusT(mDelegate->createTrack(request, *_aidl_return));
+}
+
+Status AudioFlingerServerAdapter::createRecord(const media::CreateRecordRequest& request,
+                                               media::CreateRecordResponse* _aidl_return) {
+    return Status::fromStatusT(mDelegate->createRecord(request, *_aidl_return));
+}
+
+Status AudioFlingerServerAdapter::sampleRate(int32_t ioHandle, int32_t* _aidl_return) {
+    audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            convertIntegral<int32_t>(mDelegate->sampleRate(ioHandleLegacy)));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::format(int32_t output,
+                                         media::audio::common::AudioFormat* _aidl_return) {
+    audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(output));
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            legacy2aidl_audio_format_t_AudioFormat(mDelegate->format(outputLegacy)));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::frameCount(int32_t ioHandle, int64_t* _aidl_return) {
+    audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            convertIntegral<int64_t>(mDelegate->frameCount(ioHandleLegacy)));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::latency(int32_t output, int32_t* _aidl_return) {
+    audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(output));
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            convertIntegral<int32_t>(mDelegate->latency(outputLegacy)));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::setMasterVolume(float value) {
+    return Status::fromStatusT(mDelegate->setMasterVolume(value));
+}
+
+Status AudioFlingerServerAdapter::setMasterMute(bool muted) {
+    return Status::fromStatusT(mDelegate->setMasterMute(muted));
+}
+
+Status AudioFlingerServerAdapter::masterVolume(float* _aidl_return) {
+    *_aidl_return = mDelegate->masterVolume();
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::masterMute(bool* _aidl_return) {
+    *_aidl_return = mDelegate->masterMute();
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::setMasterBalance(float balance) {
+    return Status::fromStatusT(mDelegate->setMasterBalance(balance));
+}
+
+Status AudioFlingerServerAdapter::getMasterBalance(float* _aidl_return) {
+    return Status::fromStatusT(mDelegate->getMasterBalance(_aidl_return));
+}
+
+Status AudioFlingerServerAdapter::setStreamVolume(media::AudioStreamType stream, float value,
+                                                  int32_t output) {
+    audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
+    audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(output));
+    return Status::fromStatusT(mDelegate->setStreamVolume(streamLegacy, value, outputLegacy));
+}
+
+Status AudioFlingerServerAdapter::setStreamMute(media::AudioStreamType stream, bool muted) {
+    audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
+    return Status::fromStatusT(mDelegate->setStreamMute(streamLegacy, muted));
+}
+
+Status AudioFlingerServerAdapter::streamVolume(media::AudioStreamType stream, int32_t output,
+                                               float* _aidl_return) {
+    audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
+    audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(output));
+    *_aidl_return = mDelegate->streamVolume(streamLegacy, outputLegacy);
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::streamMute(media::AudioStreamType stream, bool* _aidl_return) {
+    audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
+    *_aidl_return = mDelegate->streamMute(streamLegacy);
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::setMode(media::AudioMode mode) {
+    audio_mode_t modeLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioMode_audio_mode_t(mode));
+    return Status::fromStatusT(mDelegate->setMode(modeLegacy));
+}
+
+Status AudioFlingerServerAdapter::setMicMute(bool state) {
+    return Status::fromStatusT(mDelegate->setMicMute(state));
+}
+
+Status AudioFlingerServerAdapter::getMicMute(bool* _aidl_return) {
+    *_aidl_return = mDelegate->getMicMute();
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::setRecordSilenced(int32_t portId, bool silenced) {
+    audio_port_handle_t portIdLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_port_handle_t(portId));
+    mDelegate->setRecordSilenced(portIdLegacy, silenced);
+    return Status::ok();
+}
+
+Status
+AudioFlingerServerAdapter::setParameters(int32_t ioHandle, const std::string& keyValuePairs) {
+    audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
+    String8 keyValuePairsLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_string_view_String8(keyValuePairs));
+    return Status::fromStatusT(mDelegate->setParameters(ioHandleLegacy, keyValuePairsLegacy));
+}
+
+Status AudioFlingerServerAdapter::getParameters(int32_t ioHandle, const std::string& keys,
+                                                std::string* _aidl_return) {
+    audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
+    String8 keysLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_string_view_String8(keys));
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            legacy2aidl_String8_string(mDelegate->getParameters(ioHandleLegacy, keysLegacy)));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::registerClient(const sp<media::IAudioFlingerClient>& client) {
+    mDelegate->registerClient(client);
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getInputBufferSize(int32_t sampleRate,
+                                                     media::audio::common::AudioFormat format,
+                                                     int32_t channelMask, int64_t* _aidl_return) {
+    uint32_t sampleRateLegacy = VALUE_OR_RETURN_BINDER(convertIntegral<uint32_t>(sampleRate));
+    audio_format_t formatLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_AudioFormat_audio_format_t(format));
+    audio_channel_mask_t channelMaskLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_channel_mask_t(channelMask));
+    size_t size = mDelegate->getInputBufferSize(sampleRateLegacy, formatLegacy, channelMaskLegacy);
+    *_aidl_return = VALUE_OR_RETURN_BINDER(convertIntegral<int64_t>(size));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::openOutput(const media::OpenOutputRequest& request,
+                                             media::OpenOutputResponse* _aidl_return) {
+    return Status::fromStatusT(mDelegate->openOutput(request, _aidl_return));
+}
+
+Status AudioFlingerServerAdapter::openDuplicateOutput(int32_t output1, int32_t output2,
+                                                      int32_t* _aidl_return) {
+    audio_io_handle_t output1Legacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(output1));
+    audio_io_handle_t output2Legacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(output2));
+    audio_io_handle_t result = mDelegate->openDuplicateOutput(output1Legacy, output2Legacy);
+    *_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_io_handle_t_int32_t(result));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::closeOutput(int32_t output) {
+    audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(output));
+    return Status::fromStatusT(mDelegate->closeOutput(outputLegacy));
+}
+
+Status AudioFlingerServerAdapter::suspendOutput(int32_t output) {
+    audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(output));
+    return Status::fromStatusT(mDelegate->suspendOutput(outputLegacy));
+}
+
+Status AudioFlingerServerAdapter::restoreOutput(int32_t output) {
+    audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(output));
+    return Status::fromStatusT(mDelegate->restoreOutput(outputLegacy));
+}
+
+Status AudioFlingerServerAdapter::openInput(const media::OpenInputRequest& request,
+                                            media::OpenInputResponse* _aidl_return) {
+    return Status::fromStatusT(mDelegate->openInput(request, _aidl_return));
+}
+
+Status AudioFlingerServerAdapter::closeInput(int32_t input) {
+    audio_io_handle_t inputLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(input));
+    return Status::fromStatusT(mDelegate->closeInput(inputLegacy));
+}
+
+Status AudioFlingerServerAdapter::invalidateStream(media::AudioStreamType stream) {
+    audio_stream_type_t streamLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_AudioStreamType_audio_stream_type_t(stream));
+    return Status::fromStatusT(mDelegate->invalidateStream(streamLegacy));
+}
+
+Status AudioFlingerServerAdapter::setVoiceVolume(float volume) {
+    return Status::fromStatusT(mDelegate->setVoiceVolume(volume));
+}
+
+Status
+AudioFlingerServerAdapter::getRenderPosition(int32_t output, media::RenderPosition* _aidl_return) {
+    audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(output));
+    uint32_t halFramesLegacy;
+    uint32_t dspFramesLegacy;
+    RETURN_BINDER_IF_ERROR(
+            mDelegate->getRenderPosition(&halFramesLegacy, &dspFramesLegacy, outputLegacy));
+    _aidl_return->halFrames = VALUE_OR_RETURN_BINDER(convertIntegral<int32_t>(halFramesLegacy));
+    _aidl_return->dspFrames = VALUE_OR_RETURN_BINDER(convertIntegral<int32_t>(dspFramesLegacy));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getInputFramesLost(int32_t ioHandle, int32_t* _aidl_return) {
+    audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
+    uint32_t result = mDelegate->getInputFramesLost(ioHandleLegacy);
+    *_aidl_return = VALUE_OR_RETURN_BINDER(convertIntegral<int32_t>(result));
+    return Status::ok();
+}
+
+Status
+AudioFlingerServerAdapter::newAudioUniqueId(media::AudioUniqueIdUse use, int32_t* _aidl_return) {
+    audio_unique_id_use_t useLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_AudioUniqueIdUse_audio_unique_id_use_t(use));
+    audio_unique_id_t result = mDelegate->newAudioUniqueId(useLegacy);
+    *_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_unique_id_t_int32_t(result));
+    return Status::ok();
+}
+
+Status
+AudioFlingerServerAdapter::acquireAudioSessionId(int32_t audioSession, int32_t pid, int32_t uid) {
+    audio_session_t audioSessionLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_session_t(audioSession));
+    pid_t pidLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_int32_t_pid_t(pid));
+    uid_t uidLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_int32_t_uid_t(uid));
+    mDelegate->acquireAudioSessionId(audioSessionLegacy, pidLegacy, uidLegacy);
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::releaseAudioSessionId(int32_t audioSession, int32_t pid) {
+    audio_session_t audioSessionLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_session_t(audioSession));
+    pid_t pidLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_int32_t_pid_t(pid));
+    mDelegate->releaseAudioSessionId(audioSessionLegacy, pidLegacy);
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::queryNumberEffects(int32_t* _aidl_return) {
+    uint32_t result;
+    RETURN_BINDER_IF_ERROR(mDelegate->queryNumberEffects(&result));
+    *_aidl_return = VALUE_OR_RETURN_BINDER(convertIntegral<uint32_t>(result));
+    return Status::ok();
+}
+
+Status
+AudioFlingerServerAdapter::queryEffect(int32_t index, media::EffectDescriptor* _aidl_return) {
+    uint32_t indexLegacy = VALUE_OR_RETURN_BINDER(convertIntegral<uint32_t>(index));
+    effect_descriptor_t result;
+    RETURN_BINDER_IF_ERROR(mDelegate->queryEffect(indexLegacy, &result));
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            legacy2aidl_effect_descriptor_t_EffectDescriptor(result));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getEffectDescriptor(const media::AudioUuid& effectUUID,
+                                                      const media::AudioUuid& typeUUID,
+                                                      int32_t preferredTypeFlag,
+                                                      media::EffectDescriptor* _aidl_return) {
+    effect_uuid_t effectUuidLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_AudioUuid_audio_uuid_t(effectUUID));
+    effect_uuid_t typeUuidLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_AudioUuid_audio_uuid_t(typeUUID));
+    uint32_t preferredTypeFlagLegacy = VALUE_OR_RETURN_BINDER(
+            convertReinterpret<uint32_t>(preferredTypeFlag));
+    effect_descriptor_t result;
+    RETURN_BINDER_IF_ERROR(mDelegate->getEffectDescriptor(&effectUuidLegacy, &typeUuidLegacy,
+                                                          preferredTypeFlagLegacy, &result));
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            legacy2aidl_effect_descriptor_t_EffectDescriptor(result));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::createEffect(const media::CreateEffectRequest& request,
+                                               media::CreateEffectResponse* _aidl_return) {
+    return Status::fromStatusT(mDelegate->createEffect(request, _aidl_return));
+}
+
+Status
+AudioFlingerServerAdapter::moveEffects(int32_t session, int32_t srcOutput, int32_t dstOutput) {
+    audio_session_t sessionLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_session_t(session));
+    audio_io_handle_t srcOutputLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(srcOutput));
+    audio_io_handle_t dstOutputLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(dstOutput));
+    return Status::fromStatusT(
+            mDelegate->moveEffects(sessionLegacy, srcOutputLegacy, dstOutputLegacy));
+}
+
+Status AudioFlingerServerAdapter::setEffectSuspended(int32_t effectId, int32_t sessionId,
+                                                     bool suspended) {
+    int effectIdLegacy = VALUE_OR_RETURN_BINDER(convertReinterpret<int>(effectId));
+    audio_session_t sessionIdLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_session_t(sessionId));
+    mDelegate->setEffectSuspended(effectIdLegacy, sessionIdLegacy, suspended);
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::loadHwModule(const std::string& name, int32_t* _aidl_return) {
+    audio_module_handle_t result = mDelegate->loadHwModule(name.c_str());
+    *_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_module_handle_t_int32_t(result));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getPrimaryOutputSamplingRate(int32_t* _aidl_return) {
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            convertIntegral<int32_t>(mDelegate->getPrimaryOutputSamplingRate()));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getPrimaryOutputFrameCount(int64_t* _aidl_return) {
+    *_aidl_return = VALUE_OR_RETURN_BINDER(
+            convertIntegral<int64_t>(mDelegate->getPrimaryOutputFrameCount()));
+    return Status::ok();
+
+}
+
+Status AudioFlingerServerAdapter::setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) {
+    return Status::fromStatusT(mDelegate->setLowRamDevice(isLowRamDevice, totalMemory));
+}
+
+Status AudioFlingerServerAdapter::getAudioPort(const media::AudioPort& port,
+                                               media::AudioPort* _aidl_return) {
+    audio_port_v7 portLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioPort_audio_port_v7(port));
+    RETURN_BINDER_IF_ERROR(mDelegate->getAudioPort(&portLegacy));
+    *_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_port_v7_AudioPort(portLegacy));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::createAudioPatch(const media::AudioPatch& patch,
+                                                   int32_t* _aidl_return) {
+    audio_patch patchLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioPatch_audio_patch(patch));
+    audio_patch_handle_t handleLegacy;
+    RETURN_BINDER_IF_ERROR(mDelegate->createAudioPatch(&patchLegacy, &handleLegacy));
+    *_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_patch_handle_t_int32_t(handleLegacy));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::releaseAudioPatch(int32_t handle) {
+    audio_patch_handle_t handleLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_patch_handle_t(handle));
+    return Status::fromStatusT(mDelegate->releaseAudioPatch(handleLegacy));
+}
+
+Status AudioFlingerServerAdapter::listAudioPatches(int32_t maxCount,
+                            std::vector<media::AudioPatch>* _aidl_return) {
+    unsigned int count = VALUE_OR_RETURN_BINDER(convertIntegral<unsigned int>(maxCount));
+    count = std::min(count, static_cast<unsigned int>(MAX_ITEMS_PER_LIST));
+    std::unique_ptr<audio_patch[]> patchesLegacy(new audio_patch[count]);
+    RETURN_BINDER_IF_ERROR(mDelegate->listAudioPatches(&count, patchesLegacy.get()));
+    RETURN_BINDER_IF_ERROR(convertRange(&patchesLegacy[0],
+                           &patchesLegacy[count],
+                           std::back_inserter(*_aidl_return),
+                           legacy2aidl_audio_patch_AudioPatch));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::setAudioPortConfig(const media::AudioPortConfig& config) {
+    audio_port_config configLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_AudioPortConfig_audio_port_config(config));
+    return Status::fromStatusT(mDelegate->setAudioPortConfig(&configLegacy));
+}
+
+Status AudioFlingerServerAdapter::getAudioHwSyncForSession(int32_t sessionId,
+                                                           int32_t* _aidl_return) {
+    audio_session_t sessionIdLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_session_t(sessionId));
+    audio_hw_sync_t result = mDelegate->getAudioHwSyncForSession(sessionIdLegacy);
+    *_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_hw_sync_t_int32_t(result));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::systemReady() {
+    return Status::fromStatusT(mDelegate->systemReady());
+}
+
+Status AudioFlingerServerAdapter::frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) {
+    audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
+            aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
+    size_t result = mDelegate->frameCountHAL(ioHandleLegacy);
+    *_aidl_return = VALUE_OR_RETURN_BINDER(convertIntegral<int64_t>(result));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::getMicrophones(
+        std::vector<media::MicrophoneInfoData>* _aidl_return) {
+    std::vector<media::MicrophoneInfo> resultLegacy;
+    RETURN_BINDER_IF_ERROR(mDelegate->getMicrophones(&resultLegacy));
+    *_aidl_return = VALUE_OR_RETURN_BINDER(convertContainer<std::vector<media::MicrophoneInfoData>>(
+            resultLegacy, media::legacy2aidl_MicrophoneInfo));
+    return Status::ok();
+}
+
+Status AudioFlingerServerAdapter::setAudioHalPids(const std::vector<int32_t>& pids) {
+    std::vector<pid_t> pidsLegacy = VALUE_OR_RETURN_BINDER(
+            convertContainer<std::vector<pid_t>>(pids, aidl2legacy_int32_t_pid_t));
+    RETURN_BINDER_IF_ERROR(mDelegate->setAudioHalPids(pidsLegacy));
+    return Status::ok();
+}
 
 } // namespace android
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 4132244..2828bcb 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -69,7 +69,7 @@
     QUERY_DEFAULT_PRE_PROCESSING,
     SET_EFFECT_ENABLED,
     IS_STREAM_ACTIVE_REMOTELY,
-    IS_OFFLOAD_SUPPORTED,
+    GET_OFFLOAD_MODE_SUPPORTED,
     IS_DIRECT_OUTPUT_SUPPORTED,
     LIST_AUDIO_PORTS,
     GET_AUDIO_PORT,
@@ -666,13 +666,13 @@
         return reply.readInt32();
     }
 
-    virtual bool isOffloadSupported(const audio_offload_info_t& info)
+    virtual audio_offload_mode_t getOffloadSupport(const audio_offload_info_t& info)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.write(&info, sizeof(audio_offload_info_t));
-        remote()->transact(IS_OFFLOAD_SUPPORTED, data, &reply);
-        return reply.readInt32();
+        remote()->transact(GET_OFFLOAD_MODE_SUPPORTED, data, &reply);
+        return static_cast<audio_offload_mode_t>(reply.readInt32());
     }
 
     virtual bool isDirectOutputSupported(const audio_config_base_t& config,
@@ -688,7 +688,7 @@
     virtual status_t listAudioPorts(audio_port_role_t role,
                                     audio_port_type_t type,
                                     unsigned int *num_ports,
-                                    struct audio_port *ports,
+                                    struct audio_port_v7 *ports,
                                     unsigned int *generation)
     {
         if (num_ports == NULL || (*num_ports != 0 && ports == NULL) ||
@@ -711,27 +711,27 @@
                 numPortsReq = *num_ports;
             }
             if (numPortsReq > 0) {
-                reply.read(ports, numPortsReq * sizeof(struct audio_port));
+                reply.read(ports, numPortsReq * sizeof(struct audio_port_v7));
             }
             *generation = reply.readInt32();
         }
         return status;
     }
 
-    virtual status_t getAudioPort(struct audio_port *port)
+    virtual status_t getAudioPort(struct audio_port_v7 *port)
     {
         if (port == NULL) {
             return BAD_VALUE;
         }
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-        data.write(port, sizeof(struct audio_port));
+        data.write(port, sizeof(struct audio_port_v7));
         status_t status = remote()->transact(GET_AUDIO_PORT, data, &reply);
         if (status != NO_ERROR ||
                 (status = (status_t)reply.readInt32()) != NO_ERROR) {
             return status;
         }
-        reply.read(port, sizeof(struct audio_port));
+        reply.read(port, sizeof(struct audio_port_v7));
         return status;
     }
 
@@ -1591,6 +1591,7 @@
         case REGISTER_EFFECT:
         case UNREGISTER_EFFECT:
         case SET_EFFECT_ENABLED:
+        case GET_STRATEGY_FOR_STREAM:
         case GET_OUTPUT_FOR_ATTR:
         case MOVE_EFFECTS_TO_IO:
             ALOGW("%s: transaction %d received from PID %d",
@@ -1619,6 +1620,14 @@
         case INIT_STREAM_VOLUME:
         case SET_STREAM_VOLUME:
         case SET_VOLUME_ATTRIBUTES:
+        case GET_STREAM_VOLUME:
+        case GET_VOLUME_ATTRIBUTES:
+        case GET_MIN_VOLUME_FOR_ATTRIBUTES:
+        case GET_MAX_VOLUME_FOR_ATTRIBUTES:
+        case IS_STREAM_ACTIVE:
+        case IS_STREAM_ACTIVE_REMOTELY:
+        case IS_SOURCE_ACTIVE:
+        case GET_DEVICES_FOR_STREAM:
         case REGISTER_POLICY_MIXES:
         case SET_MASTER_MONO:
         case GET_SURROUND_FORMATS:
@@ -2140,12 +2149,11 @@
             return status;
         }
 
-        case IS_OFFLOAD_SUPPORTED: {
+        case GET_OFFLOAD_MODE_SUPPORTED: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_offload_info_t info = {};
             data.read(&info, sizeof(audio_offload_info_t));
-            bool isSupported = isOffloadSupported(info);
-            reply->writeInt32(isSupported);
+            reply->writeInt32(static_cast<int32_t>(getOffloadSupport(info)));
             return NO_ERROR;
         }
 
@@ -2174,8 +2182,8 @@
                 numPortsReq = MAX_ITEMS_PER_LIST;
             }
             unsigned int numPorts = numPortsReq;
-            struct audio_port *ports =
-                    (struct audio_port *)calloc(numPortsReq, sizeof(struct audio_port));
+            struct audio_port_v7 *ports =
+                    (struct audio_port_v7 *)calloc(numPortsReq, sizeof(struct audio_port_v7));
             if (ports == NULL) {
                 reply->writeInt32(NO_MEMORY);
                 reply->writeInt32(0);
@@ -2190,7 +2198,7 @@
                 if (numPortsReq > numPorts) {
                     numPortsReq = numPorts;
                 }
-                reply->write(ports, numPortsReq * sizeof(struct audio_port));
+                reply->write(ports, numPortsReq * sizeof(struct audio_port_v7));
                 reply->writeInt32(generation);
             }
             free(ports);
@@ -2199,8 +2207,8 @@
 
         case GET_AUDIO_PORT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
-            struct audio_port port = {};
-            status_t status = data.read(&port, sizeof(struct audio_port));
+            struct audio_port_v7 port = {};
+            status_t status = data.read(&port, sizeof(struct audio_port_v7));
             if (status != NO_ERROR) {
                 ALOGE("b/23912202");
                 return status;
@@ -2211,7 +2219,7 @@
             }
             reply->writeInt32(status);
             if (status == NO_ERROR) {
-                reply->write(&port, sizeof(struct audio_port));
+                reply->write(&port, sizeof(struct audio_port_v7));
             }
             return NO_ERROR;
         }
diff --git a/media/libaudioclient/PlayerBase.cpp b/media/libaudioclient/PlayerBase.cpp
index c443865..9e7d89e 100644
--- a/media/libaudioclient/PlayerBase.cpp
+++ b/media/libaudioclient/PlayerBase.cpp
@@ -15,13 +15,14 @@
  */
 
 #include <binder/IServiceManager.h>
+#include <media/AidlConversionUtil.h>
 #include <media/PlayerBase.h>
 
 #define max(a, b) ((a) > (b) ? (a) : (b))
 #define min(a, b) ((a) < (b) ? (a) : (b))
 
 namespace android {
-
+using aidl_utils::binderStatusFromStatusT;
 using media::VolumeShaperConfiguration;
 using media::VolumeShaperOperation;
 
@@ -150,7 +151,7 @@
     if (status != NO_ERROR) {
         ALOGW("PlayerBase::setVolume() error %d", status);
     }
-    return binder::Status::fromStatusT(status);
+    return binderStatusFromStatusT(status);
 }
 
 binder::Status PlayerBase::setPan(float pan) {
@@ -170,7 +171,7 @@
     if (status != NO_ERROR) {
         ALOGW("PlayerBase::setPan() error %d", status);
     }
-    return binder::Status::fromStatusT(status);
+    return binderStatusFromStatusT(status);
 }
 
 binder::Status PlayerBase::setStartDelayMs(int32_t delayMs __unused) {
diff --git a/media/libaudioclient/ToneGenerator.cpp b/media/libaudioclient/ToneGenerator.cpp
index ee78a2d..c9f3ab9 100644
--- a/media/libaudioclient/ToneGenerator.cpp
+++ b/media/libaudioclient/ToneGenerator.cpp
@@ -17,6 +17,8 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "ToneGenerator"
 
+#include <utility>
+
 #include <math.h>
 #include <utils/Log.h>
 #include <cutils/properties.h>
@@ -740,6 +742,11 @@
                         { .duration = 0 , .waveFreq = { 0 }, 0, 0}},
           .repeatCnt = ToneGenerator::TONEGEN_INF,
           .repeatSegment = 0 },                              // TONE_JAPAN_RADIO_ACK
+        { .segments = { { .duration = 1000, .waveFreq = { 400, 0 }, 0, 0 },
+                        { .duration = 2000, .waveFreq = { 0 }, 0, 0 },
+                        { .duration = 0 , .waveFreq = { 0 }, 0, 0}},
+          .repeatCnt = ToneGenerator::TONEGEN_INF,
+          .repeatSegment = 0 },                              // TONE_JAPAN_RINGTONE
         { .segments = { { .duration = 375, .waveFreq = { 400, 0 }, 0, 0 },
                         { .duration = 375, .waveFreq = { 0 }, 0, 0 },
                         { .duration = 0 , .waveFreq = { 0 }, 0, 0}},
@@ -881,7 +888,7 @@
             TONE_SUP_RADIO_NOTAVAIL,     // TONE_SUP_RADIO_NOTAVAIL
             TONE_SUP_ERROR,              // TONE_SUP_ERROR
             TONE_SUP_CALL_WAITING,       // TONE_SUP_CALL_WAITING
-            TONE_SUP_RINGTONE            // TONE_SUP_RINGTONE
+            TONE_JAPAN_RINGTONE          // TONE_SUP_RINGTONE
         },
         {   // GB
             TONE_ANSI_DIAL,              // TONE_SUP_DIAL
@@ -979,7 +986,9 @@
 //        none
 //
 ////////////////////////////////////////////////////////////////////////////////
-ToneGenerator::ToneGenerator(audio_stream_type_t streamType, float volume, bool threadCanCallJava) {
+ToneGenerator::ToneGenerator(audio_stream_type_t streamType, float volume, bool threadCanCallJava,
+        std::string opPackageName)
+        : mOpPackageName(std::move(opPackageName)) {
 
     ALOGV("ToneGenerator constructor: streamType=%d, volume=%f", streamType, volume);
 
@@ -1250,7 +1259,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 bool ToneGenerator::initAudioTrack() {
     // Open audio track in mono, PCM 16bit, default sampling rate.
-    mpAudioTrack = new AudioTrack();
+    mpAudioTrack = new AudioTrack(mOpPackageName);
     ALOGV("AudioTrack(%p) created", mpAudioTrack.get());
 
     audio_attributes_t attr;
diff --git a/media/libaudioclient/TrackPlayerBase.cpp b/media/libaudioclient/TrackPlayerBase.cpp
index e571838..372b7c3 100644
--- a/media/libaudioclient/TrackPlayerBase.cpp
+++ b/media/libaudioclient/TrackPlayerBase.cpp
@@ -17,7 +17,7 @@
 #include <media/TrackPlayerBase.h>
 
 namespace android {
-
+using aidl_utils::binderStatusFromStatusT;
 using media::VolumeShaper;
 
 //--------------------------------------------------------------------------------------------------
@@ -115,7 +115,7 @@
     status_t s = spConfiguration->readFromParcelable(configuration)
             ?: spOperation->readFromParcelable(operation);
     if (s != OK) {
-        return binder::Status::fromStatusT(s);
+        return binderStatusFromStatusT(s);
     }
 
     if (mAudioTrack != 0) {
@@ -124,7 +124,7 @@
         if (status < 0) { // a non-negative value is the volume shaper id.
             ALOGE("TrackPlayerBase::applyVolumeShaper() failed with status %d", status);
         }
-        return binder::Status::fromStatusT(status);
+        return binderStatusFromStatusT(status);
     } else {
         ALOGD("TrackPlayerBase::applyVolumeShaper()"
               " no AudioTrack for volume control from IPlayer");
diff --git a/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl b/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl
index cf11528..cebd8f0 100644
--- a/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl
@@ -35,4 +35,5 @@
     MMAP_NOIRQ       = 12,
     VOIP_RX          = 13,
     INCALL_MUSIC     = 14,
+    GAPLESS_OFFLOAD  = 15,
 }
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
new file mode 100644
index 0000000..e63f391
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.AudioMode;
+import android.media.AudioPatch;
+import android.media.AudioPort;
+import android.media.AudioPortConfig;
+import android.media.AudioStreamType;
+import android.media.AudioUniqueIdUse;
+import android.media.AudioUuid;
+import android.media.CreateEffectRequest;
+import android.media.CreateEffectResponse;
+import android.media.CreateRecordRequest;
+import android.media.CreateRecordResponse;
+import android.media.CreateTrackRequest;
+import android.media.CreateTrackResponse;
+import android.media.OpenInputRequest;
+import android.media.OpenInputResponse;
+import android.media.OpenOutputRequest;
+import android.media.OpenOutputResponse;
+import android.media.EffectDescriptor;
+import android.media.IAudioFlingerClient;
+import android.media.IAudioRecord;
+import android.media.IAudioTrack;
+import android.media.MicrophoneInfoData;
+import android.media.RenderPosition;
+import android.media.audio.common.AudioFormat;
+
+/**
+ * {@hide}
+ */
+interface IAudioFlingerService {
+    /**
+     * Creates an audio track and registers it with AudioFlinger, or null if the track cannot be
+     * created.
+     */
+    CreateTrackResponse createTrack(in CreateTrackRequest request);
+
+    CreateRecordResponse createRecord(in CreateRecordRequest request);
+
+    // FIXME Surprisingly, format/latency don't work for input handles
+
+    /**
+     * Queries the audio hardware state. This state never changes, and therefore can be cached.
+     */
+    int sampleRate(int /* audio_io_handle_t */ ioHandle);
+
+    AudioFormat format(int /* audio_io_handle_t */ output);
+
+    long frameCount(int /* audio_io_handle_t */ ioHandle);
+
+    /**
+     * Return the estimated latency in milliseconds.
+     */
+    int latency(int  /* audio_io_handle_t */ output);
+
+    /*
+     * Sets/gets the audio hardware state. This will probably be used by
+     * the preference panel, mostly.
+     */
+    void setMasterVolume(float value);
+    void setMasterMute(boolean muted);
+
+    float masterVolume();
+    boolean masterMute();
+
+    void setMasterBalance(float balance);
+    float getMasterBalance();
+
+    /*
+     * Set/gets stream type state. This will probably be used by
+     * the preference panel, mostly.
+     */
+    void setStreamVolume(AudioStreamType stream, float value, int /* audio_io_handle_t */ output);
+    void setStreamMute(AudioStreamType stream, boolean muted);
+    float streamVolume(AudioStreamType stream, int /* audio_io_handle_t */ output);
+    boolean streamMute(AudioStreamType stream);
+
+    // set audio mode.
+    void setMode(AudioMode mode);
+
+    // mic mute/state
+    void setMicMute(boolean state);
+    boolean getMicMute();
+    void setRecordSilenced(int /* audio_port_handle_t */ portId,
+                           boolean silenced);
+
+    void setParameters(int /* audio_io_handle_t */ ioHandle,
+                       @utf8InCpp String keyValuePairs);
+    @utf8InCpp String getParameters(int /* audio_io_handle_t */ ioHandle,
+                                    @utf8InCpp String keys);
+
+    // Register an object to receive audio input/output change and track notifications.
+    // For a given calling pid, AudioFlinger disregards any registrations after the first.
+    // Thus the IAudioFlingerClient must be a singleton per process.
+    void registerClient(IAudioFlingerClient client);
+
+    // Retrieve the audio recording buffer size in bytes.
+    // FIXME This API assumes a route, and so should be deprecated.
+    long getInputBufferSize(int sampleRate,
+                            AudioFormat format,
+                            int /* audio_channel_mask_t */ channelMask);
+
+    OpenOutputResponse openOutput(in OpenOutputRequest request);
+    int /* audio_io_handle_t */ openDuplicateOutput(int /* audio_io_handle_t */ output1,
+                                                    int /* audio_io_handle_t */ output2);
+    void closeOutput(int /* audio_io_handle_t */ output);
+    void suspendOutput(int /* audio_io_handle_t */ output);
+    void restoreOutput(int /* audio_io_handle_t */ output);
+
+    OpenInputResponse openInput(in OpenInputRequest request);
+    void closeInput(int /* audio_io_handle_t */ input);
+
+    void invalidateStream(AudioStreamType stream);
+
+    void setVoiceVolume(float volume);
+
+    RenderPosition getRenderPosition(int /* audio_io_handle_t */ output);
+
+    int getInputFramesLost(int /* audio_io_handle_t */ ioHandle);
+
+    int /* audio_unique_id_t */ newAudioUniqueId(AudioUniqueIdUse use);
+
+    void acquireAudioSessionId(int /* audio_session_t */ audioSession,
+                               int /* pid_t */ pid,
+                               int /* uid_t */ uid);
+    void releaseAudioSessionId(int /* audio_session_t */ audioSession,
+                               int /* pid_t */ pid);
+
+    int queryNumberEffects();
+
+    EffectDescriptor queryEffect(int index);
+
+    /** preferredTypeFlag is interpreted as a uint32_t with the "effect flag" format. */
+    EffectDescriptor getEffectDescriptor(in AudioUuid effectUUID,
+                                         in AudioUuid typeUUID,
+                                         int preferredTypeFlag);
+
+    CreateEffectResponse createEffect(in CreateEffectRequest request);
+
+    void moveEffects(int /* audio_session_t */ session,
+                     int /* audio_io_handle_t */ srcOutput,
+                     int /* audio_io_handle_t */ dstOutput);
+
+    void setEffectSuspended(int effectId,
+                            int /* audio_session_t */ sessionId,
+                            boolean suspended);
+
+    int /* audio_module_handle_t */ loadHwModule(@utf8InCpp String name);
+
+    // helpers for android.media.AudioManager.getProperty(), see description there for meaning
+    // FIXME move these APIs to AudioPolicy to permit a more accurate implementation
+    // that looks on primary device for a stream with fast flag, primary flag, or first one.
+    int getPrimaryOutputSamplingRate();
+    long getPrimaryOutputFrameCount();
+
+    // Intended for AudioService to inform AudioFlinger of device's low RAM attribute,
+    // and should be called at most once.  For a definition of what "low RAM" means, see
+    // android.app.ActivityManager.isLowRamDevice().  The totalMemory parameter
+    // is obtained from android.app.ActivityManager.MemoryInfo.totalMem.
+    void setLowRamDevice(boolean isLowRamDevice, long totalMemory);
+
+    /* Get attributes for a given audio port */
+    AudioPort getAudioPort(in AudioPort port);
+
+    /* Create an audio patch between several source and sink ports */
+    int /* audio_patch_handle_t */ createAudioPatch(in AudioPatch patch);
+
+    /* Release an audio patch */
+    void releaseAudioPatch(int /* audio_patch_handle_t */ handle);
+
+    /* List existing audio patches */
+    AudioPatch[] listAudioPatches(int maxCount);
+    /* Set audio port configuration */
+    void setAudioPortConfig(in AudioPortConfig config);
+
+    /* Get the HW synchronization source used for an audio session */
+    int /* audio_hw_sync_t */ getAudioHwSyncForSession(int /* audio_session_t */ sessionId);
+
+    /* Indicate JAVA services are ready (scheduling, power management ...) */
+    oneway void systemReady();
+
+    // Returns the number of frames per audio HAL buffer.
+    long frameCountHAL(int /* audio_io_handle_t */ ioHandle);
+
+    /* List available microphones and their characteristics */
+    MicrophoneInfoData[] getMicrophones();
+
+    void setAudioHalPids(in int[] /* pid_t[] */ pids);
+}
diff --git a/media/libaudioclient/aidl/android/media/RenderPosition.aidl b/media/libaudioclient/aidl/android/media/RenderPosition.aidl
new file mode 100644
index 0000000..98dc17a
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/RenderPosition.aidl
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * {@hide}
+ */
+parcelable RenderPosition {
+    int halFrames;
+    int dspFrames;
+}
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index 2dc471b..e3858b9 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -34,6 +34,7 @@
 #include <android/media/AudioIoConfigEvent.h>
 #include <android/media/AudioIoDescriptor.h>
 #include <android/media/AudioMixLatencyClass.h>
+#include <android/media/AudioMode.h>
 #include <android/media/AudioOutputFlags.h>
 #include <android/media/AudioPort.h>
 #include <android/media/AudioPortConfigType.h>
@@ -43,6 +44,7 @@
 #include <android/media/AudioPortSessionExt.h>
 #include <android/media/AudioProfile.h>
 #include <android/media/AudioTimestampInternal.h>
+#include <android/media/AudioUniqueIdUse.h>
 #include <android/media/EffectDescriptor.h>
 
 #include <android/media/SharedFileRegion.h>
@@ -75,10 +77,15 @@
 ConversionResult<audio_unique_id_t> aidl2legacy_int32_t_audio_unique_id_t(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_audio_unique_id_t_int32_t(audio_unique_id_t legacy);
 
-// The legacy enum is unnamed. Thus, we use int.
-ConversionResult<int> aidl2legacy_AudioPortConfigType(media::AudioPortConfigType aidl);
-// The legacy enum is unnamed. Thus, we use int.
-ConversionResult<media::AudioPortConfigType> legacy2aidl_AudioPortConfigType(int legacy);
+ConversionResult<audio_hw_sync_t> aidl2legacy_int32_t_audio_hw_sync_t(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_hw_sync_t_int32_t(audio_hw_sync_t legacy);
+
+// The legacy enum is unnamed. Thus, we use int32_t.
+ConversionResult<int32_t> aidl2legacy_AudioPortConfigType_int32_t(
+        media::AudioPortConfigType aidl);
+// The legacy enum is unnamed. Thus, we use int32_t.
+ConversionResult<media::AudioPortConfigType> legacy2aidl_int32_t_AudioPortConfigType(
+        int32_t legacy);
 
 ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy);
@@ -124,7 +131,7 @@
 legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy);
 
 ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_mask_int32_t(audio_gain_mode_t legacy);
+ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy);
 
 ConversionResult<audio_devices_t> aidl2legacy_int32_t_audio_devices_t(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_audio_devices_t_int32_t(audio_devices_t legacy);
@@ -144,20 +151,26 @@
 ConversionResult<media::AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
         audio_output_flags_t legacy);
 
-ConversionResult<audio_input_flags_t> aidl2legacy_audio_input_flags_mask(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_input_flags_mask(audio_input_flags_t legacy);
+ConversionResult<audio_input_flags_t> aidl2legacy_int32_t_audio_input_flags_t_mask(
+        int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_input_flags_t_int32_t_mask(
+        audio_input_flags_t legacy);
 
-ConversionResult<audio_output_flags_t> aidl2legacy_audio_output_flags_mask(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_output_flags_mask(audio_output_flags_t legacy);
+ConversionResult<audio_output_flags_t> aidl2legacy_int32_t_audio_output_flags_t_mask(
+        int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_output_flags_t_int32_t_mask(
+        audio_output_flags_t legacy);
 
 ConversionResult<audio_io_flags> aidl2legacy_AudioIoFlags_audio_io_flags(
         const media::AudioIoFlags& aidl, media::AudioPortRole role, media::AudioPortType type);
 ConversionResult<media::AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
         const audio_io_flags& legacy, audio_port_role_t role, audio_port_type_t type);
 
-ConversionResult<audio_port_config_device_ext> aidl2legacy_AudioPortConfigDeviceExt(
+ConversionResult<audio_port_config_device_ext>
+aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
         const media::AudioPortConfigDeviceExt& aidl);
-ConversionResult<media::AudioPortConfigDeviceExt> legacy2aidl_AudioPortConfigDeviceExt(
+ConversionResult<media::AudioPortConfigDeviceExt>
+legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
         const audio_port_config_device_ext& legacy);
 
 ConversionResult<audio_stream_type_t> aidl2legacy_AudioStreamType_audio_stream_type_t(
@@ -178,9 +191,11 @@
 ConversionResult<media::AudioPortConfigMixExt> legacy2aidl_AudioPortConfigMixExt(
         const audio_port_config_mix_ext& legacy, audio_port_role_t role);
 
-ConversionResult<audio_port_config_session_ext> aidl2legacy_AudioPortConfigSessionExt(
+ConversionResult<audio_port_config_session_ext>
+aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
         const media::AudioPortConfigSessionExt& aidl);
-ConversionResult<media::AudioPortConfigSessionExt> legacy2aidl_AudioPortConfigSessionExt(
+ConversionResult<media::AudioPortConfigSessionExt>
+legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
         const audio_port_config_session_ext& legacy);
 
 ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
@@ -199,8 +214,10 @@
 ConversionResult<media::AudioIoDescriptor> legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(
         const sp<AudioIoDescriptor>& legacy);
 
-ConversionResult<AudioClient> aidl2legacy_AudioClient(const media::AudioClient& aidl);
-ConversionResult<media::AudioClient> legacy2aidl_AudioClient(const AudioClient& legacy);
+ConversionResult<AudioClient> aidl2legacy_AudioClient_AudioClient(
+        const media::AudioClient& aidl);
+ConversionResult<media::AudioClient> legacy2aidl_AudioClient_AudioClient(
+        const AudioClient& legacy);
 
 ConversionResult<audio_content_type_t>
 aidl2legacy_AudioContentType_audio_content_type_t(media::AudioContentType aidl);
@@ -258,9 +275,9 @@
 legacy2aidl_NullableIMemory_SharedFileRegion(const sp<IMemory>& legacy);
 
 ConversionResult<AudioTimestamp>
-aidl2legacy_AudioTimestamp(const media::AudioTimestampInternal& aidl);
+aidl2legacy_AudioTimestampInternal_AudioTimestamp(const media::AudioTimestampInternal& aidl);
 ConversionResult<media::AudioTimestampInternal>
-legacy2aidl_AudioTimestamp(const AudioTimestamp& legacy);
+legacy2aidl_AudioTimestamp_AudioTimestampInternal(const AudioTimestamp& legacy);
 
 ConversionResult<audio_uuid_t>
 aidl2legacy_AudioUuid_audio_uuid_t(const media::AudioUuid& aidl);
@@ -326,4 +343,14 @@
 ConversionResult<media::AudioPort>
 legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy);
 
+ConversionResult<audio_mode_t>
+aidl2legacy_AudioMode_audio_mode_t(media::AudioMode aidl);
+ConversionResult<media::AudioMode>
+legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy);
+
+ConversionResult<audio_unique_id_use_t>
+aidl2legacy_AudioUniqueIdUse_audio_unique_id_use_t(media::AudioUniqueIdUse aidl);
+ConversionResult<media::AudioUniqueIdUse>
+legacy2aidl_audio_unique_id_use_t_AudioUniqueIdUse(audio_unique_id_use_t legacy);
+
 }  // namespace android
diff --git a/media/libaudioclient/include/media/AidlConversionUtil.h b/media/libaudioclient/include/media/AidlConversionUtil.h
index 6bfb743..9453673 100644
--- a/media/libaudioclient/include/media/AidlConversionUtil.h
+++ b/media/libaudioclient/include/media/AidlConversionUtil.h
@@ -21,6 +21,7 @@
 #include <utility>
 
 #include <android-base/expected.h>
+#include <binder/Status.h>
 
 namespace android {
 
@@ -132,4 +133,98 @@
 #define UNION_SET(u, field, value) \
     (u).set<std::decay_t<decltype(u)>::Tag::field>(value)
 
+namespace aidl_utils {
+
+/**
+ * Return the equivalent Android status_t from a binder exception code.
+ *
+ * Generally one should use statusTFromBinderStatus() instead.
+ *
+ * Exception codes can be generated from a remote Java service exception, translate
+ * them for use on the Native side.
+ *
+ * Note: for EX_TRANSACTION_FAILED and EX_SERVICE_SPECIFIC a more detailed error code
+ * can be found from transactionError() or serviceSpecificErrorCode().
+ */
+static inline status_t statusTFromExceptionCode(int32_t exceptionCode) {
+    using namespace ::android::binder;
+    switch (exceptionCode) {
+        case Status::EX_NONE:
+            return OK;
+        case Status::EX_SECURITY: // Java SecurityException, rethrows locally in Java
+            return PERMISSION_DENIED;
+        case Status::EX_BAD_PARCELABLE: // Java BadParcelableException, rethrows in Java
+        case Status::EX_ILLEGAL_ARGUMENT: // Java IllegalArgumentException, rethrows in Java
+        case Status::EX_NULL_POINTER: // Java NullPointerException, rethrows in Java
+            return BAD_VALUE;
+        case Status::EX_ILLEGAL_STATE: // Java IllegalStateException, rethrows in Java
+        case Status::EX_UNSUPPORTED_OPERATION: // Java UnsupportedOperationException, rethrows
+            return INVALID_OPERATION;
+        case Status::EX_HAS_REPLY_HEADER: // Native strictmode violation
+        case Status::EX_PARCELABLE: // Java bootclass loader (not standard exception), rethrows
+        case Status::EX_NETWORK_MAIN_THREAD: // Java NetworkOnMainThreadException, rethrows
+        case Status::EX_TRANSACTION_FAILED: // Native - see error code
+        case Status::EX_SERVICE_SPECIFIC:  // Java ServiceSpecificException,
+                                           // rethrows in Java with integer error code
+            return UNKNOWN_ERROR;
+    }
+    return UNKNOWN_ERROR;
+}
+
+/**
+ * Return the equivalent Android status_t from a binder status.
+ *
+ * Used to handle errors from a AIDL method declaration
+ *
+ * [oneway] void method(type0 param0, ...)
+ *
+ * or the following (where return_type is not a status_t)
+ *
+ * return_type method(type0 param0, ...)
+ */
+static inline status_t statusTFromBinderStatus(const ::android::binder::Status &status) {
+    return status.isOk() ? OK // check OK,
+        : status.serviceSpecificErrorCode() // service-side error, not standard Java exception
+                                            // (fromServiceSpecificError)
+        ?: status.transactionError() // a native binder transaction error (fromStatusT)
+        ?: statusTFromExceptionCode(status.exceptionCode()); // a service-side error with a
+                                                    // standard Java exception (fromExceptionCode)
+}
+
+/**
+ * Return a binder::Status from native service status.
+ *
+ * This is used for methods not returning an explicit status_t,
+ * where Java callers expect an exception, not an integer return value.
+ */
+static inline ::android::binder::Status binderStatusFromStatusT(
+        status_t status, const char *optionalMessage = nullptr) {
+    const char * const emptyIfNull = optionalMessage == nullptr ? "" : optionalMessage;
+    // From binder::Status instructions:
+    //  Prefer a generic exception code when possible, then a service specific
+    //  code, and finally a status_t for low level failures or legacy support.
+    //  Exception codes and service specific errors map to nicer exceptions for
+    //  Java clients.
+
+    using namespace ::android::binder;
+    switch (status) {
+        case OK:
+            return Status::ok();
+        case PERMISSION_DENIED: // throw SecurityException on Java side
+            return Status::fromExceptionCode(Status::EX_SECURITY, emptyIfNull);
+        case BAD_VALUE: // throw IllegalArgumentException on Java side
+            return Status::fromExceptionCode(Status::EX_ILLEGAL_ARGUMENT, emptyIfNull);
+        case INVALID_OPERATION: // throw IllegalStateException on Java side
+            return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE, emptyIfNull);
+    }
+
+    // A service specific error will not show on status.transactionError() so
+    // be sure to use statusTFromBinderStatus() for reliable error handling.
+
+    // throw a ServiceSpecificException.
+    return Status::fromServiceSpecificError(status, emptyIfNull);
+}
+
+} // namespace aidl_utils
+
 }  // namespace android
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index dfc1982..7a3692b 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -319,9 +319,10 @@
 
     static status_t setAllowedCapturePolicy(uid_t uid, audio_flags_mask_t flags);
 
-    // Check if hw offload is possible for given format, stream type, sample rate,
-    // bit rate, duration, video and streaming or offload property is enabled
-    static bool isOffloadSupported(const audio_offload_info_t& info);
+    // Indicate if hw offload is possible for given format, stream type, sample rate,
+    // bit rate, duration, video and streaming or offload property is enabled and when possible
+    // if gapless transitions are supported.
+    static audio_offload_mode_t getOffloadSupport(const audio_offload_info_t& info);
 
     // check presence of audio flinger service.
     // returns NO_ERROR if binding to service succeeds, DEAD_OBJECT otherwise
@@ -331,11 +332,11 @@
     static status_t listAudioPorts(audio_port_role_t role,
                                    audio_port_type_t type,
                                    unsigned int *num_ports,
-                                   struct audio_port *ports,
+                                   struct audio_port_v7 *ports,
                                    unsigned int *generation);
 
     /* Get attributes for a given audio port */
-    static status_t getAudioPort(struct audio_port *port);
+    static status_t getAudioPort(struct audio_port_v7 *port);
 
     /* Create an audio patch between several source and sink ports */
     static status_t createAudioPatch(const struct audio_patch *patch,
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 911a34f..9a8014d 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -24,8 +24,6 @@
 #include <utils/RefBase.h>
 #include <utils/Errors.h>
 #include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
 #include <media/AidlConversion.h>
 #include <media/AudioClient.h>
 #include <media/DeviceDescriptorBase.h>
@@ -37,6 +35,8 @@
 #include <string>
 #include <vector>
 
+#include <android/media/BnAudioFlingerService.h>
+#include <android/media/BpAudioFlingerService.h>
 #include "android/media/CreateEffectRequest.h"
 #include "android/media/CreateEffectResponse.h"
 #include "android/media/CreateRecordRequest.h"
@@ -58,10 +58,11 @@
 
 // ----------------------------------------------------------------------------
 
-class IAudioFlinger : public IInterface
-{
+class IAudioFlinger : public RefBase {
 public:
-    DECLARE_META_INTERFACE(AudioFlinger);
+    static constexpr char DEFAULT_SERVICE_NAME[] = "media.audio_flinger";
+
+    virtual ~IAudioFlinger() = default;
 
     /* CreateTrackInput contains all input arguments sent by AudioTrack to AudioFlinger
      * when calling createTrack() including arguments that will be updated by AudioFlinger
@@ -162,7 +163,8 @@
         sp<media::IAudioRecord> audioRecord;
 
         ConversionResult<media::CreateRecordResponse> toAidl() const;
-        static ConversionResult<CreateRecordOutput> fromAidl(const media::CreateRecordResponse& aidl);
+        static ConversionResult<CreateRecordOutput>
+        fromAidl(const media::CreateRecordResponse& aidl);
     };
 
     /* create an audio track and registers it with AudioFlinger.
@@ -300,10 +302,6 @@
     // is obtained from android.app.ActivityManager.MemoryInfo.totalMem.
     virtual status_t setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) = 0;
 
-    /* List available audio ports and their attributes */
-    virtual status_t listAudioPorts(unsigned int *num_ports,
-                                    struct audio_port *ports) = 0;
-
     /* Get attributes for a given audio port */
     virtual status_t getAudioPort(struct audio_port_v7 *port) = 0;
 
@@ -333,85 +331,284 @@
     virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
 
     virtual status_t setAudioHalPids(const std::vector<pid_t>& pids) = 0;
-
-protected:
-    enum {
-        CREATE_TRACK = IBinder::FIRST_CALL_TRANSACTION,
-        CREATE_RECORD,
-        SAMPLE_RATE,
-        RESERVED,   // obsolete, was CHANNEL_COUNT
-        FORMAT,
-        FRAME_COUNT,
-        LATENCY,
-        SET_MASTER_VOLUME,
-        SET_MASTER_MUTE,
-        MASTER_VOLUME,
-        MASTER_MUTE,
-        SET_STREAM_VOLUME,
-        SET_STREAM_MUTE,
-        STREAM_VOLUME,
-        STREAM_MUTE,
-        SET_MODE,
-        SET_MIC_MUTE,
-        GET_MIC_MUTE,
-        SET_RECORD_SILENCED,
-        SET_PARAMETERS,
-        GET_PARAMETERS,
-        REGISTER_CLIENT,
-        GET_INPUTBUFFERSIZE,
-        OPEN_OUTPUT,
-        OPEN_DUPLICATE_OUTPUT,
-        CLOSE_OUTPUT,
-        SUSPEND_OUTPUT,
-        RESTORE_OUTPUT,
-        OPEN_INPUT,
-        CLOSE_INPUT,
-        INVALIDATE_STREAM,
-        SET_VOICE_VOLUME,
-        GET_RENDER_POSITION,
-        GET_INPUT_FRAMES_LOST,
-        NEW_AUDIO_UNIQUE_ID,
-        ACQUIRE_AUDIO_SESSION_ID,
-        RELEASE_AUDIO_SESSION_ID,
-        QUERY_NUM_EFFECTS,
-        QUERY_EFFECT,
-        GET_EFFECT_DESCRIPTOR,
-        CREATE_EFFECT,
-        MOVE_EFFECTS,
-        LOAD_HW_MODULE,
-        GET_PRIMARY_OUTPUT_SAMPLING_RATE,
-        GET_PRIMARY_OUTPUT_FRAME_COUNT,
-        SET_LOW_RAM_DEVICE,
-        LIST_AUDIO_PORTS,
-        GET_AUDIO_PORT,
-        CREATE_AUDIO_PATCH,
-        RELEASE_AUDIO_PATCH,
-        LIST_AUDIO_PATCHES,
-        SET_AUDIO_PORT_CONFIG,
-        GET_AUDIO_HW_SYNC_FOR_SESSION,
-        SYSTEM_READY,
-        FRAME_COUNT_HAL,
-        GET_MICROPHONES,
-        SET_MASTER_BALANCE,
-        GET_MASTER_BALANCE,
-        SET_EFFECT_SUSPENDED,
-        SET_AUDIO_HAL_PIDS
-    };
 };
 
-
-// ----------------------------------------------------------------------------
-
-class BnAudioFlinger : public BnInterface<IAudioFlinger>
-{
+/**
+ * A client-side adapter, wrapping an IAudioFlingerService instance and presenting it as an
+ * IAudioFlinger. Intended to be used by legacy client code that was written against IAudioFlinger,
+ * before IAudioFlingerService was introduced as an AIDL service.
+ * New clients should not use this adapter, but rather IAudioFlingerService directly, via
+ * BpAudioFlingerService.
+ */
+class AudioFlingerClientAdapter : public IAudioFlinger {
 public:
-    virtual status_t    onTransact( uint32_t code,
-                                    const Parcel& data,
-                                    Parcel* reply,
-                                    uint32_t flags = 0);
+    explicit AudioFlingerClientAdapter(const sp<media::IAudioFlingerService> delegate);
+
+    status_t createTrack(const media::CreateTrackRequest& input,
+                         media::CreateTrackResponse& output) override;
+    status_t createRecord(const media::CreateRecordRequest& input,
+                          media::CreateRecordResponse& output) override;
+    uint32_t sampleRate(audio_io_handle_t ioHandle) const override;
+    audio_format_t format(audio_io_handle_t output) const override;
+    size_t frameCount(audio_io_handle_t ioHandle) const override;
+    uint32_t latency(audio_io_handle_t output) const override;
+    status_t setMasterVolume(float value) override;
+    status_t setMasterMute(bool muted) override;
+    float masterVolume() const override;
+    bool masterMute() const override;
+    status_t setMasterBalance(float balance) override;
+    status_t getMasterBalance(float* balance) const override;
+    status_t setStreamVolume(audio_stream_type_t stream, float value,
+                             audio_io_handle_t output) override;
+    status_t setStreamMute(audio_stream_type_t stream, bool muted) override;
+    float streamVolume(audio_stream_type_t stream,
+                       audio_io_handle_t output) const override;
+    bool streamMute(audio_stream_type_t stream) const override;
+    status_t setMode(audio_mode_t mode) override;
+    status_t setMicMute(bool state) override;
+    bool getMicMute() const override;
+    void setRecordSilenced(audio_port_handle_t portId, bool silenced) override;
+    status_t setParameters(audio_io_handle_t ioHandle,
+                           const String8& keyValuePairs) override;
+    String8 getParameters(audio_io_handle_t ioHandle, const String8& keys)
+    const override;
+    void registerClient(const sp<media::IAudioFlingerClient>& client) override;
+    size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
+                              audio_channel_mask_t channelMask) const override;
+    status_t openOutput(const media::OpenOutputRequest& request,
+                        media::OpenOutputResponse* response) override;
+    audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
+                                          audio_io_handle_t output2) override;
+    status_t closeOutput(audio_io_handle_t output) override;
+    status_t suspendOutput(audio_io_handle_t output) override;
+    status_t restoreOutput(audio_io_handle_t output) override;
+    status_t openInput(const media::OpenInputRequest& request,
+                       media::OpenInputResponse* response) override;
+    status_t closeInput(audio_io_handle_t input) override;
+    status_t invalidateStream(audio_stream_type_t stream) override;
+    status_t setVoiceVolume(float volume) override;
+    status_t getRenderPosition(uint32_t* halFrames, uint32_t* dspFrames,
+                               audio_io_handle_t output) const override;
+    uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const override;
+    audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use) override;
+    void acquireAudioSessionId(audio_session_t audioSession, pid_t pid, uid_t uid) override;
+    void releaseAudioSessionId(audio_session_t audioSession, pid_t pid) override;
+    status_t queryNumberEffects(uint32_t* numEffects) const override;
+    status_t queryEffect(uint32_t index, effect_descriptor_t* pDescriptor) const override;
+    status_t getEffectDescriptor(const effect_uuid_t* pEffectUUID,
+                                 const effect_uuid_t* pTypeUUID,
+                                 uint32_t preferredTypeFlag,
+                                 effect_descriptor_t* pDescriptor) const override;
+    status_t createEffect(const media::CreateEffectRequest& request,
+                          media::CreateEffectResponse* response) override;
+    status_t moveEffects(audio_session_t session, audio_io_handle_t srcOutput,
+                         audio_io_handle_t dstOutput) override;
+    void setEffectSuspended(int effectId,
+                            audio_session_t sessionId,
+                            bool suspended) override;
+    audio_module_handle_t loadHwModule(const char* name) override;
+    uint32_t getPrimaryOutputSamplingRate() override;
+    size_t getPrimaryOutputFrameCount() override;
+    status_t setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) override;
+    status_t getAudioPort(struct audio_port_v7* port) override;
+    status_t createAudioPatch(const struct audio_patch* patch,
+                              audio_patch_handle_t* handle) override;
+    status_t releaseAudioPatch(audio_patch_handle_t handle) override;
+    status_t listAudioPatches(unsigned int* num_patches,
+                              struct audio_patch* patches) override;
+    status_t setAudioPortConfig(const struct audio_port_config* config) override;
+    audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId) override;
+    status_t systemReady() override;
+    size_t frameCountHAL(audio_io_handle_t ioHandle) const override;
+    status_t getMicrophones(std::vector<media::MicrophoneInfo>* microphones) override;
+    status_t setAudioHalPids(const std::vector<pid_t>& pids) override;
+
+private:
+    const sp<media::IAudioFlingerService> mDelegate;
 };
 
-// ----------------------------------------------------------------------------
+/**
+ * A server-side adapter, wrapping an IAudioFlinger instance and presenting it as an
+ * IAudioFlingerService. Intended to be used by legacy server code that was written against
+ * IAudioFlinger, before IAudioFlingerService was introduced as an AIDL service.
+ * New servers should not use this adapter, but rather implement IAudioFlingerService directly, via
+ * BnAudioFlingerService.
+ */
+class AudioFlingerServerAdapter : public media::BnAudioFlingerService {
+public:
+    using Status = binder::Status;
+
+    /**
+     * Legacy server should implement this interface in order to be wrapped.
+     */
+    class Delegate : public IAudioFlinger {
+    protected:
+        friend class AudioFlingerServerAdapter;
+
+        enum class TransactionCode {
+            CREATE_TRACK = media::BnAudioFlingerService::TRANSACTION_createTrack,
+            CREATE_RECORD = media::BnAudioFlingerService::TRANSACTION_createRecord,
+            SAMPLE_RATE = media::BnAudioFlingerService::TRANSACTION_sampleRate,
+            FORMAT = media::BnAudioFlingerService::TRANSACTION_format,
+            FRAME_COUNT = media::BnAudioFlingerService::TRANSACTION_frameCount,
+            LATENCY = media::BnAudioFlingerService::TRANSACTION_latency,
+            SET_MASTER_VOLUME = media::BnAudioFlingerService::TRANSACTION_setMasterVolume,
+            SET_MASTER_MUTE = media::BnAudioFlingerService::TRANSACTION_setMasterMute,
+            MASTER_VOLUME = media::BnAudioFlingerService::TRANSACTION_masterVolume,
+            MASTER_MUTE = media::BnAudioFlingerService::TRANSACTION_masterMute,
+            SET_STREAM_VOLUME = media::BnAudioFlingerService::TRANSACTION_setStreamVolume,
+            SET_STREAM_MUTE = media::BnAudioFlingerService::TRANSACTION_setStreamMute,
+            STREAM_VOLUME = media::BnAudioFlingerService::TRANSACTION_streamVolume,
+            STREAM_MUTE = media::BnAudioFlingerService::TRANSACTION_streamMute,
+            SET_MODE = media::BnAudioFlingerService::TRANSACTION_setMode,
+            SET_MIC_MUTE = media::BnAudioFlingerService::TRANSACTION_setMicMute,
+            GET_MIC_MUTE = media::BnAudioFlingerService::TRANSACTION_getMicMute,
+            SET_RECORD_SILENCED = media::BnAudioFlingerService::TRANSACTION_setRecordSilenced,
+            SET_PARAMETERS = media::BnAudioFlingerService::TRANSACTION_setParameters,
+            GET_PARAMETERS = media::BnAudioFlingerService::TRANSACTION_getParameters,
+            REGISTER_CLIENT = media::BnAudioFlingerService::TRANSACTION_registerClient,
+            GET_INPUTBUFFERSIZE = media::BnAudioFlingerService::TRANSACTION_getInputBufferSize,
+            OPEN_OUTPUT = media::BnAudioFlingerService::TRANSACTION_openOutput,
+            OPEN_DUPLICATE_OUTPUT = media::BnAudioFlingerService::TRANSACTION_openDuplicateOutput,
+            CLOSE_OUTPUT = media::BnAudioFlingerService::TRANSACTION_closeOutput,
+            SUSPEND_OUTPUT = media::BnAudioFlingerService::TRANSACTION_suspendOutput,
+            RESTORE_OUTPUT = media::BnAudioFlingerService::TRANSACTION_restoreOutput,
+            OPEN_INPUT = media::BnAudioFlingerService::TRANSACTION_openInput,
+            CLOSE_INPUT = media::BnAudioFlingerService::TRANSACTION_closeInput,
+            INVALIDATE_STREAM = media::BnAudioFlingerService::TRANSACTION_invalidateStream,
+            SET_VOICE_VOLUME = media::BnAudioFlingerService::TRANSACTION_setVoiceVolume,
+            GET_RENDER_POSITION = media::BnAudioFlingerService::TRANSACTION_getRenderPosition,
+            GET_INPUT_FRAMES_LOST = media::BnAudioFlingerService::TRANSACTION_getInputFramesLost,
+            NEW_AUDIO_UNIQUE_ID = media::BnAudioFlingerService::TRANSACTION_newAudioUniqueId,
+            ACQUIRE_AUDIO_SESSION_ID = media::BnAudioFlingerService::TRANSACTION_acquireAudioSessionId,
+            RELEASE_AUDIO_SESSION_ID = media::BnAudioFlingerService::TRANSACTION_releaseAudioSessionId,
+            QUERY_NUM_EFFECTS = media::BnAudioFlingerService::TRANSACTION_queryNumberEffects,
+            QUERY_EFFECT = media::BnAudioFlingerService::TRANSACTION_queryEffect,
+            GET_EFFECT_DESCRIPTOR = media::BnAudioFlingerService::TRANSACTION_getEffectDescriptor,
+            CREATE_EFFECT = media::BnAudioFlingerService::TRANSACTION_createEffect,
+            MOVE_EFFECTS = media::BnAudioFlingerService::TRANSACTION_moveEffects,
+            LOAD_HW_MODULE = media::BnAudioFlingerService::TRANSACTION_loadHwModule,
+            GET_PRIMARY_OUTPUT_SAMPLING_RATE = media::BnAudioFlingerService::TRANSACTION_getPrimaryOutputSamplingRate,
+            GET_PRIMARY_OUTPUT_FRAME_COUNT = media::BnAudioFlingerService::TRANSACTION_getPrimaryOutputFrameCount,
+            SET_LOW_RAM_DEVICE = media::BnAudioFlingerService::TRANSACTION_setLowRamDevice,
+            GET_AUDIO_PORT = media::BnAudioFlingerService::TRANSACTION_getAudioPort,
+            CREATE_AUDIO_PATCH = media::BnAudioFlingerService::TRANSACTION_createAudioPatch,
+            RELEASE_AUDIO_PATCH = media::BnAudioFlingerService::TRANSACTION_releaseAudioPatch,
+            LIST_AUDIO_PATCHES = media::BnAudioFlingerService::TRANSACTION_listAudioPatches,
+            SET_AUDIO_PORT_CONFIG = media::BnAudioFlingerService::TRANSACTION_setAudioPortConfig,
+            GET_AUDIO_HW_SYNC_FOR_SESSION = media::BnAudioFlingerService::TRANSACTION_getAudioHwSyncForSession,
+            SYSTEM_READY = media::BnAudioFlingerService::TRANSACTION_systemReady,
+            FRAME_COUNT_HAL = media::BnAudioFlingerService::TRANSACTION_frameCountHAL,
+            GET_MICROPHONES = media::BnAudioFlingerService::TRANSACTION_getMicrophones,
+            SET_MASTER_BALANCE = media::BnAudioFlingerService::TRANSACTION_setMasterBalance,
+            GET_MASTER_BALANCE = media::BnAudioFlingerService::TRANSACTION_getMasterBalance,
+            SET_EFFECT_SUSPENDED = media::BnAudioFlingerService::TRANSACTION_setEffectSuspended,
+            SET_AUDIO_HAL_PIDS = media::BnAudioFlingerService::TRANSACTION_setAudioHalPids,
+        };
+
+        /**
+         * And optional hook, called on every transaction, before unparceling the data and
+         * dispatching to the respective method. Useful for bulk operations, such as logging or
+         * permission checks.
+         * If an error status is returned, the transaction will return immediately and will not be
+         * processed.
+         */
+        virtual status_t onPreTransact(TransactionCode code, const Parcel& data, uint32_t flags) {
+            (void) code;
+            (void) data;
+            (void) flags;
+            return OK;
+        };
+
+        /**
+         * An optional hook for implementing diagnostics dumping.
+         */
+        virtual status_t dump(int fd, const Vector<String16>& args) {
+            (void) fd;
+            (void) args;
+            return OK;
+        }
+    };
+
+    explicit AudioFlingerServerAdapter(
+            const sp<AudioFlingerServerAdapter::Delegate>& delegate);
+
+    status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) override;
+    status_t dump(int fd, const Vector<String16>& args) override;
+
+    Status createTrack(const media::CreateTrackRequest& request,
+                       media::CreateTrackResponse* _aidl_return) override;
+    Status createRecord(const media::CreateRecordRequest& request,
+                        media::CreateRecordResponse* _aidl_return) override;
+    Status sampleRate(int32_t ioHandle, int32_t* _aidl_return) override;
+    Status format(int32_t output, media::audio::common::AudioFormat* _aidl_return) override;
+    Status frameCount(int32_t ioHandle, int64_t* _aidl_return) override;
+    Status latency(int32_t output, int32_t* _aidl_return) override;
+    Status setMasterVolume(float value) override;
+    Status setMasterMute(bool muted) override;
+    Status masterVolume(float* _aidl_return) override;
+    Status masterMute(bool* _aidl_return) override;
+    Status setMasterBalance(float balance) override;
+    Status getMasterBalance(float* _aidl_return) override;
+    Status setStreamVolume(media::AudioStreamType stream, float value, int32_t output) override;
+    Status setStreamMute(media::AudioStreamType stream, bool muted) override;
+    Status
+    streamVolume(media::AudioStreamType stream, int32_t output, float* _aidl_return) override;
+    Status streamMute(media::AudioStreamType stream, bool* _aidl_return) override;
+    Status setMode(media::AudioMode mode) override;
+    Status setMicMute(bool state) override;
+    Status getMicMute(bool* _aidl_return) override;
+    Status setRecordSilenced(int32_t portId, bool silenced) override;
+    Status setParameters(int32_t ioHandle, const std::string& keyValuePairs) override;
+    Status
+    getParameters(int32_t ioHandle, const std::string& keys, std::string* _aidl_return) override;
+    Status registerClient(const sp<media::IAudioFlingerClient>& client) override;
+    Status getInputBufferSize(int32_t sampleRate, media::audio::common::AudioFormat format,
+                              int32_t channelMask, int64_t* _aidl_return) override;
+    Status openOutput(const media::OpenOutputRequest& request,
+                      media::OpenOutputResponse* _aidl_return) override;
+    Status openDuplicateOutput(int32_t output1, int32_t output2, int32_t* _aidl_return) override;
+    Status closeOutput(int32_t output) override;
+    Status suspendOutput(int32_t output) override;
+    Status restoreOutput(int32_t output) override;
+    Status openInput(const media::OpenInputRequest& request,
+                     media::OpenInputResponse* _aidl_return) override;
+    Status closeInput(int32_t input) override;
+    Status invalidateStream(media::AudioStreamType stream) override;
+    Status setVoiceVolume(float volume) override;
+    Status getRenderPosition(int32_t output, media::RenderPosition* _aidl_return) override;
+    Status getInputFramesLost(int32_t ioHandle, int32_t* _aidl_return) override;
+    Status newAudioUniqueId(media::AudioUniqueIdUse use, int32_t* _aidl_return) override;
+    Status acquireAudioSessionId(int32_t audioSession, int32_t pid, int32_t uid) override;
+    Status releaseAudioSessionId(int32_t audioSession, int32_t pid) override;
+    Status queryNumberEffects(int32_t* _aidl_return) override;
+    Status queryEffect(int32_t index, media::EffectDescriptor* _aidl_return) override;
+    Status getEffectDescriptor(const media::AudioUuid& effectUUID, const media::AudioUuid& typeUUID,
+                               int32_t preferredTypeFlag,
+                               media::EffectDescriptor* _aidl_return) override;
+    Status createEffect(const media::CreateEffectRequest& request,
+                        media::CreateEffectResponse* _aidl_return) override;
+    Status moveEffects(int32_t session, int32_t srcOutput, int32_t dstOutput) override;
+    Status setEffectSuspended(int32_t effectId, int32_t sessionId, bool suspended) override;
+    Status loadHwModule(const std::string& name, int32_t* _aidl_return) override;
+    Status getPrimaryOutputSamplingRate(int32_t* _aidl_return) override;
+    Status getPrimaryOutputFrameCount(int64_t* _aidl_return) override;
+    Status setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) override;
+    Status getAudioPort(const media::AudioPort& port, media::AudioPort* _aidl_return) override;
+    Status createAudioPatch(const media::AudioPatch& patch, int32_t* _aidl_return) override;
+    Status releaseAudioPatch(int32_t handle) override;
+    Status listAudioPatches(int32_t maxCount,
+                            std::vector<media::AudioPatch>* _aidl_return) override;
+    Status setAudioPortConfig(const media::AudioPortConfig& config) override;
+    Status getAudioHwSyncForSession(int32_t sessionId, int32_t* _aidl_return) override;
+    Status systemReady() override;
+    Status frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) override;
+    Status getMicrophones(std::vector<media::MicrophoneInfoData>* _aidl_return) override;
+    Status setAudioHalPids(const std::vector<int32_t>& pids) override;
+
+private:
+    const sp<AudioFlingerServerAdapter::Delegate> mDelegate;
+};
 
 }; // namespace android
 
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index 837375d..df792e3 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -150,7 +150,7 @@
     virtual status_t setAllowedCapturePolicy(uid_t uid, audio_flags_mask_t flags) = 0;
    // Check if offload is possible for given format, stream type, sample rate,
     // bit rate, duration, video and streaming or offload property is enabled
-    virtual bool isOffloadSupported(const audio_offload_info_t& info) = 0;
+    virtual audio_offload_mode_t getOffloadSupport(const audio_offload_info_t& info) = 0;
 
     // Check if direct playback is possible for given format, sample rate, channel mask and flags.
     virtual bool isDirectOutputSupported(const audio_config_base_t& config,
@@ -160,11 +160,11 @@
     virtual status_t listAudioPorts(audio_port_role_t role,
                                     audio_port_type_t type,
                                     unsigned int *num_ports,
-                                    struct audio_port *ports,
+                                    struct audio_port_v7 *ports,
                                     unsigned int *generation) = 0;
 
     /* Get attributes for a given audio port */
-    virtual status_t getAudioPort(struct audio_port *port) = 0;
+    virtual status_t getAudioPort(struct audio_port_v7 *port) = 0;
 
     /* Create an audio patch between several source and sink ports */
     virtual status_t createAudioPatch(const struct audio_patch *patch,
diff --git a/media/libaudioclient/include/media/ToneGenerator.h b/media/libaudioclient/include/media/ToneGenerator.h
index 04357a8..a575616 100644
--- a/media/libaudioclient/include/media/ToneGenerator.h
+++ b/media/libaudioclient/include/media/ToneGenerator.h
@@ -17,6 +17,8 @@
 #ifndef ANDROID_TONEGENERATOR_H_
 #define ANDROID_TONEGENERATOR_H_
 
+#include <string>
+
 #include <media/AudioSystem.h>
 #include <media/AudioTrack.h>
 #include <utils/Compat.h>
@@ -152,7 +154,8 @@
         NUM_SUP_TONES = LAST_SUP_TONE-FIRST_SUP_TONE+1
     };
 
-    ToneGenerator(audio_stream_type_t streamType, float volume, bool threadCanCallJava = false);
+    ToneGenerator(audio_stream_type_t streamType, float volume, bool threadCanCallJava = false,
+            std::string opPackageName = {});
     ~ToneGenerator();
 
     bool startTone(tone_type toneType, int durationMs = -1);
@@ -193,6 +196,7 @@
         TONE_JAPAN_DIAL,            // Dial tone: 400Hz, continuous
         TONE_JAPAN_BUSY,            // Busy tone: 400Hz, 500ms ON, 500ms OFF...
         TONE_JAPAN_RADIO_ACK,       // Radio path acknowlegment: 400Hz, 1s ON, 2s OFF...
+        TONE_JAPAN_RINGTONE,        // Ring Tone: 400 Hz repeated in a 1 s on, 2 s off pattern.
         // GB Supervisory tones
         TONE_GB_BUSY,               // Busy tone: 400 Hz, 375ms ON, 375ms OFF...
         TONE_GB_CONGESTION,         // Congestion Tone: 400 Hz, 400ms ON, 350ms OFF, 225ms ON, 525ms OFF...
@@ -343,6 +347,8 @@
     };
 
     KeyedVector<uint16_t, WaveGenerator *> mWaveGens;  // list of active wave generators.
+
+    std::string mOpPackageName;
 };
 
 }
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index 350a780..21d18d3 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -7,6 +7,18 @@
 }
 
 cc_test {
+    name: "audio_aidl_status_tests",
+    defaults: ["libaudioclient_tests_defaults"],
+    srcs: ["audio_aidl_status_tests.cpp"],
+    shared_libs: [
+        "libaudioclient_aidl_conversion",
+        "libbinder",
+        "libcutils",
+        "libutils",
+    ],
+}
+
+cc_test {
     name: "test_create_audiotrack",
     defaults: ["libaudioclient_tests_defaults"],
     srcs: ["test_create_audiotrack.cpp",
diff --git a/media/libaudioclient/tests/audio_aidl_status_tests.cpp b/media/libaudioclient/tests/audio_aidl_status_tests.cpp
new file mode 100644
index 0000000..5517091
--- /dev/null
+++ b/media/libaudioclient/tests/audio_aidl_status_tests.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <media/AidlConversionUtil.h>
+#include <utils/Errors.h>
+
+using namespace android;
+using namespace android::aidl_utils;
+using android::binder::Status;
+
+// Tests for statusTFromBinderStatus() and binderStatusFromStatusT().
+
+// STATUS_T_SMALL_VALUE_LIMIT is an arbitrary limit where we exhaustively check status_t errors.
+// It is known that this limit doesn't cover UNKNOWN_ERROR ~ INT32_MIN.
+constexpr status_t STATUS_T_SMALL_VALUE_LIMIT = -1000;
+
+// Small status values are preserved on round trip
+TEST(audio_aidl_status_tests, statusRoundTripSmallValues) {
+    for (status_t status = 0; status > STATUS_T_SMALL_VALUE_LIMIT; --status) {
+        ASSERT_EQ(status, statusTFromBinderStatus(binderStatusFromStatusT(status)));
+    }
+}
+
+// Special status values are preserved on round trip.
+TEST(audio_aidl_status_tests, statusRoundTripSpecialValues) {
+    for (status_t status : {
+            OK,
+            UNKNOWN_ERROR,
+            NO_MEMORY,
+            INVALID_OPERATION,
+            BAD_VALUE,
+            BAD_TYPE,
+            NAME_NOT_FOUND,
+            PERMISSION_DENIED,
+            NO_INIT,
+            ALREADY_EXISTS,
+            DEAD_OBJECT,
+            FAILED_TRANSACTION,
+            BAD_INDEX,
+            NOT_ENOUGH_DATA,
+            WOULD_BLOCK,
+            TIMED_OUT,
+            UNKNOWN_TRANSACTION,
+            FDS_NOT_ALLOWED}) {
+        ASSERT_EQ(status, statusTFromBinderStatus(binderStatusFromStatusT(status)));
+    }
+}
+
+// Binder exceptions show as an error (not fixed at this time); these come fromExceptionCode().
+TEST(audio_aidl_status_tests, binderStatusExceptions) {
+    for (int exceptionCode : {
+            //Status::EX_NONE,
+            Status::EX_SECURITY,
+            Status::EX_BAD_PARCELABLE,
+            Status::EX_ILLEGAL_ARGUMENT,
+            Status::EX_NULL_POINTER,
+            Status::EX_ILLEGAL_STATE,
+            Status::EX_NETWORK_MAIN_THREAD,
+            Status::EX_UNSUPPORTED_OPERATION,
+            //Status::EX_SERVICE_SPECIFIC, -- tested fromServiceSpecificError()
+            Status::EX_PARCELABLE,
+            // This is special and Java specific; see Parcel.java.
+            Status::EX_HAS_REPLY_HEADER,
+            // This is special, and indicates to C++ binder proxies that the
+            // transaction has failed at a low level.
+            //Status::EX_TRANSACTION_FAILED, -- tested fromStatusT().
+            }) {
+        ASSERT_NE(OK, statusTFromBinderStatus(Status::fromExceptionCode(exceptionCode)));
+    }
+}
+
+// Binder transaction errors show exactly in status_t; these come fromStatusT().
+TEST(audio_aidl_status_tests, binderStatusTransactionError) {
+    for (status_t status : {
+            OK, // Note: fromStatusT does check if this is 0, so this is no error.
+            UNKNOWN_ERROR,
+            NO_MEMORY,
+            INVALID_OPERATION,
+            BAD_VALUE,
+            BAD_TYPE,
+            NAME_NOT_FOUND,
+            PERMISSION_DENIED,
+            NO_INIT,
+            ALREADY_EXISTS,
+            DEAD_OBJECT,
+            FAILED_TRANSACTION,
+            BAD_INDEX,
+            NOT_ENOUGH_DATA,
+            WOULD_BLOCK,
+            TIMED_OUT,
+            UNKNOWN_TRANSACTION,
+            FDS_NOT_ALLOWED}) {
+        ASSERT_EQ(status, statusTFromBinderStatus(Status::fromStatusT(status)));
+    }
+}
+
+// Binder service specific errors show in status_t; these come fromServiceSpecificError().
+TEST(audio_aidl_status_tests, binderStatusServiceSpecificError) {
+    // fromServiceSpecificError() still stores exception code if status is 0.
+    for (status_t status = -1; status > STATUS_T_SMALL_VALUE_LIMIT; --status) {
+        ASSERT_EQ(status, statusTFromBinderStatus(Status::fromServiceSpecificError(status)));
+    }
+}
+
+// Binder status with message.
+TEST(audio_aidl_status_tests, binderStatusMessage) {
+    const String8 message("abcd");
+    for (status_t status = -1; status > STATUS_T_SMALL_VALUE_LIMIT; --status) {
+        const Status binderStatus = binderStatusFromStatusT(status, message.c_str());
+        ASSERT_EQ(status, statusTFromBinderStatus(binderStatus));
+        ASSERT_EQ(message, binderStatus.exceptionMessage());
+    }
+}
diff --git a/media/libaudiofoundation/AudioGain.cpp b/media/libaudiofoundation/AudioGain.cpp
index 56343d8..1dee938 100644
--- a/media/libaudiofoundation/AudioGain.cpp
+++ b/media/libaudiofoundation/AudioGain.cpp
@@ -140,7 +140,7 @@
     parcelable->useInChannelMask = mUseInChannelMask;
     parcelable->useForVolume = mUseForVolume;
     parcelable->mode = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_gain_mode_t_mask_int32_t(mGain.mode));
+            legacy2aidl_audio_gain_mode_t_int32_t_mask(mGain.mode));
     parcelable->channelMask = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_channel_mask_t_int32_t(mGain.channel_mask));
     parcelable->minValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.min_value));
diff --git a/media/libaudiofoundation/AudioPort.cpp b/media/libaudiofoundation/AudioPort.cpp
index 6b63675..20d8632 100644
--- a/media/libaudiofoundation/AudioPort.cpp
+++ b/media/libaudiofoundation/AudioPort.cpp
@@ -291,7 +291,7 @@
     parcelable->id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
     parcelable->gain.index = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.index));
     parcelable->gain.mode = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_gain_mode_t_mask_int32_t(mGain.mode));
+            legacy2aidl_audio_gain_mode_t_int32_t_mask(mGain.mode));
     parcelable->gain.channelMask = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_channel_mask_t_int32_t(mGain.channel_mask));
     parcelable->gain.rampDurationMs = VALUE_OR_RETURN_STATUS(
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index 77c2550..0108816 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -215,7 +215,7 @@
         const struct audio_config *config, size_t *size) {
     if (mDevice == 0) return NO_INIT;
     AudioConfig hidlConfig;
-    HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+    HidlUtils::audioConfigFromHal(*config, true /*isInput*/, &hidlConfig);
     Result retval;
     Return<void> ret = mDevice->getInputBufferSize(
             hidlConfig,
@@ -240,7 +240,7 @@
     status_t status = deviceAddressFromHal(deviceType, address, &hidlDevice);
     if (status != OK) return status;
     AudioConfig hidlConfig;
-    HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+    HidlUtils::audioConfigFromHal(*config, false /*isInput*/, &hidlConfig);
     Result retval = Result::NOT_INITIALIZED;
     Return<void> ret = mDevice->openOutputStream(
             handle,
@@ -275,7 +275,7 @@
     status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
     if (status != OK) return status;
     AudioConfig hidlConfig;
-    HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+    HidlUtils::audioConfigFromHal(*config, true /*isInput*/, &hidlConfig);
     Result retval = Result::NOT_INITIALIZED;
 #if MAJOR_VERSION == 2
     auto sinkMetadata = AudioSource(source);
diff --git a/media/libeffects/lvm/lib/Android.bp b/media/libeffects/lvm/lib/Android.bp
index 8f2f016..dbe0d62 100644
--- a/media/libeffects/lvm/lib/Android.bp
+++ b/media/libeffects/lvm/lib/Android.bp
@@ -131,12 +131,15 @@
     shared_libs: [
         "liblog",
     ],
+    static_libs: [
+        "libaudioutils",
+    ],
     header_libs: [
         "libhardware_headers",
     ],
     cppflags: [
+        "-DBIQUAD_OPT",
         "-fvisibility=hidden",
-
         "-Wall",
         "-Werror",
     ],
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp b/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp
index 5b47aa6..1f0b459 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp
@@ -21,6 +21,9 @@
 /*                                                                                      */
 /****************************************************************************************/
 
+#ifdef BIQUAD_OPT
+#include <audio_utils/BiquadFilter.h>
+#endif
 #include "LVDBE.h"
 #include "LVDBE_Private.h"
 #include "VectorArithmetic.h"
@@ -107,12 +110,20 @@
     /*
      * Setup the high pass filter
      */
+#ifdef BIQUAD_OPT
+    std::array<LVM_FLOAT, android::audio_utils::kBiquadNumCoefs> coefs = {
+            LVDBE_HPF_Table[Offset].A0, LVDBE_HPF_Table[Offset].A1, LVDBE_HPF_Table[Offset].A2,
+            -(LVDBE_HPF_Table[Offset].B1), -(LVDBE_HPF_Table[Offset].B2)};
+    pInstance->pBqInstance
+            ->setCoefficients<std::array<LVM_FLOAT, android::audio_utils::kBiquadNumCoefs>>(coefs);
+#else
     LoadConst_Float(0,                                      /* Clear the history, value 0 */
                     (LVM_FLOAT*)&pInstance->pData->HPFTaps, /* Destination */
                     sizeof(pInstance->pData->HPFTaps) / sizeof(LVM_FLOAT)); /* Number of words */
     BQ_2I_D32F32Cll_TRC_WRA_01_Init(&pInstance->pCoef->HPFInstance, /* Initialise the filter */
                                     &pInstance->pData->HPFTaps,
                                     (BQ_FLOAT_Coefs_t*)&LVDBE_HPF_Table[Offset]);
+#endif
 
     /*
      * Setup the band pass filter
@@ -275,6 +286,15 @@
     LVDBE_Instance_t* pInstance = (LVDBE_Instance_t*)hInstance;
     LVMixer3_2St_FLOAT_st* pBypassMixer_Instance = &pInstance->pData->BypassMixer;
 
+#ifdef BIQUAD_OPT
+    /*
+     * Create biquad instance
+     */
+    pInstance->pBqInstance.reset(
+            new android::audio_utils::BiquadFilter<LVM_FLOAT>(pParams->NrChannels));
+    pInstance->pBqInstance->clear();
+#endif
+
     /*
      * Update the filters
      */
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp b/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
index 12af162..611b762 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
@@ -94,6 +94,14 @@
         return LVDBE_NULLADDRESS;
     }
 
+#ifdef BIQUAD_OPT
+    /*
+     * Create biquad instance
+     */
+    pInstance->pBqInstance.reset(
+            new android::audio_utils::BiquadFilter<LVM_FLOAT>(LVM_MAX_CHANNELS));
+#endif
+
     /*
      * Initialise the filters
      */
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h b/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
index 4fef1ef..fa85638 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
@@ -33,6 +33,9 @@
 /*                                                                                      */
 /****************************************************************************************/
 
+#ifdef BIQUAD_OPT
+#include <audio_utils/BiquadFilter.h>
+#endif
 #include "LVDBE.h" /* Calling or Application layer definitions */
 #include "BIQUAD.h"
 #include "LVC_Mixer.h"
@@ -63,7 +66,9 @@
     AGC_MIX_VOL_2St1Mon_FLOAT_t AGCInstance; /* AGC instance parameters */
 
     /* Process variables */
+#ifndef BIQUAD_OPT
     Biquad_2I_Order2_FLOAT_Taps_t HPFTaps; /* High pass filter taps */
+#endif
     Biquad_1I_Order2_FLOAT_Taps_t BPFTaps; /* Band pass filter taps */
     LVMixer3_1St_FLOAT_st BypassVolume;    /* Bypass volume scaler */
     LVMixer3_2St_FLOAT_st BypassMixer;     /* Bypass Mixer for Click Removal */
@@ -73,7 +78,9 @@
 /* Coefs structure */
 typedef struct {
     /* Process variables */
+#ifndef BIQUAD_OPT
     Biquad_FLOAT_Instance_t HPFInstance; /* High pass filter instance */
+#endif
     Biquad_FLOAT_Instance_t BPFInstance; /* Band pass filter instance */
 } LVDBE_Coef_FLOAT_t;
 /* Instance structure */
@@ -86,6 +93,10 @@
     LVDBE_Data_FLOAT_t* pData; /* Instance data */
     LVDBE_Coef_FLOAT_t* pCoef; /* Instance coefficients */
     void* pScratch;            /* scratch pointer */
+#ifdef BIQUAD_OPT
+    std::unique_ptr<android::audio_utils::BiquadFilter<LVM_FLOAT>>
+            pBqInstance; /* Biquad filter instance */
+#endif
 } LVDBE_Instance_t;
 
 /****************************************************************************************/
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp b/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
index f4a4d6f..bd04a02 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
@@ -20,6 +20,9 @@
 /*    Includes                                                                          */
 /*                                                                                      */
 /****************************************************************************************/
+#ifdef BIQUAD_OPT
+#include <audio_utils/BiquadFilter.h>
+#endif
 
 #include <string.h>  // memset
 #include "LVDBE.h"
@@ -125,10 +128,14 @@
          * Apply the high pass filter if selected
          */
         if (pInstance->Params.HPFSelect == LVDBE_HPF_ON) {
+#ifdef BIQUAD_OPT
+            pInstance->pBqInstance->process(pScratch, pScratch, NrFrames);
+#else
             BQ_MC_D32F32C30_TRC_WRA_01(&pInstance->pCoef->HPFInstance, /* Filter instance      */
                                        pScratch,                       /* Source               */
                                        pScratch,                       /* Destination          */
                                        (LVM_INT16)NrFrames, (LVM_INT16)NrChannels);
+#endif
         }
 
         /*
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 670b415..865baad 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -1021,6 +1021,16 @@
         ActiveParams.NrChannels = NrChannels;
         ActiveParams.ChMask = pConfig->inputCfg.channels;
 
+        if (NrChannels == 1) {
+            ActiveParams.SourceFormat = LVM_MONO;
+        } else if (NrChannels == 2) {
+            ActiveParams.SourceFormat = LVM_STEREO;
+        } else if (NrChannels > 2 && NrChannels <= LVM_MAX_CHANNELS) {
+            ActiveParams.SourceFormat = LVM_MULTICHANNEL;
+        } else {
+            return -EINVAL;
+        }
+
         LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance, &ActiveParams);
 
         LVM_ERROR_CHECK(LvmStatus, "LVM_SetControlParameters", "Effect_setConfig")
diff --git a/media/libeffects/preprocessing/Android.bp b/media/libeffects/preprocessing/Android.bp
index 5217cf9..681e247 100644
--- a/media/libeffects/preprocessing/Android.bp
+++ b/media/libeffects/preprocessing/Android.bp
@@ -1,35 +1,5 @@
 // audio preprocessing wrapper
 cc_library_shared {
-    name: "libaudiopreprocessing_legacy",
-
-    vendor: true,
-
-    relative_install_path: "soundfx",
-
-    srcs: ["PreProcessing.cpp"],
-
-    shared_libs: [
-        "libwebrtc_audio_preprocessing",
-        "libspeexresampler",
-        "libutils",
-        "liblog",
-    ],
-
-    cflags: [
-        "-DWEBRTC_POSIX",
-        "-DWEBRTC_LEGACY",
-        "-fvisibility=hidden",
-        "-Wall",
-        "-Werror",
-    ],
-
-    header_libs: [
-        "libaudioeffects",
-        "libhardware_headers",
-    ],
-}
-
-cc_library_shared {
     name: "libaudiopreprocessing",
     vendor: true,
     relative_install_path: "soundfx",
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index 1a5547b..03ccc34 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -23,15 +23,10 @@
 #include <hardware/audio_effect.h>
 #include <utils/Log.h>
 #include <utils/Timers.h>
-#ifndef WEBRTC_LEGACY
 #include <audio_effects/effect_agc2.h>
-#endif
 #include <audio_effects/effect_ns.h>
 #include <audio_processing.h>
 #include <module_common_types.h>
-#ifdef WEBRTC_LEGACY
-#include "speex/speex_resampler.h"
-#endif
 
 // undefine to perform multi channels API functional tests
 //#define DUAL_MIC_TEST
@@ -46,9 +41,7 @@
 // types of pre processing modules
 enum preproc_id {
     PREPROC_AGC,  // Automatic Gain Control
-#ifndef WEBRTC_LEGACY
     PREPROC_AGC2,  // Automatic Gain Control 2
-#endif
     PREPROC_AEC,  // Acoustic Echo Canceler
     PREPROC_NS,   // Noise Suppressor
     PREPROC_NUM_EFFECTS
@@ -110,10 +103,8 @@
     int id;                        // audio session ID
     int io;                        // handle of input stream this session is on
     webrtc::AudioProcessing* apm;  // handle on webRTC audio processing module (APM)
-#ifndef WEBRTC_LEGACY
     // Audio Processing module builder
     webrtc::AudioProcessingBuilder ap_builder;
-#endif
     size_t apmFrameCount;      // buffer size for webRTC process (10 ms)
     uint32_t apmSamplingRate;  // webRTC APM sampling rate (8/16 or 32 kHz)
     size_t frameCount;         // buffer size before input resampler ( <=> apmFrameCount)
@@ -124,42 +115,25 @@
     uint32_t enabledMsk;       // bit field containing IDs of enabled pre processors
     uint32_t processedMsk;     // bit field containing IDs of pre processors already
                                // processed in current round
-#ifdef WEBRTC_LEGACY
-    webrtc::AudioFrame* procFrame;  // audio frame passed to webRTC AMP ProcessStream()
-#else
     // audio config strucutre
     webrtc::AudioProcessing::Config config;
     webrtc::StreamConfig inputConfig;   // input stream configuration
     webrtc::StreamConfig outputConfig;  // output stream configuration
-#endif
     int16_t* inBuf;    // input buffer used when resampling
     size_t inBufSize;  // input buffer size in frames
     size_t framesIn;   // number of frames in input buffer
-#ifdef WEBRTC_LEGACY
-    SpeexResamplerState* inResampler;  // handle on input speex resampler
-#endif
     int16_t* outBuf;    // output buffer used when resampling
     size_t outBufSize;  // output buffer size in frames
     size_t framesOut;   // number of frames in output buffer
-#ifdef WEBRTC_LEGACY
-    SpeexResamplerState* outResampler;  // handle on output speex resampler
-#endif
     uint32_t revChannelCount;  // number of channels on reverse stream
     uint32_t revEnabledMsk;    // bit field containing IDs of enabled pre processors
                                // with reverse channel
     uint32_t revProcessedMsk;  // bit field containing IDs of pre processors with reverse
                                // channel already processed in current round
-#ifdef WEBRTC_LEGACY
-    webrtc::AudioFrame* revFrame;  // audio frame passed to webRTC AMP AnalyzeReverseStream()
-#else
     webrtc::StreamConfig revConfig;     // reverse stream configuration.
-#endif
     int16_t* revBuf;    // reverse channel input buffer
     size_t revBufSize;  // reverse channel input buffer size
     size_t framesRev;   // number of frames in reverse channel input buffer
-#ifdef WEBRTC_LEGACY
-    SpeexResamplerState* revResampler;  // handle on reverse channel input speex resampler
-#endif
 };
 
 #ifdef DUAL_MIC_TEST
@@ -213,7 +187,6 @@
         "Automatic Gain Control",
         "The Android Open Source Project"};
 
-#ifndef WEBRTC_LEGACY
 // Automatic Gain Control 2
 static const effect_descriptor_t sAgc2Descriptor = {
         {0xae3c653b, 0xbe18, 0x4ab8, 0x8938, {0x41, 0x8f, 0x0a, 0x7f, 0x06, 0xac}},  // type
@@ -224,7 +197,6 @@
         0,  // FIXME indicate memory usage
         "Automatic Gain Control 2",
         "The Android Open Source Project"};
-#endif
 
 // Acoustic Echo Cancellation
 static const effect_descriptor_t sAecDescriptor = {
@@ -249,9 +221,7 @@
         "The Android Open Source Project"};
 
 static const effect_descriptor_t* sDescriptors[PREPROC_NUM_EFFECTS] = {&sAgcDescriptor,
-#ifndef WEBRTC_LEGACY
                                                                        &sAgc2Descriptor,
-#endif
                                                                        &sAecDescriptor,
                                                                        &sNsDescriptor};
 
@@ -260,9 +230,7 @@
 //------------------------------------------------------------------------------
 
 const effect_uuid_t* const sUuidToPreProcTable[PREPROC_NUM_EFFECTS] = {FX_IID_AGC,
-#ifndef WEBRTC_LEGACY
                                                                        FX_IID_AGC2,
-#endif
                                                                        FX_IID_AEC, FX_IID_NS};
 
 const effect_uuid_t* ProcIdToUuid(int procId) {
@@ -297,7 +265,6 @@
 static const int kAgcDefaultCompGain = 9;
 static const bool kAgcDefaultLimiter = true;
 
-#ifndef WEBRTC_LEGACY
 int Agc2Init(preproc_effect_t* effect) {
     ALOGV("Agc2Init");
     effect->session->config = effect->session->apm->GetConfig();
@@ -308,48 +275,27 @@
     effect->session->apm->ApplyConfig(effect->session->config);
     return 0;
 }
-#endif
 
 int AgcInit(preproc_effect_t* effect) {
     ALOGV("AgcInit");
-#ifdef WEBRTC_LEGACY
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-    agc->set_mode(webrtc::GainControl::kFixedDigital);
-    agc->set_target_level_dbfs(kAgcDefaultTargetLevel);
-    agc->set_compression_gain_db(kAgcDefaultCompGain);
-    agc->enable_limiter(kAgcDefaultLimiter);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller1.target_level_dbfs = kAgcDefaultTargetLevel;
     effect->session->config.gain_controller1.compression_gain_db = kAgcDefaultCompGain;
     effect->session->config.gain_controller1.enable_limiter = kAgcDefaultLimiter;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
     return 0;
 }
 
-#ifndef WEBRTC_LEGACY
 int Agc2Create(preproc_effect_t* effect) {
     Agc2Init(effect);
     return 0;
 }
-#endif
 
 int AgcCreate(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::GainControl* agc = effect->session->apm->gain_control();
-    ALOGV("AgcCreate got agc %p", agc);
-    if (agc == NULL) {
-        ALOGW("AgcCreate Error");
-        return -ENOMEM;
-    }
-    effect->engine = static_cast<preproc_fx_handle_t>(agc);
-#endif
     AgcInit(effect);
     return 0;
 }
 
-#ifndef WEBRTC_LEGACY
 int Agc2GetParameter(preproc_effect_t* effect, void* pParam, uint32_t* pValueSize, void* pValue) {
     int status = 0;
     uint32_t param = *(uint32_t*)pParam;
@@ -422,15 +368,11 @@
 
     return status;
 }
-#endif
 
 int AgcGetParameter(preproc_effect_t* effect, void* pParam, uint32_t* pValueSize, void* pValue) {
     int status = 0;
     uint32_t param = *(uint32_t*)pParam;
     t_agc_settings* pProperties = (t_agc_settings*)pValue;
-#ifdef WEBRTC_LEGACY
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-#endif
 
     switch (param) {
         case AGC_PARAM_TARGET_LEVEL:
@@ -459,32 +401,6 @@
             break;
     }
 
-#ifdef WEBRTC_LEGACY
-    switch (param) {
-        case AGC_PARAM_TARGET_LEVEL:
-            *(int16_t*)pValue = (int16_t)(agc->target_level_dbfs() * -100);
-            ALOGV("AgcGetParameter() target level %d milliBels", *(int16_t*)pValue);
-            break;
-        case AGC_PARAM_COMP_GAIN:
-            *(int16_t*)pValue = (int16_t)(agc->compression_gain_db() * 100);
-            ALOGV("AgcGetParameter() comp gain %d milliBels", *(int16_t*)pValue);
-            break;
-        case AGC_PARAM_LIMITER_ENA:
-            *(bool*)pValue = (bool)agc->is_limiter_enabled();
-            ALOGV("AgcGetParameter() limiter enabled %s",
-                  (*(int16_t*)pValue != 0) ? "true" : "false");
-            break;
-        case AGC_PARAM_PROPERTIES:
-            pProperties->targetLevel = (int16_t)(agc->target_level_dbfs() * -100);
-            pProperties->compGain = (int16_t)(agc->compression_gain_db() * 100);
-            pProperties->limiterEnabled = (bool)agc->is_limiter_enabled();
-            break;
-        default:
-            ALOGW("AgcGetParameter() unknown param %d", param);
-            status = -EINVAL;
-            break;
-    }
-#else
     effect->session->config = effect->session->apm->GetConfig();
     switch (param) {
         case AGC_PARAM_TARGET_LEVEL:
@@ -515,11 +431,9 @@
             status = -EINVAL;
             break;
     }
-#endif
     return status;
 }
 
-#ifndef WEBRTC_LEGACY
 int Agc2SetParameter(preproc_effect_t* effect, void* pParam, void* pValue) {
     int status = 0;
     uint32_t param = *(uint32_t*)pParam;
@@ -567,43 +481,9 @@
 
     return status;
 }
-#endif
 
 int AgcSetParameter(preproc_effect_t* effect, void* pParam, void* pValue) {
     int status = 0;
-#ifdef WEBRTC_LEGACY
-    uint32_t param = *(uint32_t*)pParam;
-    t_agc_settings* pProperties = (t_agc_settings*)pValue;
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-
-    switch (param) {
-        case AGC_PARAM_TARGET_LEVEL:
-            ALOGV("AgcSetParameter() target level %d milliBels", *(int16_t*)pValue);
-            status = agc->set_target_level_dbfs(-(*(int16_t*)pValue / 100));
-            break;
-        case AGC_PARAM_COMP_GAIN:
-            ALOGV("AgcSetParameter() comp gain %d milliBels", *(int16_t*)pValue);
-            status = agc->set_compression_gain_db(*(int16_t*)pValue / 100);
-            break;
-        case AGC_PARAM_LIMITER_ENA:
-            ALOGV("AgcSetParameter() limiter enabled %s", *(bool*)pValue ? "true" : "false");
-            status = agc->enable_limiter(*(bool*)pValue);
-            break;
-        case AGC_PARAM_PROPERTIES:
-            ALOGV("AgcSetParameter() properties level %d, gain %d limiter %d",
-                  pProperties->targetLevel, pProperties->compGain, pProperties->limiterEnabled);
-            status = agc->set_target_level_dbfs(-(pProperties->targetLevel / 100));
-            if (status != 0) break;
-            status = agc->set_compression_gain_db(pProperties->compGain / 100);
-            if (status != 0) break;
-            status = agc->enable_limiter(pProperties->limiterEnabled);
-            break;
-        default:
-            ALOGW("AgcSetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
-            status = -EINVAL;
-            break;
-    }
-#else
     uint32_t param = *(uint32_t*)pParam;
     t_agc_settings* pProperties = (t_agc_settings*)pValue;
     effect->session->config = effect->session->apm->GetConfig();
@@ -637,96 +517,57 @@
             break;
     }
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 
     ALOGV("AgcSetParameter() done status %d", status);
 
     return status;
 }
 
-#ifndef WEBRTC_LEGACY
 void Agc2Enable(preproc_effect_t* effect) {
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller2.enabled = true;
     effect->session->apm->ApplyConfig(effect->session->config);
 }
-#endif
 
 void AgcEnable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-    ALOGV("AgcEnable agc %p", agc);
-    agc->Enable(true);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller1.enabled = true;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
-#ifndef WEBRTC_LEGACY
 void Agc2Disable(preproc_effect_t* effect) {
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller2.enabled = false;
     effect->session->apm->ApplyConfig(effect->session->config);
 }
-#endif
 
 void AgcDisable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    ALOGV("AgcDisable");
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-    agc->Enable(false);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller1.enabled = false;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 static const preproc_ops_t sAgcOps = {AgcCreate,       AgcInit,         NULL, AgcEnable, AgcDisable,
                                       AgcSetParameter, AgcGetParameter, NULL};
 
-#ifndef WEBRTC_LEGACY
 static const preproc_ops_t sAgc2Ops = {Agc2Create,       Agc2Init,    NULL,
                                        Agc2Enable,       Agc2Disable, Agc2SetParameter,
                                        Agc2GetParameter, NULL};
-#endif
 
 //------------------------------------------------------------------------------
 // Acoustic Echo Canceler (AEC)
 //------------------------------------------------------------------------------
 
-#ifdef WEBRTC_LEGACY
-static const webrtc::EchoControlMobile::RoutingMode kAecDefaultMode =
-        webrtc::EchoControlMobile::kEarpiece;
-static const bool kAecDefaultComfortNoise = true;
-#endif
 
 int AecInit(preproc_effect_t* effect) {
     ALOGV("AecInit");
-#ifdef WEBRTC_LEGACY
-    webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
-    aec->set_routing_mode(kAecDefaultMode);
-    aec->enable_comfort_noise(kAecDefaultComfortNoise);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.echo_canceller.mobile_mode = true;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
     return 0;
 }
 
 int AecCreate(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::EchoControlMobile* aec = effect->session->apm->echo_control_mobile();
-    ALOGV("AecCreate got aec %p", aec);
-    if (aec == NULL) {
-        ALOGW("AgcCreate Error");
-        return -ENOMEM;
-    }
-    effect->engine = static_cast<preproc_fx_handle_t>(aec);
-#endif
     AecInit(effect);
     return 0;
 }
@@ -744,13 +585,11 @@
             *(uint32_t*)pValue = 1000 * effect->session->apm->stream_delay_ms();
             ALOGV("AecGetParameter() echo delay %d us", *(uint32_t*)pValue);
             break;
-#ifndef WEBRTC_LEGACY
         case AEC_PARAM_MOBILE_MODE:
             effect->session->config = effect->session->apm->GetConfig();
             *(uint32_t*)pValue = effect->session->config.echo_canceller.mobile_mode;
             ALOGV("AecGetParameter() mobile mode %d us", *(uint32_t*)pValue);
             break;
-#endif
         default:
             ALOGW("AecGetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
             status = -EINVAL;
@@ -770,14 +609,12 @@
             status = effect->session->apm->set_stream_delay_ms(value / 1000);
             ALOGV("AecSetParameter() echo delay %d us, status %d", value, status);
             break;
-#ifndef WEBRTC_LEGACY
         case AEC_PARAM_MOBILE_MODE:
             effect->session->config = effect->session->apm->GetConfig();
             effect->session->config.echo_canceller.mobile_mode = value;
             ALOGV("AecSetParameter() mobile mode %d us", value);
             effect->session->apm->ApplyConfig(effect->session->config);
             break;
-#endif
         default:
             ALOGW("AecSetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
             status = -EINVAL;
@@ -787,57 +624,24 @@
 }
 
 void AecEnable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
-    ALOGV("AecEnable aec %p", aec);
-    aec->Enable(true);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.echo_canceller.enabled = true;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 void AecDisable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    ALOGV("AecDisable");
-    webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
-    aec->Enable(false);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.echo_canceller.enabled = false;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 int AecSetDevice(preproc_effect_t* effect, uint32_t device) {
     ALOGV("AecSetDevice %08x", device);
-#ifdef WEBRTC_LEGACY
-    webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
-    webrtc::EchoControlMobile::RoutingMode mode =
-            webrtc::EchoControlMobile::kQuietEarpieceOrHeadset;
-#endif
 
     if (audio_is_input_device(device)) {
         return 0;
     }
 
-#ifdef WEBRTC_LEGACY
-    switch (device) {
-        case AUDIO_DEVICE_OUT_EARPIECE:
-            mode = webrtc::EchoControlMobile::kEarpiece;
-            break;
-        case AUDIO_DEVICE_OUT_SPEAKER:
-            mode = webrtc::EchoControlMobile::kSpeakerphone;
-            break;
-        case AUDIO_DEVICE_OUT_WIRED_HEADSET:
-        case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
-        case AUDIO_DEVICE_OUT_USB_HEADSET:
-        default:
-            break;
-    }
-    aec->set_routing_mode(mode);
-#endif
     return 0;
 }
 
@@ -849,49 +653,19 @@
 // Noise Suppression (NS)
 //------------------------------------------------------------------------------
 
-#ifdef WEBRTC_LEGACY
-static const webrtc::NoiseSuppression::Level kNsDefaultLevel = webrtc::NoiseSuppression::kModerate;
-#else
 static const webrtc::AudioProcessing::Config::NoiseSuppression::Level kNsDefaultLevel =
         webrtc::AudioProcessing::Config::NoiseSuppression::kModerate;
-#endif
 
 int NsInit(preproc_effect_t* effect) {
     ALOGV("NsInit");
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
-    ns->set_level(kNsDefaultLevel);
-    webrtc::Config config;
-    std::vector<webrtc::Point> geometry;
-    // TODO(aluebs): Make the geometry settable.
-    geometry.push_back(webrtc::Point(-0.03f, 0.f, 0.f));
-    geometry.push_back(webrtc::Point(-0.01f, 0.f, 0.f));
-    geometry.push_back(webrtc::Point(0.01f, 0.f, 0.f));
-    geometry.push_back(webrtc::Point(0.03f, 0.f, 0.f));
-    // The geometry needs to be set with Beamforming enabled.
-    config.Set<webrtc::Beamforming>(new webrtc::Beamforming(true, geometry));
-    effect->session->apm->SetExtraOptions(config);
-    config.Set<webrtc::Beamforming>(new webrtc::Beamforming(false, geometry));
-    effect->session->apm->SetExtraOptions(config);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.noise_suppression.level = kNsDefaultLevel;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
     effect->type = NS_TYPE_SINGLE_CHANNEL;
     return 0;
 }
 
 int NsCreate(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = effect->session->apm->noise_suppression();
-    ALOGV("NsCreate got ns %p", ns);
-    if (ns == NULL) {
-        ALOGW("AgcCreate Error");
-        return -ENOMEM;
-    }
-    effect->engine = static_cast<preproc_fx_handle_t>(ns);
-#endif
     NsInit(effect);
     return 0;
 }
@@ -904,31 +678,6 @@
 
 int NsSetParameter(preproc_effect_t* effect, void* pParam, void* pValue) {
     int status = 0;
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
-    uint32_t param = *(uint32_t*)pParam;
-    uint32_t value = *(uint32_t*)pValue;
-    switch (param) {
-        case NS_PARAM_LEVEL:
-            ns->set_level((webrtc::NoiseSuppression::Level)value);
-            ALOGV("NsSetParameter() level %d", value);
-            break;
-        case NS_PARAM_TYPE: {
-            webrtc::Config config;
-            std::vector<webrtc::Point> geometry;
-            bool is_beamforming_enabled = value == NS_TYPE_MULTI_CHANNEL && ns->is_enabled();
-            config.Set<webrtc::Beamforming>(
-                    new webrtc::Beamforming(is_beamforming_enabled, geometry));
-            effect->session->apm->SetExtraOptions(config);
-            effect->type = value;
-            ALOGV("NsSetParameter() type %d", value);
-            break;
-        }
-        default:
-            ALOGW("NsSetParameter() unknown param %08x value %08x", param, value);
-            status = -EINVAL;
-    }
-#else
     uint32_t param = *(uint32_t*)pParam;
     uint32_t value = *(uint32_t*)pValue;
     effect->session->config = effect->session->apm->GetConfig();
@@ -943,52 +692,28 @@
             status = -EINVAL;
     }
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 
     return status;
 }
 
 void NsEnable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
-    ALOGV("NsEnable ns %p", ns);
-    ns->Enable(true);
-    if (effect->type == NS_TYPE_MULTI_CHANNEL) {
-        webrtc::Config config;
-        std::vector<webrtc::Point> geometry;
-        config.Set<webrtc::Beamforming>(new webrtc::Beamforming(true, geometry));
-        effect->session->apm->SetExtraOptions(config);
-    }
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.noise_suppression.enabled = true;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 void NsDisable(preproc_effect_t* effect) {
     ALOGV("NsDisable");
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
-    ns->Enable(false);
-    webrtc::Config config;
-    std::vector<webrtc::Point> geometry;
-    config.Set<webrtc::Beamforming>(new webrtc::Beamforming(false, geometry));
-    effect->session->apm->SetExtraOptions(config);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.noise_suppression.enabled = false;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 static const preproc_ops_t sNsOps = {NsCreate,  NsInit,         NULL,           NsEnable,
                                      NsDisable, NsSetParameter, NsGetParameter, NULL};
 
 static const preproc_ops_t* sPreProcOps[PREPROC_NUM_EFFECTS] = {&sAgcOps,
-#ifndef WEBRTC_LEGACY
                                                                 &sAgc2Ops,
-#endif
                                                                 &sAecOps, &sNsOps};
 
 //------------------------------------------------------------------------------
@@ -1119,9 +844,6 @@
     session->id = 0;
     session->io = 0;
     session->createdMsk = 0;
-#ifdef WEBRTC_LEGACY
-    session->apm = NULL;
-#endif
     for (i = 0; i < PREPROC_NUM_EFFECTS && status == 0; i++) {
         status = Effect_Init(&session->effects[i], i);
     }
@@ -1135,75 +857,32 @@
     ALOGV("Session_CreateEffect procId %d, createdMsk %08x", procId, session->createdMsk);
 
     if (session->createdMsk == 0) {
-#ifdef WEBRTC_LEGACY
-        session->apm = webrtc::AudioProcessing::Create();
-        if (session->apm == NULL) {
-            ALOGW("Session_CreateEffect could not get apm engine");
-            goto error;
-        }
-        const webrtc::ProcessingConfig processing_config = {
-                {{kPreprocDefaultSr, kPreProcDefaultCnl},
-                 {kPreprocDefaultSr, kPreProcDefaultCnl},
-                 {kPreprocDefaultSr, kPreProcDefaultCnl},
-                 {kPreprocDefaultSr, kPreProcDefaultCnl}}};
-        session->apm->Initialize(processing_config);
-        session->procFrame = new webrtc::AudioFrame();
-        if (session->procFrame == NULL) {
-            ALOGW("Session_CreateEffect could not allocate audio frame");
-            goto error;
-        }
-        session->revFrame = new webrtc::AudioFrame();
-        if (session->revFrame == NULL) {
-            ALOGW("Session_CreateEffect could not allocate reverse audio frame");
-            goto error;
-        }
-#else
         session->apm = session->ap_builder.Create();
         if (session->apm == NULL) {
             ALOGW("Session_CreateEffect could not get apm engine");
             goto error;
         }
-#endif
         session->apmSamplingRate = kPreprocDefaultSr;
         session->apmFrameCount = (kPreprocDefaultSr) / 100;
         session->frameCount = session->apmFrameCount;
         session->samplingRate = kPreprocDefaultSr;
         session->inChannelCount = kPreProcDefaultCnl;
         session->outChannelCount = kPreProcDefaultCnl;
-#ifdef WEBRTC_LEGACY
-        session->procFrame->sample_rate_hz_ = kPreprocDefaultSr;
-        session->procFrame->num_channels_ = kPreProcDefaultCnl;
-#else
         session->inputConfig.set_sample_rate_hz(kPreprocDefaultSr);
         session->inputConfig.set_num_channels(kPreProcDefaultCnl);
         session->outputConfig.set_sample_rate_hz(kPreprocDefaultSr);
         session->outputConfig.set_num_channels(kPreProcDefaultCnl);
-#endif
         session->revChannelCount = kPreProcDefaultCnl;
-#ifdef WEBRTC_LEGACY
-        session->revFrame->sample_rate_hz_ = kPreprocDefaultSr;
-        session->revFrame->num_channels_ = kPreProcDefaultCnl;
-#else
         session->revConfig.set_sample_rate_hz(kPreprocDefaultSr);
         session->revConfig.set_num_channels(kPreProcDefaultCnl);
-#endif
         session->enabledMsk = 0;
         session->processedMsk = 0;
         session->revEnabledMsk = 0;
         session->revProcessedMsk = 0;
-#ifdef WEBRTC_LEGACY
-        session->inResampler = NULL;
-#endif
         session->inBuf = NULL;
         session->inBufSize = 0;
-#ifdef WEBRTC_LEGACY
-        session->outResampler = NULL;
-#endif
         session->outBuf = NULL;
         session->outBufSize = 0;
-#ifdef WEBRTC_LEGACY
-        session->revResampler = NULL;
-#endif
         session->revBuf = NULL;
         session->revBufSize = 0;
     }
@@ -1217,17 +896,8 @@
 
 error:
     if (session->createdMsk == 0) {
-#ifdef WEBRTC_LEGACY
-        delete session->revFrame;
-        session->revFrame = NULL;
-        delete session->procFrame;
-        session->procFrame = NULL;
-        delete session->apm;
-        session->apm = NULL;  // NOLINT(clang-analyzer-cplusplus.NewDelete)
-#else
         delete session->apm;
         session->apm = NULL;
-#endif
     }
     return status;
 }
@@ -1236,29 +906,8 @@
     ALOGW_IF(Effect_Release(fx) != 0, " Effect_Release() failed for proc ID %d", fx->procId);
     session->createdMsk &= ~(1 << fx->procId);
     if (session->createdMsk == 0) {
-#ifdef WEBRTC_LEGACY
         delete session->apm;
         session->apm = NULL;
-        delete session->procFrame;
-        session->procFrame = NULL;
-        delete session->revFrame;
-        session->revFrame = NULL;
-        if (session->inResampler != NULL) {
-            speex_resampler_destroy(session->inResampler);
-            session->inResampler = NULL;
-        }
-        if (session->outResampler != NULL) {
-            speex_resampler_destroy(session->outResampler);
-            session->outResampler = NULL;
-        }
-        if (session->revResampler != NULL) {
-            speex_resampler_destroy(session->revResampler);
-            session->revResampler = NULL;
-        }
-#else
-        delete session->apm;
-        session->apm = NULL;
-#endif
         delete session->inBuf;
         session->inBuf = NULL;
         delete session->outBuf;
@@ -1284,9 +933,6 @@
 
     ALOGV("Session_SetConfig sr %d cnl %08x", config->inputCfg.samplingRate,
           config->inputCfg.channels);
-#ifdef WEBRTC_LEGACY
-    int status;
-#endif
 
     // AEC implementation is limited to 16kHz
     if (config->inputCfg.samplingRate >= 32000 && !(session->createdMsk & (1 << PREPROC_AEC))) {
@@ -1297,51 +943,25 @@
         session->apmSamplingRate = 8000;
     }
 
-#ifdef WEBRTC_LEGACY
-    const webrtc::ProcessingConfig processing_config = {
-            {{static_cast<int>(session->apmSamplingRate), inCnl},
-             {static_cast<int>(session->apmSamplingRate), outCnl},
-             {static_cast<int>(session->apmSamplingRate), inCnl},
-             {static_cast<int>(session->apmSamplingRate), inCnl}}};
-    status = session->apm->Initialize(processing_config);
-    if (status < 0) {
-        return -EINVAL;
-    }
-#endif
 
     session->samplingRate = config->inputCfg.samplingRate;
     session->apmFrameCount = session->apmSamplingRate / 100;
     if (session->samplingRate == session->apmSamplingRate) {
         session->frameCount = session->apmFrameCount;
     } else {
-#ifdef WEBRTC_LEGACY
-        session->frameCount =
-                (session->apmFrameCount * session->samplingRate) / session->apmSamplingRate + 1;
-#else
         session->frameCount =
                 (session->apmFrameCount * session->samplingRate) / session->apmSamplingRate;
-#endif
     }
     session->inChannelCount = inCnl;
     session->outChannelCount = outCnl;
-#ifdef WEBRTC_LEGACY
-    session->procFrame->num_channels_ = inCnl;
-    session->procFrame->sample_rate_hz_ = session->apmSamplingRate;
-#else
     session->inputConfig.set_sample_rate_hz(session->samplingRate);
     session->inputConfig.set_num_channels(inCnl);
     session->outputConfig.set_sample_rate_hz(session->samplingRate);
     session->outputConfig.set_num_channels(inCnl);
-#endif
 
     session->revChannelCount = inCnl;
-#ifdef WEBRTC_LEGACY
-    session->revFrame->num_channels_ = inCnl;
-    session->revFrame->sample_rate_hz_ = session->apmSamplingRate;
-#else
     session->revConfig.set_sample_rate_hz(session->samplingRate);
     session->revConfig.set_num_channels(inCnl);
-#endif
 
     // force process buffer reallocation
     session->inBufSize = 0;
@@ -1349,53 +969,6 @@
     session->framesIn = 0;
     session->framesOut = 0;
 
-#ifdef WEBRTC_LEGACY
-    if (session->inResampler != NULL) {
-        speex_resampler_destroy(session->inResampler);
-        session->inResampler = NULL;
-    }
-    if (session->outResampler != NULL) {
-        speex_resampler_destroy(session->outResampler);
-        session->outResampler = NULL;
-    }
-    if (session->revResampler != NULL) {
-        speex_resampler_destroy(session->revResampler);
-        session->revResampler = NULL;
-    }
-    if (session->samplingRate != session->apmSamplingRate) {
-        int error;
-        session->inResampler =
-                speex_resampler_init(session->inChannelCount, session->samplingRate,
-                                     session->apmSamplingRate, RESAMPLER_QUALITY, &error);
-        if (session->inResampler == NULL) {
-            ALOGW("Session_SetConfig Cannot create speex resampler: %s",
-                  speex_resampler_strerror(error));
-            return -EINVAL;
-        }
-        session->outResampler =
-                speex_resampler_init(session->outChannelCount, session->apmSamplingRate,
-                                     session->samplingRate, RESAMPLER_QUALITY, &error);
-        if (session->outResampler == NULL) {
-            ALOGW("Session_SetConfig Cannot create speex resampler: %s",
-                  speex_resampler_strerror(error));
-            speex_resampler_destroy(session->inResampler);
-            session->inResampler = NULL;
-            return -EINVAL;
-        }
-        session->revResampler =
-                speex_resampler_init(session->inChannelCount, session->samplingRate,
-                                     session->apmSamplingRate, RESAMPLER_QUALITY, &error);
-        if (session->revResampler == NULL) {
-            ALOGW("Session_SetConfig Cannot create speex resampler: %s",
-                  speex_resampler_strerror(error));
-            speex_resampler_destroy(session->inResampler);
-            session->inResampler = NULL;
-            speex_resampler_destroy(session->outResampler);
-            session->outResampler = NULL;
-            return -EINVAL;
-        }
-    }
-#endif
 
     session->state = PREPROC_SESSION_STATE_CONFIG;
     return 0;
@@ -1430,22 +1003,7 @@
         return -EINVAL;
     }
     uint32_t inCnl = audio_channel_count_from_out_mask(config->inputCfg.channels);
-#ifdef WEBRTC_LEGACY
-    const webrtc::ProcessingConfig processing_config = {
-            {{static_cast<int>(session->apmSamplingRate), session->inChannelCount},
-             {static_cast<int>(session->apmSamplingRate), session->outChannelCount},
-             {static_cast<int>(session->apmSamplingRate), inCnl},
-             {static_cast<int>(session->apmSamplingRate), inCnl}}};
-    int status = session->apm->Initialize(processing_config);
-    if (status < 0) {
-        return -EINVAL;
-    }
-#endif
     session->revChannelCount = inCnl;
-#ifdef WEBRTC_LEGACY
-    session->revFrame->num_channels_ = inCnl;
-    session->revFrame->sample_rate_hz_ = session->apmSamplingRate;
-#endif
     // force process buffer reallocation
     session->revBufSize = 0;
     session->framesRev = 0;
@@ -1467,24 +1025,10 @@
     if (enabled) {
         if (session->enabledMsk == 0) {
             session->framesIn = 0;
-#ifdef WEBRTC_LEGACY
-            if (session->inResampler != NULL) {
-                speex_resampler_reset_mem(session->inResampler);
-            }
-            session->framesOut = 0;
-            if (session->outResampler != NULL) {
-                speex_resampler_reset_mem(session->outResampler);
-            }
-#endif
         }
         session->enabledMsk |= (1 << procId);
         if (HasReverseStream(procId)) {
             session->framesRev = 0;
-#ifdef WEBRTC_LEGACY
-            if (session->revResampler != NULL) {
-                speex_resampler_reset_mem(session->revResampler);
-            }
-#endif
             session->revEnabledMsk |= (1 << procId);
         }
     } else {
@@ -1600,82 +1144,6 @@
             return 0;
         }
 
-#ifdef WEBRTC_LEGACY
-        if (session->inResampler != NULL) {
-            size_t fr = session->frameCount - session->framesIn;
-            if (inBuffer->frameCount < fr) {
-                fr = inBuffer->frameCount;
-            }
-            if (session->inBufSize < session->framesIn + fr) {
-                int16_t* buf;
-                session->inBufSize = session->framesIn + fr;
-                buf = (int16_t*)realloc(
-                        session->inBuf,
-                        session->inBufSize * session->inChannelCount * sizeof(int16_t));
-                if (buf == NULL) {
-                    session->framesIn = 0;
-                    free(session->inBuf);
-                    session->inBuf = NULL;
-                    return -ENOMEM;
-                }
-                session->inBuf = buf;
-            }
-            memcpy(session->inBuf + session->framesIn * session->inChannelCount, inBuffer->s16,
-                   fr * session->inChannelCount * sizeof(int16_t));
-#ifdef DUAL_MIC_TEST
-            pthread_mutex_lock(&gPcmDumpLock);
-            if (gPcmDumpFh != NULL) {
-                fwrite(inBuffer->raw, fr * session->inChannelCount * sizeof(int16_t), 1,
-                       gPcmDumpFh);
-            }
-            pthread_mutex_unlock(&gPcmDumpLock);
-#endif
-
-            session->framesIn += fr;
-            inBuffer->frameCount = fr;
-            if (session->framesIn < session->frameCount) {
-                return 0;
-            }
-            spx_uint32_t frIn = session->framesIn;
-            spx_uint32_t frOut = session->apmFrameCount;
-            if (session->inChannelCount == 1) {
-                speex_resampler_process_int(session->inResampler, 0, session->inBuf, &frIn,
-                                            session->procFrame->data_, &frOut);
-            } else {
-                speex_resampler_process_interleaved_int(session->inResampler, session->inBuf, &frIn,
-                                                        session->procFrame->data_, &frOut);
-            }
-            memmove(session->inBuf, session->inBuf + frIn * session->inChannelCount,
-                    (session->framesIn - frIn) * session->inChannelCount * sizeof(int16_t));
-            session->framesIn -= frIn;
-        } else {
-            size_t fr = session->frameCount - session->framesIn;
-            if (inBuffer->frameCount < fr) {
-                fr = inBuffer->frameCount;
-            }
-            memcpy(session->procFrame->data_ + session->framesIn * session->inChannelCount,
-                   inBuffer->s16, fr * session->inChannelCount * sizeof(int16_t));
-
-#ifdef DUAL_MIC_TEST
-            pthread_mutex_lock(&gPcmDumpLock);
-            if (gPcmDumpFh != NULL) {
-                fwrite(inBuffer->raw, fr * session->inChannelCount * sizeof(int16_t), 1,
-                       gPcmDumpFh);
-            }
-            pthread_mutex_unlock(&gPcmDumpLock);
-#endif
-
-            session->framesIn += fr;
-            inBuffer->frameCount = fr;
-            if (session->framesIn < session->frameCount) {
-                return 0;
-            }
-            session->framesIn = 0;
-        }
-        session->procFrame->samples_per_channel_ = session->apmFrameCount;
-
-        effect->session->apm->ProcessStream(session->procFrame);
-#else
         size_t fr = session->frameCount - session->framesIn;
         if (inBuffer->frameCount < fr) {
             fr = inBuffer->frameCount;
@@ -1696,7 +1164,6 @@
             return status;
         }
         outBuffer->frameCount = inBuffer->frameCount;
-#endif
 
         if (session->outBufSize < session->framesOut + session->frameCount) {
             int16_t* buf;
@@ -1713,30 +1180,7 @@
             session->outBuf = buf;
         }
 
-#ifdef WEBRTC_LEGACY
-        if (session->outResampler != NULL) {
-            spx_uint32_t frIn = session->apmFrameCount;
-            spx_uint32_t frOut = session->frameCount;
-            if (session->inChannelCount == 1) {
-                speex_resampler_process_int(
-                        session->outResampler, 0, session->procFrame->data_, &frIn,
-                        session->outBuf + session->framesOut * session->outChannelCount, &frOut);
-            } else {
-                speex_resampler_process_interleaved_int(
-                        session->outResampler, session->procFrame->data_, &frIn,
-                        session->outBuf + session->framesOut * session->outChannelCount, &frOut);
-            }
-            session->framesOut += frOut;
-        } else {
-            memcpy(session->outBuf + session->framesOut * session->outChannelCount,
-                   session->procFrame->data_,
-                   session->frameCount * session->outChannelCount * sizeof(int16_t));
-            session->framesOut += session->frameCount;
-        }
-        size_t fr = session->framesOut;
-#else
         fr = session->framesOut;
-#endif
         if (framesRq - framesWr < fr) {
             fr = framesRq - framesWr;
         }
@@ -2129,63 +1573,6 @@
 
     if ((session->revProcessedMsk & session->revEnabledMsk) == session->revEnabledMsk) {
         effect->session->revProcessedMsk = 0;
-#ifdef WEBRTC_LEGACY
-        if (session->revResampler != NULL) {
-            size_t fr = session->frameCount - session->framesRev;
-            if (inBuffer->frameCount < fr) {
-                fr = inBuffer->frameCount;
-            }
-            if (session->revBufSize < session->framesRev + fr) {
-                int16_t* buf;
-                session->revBufSize = session->framesRev + fr;
-                buf = (int16_t*)realloc(
-                        session->revBuf,
-                        session->revBufSize * session->inChannelCount * sizeof(int16_t));
-                if (buf == NULL) {
-                    session->framesRev = 0;
-                    free(session->revBuf);
-                    session->revBuf = NULL;
-                    return -ENOMEM;
-                }
-                session->revBuf = buf;
-            }
-            memcpy(session->revBuf + session->framesRev * session->inChannelCount, inBuffer->s16,
-                   fr * session->inChannelCount * sizeof(int16_t));
-
-            session->framesRev += fr;
-            inBuffer->frameCount = fr;
-            if (session->framesRev < session->frameCount) {
-                return 0;
-            }
-            spx_uint32_t frIn = session->framesRev;
-            spx_uint32_t frOut = session->apmFrameCount;
-            if (session->inChannelCount == 1) {
-                speex_resampler_process_int(session->revResampler, 0, session->revBuf, &frIn,
-                                            session->revFrame->data_, &frOut);
-            } else {
-                speex_resampler_process_interleaved_int(session->revResampler, session->revBuf,
-                                                        &frIn, session->revFrame->data_, &frOut);
-            }
-            memmove(session->revBuf, session->revBuf + frIn * session->inChannelCount,
-                    (session->framesRev - frIn) * session->inChannelCount * sizeof(int16_t));
-            session->framesRev -= frIn;
-        } else {
-            size_t fr = session->frameCount - session->framesRev;
-            if (inBuffer->frameCount < fr) {
-                fr = inBuffer->frameCount;
-            }
-            memcpy(session->revFrame->data_ + session->framesRev * session->inChannelCount,
-                   inBuffer->s16, fr * session->inChannelCount * sizeof(int16_t));
-            session->framesRev += fr;
-            inBuffer->frameCount = fr;
-            if (session->framesRev < session->frameCount) {
-                return 0;
-            }
-            session->framesRev = 0;
-        }
-        session->revFrame->samples_per_channel_ = session->apmFrameCount;
-        effect->session->apm->AnalyzeReverseStream(session->revFrame);
-#else
         size_t fr = session->frameCount - session->framesRev;
         if (inBuffer->frameCount < fr) {
             fr = inBuffer->frameCount;
@@ -2205,7 +1592,6 @@
             ALOGE("Process Reverse Stream failed with error %d\n", status);
             return status;
         }
-#endif
         return 0;
     } else {
         return -ENODATA;
diff --git a/media/libeffects/preprocessing/benchmarks/Android.bp b/media/libeffects/preprocessing/benchmarks/Android.bp
index 2808293..262fd19 100644
--- a/media/libeffects/preprocessing/benchmarks/Android.bp
+++ b/media/libeffects/preprocessing/benchmarks/Android.bp
@@ -1,31 +1,4 @@
 cc_benchmark {
-    name: "preprocessing_legacy_benchmark",
-    vendor: true,
-    relative_install_path: "soundfx",
-    srcs: ["preprocessing_benchmark.cpp"],
-    shared_libs: [
-        "libaudiopreprocessing_legacy",
-        "libaudioutils",
-        "liblog",
-        "libutils",
-        "libwebrtc_audio_preprocessing",
-    ],
-    cflags: [
-        "-DWEBRTC_POSIX",
-        "-DWEBRTC_LEGACY",
-        "-fvisibility=default",
-        "-Wall",
-        "-Werror",
-        "-Wextra",
-    ],
-    header_libs: [
-        "libaudioeffects",
-        "libhardware_headers",
-        "libwebrtc_absl_headers",
-    ],
-}
-
-cc_benchmark {
     name: "preprocessing_benchmark",
     vendor: true,
     relative_install_path: "soundfx",
diff --git a/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp b/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp
index 3a0ad6d..694a6c4 100644
--- a/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp
+++ b/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp
@@ -54,9 +54,7 @@
 #include <cstdlib>
 #include <random>
 #include <vector>
-#ifndef WEBRTC_LEGACY
 #include <audio_effects/effect_agc2.h>
-#endif
 #include <audio_effects/effect_ns.h>
 #include <benchmark/benchmark.h>
 #include <hardware/audio_effect.h>
@@ -76,10 +74,8 @@
         {0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
         // ns  uuid
         {0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
-#ifndef WEBRTC_LEGACY
         // agc2 uuid
         {0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, {0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86}},
-#endif
 };
 constexpr size_t kNumEffectUuids = std::size(kEffectUuids);
 constexpr audio_channel_mask_t kChMasks[] = {
@@ -93,9 +89,7 @@
     PREPROC_AGC,  // Automatic Gain Control
     PREPROC_AEC,  // Acoustic Echo Canceler
     PREPROC_NS,   // Noise Suppressor
-#ifndef WEBRTC_LEGACY
     PREPROC_AGC2,  // Automatic Gain Control 2
-#endif
     PREPROC_NUM_EFFECTS
 };
 
diff --git a/media/libeffects/preprocessing/tests/Android.bp b/media/libeffects/preprocessing/tests/Android.bp
index 045b0d3..b439880 100644
--- a/media/libeffects/preprocessing/tests/Android.bp
+++ b/media/libeffects/preprocessing/tests/Android.bp
@@ -1,37 +1,5 @@
 // audio preprocessing unit test
 cc_test {
-    name: "AudioPreProcessingLegacyTest",
-
-    vendor: true,
-
-    relative_install_path: "soundfx",
-
-    srcs: ["PreProcessingTest.cpp"],
-
-    shared_libs: [
-        "libaudiopreprocessing_legacy",
-        "libaudioutils",
-        "liblog",
-        "libutils",
-        "libwebrtc_audio_preprocessing",
-    ],
-
-    cflags: [
-        "-DWEBRTC_POSIX",
-        "-DWEBRTC_LEGACY",
-        "-fvisibility=default",
-        "-Wall",
-        "-Werror",
-        "-Wextra",
-    ],
-
-    header_libs: [
-        "libaudioeffects",
-        "libhardware_headers",
-    ],
-}
-
-cc_test {
     name: "AudioPreProcessingTest",
 
     vendor: true,
diff --git a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
index 65b9469..5f223c9 100644
--- a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
+++ b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
@@ -22,9 +22,7 @@
 
 #include <audio_effects/effect_aec.h>
 #include <audio_effects/effect_agc.h>
-#ifndef WEBRTC_LEGACY
 #include <audio_effects/effect_agc2.h>
-#endif
 #include <audio_effects/effect_ns.h>
 #include <log/log.h>
 
@@ -38,9 +36,7 @@
 // types of pre processing modules
 enum PreProcId {
     PREPROC_AGC,  // Automatic Gain Control
-#ifndef WEBRTC_LEGACY
     PREPROC_AGC2,  // Automatic Gain Control 2
-#endif
     PREPROC_AEC,  // Acoustic Echo Canceler
     PREPROC_NS,   // Noise Suppressor
     PREPROC_NUM_EFFECTS
@@ -57,11 +53,9 @@
     ARG_AGC_COMP_LVL,
     ARG_AEC_DELAY,
     ARG_NS_LVL,
-#ifndef WEBRTC_LEGACY
     ARG_AGC2_GAIN,
     ARG_AGC2_LVL,
     ARG_AGC2_SAT_MGN
-#endif
 };
 
 struct preProcConfigParams_t {
@@ -70,19 +64,15 @@
     int nsLevel = 0;         // a value between 0-3
     int agcTargetLevel = 3;  // in dB
     int agcCompLevel = 9;    // in dB
-#ifndef WEBRTC_LEGACY
     float agc2Gain = 0.f;              // in dB
     float agc2SaturationMargin = 2.f;  // in dB
     int agc2Level = 0;                 // either kRms(0) or kPeak(1)
-#endif
     int aecDelay = 0;  // in ms
 };
 
 const effect_uuid_t kPreProcUuids[PREPROC_NUM_EFFECTS] = {
         {0xaa8130e0, 0x66fc, 0x11e0, 0xbad0, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // agc uuid
-#ifndef WEBRTC_LEGACY
         {0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, {0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86}},  // agc2 uuid
-#endif
         {0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // aec uuid
         {0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // ns  uuid
 };
@@ -138,24 +128,20 @@
     printf("\n           Enable Noise Suppression, default disabled");
     printf("\n     --agc");
     printf("\n           Enable Gain Control, default disabled");
-#ifndef WEBRTC_LEGACY
     printf("\n     --agc2");
     printf("\n           Enable Gain Controller 2, default disabled");
-#endif
     printf("\n     --ns_lvl <ns_level>");
     printf("\n           Noise Suppression level in dB, default value 0dB");
     printf("\n     --agc_tgt_lvl <target_level>");
     printf("\n           AGC Target Level in dB, default value 3dB");
     printf("\n     --agc_comp_lvl <comp_level>");
     printf("\n           AGC Comp Level in dB, default value 9dB");
-#ifndef WEBRTC_LEGACY
     printf("\n     --agc2_gain <fixed_digital_gain>");
     printf("\n           AGC Fixed Digital Gain in dB, default value 0dB");
     printf("\n     --agc2_lvl <level_estimator>");
     printf("\n           AGC Adaptive Digital Level Estimator, default value kRms");
     printf("\n     --agc2_sat_mgn <saturation_margin>");
     printf("\n           AGC Adaptive Digital Saturation Margin in dB, default value 2dB");
-#endif
     printf("\n     --aec_delay <delay>");
     printf("\n           AEC delay value in ms, default value 0ms");
     printf("\n");
@@ -217,18 +203,14 @@
             {"ch_mask", required_argument, nullptr, ARG_CH_MASK},
             {"agc_tgt_lvl", required_argument, nullptr, ARG_AGC_TGT_LVL},
             {"agc_comp_lvl", required_argument, nullptr, ARG_AGC_COMP_LVL},
-#ifndef WEBRTC_LEGACY
             {"agc2_gain", required_argument, nullptr, ARG_AGC2_GAIN},
             {"agc2_lvl", required_argument, nullptr, ARG_AGC2_LVL},
             {"agc2_sat_mgn", required_argument, nullptr, ARG_AGC2_SAT_MGN},
-#endif
             {"aec_delay", required_argument, nullptr, ARG_AEC_DELAY},
             {"ns_lvl", required_argument, nullptr, ARG_NS_LVL},
             {"aec", no_argument, &effectEn[PREPROC_AEC], 1},
             {"agc", no_argument, &effectEn[PREPROC_AGC], 1},
-#ifndef WEBRTC_LEGACY
             {"agc2", no_argument, &effectEn[PREPROC_AGC2], 1},
-#endif
             {"ns", no_argument, &effectEn[PREPROC_NS], 1},
             {nullptr, 0, nullptr, 0},
     };
@@ -277,7 +259,6 @@
                 preProcCfgParams.agcCompLevel = atoi(optarg);
                 break;
             }
-#ifndef WEBRTC_LEGACY
             case ARG_AGC2_GAIN: {
                 preProcCfgParams.agc2Gain = atof(optarg);
                 break;
@@ -290,7 +271,6 @@
                 preProcCfgParams.agc2SaturationMargin = atof(optarg);
                 break;
             }
-#endif
             case ARG_AEC_DELAY: {
                 preProcCfgParams.aecDelay = atoi(optarg);
                 break;
@@ -387,7 +367,6 @@
             return EXIT_FAILURE;
         }
     }
-#ifndef WEBRTC_LEGACY
     if (effectEn[PREPROC_AGC2]) {
         if (int status = preProcSetConfigParam(AGC2_PARAM_FIXED_DIGITAL_GAIN,
                                                (float)preProcCfgParams.agc2Gain,
@@ -411,7 +390,6 @@
             return EXIT_FAILURE;
         }
     }
-#endif
     if (effectEn[PREPROC_NS]) {
         if (int status = preProcSetConfigParam(NS_PARAM_LEVEL, (uint32_t)preProcCfgParams.nsLevel,
                                                effectHandle[PREPROC_NS]);
diff --git a/media/libmedia/IMediaExtractor.cpp b/media/libmedia/IMediaExtractor.cpp
index 39caf53..7ed76d8 100644
--- a/media/libmedia/IMediaExtractor.cpp
+++ b/media/libmedia/IMediaExtractor.cpp
@@ -38,7 +38,8 @@
     FLAGS,
     SETMEDIACAS,
     NAME,
-    GETMETRICS
+    GETMETRICS,
+    SETENTRYPOINT
 };
 
 class BpMediaExtractor : public BpInterface<IMediaExtractor> {
@@ -142,6 +143,13 @@
         }
         return nm;
     }
+
+    virtual status_t setEntryPoint(EntryPoint entryPoint) {
+        Parcel data, reply;
+        data.writeInterfaceToken(BpMediaExtractor::getInterfaceDescriptor());
+        data.writeInt32(static_cast<int32_t>(entryPoint));
+        return remote()->transact(SETENTRYPOINT, data, &reply);
+    }
 };
 
 IMPLEMENT_META_INTERFACE(MediaExtractor, "android.media.IMediaExtractor");
@@ -232,6 +240,16 @@
             reply->writeString8(nm);
             return NO_ERROR;
         }
+        case SETENTRYPOINT: {
+            ALOGV("setEntryPoint");
+            CHECK_INTERFACE(IMediaExtractor, data, reply);
+            int32_t entryPoint;
+            status_t err = data.readInt32(&entryPoint);
+            if (err == OK) {
+                setEntryPoint(EntryPoint(entryPoint));
+            }
+            return err;
+        }
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/include/android/IMediaExtractor.h b/media/libmedia/include/android/IMediaExtractor.h
index 3e035ad..f9cafde 100644
--- a/media/libmedia/include/android/IMediaExtractor.h
+++ b/media/libmedia/include/android/IMediaExtractor.h
@@ -63,6 +63,15 @@
     virtual status_t setMediaCas(const HInterfaceToken &casToken) = 0;
 
     virtual String8 name() = 0;
+
+    enum class EntryPoint {
+        SDK = 1,
+        NDK_WITH_JVM = 2,
+        NDK_NO_JVM = 3,
+        OTHER = 4,
+    };
+
+    virtual status_t setEntryPoint(EntryPoint entryPoint) = 0;
 };
 
 
diff --git a/media/libmediatranscoding/Android.bp b/media/libmediatranscoding/Android.bp
index ec806d1..1934820 100644
--- a/media/libmediatranscoding/Android.bp
+++ b/media/libmediatranscoding/Android.bp
@@ -48,7 +48,7 @@
     },
 }
 
-cc_library_shared {
+cc_library {
     name: "libmediatranscoding",
 
     srcs: [
@@ -66,7 +66,6 @@
         "liblog",
         "libutils",
         "libmediatranscoder",
-        "libbinder",
         "libmediandk",
     ],
     export_shared_lib_headers: [
diff --git a/media/libmediatranscoding/TranscoderWrapper.cpp b/media/libmediatranscoding/TranscoderWrapper.cpp
index 8cc8dd2..da86187 100644
--- a/media/libmediatranscoding/TranscoderWrapper.cpp
+++ b/media/libmediatranscoding/TranscoderWrapper.cpp
@@ -192,7 +192,7 @@
                                         new ndk::ScopedAParcel());
             }
 
-            callback->onResourceLost();
+            callback->onResourceLost(clientId, sessionId);
         } else {
             callback->onError(clientId, sessionId, toTranscodingError(err));
         }
diff --git a/media/libmediatranscoding/TranscodingResourcePolicy.cpp b/media/libmediatranscoding/TranscodingResourcePolicy.cpp
index f2e973a..af53f64 100644
--- a/media/libmediatranscoding/TranscodingResourcePolicy.cpp
+++ b/media/libmediatranscoding/TranscodingResourcePolicy.cpp
@@ -21,7 +21,6 @@
 #include <aidl/android/media/IResourceObserverService.h>
 #include <android/binder_manager.h>
 #include <android/binder_process.h>
-#include <binder/IServiceManager.h>
 #include <media/TranscodingResourcePolicy.h>
 #include <utils/Log.h>
 
diff --git a/media/libmediatranscoding/TranscodingSessionController.cpp b/media/libmediatranscoding/TranscodingSessionController.cpp
index bab25c6..b77a3a4 100644
--- a/media/libmediatranscoding/TranscodingSessionController.cpp
+++ b/media/libmediatranscoding/TranscodingSessionController.cpp
@@ -31,6 +31,7 @@
 static_assert((SessionIdType)-1 < 0, "SessionIdType should be signed");
 
 constexpr static uid_t OFFLINE_UID = -1;
+constexpr static size_t kSessionHistoryMax = 100;
 
 //static
 String8 TranscodingSessionController::sessionToString(const SessionKeyType& sessionKey) {
@@ -47,6 +48,12 @@
         return "RUNNING";
     case Session::State::PAUSED:
         return "PAUSED";
+    case Session::State::FINISHED:
+        return "FINISHED";
+    case Session::State::CANCELED:
+        return "CANCELED";
+    case Session::State::ERROR:
+        return "ERROR";
     default:
         break;
     }
@@ -71,6 +78,30 @@
 
 TranscodingSessionController::~TranscodingSessionController() {}
 
+void TranscodingSessionController::dumpSession_l(const Session& session, String8& result,
+                                                 bool closedSession) {
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    const TranscodingRequestParcel& request = session.request;
+    snprintf(buffer, SIZE, "      Session: %s, %s, %d%%\n", sessionToString(session.key).c_str(),
+             sessionStateToString(session.getState()), session.lastProgress);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "        pkg: %s\n", request.clientPackageName.c_str());
+    result.append(buffer);
+    snprintf(buffer, SIZE, "        src: %s\n", request.sourceFilePath.c_str());
+    result.append(buffer);
+    snprintf(buffer, SIZE, "        dst: %s\n", request.destinationFilePath.c_str());
+    result.append(buffer);
+
+    if (closedSession) {
+        snprintf(buffer, SIZE,
+                 "        waiting: %.1fs, running: %.1fs, paused: %.1fs, paused count: %d\n",
+                 session.waitingTime.count() / 1000000.0f, session.runningTime.count() / 1000000.0f,
+                 session.pausedTime.count() / 1000000.0f, session.pauseCount);
+        result.append(buffer);
+    }
+}
+
 void TranscodingSessionController::dumpAllSessions(int fd, const Vector<String16>& args __unused) {
     String8 result;
 
@@ -78,7 +109,7 @@
     char buffer[SIZE];
     std::scoped_lock lock{mLock};
 
-    snprintf(buffer, SIZE, "\n========== Dumping all sessions queues =========\n");
+    snprintf(buffer, SIZE, "\n========== Dumping live sessions queues =========\n");
     result.append(buffer);
     snprintf(buffer, SIZE, "  Total num of Sessions: %zu\n", mSessionMap.size());
     result.append(buffer);
@@ -91,7 +122,7 @@
         if (mSessionQueues[uid].empty()) {
             continue;
         }
-        snprintf(buffer, SIZE, "    Uid: %d, pkg: %s\n", uid,
+        snprintf(buffer, SIZE, "    uid: %d, pkg: %s\n", uid,
                  mUidPackageNames.count(uid) > 0 ? mUidPackageNames[uid].c_str() : "(unknown)");
         result.append(buffer);
         snprintf(buffer, SIZE, "      Num of sessions: %zu\n", mSessionQueues[uid].size());
@@ -104,25 +135,16 @@
                 result.append(buffer);
                 continue;
             }
-            Session& session = sessionIt->second;
-            TranscodingRequestParcel& request = session.request;
-            snprintf(buffer, SIZE, "      Session: %s, %s, %d%%\n",
-                     sessionToString(sessionKey).c_str(), sessionStateToString(session.state),
-                     session.lastProgress);
-            result.append(buffer);
-            snprintf(buffer, SIZE, "        Src: %s\n", request.sourceFilePath.c_str());
-            result.append(buffer);
-            snprintf(buffer, SIZE, "        Dst: %s\n", request.destinationFilePath.c_str());
-            result.append(buffer);
-            // For the offline queue, print out the original client.
-            if (uid == OFFLINE_UID) {
-                snprintf(buffer, SIZE, "        Original Client: %s\n",
-                         request.clientPackageName.c_str());
-                result.append(buffer);
-            }
+            dumpSession_l(sessionIt->second, result);
         }
     }
 
+    snprintf(buffer, SIZE, "\n========== Dumping past sessions =========\n");
+    result.append(buffer);
+    for (auto &session : mSessionHistory) {
+        dumpSession_l(session, result, true /*closedSession*/);
+    }
+
     write(fd, result.string(), result.size());
 }
 
@@ -135,6 +157,34 @@
     return &mSessionMap[topSessionKey];
 }
 
+void TranscodingSessionController::Session::setState(Session::State newState) {
+    if (state == newState) {
+        return;
+    }
+    auto nowTime = std::chrono::system_clock::now();
+    if (state != INVALID) {
+        std::chrono::microseconds elapsedTime = (nowTime - stateEnterTime);
+        switch (state) {
+        case PAUSED:
+            pausedTime = pausedTime + elapsedTime;
+            break;
+        case RUNNING:
+            runningTime = runningTime + elapsedTime;
+            break;
+        case NOT_STARTED:
+            waitingTime = waitingTime + elapsedTime;
+            break;
+        default:
+            break;
+        }
+    }
+    if (newState == PAUSED) {
+        pauseCount++;
+    }
+    stateEnterTime = nowTime;
+    state = newState;
+}
+
 void TranscodingSessionController::updateCurrentSession_l() {
     Session* topSession = getTopSession_l();
     Session* curSession = mCurrentSession;
@@ -145,29 +195,30 @@
     // If we found a topSession that should be run, and it's not already running,
     // take some actions to ensure it's running.
     if (topSession != nullptr &&
-        (topSession != curSession || topSession->state != Session::RUNNING)) {
+        (topSession != curSession || topSession->getState() != Session::RUNNING)) {
         // If another session is currently running, pause it first.
-        if (curSession != nullptr && curSession->state == Session::RUNNING) {
+        if (curSession != nullptr && curSession->getState() == Session::RUNNING) {
             mTranscoder->pause(curSession->key.first, curSession->key.second);
-            curSession->state = Session::PAUSED;
+            curSession->setState(Session::PAUSED);
         }
         // If we are not experiencing resource loss, we can start or resume
         // the topSession now.
         if (!mResourceLost) {
-            if (topSession->state == Session::NOT_STARTED) {
+            if (topSession->getState() == Session::NOT_STARTED) {
                 mTranscoder->start(topSession->key.first, topSession->key.second,
                                    topSession->request, topSession->callback.lock());
-            } else if (topSession->state == Session::PAUSED) {
+            } else if (topSession->getState() == Session::PAUSED) {
                 mTranscoder->resume(topSession->key.first, topSession->key.second,
                                     topSession->request, topSession->callback.lock());
             }
-            topSession->state = Session::RUNNING;
+            topSession->setState(Session::RUNNING);
         }
     }
     mCurrentSession = topSession;
 }
 
-void TranscodingSessionController::removeSession_l(const SessionKeyType& sessionKey) {
+void TranscodingSessionController::removeSession_l(const SessionKeyType& sessionKey,
+                                                   Session::State finalState) {
     ALOGV("%s: session %s", __FUNCTION__, sessionToString(sessionKey).c_str());
 
     if (mSessionMap.count(sessionKey) == 0) {
@@ -201,6 +252,12 @@
         mCurrentSession = nullptr;
     }
 
+    mSessionMap[sessionKey].setState(finalState);
+    mSessionHistory.push_back(mSessionMap[sessionKey]);
+    if (mSessionHistory.size() > kSessionHistoryMax) {
+        mSessionHistory.erase(mSessionHistory.begin());
+    }
+
     // Remove session from session map.
     mSessionMap.erase(sessionKey);
 }
@@ -288,10 +345,11 @@
     // Add session to session map.
     mSessionMap[sessionKey].key = sessionKey;
     mSessionMap[sessionKey].uid = uid;
-    mSessionMap[sessionKey].state = Session::NOT_STARTED;
     mSessionMap[sessionKey].lastProgress = 0;
+    mSessionMap[sessionKey].pauseCount = 0;
     mSessionMap[sessionKey].request = request;
     mSessionMap[sessionKey].callback = callback;
+    mSessionMap[sessionKey].setState(Session::NOT_STARTED);
 
     // If it's an offline session, the queue was already added in constructor.
     // If it's a real-time sessions, check if a queue is already present for the uid,
@@ -350,12 +408,12 @@
         // Note that stop() is needed even if the session is currently paused. This instructs
         // the transcoder to discard any states for the session, otherwise the states may
         // never be discarded.
-        if (mSessionMap[*it].state != Session::NOT_STARTED) {
+        if (mSessionMap[*it].getState() != Session::NOT_STARTED) {
             mTranscoder->stop(it->first, it->second);
         }
 
         // Remove the session.
-        removeSession_l(*it);
+        removeSession_l(*it, Session::CANCELED);
     }
 
     // Start next session.
@@ -396,7 +454,7 @@
     // Only ignore if session was never started. In particular, propagate the status
     // to client if the session is paused. Transcoder could have posted finish when
     // we're pausing it, and the finish arrived after we changed current session.
-    if (mSessionMap[sessionKey].state == Session::NOT_STARTED) {
+    if (mSessionMap[sessionKey].getState() == Session::NOT_STARTED) {
         ALOGW("%s: ignoring %s for session %s that was never started", __FUNCTION__, reason,
               sessionToString(sessionKey).c_str());
         return;
@@ -445,7 +503,7 @@
         }
 
         // Remove the session.
-        removeSession_l(sessionKey);
+        removeSession_l(sessionKey, Session::FINISHED);
 
         // Start next session.
         updateCurrentSession_l();
@@ -465,7 +523,7 @@
         }
 
         // Remove the session.
-        removeSession_l(sessionKey);
+        removeSession_l(sessionKey, Session::ERROR);
 
         // Start next session.
         updateCurrentSession_l();
@@ -485,30 +543,34 @@
     });
 }
 
-void TranscodingSessionController::onResourceLost() {
+void TranscodingSessionController::onResourceLost(ClientIdType clientId, SessionIdType sessionId) {
     ALOGI("%s", __FUNCTION__);
 
-    std::scoped_lock lock{mLock};
-
-    if (mResourceLost) {
-        return;
-    }
-
-    // If we receive a resource loss event, the TranscoderLibrary already paused
-    // the transcoding, so we don't need to call onPaused to notify it to pause.
-    // Only need to update the session state here.
-    if (mCurrentSession != nullptr && mCurrentSession->state == Session::RUNNING) {
-        mCurrentSession->state = Session::PAUSED;
-        // Notify the client as a paused event.
-        auto clientCallback = mCurrentSession->callback.lock();
-        if (clientCallback != nullptr) {
-            clientCallback->onTranscodingPaused(mCurrentSession->key.second);
+    notifyClient(clientId, sessionId, "resource_lost", [=](const SessionKeyType& sessionKey) {
+        if (mResourceLost) {
+            return;
         }
-        mResourcePolicy->setPidResourceLost(mCurrentSession->request.clientPid);
-    }
-    mResourceLost = true;
 
-    validateState_l();
+        Session* resourceLostSession = &mSessionMap[sessionKey];
+        if (resourceLostSession->getState() != Session::RUNNING) {
+            ALOGW("session %s lost resource but is no longer running",
+                  sessionToString(sessionKey).c_str());
+            return;
+        }
+        // If we receive a resource loss event, the transcoder already paused the transcoding,
+        // so we don't need to call onPaused() to pause it. However, we still need to notify
+        // the client and update the session state here.
+        resourceLostSession->setState(Session::PAUSED);
+        // Notify the client as a paused event.
+        auto clientCallback = resourceLostSession->callback.lock();
+        if (clientCallback != nullptr) {
+            clientCallback->onTranscodingPaused(sessionKey.second);
+        }
+        mResourcePolicy->setPidResourceLost(resourceLostSession->request.clientPid);
+        mResourceLost = true;
+
+        validateState_l();
+    });
 }
 
 void TranscodingSessionController::onTopUidsChanged(const std::unordered_set<uid_t>& uids) {
diff --git a/media/libmediatranscoding/TranscodingUidPolicy.cpp b/media/libmediatranscoding/TranscodingUidPolicy.cpp
index 8a74d5d..a725387 100644
--- a/media/libmediatranscoding/TranscodingUidPolicy.cpp
+++ b/media/libmediatranscoding/TranscodingUidPolicy.cpp
@@ -17,11 +17,9 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "TranscodingUidPolicy"
 
+#include <android/activity_manager.h>
 #include <android/binder_manager.h>
 #include <android/binder_process.h>
-#include <binder/ActivityManager.h>
-#include <cutils/misc.h>  // FIRST_APPLICATION_UID
-#include <cutils/multiuser.h>
 #include <inttypes.h>
 #include <media/TranscodingUidPolicy.h>
 #include <utils/Log.h>
@@ -31,51 +29,12 @@
 namespace android {
 
 constexpr static uid_t OFFLINE_UID = -1;
-constexpr static const char* kTranscodingTag = "transcoding";
-
-struct TranscodingUidPolicy::UidObserver : public BnUidObserver,
-                                           public virtual IBinder::DeathRecipient {
-    explicit UidObserver(TranscodingUidPolicy* owner) : mOwner(owner) {}
-
-    // IUidObserver
-    void onUidGone(uid_t uid, bool disabled) override;
-    void onUidActive(uid_t uid) override;
-    void onUidIdle(uid_t uid, bool disabled) override;
-    void onUidStateChanged(uid_t uid, int32_t procState, int64_t procStateSeq,
-                           int32_t capability) override;
-
-    // IBinder::DeathRecipient implementation
-    void binderDied(const wp<IBinder>& who) override;
-
-    TranscodingUidPolicy* mOwner;
-};
-
-void TranscodingUidPolicy::UidObserver::onUidGone(uid_t uid __unused, bool disabled __unused) {}
-
-void TranscodingUidPolicy::UidObserver::onUidActive(uid_t uid __unused) {}
-
-void TranscodingUidPolicy::UidObserver::onUidIdle(uid_t uid __unused, bool disabled __unused) {}
-
-void TranscodingUidPolicy::UidObserver::onUidStateChanged(uid_t uid, int32_t procState,
-                                                          int64_t procStateSeq __unused,
-                                                          int32_t capability __unused) {
-    mOwner->onUidStateChanged(uid, procState);
-}
-
-void TranscodingUidPolicy::UidObserver::binderDied(const wp<IBinder>& /*who*/) {
-    ALOGW("TranscodingUidPolicy: ActivityManager has died");
-    // TODO(chz): this is a rare event (since if the AMS is dead, the system is
-    // probably dead as well). But we should try to reconnect.
-    mOwner->setUidObserverRegistered(false);
-}
-
-////////////////////////////////////////////////////////////////////////////
+constexpr static int32_t IMPORTANCE_UNKNOWN = INT32_MAX;
 
 TranscodingUidPolicy::TranscodingUidPolicy()
-      : mAm(std::make_shared<ActivityManager>()),
-        mUidObserver(new UidObserver(this)),
+      : mUidObserver(nullptr),
         mRegistered(false),
-        mTopUidState(ActivityManager::PROCESS_STATE_UNKNOWN) {
+        mTopUidState(IMPORTANCE_UNKNOWN) {
     registerSelf();
 }
 
@@ -83,39 +42,32 @@
     unregisterSelf();
 }
 
+void TranscodingUidPolicy::OnUidImportance(uid_t uid, int32_t uidImportance, void* cookie) {
+    TranscodingUidPolicy* owner = reinterpret_cast<TranscodingUidPolicy*>(cookie);
+    owner->onUidStateChanged(uid, uidImportance);
+}
+
 void TranscodingUidPolicy::registerSelf() {
-    status_t res = mAm->linkToDeath(mUidObserver.get());
-    mAm->registerUidObserver(
-            mUidObserver.get(),
-            ActivityManager::UID_OBSERVER_GONE | ActivityManager::UID_OBSERVER_IDLE |
-                    ActivityManager::UID_OBSERVER_ACTIVE | ActivityManager::UID_OBSERVER_PROCSTATE,
-            ActivityManager::PROCESS_STATE_UNKNOWN, String16(kTranscodingTag));
+    mUidObserver = AActivityManager_addUidImportanceListener(
+            &OnUidImportance, -1, (void*)this);
 
-    if (res == OK) {
-        Mutex::Autolock _l(mUidLock);
-
-        mRegistered = true;
-        ALOGI("TranscodingUidPolicy: Registered with ActivityManager");
-    } else {
-        mAm->unregisterUidObserver(mUidObserver.get());
+    if (mUidObserver == nullptr) {
+        ALOGE("Failed to register uid observer");
+        return;
     }
+
+    Mutex::Autolock _l(mUidLock);
+    mRegistered = true;
+    ALOGI("Registered uid observer");
 }
 
 void TranscodingUidPolicy::unregisterSelf() {
-    mAm->unregisterUidObserver(mUidObserver.get());
-    mAm->unlinkToDeath(mUidObserver.get());
+    AActivityManager_removeUidImportanceListener(mUidObserver);
+    mUidObserver = nullptr;
 
     Mutex::Autolock _l(mUidLock);
-
     mRegistered = false;
-
-    ALOGI("TranscodingUidPolicy: Unregistered with ActivityManager");
-}
-
-void TranscodingUidPolicy::setUidObserverRegistered(bool registered) {
-    Mutex::Autolock _l(mUidLock);
-
-    mRegistered = registered;
+    ALOGI("Unregistered uid observer");
 }
 
 void TranscodingUidPolicy::setCallback(const std::shared_ptr<UidPolicyCallbackInterface>& cb) {
@@ -133,9 +85,9 @@
         return;
     }
 
-    int32_t state = ActivityManager::PROCESS_STATE_UNKNOWN;
-    if (mRegistered && mAm->isUidActive(uid, String16(kTranscodingTag))) {
-        state = mAm->getUidProcessState(uid, String16(kTranscodingTag));
+    int32_t state = IMPORTANCE_UNKNOWN;
+    if (mRegistered && AActivityManager_isUidActive(uid)) {
+        state = AActivityManager_getUidImportance(uid);
     }
 
     ALOGV("%s: inserting new uid: %u, procState %d", __FUNCTION__, uid, state);
@@ -170,14 +122,14 @@
 bool TranscodingUidPolicy::isUidOnTop(uid_t uid) {
     Mutex::Autolock _l(mUidLock);
 
-    return mTopUidState != ActivityManager::PROCESS_STATE_UNKNOWN &&
+    return mTopUidState != IMPORTANCE_UNKNOWN &&
            mTopUidState == getProcState_l(uid);
 }
 
 std::unordered_set<uid_t> TranscodingUidPolicy::getTopUids() const {
     Mutex::Autolock _l(mUidLock);
 
-    if (mTopUidState == ActivityManager::PROCESS_STATE_UNKNOWN) {
+    if (mTopUidState == IMPORTANCE_UNKNOWN) {
         return std::unordered_set<uid_t>();
     }
 
@@ -195,11 +147,13 @@
         if (it != mUidStateMap.end() && it->second != procState) {
             // Top set changed if 1) the uid is in the current top uid set, or 2) the
             // new procState is at least the same priority as the current top uid state.
-            bool isUidCurrentTop = mTopUidState != ActivityManager::PROCESS_STATE_UNKNOWN &&
-                                   mStateUidMap[mTopUidState].count(uid) > 0;
-            bool isNewStateHigherThanTop = procState != ActivityManager::PROCESS_STATE_UNKNOWN &&
-                                           (procState <= mTopUidState ||
-                                            mTopUidState == ActivityManager::PROCESS_STATE_UNKNOWN);
+            bool isUidCurrentTop =
+                    mTopUidState != IMPORTANCE_UNKNOWN &&
+                    mStateUidMap[mTopUidState].count(uid) > 0;
+            bool isNewStateHigherThanTop =
+                    procState != IMPORTANCE_UNKNOWN &&
+                    (procState <= mTopUidState ||
+                     mTopUidState == IMPORTANCE_UNKNOWN);
             topUidSetChanged = (isUidCurrentTop || isNewStateHigherThanTop);
 
             // Move uid to the new procState.
@@ -227,11 +181,12 @@
 }
 
 void TranscodingUidPolicy::updateTopUid_l() {
-    mTopUidState = ActivityManager::PROCESS_STATE_UNKNOWN;
+    mTopUidState = IMPORTANCE_UNKNOWN;
 
     // Find the lowest uid state (ignoring PROCESS_STATE_UNKNOWN) with some monitored uids.
     for (auto stateIt = mStateUidMap.begin(); stateIt != mStateUidMap.end(); stateIt++) {
-        if (stateIt->first != ActivityManager::PROCESS_STATE_UNKNOWN && !stateIt->second.empty()) {
+        if (stateIt->first != IMPORTANCE_UNKNOWN &&
+            !stateIt->second.empty()) {
             mTopUidState = stateIt->first;
             break;
         }
@@ -245,7 +200,7 @@
     if (it != mUidStateMap.end()) {
         return it->second;
     }
-    return ActivityManager::PROCESS_STATE_UNKNOWN;
+    return IMPORTANCE_UNKNOWN;
 }
 
 }  // namespace android
diff --git a/media/libmediatranscoding/include/media/TranscoderInterface.h b/media/libmediatranscoding/include/media/TranscoderInterface.h
index e17cd5a..6268aa5 100644
--- a/media/libmediatranscoding/include/media/TranscoderInterface.h
+++ b/media/libmediatranscoding/include/media/TranscoderInterface.h
@@ -64,7 +64,7 @@
     // If there is any session currently running, it will be paused. When resource contention
     // is solved, the controller should call TranscoderInterface's to either start a new session,
     // or resume a paused session.
-    virtual void onResourceLost() = 0;
+    virtual void onResourceLost(ClientIdType clientId, SessionIdType sessionId) = 0;
 
 protected:
     virtual ~TranscoderCallbackInterface() = default;
diff --git a/media/libmediatranscoding/include/media/TranscodingSessionController.h b/media/libmediatranscoding/include/media/TranscodingSessionController.h
index c082074..a443265 100644
--- a/media/libmediatranscoding/include/media/TranscodingSessionController.h
+++ b/media/libmediatranscoding/include/media/TranscodingSessionController.h
@@ -26,6 +26,7 @@
 #include <utils/String8.h>
 #include <utils/Vector.h>
 
+#include <chrono>
 #include <list>
 #include <map>
 #include <mutex>
@@ -58,7 +59,7 @@
     void onError(ClientIdType clientId, SessionIdType sessionId, TranscodingErrorCode err) override;
     void onProgressUpdate(ClientIdType clientId, SessionIdType sessionId,
                           int32_t progress) override;
-    void onResourceLost() override;
+    void onResourceLost(ClientIdType clientId, SessionIdType sessionId) override;
     // ~TranscoderCallbackInterface
 
     // UidPolicyCallbackInterface
@@ -82,16 +83,33 @@
     using SessionQueueType = std::list<SessionKeyType>;
 
     struct Session {
-        SessionKeyType key;
-        uid_t uid;
         enum State {
-            NOT_STARTED,
+            INVALID = -1,
+            NOT_STARTED = 0,
             RUNNING,
             PAUSED,
-        } state;
+            FINISHED,
+            CANCELED,
+            ERROR,
+        };
+        SessionKeyType key;
+        uid_t uid;
         int32_t lastProgress;
+        int32_t pauseCount;
+        std::chrono::time_point<std::chrono::system_clock> stateEnterTime;
+        std::chrono::microseconds waitingTime;
+        std::chrono::microseconds runningTime;
+        std::chrono::microseconds pausedTime;
+
         TranscodingRequest request;
         std::weak_ptr<ITranscodingClientCallback> callback;
+
+        // Must use setState to change state.
+        void setState(Session::State state);
+        State getState() const { return state; }
+
+    private:
+        State state = INVALID;
     };
 
     // TODO(chz): call transcoder without global lock.
@@ -115,15 +133,17 @@
 
     Session* mCurrentSession;
     bool mResourceLost;
+    std::list<Session> mSessionHistory;
 
     // Only allow MediaTranscodingService and unit tests to instantiate.
     TranscodingSessionController(const std::shared_ptr<TranscoderInterface>& transcoder,
                                  const std::shared_ptr<UidPolicyInterface>& uidPolicy,
                                  const std::shared_ptr<ResourcePolicyInterface>& resourcePolicy);
 
+    void dumpSession_l(const Session& session, String8& result, bool closedSession = false);
     Session* getTopSession_l();
     void updateCurrentSession_l();
-    void removeSession_l(const SessionKeyType& sessionKey);
+    void removeSession_l(const SessionKeyType& sessionKey, Session::State finalState);
     void moveUidsToTop_l(const std::unordered_set<uid_t>& uids, bool preserveTopUid);
     void notifyClient(ClientIdType clientId, SessionIdType sessionId, const char* reason,
                       std::function<void(const SessionKeyType&)> func);
diff --git a/media/libmediatranscoding/include/media/TranscodingUidPolicy.h b/media/libmediatranscoding/include/media/TranscodingUidPolicy.h
index acd3cff..4dde5a6 100644
--- a/media/libmediatranscoding/include/media/TranscodingUidPolicy.h
+++ b/media/libmediatranscoding/include/media/TranscodingUidPolicy.h
@@ -22,18 +22,16 @@
 #include <media/UidPolicyInterface.h>
 #include <sys/types.h>
 #include <utils/Condition.h>
-#include <utils/RefBase.h>
-#include <utils/String8.h>
-#include <utils/Vector.h>
 
 #include <map>
 #include <mutex>
 #include <unordered_map>
 #include <unordered_set>
 
+struct AActivityManager_UidImportanceListener;
+
 namespace android {
 
-class ActivityManager;
 // Observer for UID lifecycle and provide information about the uid's app
 // priority used by the session controller.
 class TranscodingUidPolicy : public UidPolicyInterface {
@@ -51,17 +49,17 @@
 
 private:
     void onUidStateChanged(uid_t uid, int32_t procState);
-    void setUidObserverRegistered(bool registerd);
     void registerSelf();
     void unregisterSelf();
     int32_t getProcState_l(uid_t uid) NO_THREAD_SAFETY_ANALYSIS;
     void updateTopUid_l() NO_THREAD_SAFETY_ANALYSIS;
 
-    struct UidObserver;
+    static void OnUidImportance(uid_t uid, int32_t uidImportance, void* cookie);
+
     struct ResourceManagerClient;
     mutable Mutex mUidLock;
-    std::shared_ptr<ActivityManager> mAm;
-    sp<UidObserver> mUidObserver;
+    AActivityManager_UidImportanceListener* mUidObserver;
+
     bool mRegistered GUARDED_BY(mUidLock);
     int32_t mTopUidState GUARDED_BY(mUidLock);
     std::unordered_map<uid_t, int32_t> mUidStateMap GUARDED_BY(mUidLock);
diff --git a/media/libmediatranscoding/tests/Android.bp b/media/libmediatranscoding/tests/Android.bp
index e49df35..8bff10a 100644
--- a/media/libmediatranscoding/tests/Android.bp
+++ b/media/libmediatranscoding/tests/Android.bp
@@ -14,15 +14,16 @@
     ],
 
     shared_libs: [
+        "libandroid",
         "libbinder_ndk",
         "libcutils",
         "liblog",
         "libutils",
-        "libmediatranscoding"
     ],
 
     static_libs: [
         "mediatranscoding_aidl_interface-ndk_platform",
+        "libmediatranscoding",
     ],
 
     cflags: [
diff --git a/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp b/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
index c8b38fb..fa52f63 100644
--- a/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
+++ b/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
@@ -44,11 +44,14 @@
 constexpr ClientIdType kClientId = 1000;
 constexpr SessionIdType kClientSessionId = 0;
 constexpr uid_t kClientUid = 5000;
+constexpr pid_t kClientPid = 10000;
 constexpr uid_t kInvalidUid = (uid_t)-1;
+constexpr pid_t kInvalidPid = (pid_t)-1;
 
 #define CLIENT(n) (kClientId + (n))
 #define SESSION(n) (kClientSessionId + (n))
 #define UID(n) (kClientUid + (n))
+#define PID(n) (kClientPid + (n))
 
 class TestUidPolicy : public UidPolicyInterface {
 public:
@@ -81,11 +84,27 @@
 
 class TestResourcePolicy : public ResourcePolicyInterface {
 public:
-    TestResourcePolicy() = default;
+    TestResourcePolicy() { reset(); }
     virtual ~TestResourcePolicy() = default;
 
+    // ResourcePolicyInterface
     void setCallback(const std::shared_ptr<ResourcePolicyCallbackInterface>& /*cb*/) override {}
-    void setPidResourceLost(pid_t /*pid*/) override {}
+    void setPidResourceLost(pid_t pid) override {
+        mResourceLostPid = pid;
+    }
+    // ~ResourcePolicyInterface
+
+    pid_t getPid() {
+        pid_t result = mResourceLostPid;
+        reset();
+        return result;
+    }
+
+private:
+    void reset() {
+        mResourceLostPid = kInvalidPid;
+    }
+    pid_t mResourceLostPid;
 };
 
 class TestTranscoder : public TranscoderInterface {
@@ -563,10 +582,12 @@
 
     // Start with unspecified top UID.
     // Submit real-time session to CLIENT(0), session should start immediately.
+    mRealtimeRequest.clientPid = PID(0);
     mController->submit(CLIENT(0), SESSION(0), UID(0), mRealtimeRequest, mClientCallback0);
     EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
 
     // Submit offline session to CLIENT(0), should not start.
+    mOfflineRequest.clientPid = PID(0);
     mController->submit(CLIENT(1), SESSION(0), UID(0), mOfflineRequest, mClientCallback1);
     EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
 
@@ -576,13 +597,22 @@
 
     // Submit real-time session to CLIENT(2) in different uid UID(1).
     // Should pause previous session and start new session.
+    mRealtimeRequest.clientPid = PID(1);
     mController->submit(CLIENT(2), SESSION(0), UID(1), mRealtimeRequest, mClientCallback2);
     EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
     EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(2), SESSION(0)));
 
+    // Test 0: No call into ResourcePolicy if resource lost is from a non-running
+    // or non-existent session.
+    mController->onResourceLost(CLIENT(0), SESSION(0));
+    EXPECT_EQ(mResourcePolicy->getPid(), kInvalidPid);
+    mController->onResourceLost(CLIENT(3), SESSION(0));
+    EXPECT_EQ(mResourcePolicy->getPid(), kInvalidPid);
+
     // Test 1: No queue change during resource loss.
     // Signal resource lost.
-    mController->onResourceLost();
+    mController->onResourceLost(CLIENT(2), SESSION(0));
+    EXPECT_EQ(mResourcePolicy->getPid(), PID(1));
     EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
 
     // Signal resource available, CLIENT(2) should resume.
@@ -591,7 +621,8 @@
 
     // Test 2: Change of queue order during resource loss.
     // Signal resource lost.
-    mController->onResourceLost();
+    mController->onResourceLost(CLIENT(2), SESSION(0));
+    EXPECT_EQ(mResourcePolicy->getPid(), PID(1));
     EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
 
     // Move UID(0) back to top, should have no resume due to no resource.
@@ -604,13 +635,15 @@
 
     // Test 3: Adding new queue during resource loss.
     // Signal resource lost.
-    mController->onResourceLost();
+    mController->onResourceLost(CLIENT(0), SESSION(0));
+    EXPECT_EQ(mResourcePolicy->getPid(), PID(0));
     EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
 
     // Move UID(2) to top.
     mUidPolicy->setTop(UID(2));
 
     // Submit real-time session to CLIENT(3) in UID(2), session shouldn't start due to no resource.
+    mRealtimeRequest.clientPid = PID(2);
     mController->submit(CLIENT(3), SESSION(0), UID(2), mRealtimeRequest, mClientCallback3);
     EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
 
diff --git a/media/libmediatranscoding/transcoder/MediaSampleReaderNDK.cpp b/media/libmediatranscoding/transcoder/MediaSampleReaderNDK.cpp
index 92ba818..1a6e7ed 100644
--- a/media/libmediatranscoding/transcoder/MediaSampleReaderNDK.cpp
+++ b/media/libmediatranscoding/transcoder/MediaSampleReaderNDK.cpp
@@ -235,6 +235,33 @@
     return AMEDIA_OK;
 }
 
+media_status_t MediaSampleReaderNDK::unselectTrack(int trackIndex) {
+    std::scoped_lock lock(mExtractorMutex);
+
+    if (trackIndex < 0 || trackIndex >= mTrackCount) {
+        LOG(ERROR) << "Invalid trackIndex " << trackIndex << " for trackCount " << mTrackCount;
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    } else if (mExtractorTrackIndex >= 0) {
+        LOG(ERROR) << "unselectTrack must be called before sample reading begins.";
+        return AMEDIA_ERROR_UNSUPPORTED;
+    }
+
+    auto it = mTrackSignals.find(trackIndex);
+    if (it == mTrackSignals.end()) {
+        LOG(ERROR) << "TrackIndex " << trackIndex << " is not selected";
+        return AMEDIA_ERROR_INVALID_PARAMETER;
+    }
+    mTrackSignals.erase(it);
+
+    media_status_t status = AMediaExtractor_unselectTrack(mExtractor, trackIndex);
+    if (status != AMEDIA_OK) {
+        LOG(ERROR) << "AMediaExtractor_selectTrack returned error: " << status;
+        return status;
+    }
+
+    return AMEDIA_OK;
+}
+
 media_status_t MediaSampleReaderNDK::setEnforceSequentialAccess(bool enforce) {
     LOG(DEBUG) << "setEnforceSequentialAccess( " << enforce << " )";
 
diff --git a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
index 94a9a33..3d4ff15 100644
--- a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
@@ -275,12 +275,6 @@
         return AMEDIA_ERROR_INVALID_PARAMETER;
     }
 
-    media_status_t status = mSampleReader->selectTrack(trackIndex);
-    if (status != AMEDIA_OK) {
-        LOG(ERROR) << "Unable to select track " << trackIndex;
-        return status;
-    }
-
     std::shared_ptr<MediaTrackTranscoder> transcoder;
     std::shared_ptr<AMediaFormat> format;
 
@@ -322,10 +316,17 @@
         format = std::shared_ptr<AMediaFormat>(mergedFormat, &AMediaFormat_delete);
     }
 
+    media_status_t status = mSampleReader->selectTrack(trackIndex);
+    if (status != AMEDIA_OK) {
+        LOG(ERROR) << "Unable to select track " << trackIndex;
+        return status;
+    }
+
     status = transcoder->configure(mSampleReader, trackIndex, format);
     if (status != AMEDIA_OK) {
         LOG(ERROR) << "Configure track transcoder for track #" << trackIndex << " returned error "
                    << status;
+        mSampleReader->unselectTrack(trackIndex);
         return status;
     }
 
diff --git a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
index 5ec5e08..0695bdb 100644
--- a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "VideoTrackTranscoder"
 
 #include <android-base/logging.h>
+#include <android-base/properties.h>
 #include <media/NdkCommon.h>
 #include <media/VideoTrackTranscoder.h>
 #include <utils/AndroidThreads.h>
@@ -39,11 +40,16 @@
 // Default key frame interval in seconds.
 static constexpr float kDefaultKeyFrameIntervalSeconds = 1.0f;
 // Default codec operating rate.
-static constexpr int32_t kDefaultCodecOperatingRate = 240;
+static int32_t kDefaultCodecOperatingRate720P = base::GetIntProperty(
+        "debug.media.transcoding.codec_max_operating_rate_720P", /*default*/ 480);
+static int32_t kDefaultCodecOperatingRate1080P = base::GetIntProperty(
+        "debug.media.transcoding.codec_max_operating_rate_1080P", /*default*/ 240);
 // Default codec priority.
 static constexpr int32_t kDefaultCodecPriority = 1;
 // Default bitrate, in case source estimation fails.
 static constexpr int32_t kDefaultBitrateMbps = 10 * 1000 * 1000;
+// Default frame rate.
+static constexpr int32_t kDefaultFrameRate = 30;
 
 template <typename T>
 void VideoTrackTranscoder::BlockingQueue<T>::push(T const& value, bool front) {
@@ -179,6 +185,25 @@
     }
 }
 
+// Search the default operating rate based on resolution.
+static int32_t getDefaultOperatingRate(AMediaFormat* encoderFormat) {
+    int32_t width, height;
+    if (AMediaFormat_getInt32(encoderFormat, AMEDIAFORMAT_KEY_WIDTH, &width) && (width > 0) &&
+        AMediaFormat_getInt32(encoderFormat, AMEDIAFORMAT_KEY_HEIGHT, &height) && (height > 0)) {
+        if ((width == 1280 && height == 720) || (width == 720 && height == 1280)) {
+            return kDefaultCodecOperatingRate720P;
+        } else if ((width == 1920 && height == 1080) || (width == 1080 && height == 1920)) {
+            return kDefaultCodecOperatingRate1080P;
+        } else {
+            LOG(WARNING) << "Could not find default operating rate: " << width << " " << height;
+            // Don't set operating rate if the correct dimensions are not found.
+        }
+    } else {
+        LOG(ERROR) << "Failed to get default operating rate due to missing resolution";
+    }
+    return -1;
+}
+
 // Creates and configures the codecs.
 media_status_t VideoTrackTranscoder::configureDestinationFormat(
         const std::shared_ptr<AMediaFormat>& destinationFormat) {
@@ -209,10 +234,15 @@
 
     SetDefaultFormatValueFloat(AMEDIAFORMAT_KEY_I_FRAME_INTERVAL, encoderFormat,
                                kDefaultKeyFrameIntervalSeconds);
-    SetDefaultFormatValueInt32(AMEDIAFORMAT_KEY_OPERATING_RATE, encoderFormat,
-                               kDefaultCodecOperatingRate);
-    SetDefaultFormatValueInt32(AMEDIAFORMAT_KEY_PRIORITY, encoderFormat, kDefaultCodecPriority);
 
+    int32_t operatingRate = getDefaultOperatingRate(encoderFormat);
+
+    if (operatingRate != -1) {
+        SetDefaultFormatValueInt32(AMEDIAFORMAT_KEY_OPERATING_RATE, encoderFormat, operatingRate);
+    }
+
+    SetDefaultFormatValueInt32(AMEDIAFORMAT_KEY_PRIORITY, encoderFormat, kDefaultCodecPriority);
+    SetDefaultFormatValueInt32(AMEDIAFORMAT_KEY_FRAME_RATE, encoderFormat, kDefaultFrameRate);
     AMediaFormat_setInt32(encoderFormat, AMEDIAFORMAT_KEY_COLOR_FORMAT, kColorFormatSurface);
 
     // Always encode without rotation. The rotation degree will be transferred directly to
@@ -237,6 +267,7 @@
     }
     mEncoder = std::make_shared<CodecWrapper>(encoder, shared_from_this());
 
+    LOG(DEBUG) << "Configuring encoder with: " << AMediaFormat_toString(mDestinationFormat.get());
     status = AMediaCodec_configure(mEncoder->getCodec(), mDestinationFormat.get(),
                                    NULL /* surface */, NULL /* crypto */,
                                    AMEDIACODEC_CONFIGURE_FLAG_ENCODE);
@@ -284,6 +315,7 @@
     CopyFormatEntries(mDestinationFormat.get(), decoderFormat.get(), kEncoderEntriesToCopy,
                       entryCount);
 
+    LOG(DEBUG) << "Configuring decoder with: " << AMediaFormat_toString(decoderFormat.get());
     status = AMediaCodec_configure(mDecoder, decoderFormat.get(), mSurface, NULL /* crypto */,
                                    0 /* flags */);
     if (status != AMEDIA_OK) {
diff --git a/media/libmediatranscoding/transcoder/benchmark/Android.bp b/media/libmediatranscoding/transcoder/benchmark/Android.bp
index ce34702..6c87233 100644
--- a/media/libmediatranscoding/transcoder/benchmark/Android.bp
+++ b/media/libmediatranscoding/transcoder/benchmark/Android.bp
@@ -1,7 +1,9 @@
 cc_defaults {
     name: "benchmarkdefaults",
-    shared_libs: ["libmediatranscoder", "libmediandk", "libbase", "libbinder_ndk"],
-    static_libs: ["libgoogle-benchmark"],
+    shared_libs: ["libmediandk", "libbase", "libbinder_ndk", "libutils", "libnativewindow"],
+    static_libs: ["libmediatranscoder", "libgoogle-benchmark"],
+    test_config_template: "AndroidTestTemplate.xml",
+    test_suites: ["device-tests", "TranscoderBenchmarks"],
 }
 
 cc_test {
diff --git a/media/libmediatranscoding/transcoder/benchmark/AndroidTestTemplate.xml b/media/libmediatranscoding/transcoder/benchmark/AndroidTestTemplate.xml
new file mode 100644
index 0000000..64085d8
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/benchmark/AndroidTestTemplate.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2020 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<configuration description="Unit test configuration for {MODULE}">
+    <option name="test-suite-tag" value="TranscoderBenchmarks" />
+    <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+        <option name="cleanup" value="false" />
+        <option name="push-file" key="{MODULE}" value="/data/local/tmp/{MODULE}" />
+        <option name="push-file"
+            key="https://storage.googleapis.com/android_media/frameworks/av/media/libmediatranscoding/transcoder/benchmark/TranscodingBenchmark-1.1.zip?unzip=true"
+            value="/data/local/tmp/TranscodingBenchmark/" />
+    </target_preparer>
+
+    <test class="com.android.tradefed.testtype.GoogleBenchmarkTest" >
+        <option name="native-benchmark-device-path" value="/data/local/tmp" />
+        <option name="benchmark-module-name" value="{MODULE}" />
+    </test>
+</configuration>
+
diff --git a/media/libmediatranscoding/transcoder/benchmark/MediaTrackTranscoderBenchmark.cpp b/media/libmediatranscoding/transcoder/benchmark/MediaTrackTranscoderBenchmark.cpp
index 351d80b..d6ed2c6 100644
--- a/media/libmediatranscoding/transcoder/benchmark/MediaTrackTranscoderBenchmark.cpp
+++ b/media/libmediatranscoding/transcoder/benchmark/MediaTrackTranscoderBenchmark.cpp
@@ -167,6 +167,10 @@
         return AMEDIA_OK;
     }
 
+    media_status_t unselectTrack(int trackIndex __unused) override {
+        return AMEDIA_ERROR_UNSUPPORTED;
+    }
+
     media_status_t setEnforceSequentialAccess(bool enforce __unused) override { return AMEDIA_OK; }
 
     media_status_t getEstimatedBitrateForTrack(int trackIndex __unused,
diff --git a/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp b/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp
index 93a08d1..9ee55e5 100644
--- a/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp
+++ b/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp
@@ -32,9 +32,12 @@
 #include <benchmark/benchmark.h>
 #include <fcntl.h>
 #include <media/MediaTranscoder.h>
+#include <iostream>
 
 using namespace android;
 
+const std::string PARAM_VIDEO_FRAME_RATE = "VideoFrameRate";
+
 class TranscoderCallbacks : public MediaTranscoder::CallbackInterface {
 public:
     virtual void onFinished(const MediaTranscoder* transcoder __unused) override {
@@ -151,7 +154,7 @@
             if (strncmp(mime, "video/", 6) == 0) {
                 int32_t frameCount;
                 if (AMediaFormat_getInt32(srcFormat, AMEDIAFORMAT_KEY_FRAME_COUNT, &frameCount)) {
-                    state.counters["VideoFrameRate"] =
+                    state.counters[PARAM_VIDEO_FRAME_RATE] =
                             benchmark::Counter(frameCount, benchmark::Counter::kIsRate);
                 }
             }
@@ -332,4 +335,69 @@
 TRANSCODER_BENCHMARK(BM_TranscodeAudioVideoPassthrough);
 TRANSCODER_BENCHMARK(BM_TranscodeVideoPassthrough);
 
-BENCHMARK_MAIN();
+class CustomCsvReporter : public benchmark::BenchmarkReporter {
+public:
+    CustomCsvReporter() : mPrintedHeader(false) {}
+    virtual bool ReportContext(const Context& context);
+    virtual void ReportRuns(const std::vector<Run>& reports);
+
+private:
+    void PrintRunData(const Run& report);
+
+    bool mPrintedHeader;
+    std::vector<std::string> mHeaders = {"name", "real_time", "cpu_time", PARAM_VIDEO_FRAME_RATE};
+};
+
+bool CustomCsvReporter::ReportContext(const Context& context __unused) {
+    return true;
+}
+
+void CustomCsvReporter::ReportRuns(const std::vector<Run>& reports) {
+    std::ostream& Out = GetOutputStream();
+
+    if (!mPrintedHeader) {
+        // print the header
+        for (auto header = mHeaders.begin(); header != mHeaders.end();) {
+            Out << *header++;
+            if (header != mHeaders.end()) Out << ",";
+        }
+        Out << "\n";
+        mPrintedHeader = true;
+    }
+
+    // print results for each run
+    for (const auto& run : reports) {
+        PrintRunData(run);
+    }
+}
+
+void CustomCsvReporter::PrintRunData(const Run& run) {
+    if (run.error_occurred) {
+        return;
+    }
+    std::ostream& Out = GetOutputStream();
+    Out << run.benchmark_name() << ",";
+    Out << run.GetAdjustedRealTime() << ",";
+    Out << run.GetAdjustedCPUTime() << ",";
+    auto frameRate = run.counters.find(PARAM_VIDEO_FRAME_RATE);
+    if (frameRate == run.counters.end()) {
+        Out << "NA"
+            << ",";
+    } else {
+        Out << frameRate->second << ",";
+    }
+    Out << '\n';
+}
+
+int main(int argc, char** argv) {
+    std::unique_ptr<benchmark::BenchmarkReporter> fileReporter;
+    for (int i = 1; i < argc; ++i) {
+        if (std::string(argv[i]).find("--benchmark_out") != std::string::npos) {
+            fileReporter.reset(new CustomCsvReporter);
+            break;
+        }
+    }
+    ::benchmark::Initialize(&argc, argv);
+    if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1;
+    ::benchmark::RunSpecifiedBenchmarks(nullptr, fileReporter.get());
+}
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaSampleReader.h b/media/libmediatranscoding/transcoder/include/media/MediaSampleReader.h
index 7b6fbef..5c7eeac 100644
--- a/media/libmediatranscoding/transcoder/include/media/MediaSampleReader.h
+++ b/media/libmediatranscoding/transcoder/include/media/MediaSampleReader.h
@@ -69,6 +69,13 @@
     virtual media_status_t selectTrack(int trackIndex) = 0;
 
     /**
+     * Undo a track selection.
+     * @param trackIndex The track to un-select.
+     * @return AMEDIA_OK on success.
+     */
+    virtual media_status_t unselectTrack(int trackIndex) = 0;
+
+    /**
      * Toggles sequential access enforcement on or off. When the reader enforces sequential access
      * calls to read sample information will block unless the underlying extractor points to the
      * specified track.
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaSampleReaderNDK.h b/media/libmediatranscoding/transcoder/include/media/MediaSampleReaderNDK.h
index 2032def..30cc37f 100644
--- a/media/libmediatranscoding/transcoder/include/media/MediaSampleReaderNDK.h
+++ b/media/libmediatranscoding/transcoder/include/media/MediaSampleReaderNDK.h
@@ -48,6 +48,7 @@
     size_t getTrackCount() const override;
     AMediaFormat* getTrackFormat(int trackIndex) override;
     media_status_t selectTrack(int trackIndex) override;
+    media_status_t unselectTrack(int trackIndex) override;
     media_status_t setEnforceSequentialAccess(bool enforce) override;
     media_status_t getEstimatedBitrateForTrack(int trackIndex, int32_t* bitrate) override;
     media_status_t getSampleInfoForTrack(int trackIndex, MediaSampleInfo* info) override;
diff --git a/media/libmediatranscoding/transcoder/setloglevel.sh b/media/libmediatranscoding/transcoder/setloglevel.sh
new file mode 100755
index 0000000..5eb7b67
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/setloglevel.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+if [ $# -ne 1 ]
+then
+    echo Usage: $0 loglevel
+    exit 1
+fi
+
+level=$1
+echo Setting transcoder log level to $level
+
+# List all log tags
+declare -a tags=(
+  MediaTranscoder MediaTrackTranscoder VideoTrackTranscoder PassthroughTrackTranscoder
+  MediaSampleWriter MediaSampleReader MediaSampleQueue MediaTranscoderTests
+  MediaTrackTranscoderTests VideoTrackTranscoderTests PassthroughTrackTranscoderTests
+  MediaSampleWriterTests MediaSampleReaderNDKTests MediaSampleQueueTests)
+
+# Set log level for all tags
+for tag in "${tags[@]}"
+do
+    adb shell setprop log.tag.${tag} $level
+done
+
+# Pick up new settings
+adb shell stop && adb shell start
diff --git a/media/libmediatranscoding/transcoder/tests/MediaTranscoderTests.cpp b/media/libmediatranscoding/transcoder/tests/MediaTranscoderTests.cpp
index 5c59992..bfc1f3b 100644
--- a/media/libmediatranscoding/transcoder/tests/MediaTranscoderTests.cpp
+++ b/media/libmediatranscoding/transcoder/tests/MediaTranscoderTests.cpp
@@ -283,20 +283,22 @@
         }
 
         EXPECT_NE(videoFormat, nullptr);
+        if (videoFormat != nullptr) {
+            LOG(INFO) << "source video format: " << AMediaFormat_toString(mSourceVideoFormat.get());
+            LOG(INFO) << "transcoded video format: " << AMediaFormat_toString(videoFormat.get());
 
-        LOG(INFO) << "source video format: " << AMediaFormat_toString(mSourceVideoFormat.get());
-        LOG(INFO) << "transcoded video format: " << AMediaFormat_toString(videoFormat.get());
+            for (int i = 0; i < (sizeof(kFieldsToPreserve) / sizeof(kFieldsToPreserve[0])); ++i) {
+                EXPECT_TRUE(kFieldsToPreserve[i].equal(kFieldsToPreserve[i].key,
+                                                       mSourceVideoFormat.get(), videoFormat.get()))
+                        << "Failed at key " << kFieldsToPreserve[i].key;
+            }
 
-        for (int i = 0; i < (sizeof(kFieldsToPreserve) / sizeof(kFieldsToPreserve[0])); ++i) {
-            EXPECT_TRUE(kFieldsToPreserve[i].equal(kFieldsToPreserve[i].key,
-                                                   mSourceVideoFormat.get(), videoFormat.get()))
-                    << "Failed at key " << kFieldsToPreserve[i].key;
-        }
-
-        if (extraVerifiers != nullptr) {
-            for (int i = 0; i < extraVerifiers->size(); ++i) {
-                const FormatVerifierEntry& entry = (*extraVerifiers)[i];
-                EXPECT_TRUE(entry.equal(entry.key, mSourceVideoFormat.get(), videoFormat.get()));
+            if (extraVerifiers != nullptr) {
+                for (int i = 0; i < extraVerifiers->size(); ++i) {
+                    const FormatVerifierEntry& entry = (*extraVerifiers)[i];
+                    EXPECT_TRUE(
+                            entry.equal(entry.key, mSourceVideoFormat.get(), videoFormat.get()));
+                }
             }
         }
 
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 71c1b0b..8f1da0d 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -7078,10 +7078,9 @@
         return err;
     }
 
-    using hardware::media::omx::V1_0::utils::TWOmxNode;
     err = statusFromBinderStatus(
             mCodec->mGraphicBufferSource->configure(
-                    new TWOmxNode(mCodec->mOMXNode),
+                    mCodec->mOMXNode->getHalInterface<IOmxNode>(),
                     static_cast<hardware::graphics::common::V1_0::Dataspace>(dataSpace)));
     if (err != OK) {
         ALOGE("[%s] Unable to configure for node (err %d)",
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index e783578..d11408d 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -44,7 +44,7 @@
 namespace android {
 
 static const int64_t kBufferTimeOutUs = 10000LL; // 10 msec
-static const size_t kRetryCount = 50; // must be >0
+static const size_t kRetryCount = 100; // must be >0
 static const int64_t kDefaultSampleDurationUs = 33333LL; // 33ms
 
 sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 0af97df..447d599 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -1137,11 +1137,24 @@
     if (!truncatePreAllocation()) {
         if (err == OK) { err = ERROR_IO; }
     }
+
+    // TODO(b/174770856) remove this measurement (and perhaps the fsync)
+    nsecs_t sync_started = systemTime(SYSTEM_TIME_REALTIME);
     if (fsync(mFd) != 0) {
         ALOGW("(ignored)fsync err:%s(%d)", std::strerror(errno), errno);
         // Don't bubble up fsync error, b/157291505.
         // if (err == OK) { err = ERROR_IO; }
     }
+    nsecs_t sync_finished = systemTime(SYSTEM_TIME_REALTIME);
+    nsecs_t sync_elapsed_ns = sync_finished - sync_started;
+    int64_t filesize = -1;
+    struct stat statbuf;
+    if (fstat(mFd, &statbuf) == 0) {
+        filesize = statbuf.st_size;
+    }
+    ALOGD("final fsync() takes %" PRId64 " ms, file size %" PRId64,
+          sync_elapsed_ns / 1000000, (int64_t) filesize);
+
     if (close(mFd) != 0) {
         ALOGE("close err:%s(%d)", std::strerror(errno), errno);
         if (err == OK) { err = ERROR_IO; }
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 6245014..f2c7dd6 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -50,8 +50,9 @@
       mSampleTimeUs(timeUs) {
 }
 
-NuMediaExtractor::NuMediaExtractor()
-    : mTotalBitrate(-1LL),
+NuMediaExtractor::NuMediaExtractor(EntryPoint entryPoint)
+    : mEntryPoint(entryPoint),
+      mTotalBitrate(-1LL),
       mDurationUs(-1LL) {
 }
 
@@ -93,6 +94,7 @@
     if (mImpl == NULL) {
         return ERROR_UNSUPPORTED;
     }
+    setEntryPointToRemoteMediaExtractor();
 
     status_t err = OK;
     if (!mCasToken.empty()) {
@@ -134,6 +136,7 @@
     if (mImpl == NULL) {
         return ERROR_UNSUPPORTED;
     }
+    setEntryPointToRemoteMediaExtractor();
 
     if (!mCasToken.empty()) {
         err = mImpl->setMediaCas(mCasToken);
@@ -168,6 +171,7 @@
     if (mImpl == NULL) {
         return ERROR_UNSUPPORTED;
     }
+    setEntryPointToRemoteMediaExtractor();
 
     if (!mCasToken.empty()) {
         err = mImpl->setMediaCas(mCasToken);
@@ -489,6 +493,16 @@
     }
 }
 
+void NuMediaExtractor::setEntryPointToRemoteMediaExtractor() {
+    if (mImpl == NULL) {
+        return;
+    }
+    status_t err = mImpl->setEntryPoint(mEntryPoint);
+    if (err != OK) {
+        ALOGW("Failed to set entry point with error %d.", err);
+    }
+}
+
 ssize_t NuMediaExtractor::fetchAllTrackSamples(
         int64_t seekTimeUs, MediaSource::ReadOptions::SeekMode mode) {
     TrackInfo *minInfo = NULL;
diff --git a/media/libstagefright/RemoteMediaExtractor.cpp b/media/libstagefright/RemoteMediaExtractor.cpp
index 25e43c2..381eb1a 100644
--- a/media/libstagefright/RemoteMediaExtractor.cpp
+++ b/media/libstagefright/RemoteMediaExtractor.cpp
@@ -39,6 +39,12 @@
 static const char *kExtractorFormat = "android.media.mediaextractor.fmt";
 static const char *kExtractorMime = "android.media.mediaextractor.mime";
 static const char *kExtractorTracks = "android.media.mediaextractor.ntrk";
+static const char *kExtractorEntryPoint = "android.media.mediaextractor.entry";
+
+static const char *kEntryPointSdk = "sdk";
+static const char *kEntryPointWithJvm = "ndk-with-jvm";
+static const char *kEntryPointNoJvm = "ndk-no-jvm";
+static const char *kEntryPointOther = "other";
 
 RemoteMediaExtractor::RemoteMediaExtractor(
         MediaExtractor *extractor,
@@ -74,6 +80,9 @@
             }
             // what else is interesting and not already available?
         }
+        // By default, we set the entry point to be "other". Clients of this
+        // class will override this value by calling setEntryPoint.
+        mMetricsItem->setCString(kExtractorEntryPoint, kEntryPointOther);
     }
 }
 
@@ -143,6 +152,28 @@
     return String8(mExtractor->name());
 }
 
+status_t RemoteMediaExtractor::setEntryPoint(EntryPoint entryPoint) {
+    const char* entryPointString;
+    switch (entryPoint) {
+      case EntryPoint::SDK:
+            entryPointString = kEntryPointSdk;
+            break;
+        case EntryPoint::NDK_WITH_JVM:
+            entryPointString = kEntryPointWithJvm;
+            break;
+        case EntryPoint::NDK_NO_JVM:
+            entryPointString = kEntryPointNoJvm;
+            break;
+        case EntryPoint::OTHER:
+            entryPointString = kEntryPointOther;
+            break;
+        default:
+            return BAD_VALUE;
+    }
+    mMetricsItem->setCString(kExtractorEntryPoint, entryPointString);
+    return OK;
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 
 // static
diff --git a/media/libstagefright/TEST_MAPPING b/media/libstagefright/TEST_MAPPING
index 76fc74f..dff7b22 100644
--- a/media/libstagefright/TEST_MAPPING
+++ b/media/libstagefright/TEST_MAPPING
@@ -11,7 +11,7 @@
 
   ],
 
-  "presubmit": [
+  "presubmit-large": [
     {
       "name": "CtsMediaTestCases",
       "options": [
@@ -29,7 +29,9 @@
           "exclude-filter": "android.media.cts.AudioRecordTest"
         }
       ]
-    },
+    }
+  ],
+  "presubmit": [
     {
       "name": "mediacodecTest"
     }
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 48b3255..fbaa38e 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -2192,7 +2192,7 @@
 #ifdef DISABLE_AUDIO_SYSTEM_OFFLOAD
     return false;
 #else
-    return AudioSystem::isOffloadSupported(info);
+    return AudioSystem::getOffloadSupport(info) != AUDIO_OFFLOAD_NOT_SUPPORTED;
 #endif
 }
 
diff --git a/media/libstagefright/id3/TEST_MAPPING b/media/libstagefright/id3/TEST_MAPPING
index d070d25..d82d26e 100644
--- a/media/libstagefright/id3/TEST_MAPPING
+++ b/media/libstagefright/id3/TEST_MAPPING
@@ -7,7 +7,7 @@
     { "name": "ID3Test" }
   ],
 
-  "presubmit": [
+  "presubmit-large": [
     // this doesn't seem to run any tests.
     // but: cts-tradefed run -m CtsMediaTestCases -t android.media.cts.MediaMetadataRetrieverTest
     // does run he 32 and 64 bit tests, but not the instant tests
diff --git a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
index 227cead..d8f2b00 100644
--- a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
@@ -47,12 +47,14 @@
         SAMPLE_FLAG_ENCRYPTED   = 2,
     };
 
+    typedef IMediaExtractor::EntryPoint EntryPoint;
+
     // identical to IMediaExtractor::GetTrackMetaDataFlags
     enum GetTrackFormatFlags {
         kIncludeExtensiveMetaData = 1, // reads sample table and possibly stream headers
     };
 
-    NuMediaExtractor();
+    explicit NuMediaExtractor(EntryPoint entryPoint);
 
     status_t setDataSource(
             const sp<MediaHTTPService> &httpService,
@@ -128,6 +130,8 @@
         uint32_t mTrackFlags;  // bitmask of "TrackFlags"
     };
 
+    const EntryPoint mEntryPoint;
+
     mutable Mutex mLock;
 
     sp<DataSource> mDataSource;
@@ -139,6 +143,8 @@
     int64_t mTotalBitrate;  // in bits/sec
     int64_t mDurationUs;
 
+    void setEntryPointToRemoteMediaExtractor();
+
     ssize_t fetchAllTrackSamples(
             int64_t seekTimeUs = -1ll,
             MediaSource::ReadOptions::SeekMode mode =
diff --git a/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h b/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h
index 2ce7bc7..25125f2 100644
--- a/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h
@@ -42,6 +42,7 @@
     virtual uint32_t flags() const;
     virtual status_t setMediaCas(const HInterfaceToken &casToken);
     virtual String8 name();
+    virtual status_t setEntryPoint(EntryPoint entryPoint);
 
 private:
     MediaExtractor *mExtractor;
diff --git a/media/libstagefright/renderfright/include/renderengine/RenderEngine.h b/media/libstagefright/renderfright/include/renderengine/RenderEngine.h
index 09a0f65..40fdff4 100644
--- a/media/libstagefright/renderfright/include/renderengine/RenderEngine.h
+++ b/media/libstagefright/renderfright/include/renderengine/RenderEngine.h
@@ -33,7 +33,7 @@
 /**
  * Allows to set RenderEngine backend to GLES (default) or Vulkan (NOT yet supported).
  */
-#define PROPERTY_DEBUG_RENDERENGINE_BACKEND "debug.renderengine.backend"
+#define PROPERTY_DEBUG_RENDERENGINE_BACKEND "debug.stagefright.renderengine.backend"
 
 struct ANativeWindowBuffer;
 
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index f57077c..07f9dd3 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -131,7 +131,7 @@
     unsigned start = (unsigned)((rand()* 1000LL)/RAND_MAX) + 15550;
     start &= ~1;
 
-    for (unsigned port = start; port < 65536; port += 2) {
+    for (unsigned port = start; port < 65535; port += 2) {
         struct sockaddr_in addr;
         memset(addr.sin_zero, 0, sizeof(addr.sin_zero));
         addr.sin_family = AF_INET;
@@ -149,6 +149,13 @@
                  (const struct sockaddr *)&addr, sizeof(addr)) == 0) {
             *rtpPort = port;
             return;
+        } else {
+            // we should recreate a RTP socket to avoid bind other port in same RTP socket
+            close(*rtpSocket);
+
+            *rtpSocket = socket(AF_INET, SOCK_DGRAM, 0);
+            CHECK_GE(*rtpSocket, 0);
+            bumpSocketBufferSize(*rtpSocket);
         }
     }
 
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index 0da0740..0c65e9e 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -22,6 +22,7 @@
 #include <media/NdkMediaExtractor.h>
 #include <media/NdkMediaErrorPriv.h>
 #include <media/NdkMediaFormatPriv.h>
+#include "NdkJavaVMHelperPriv.h"
 #include "NdkMediaDataSourcePriv.h"
 
 
@@ -63,7 +64,10 @@
 AMediaExtractor* AMediaExtractor_new() {
     ALOGV("ctor");
     AMediaExtractor *mData = new AMediaExtractor();
-    mData->mImpl = new NuMediaExtractor();
+    mData->mImpl = new NuMediaExtractor(
+        NdkJavaVMHelper::getJNIEnv() != nullptr
+                ? NuMediaExtractor::EntryPoint::NDK_WITH_JVM
+                : NuMediaExtractor::EntryPoint::NDK_NO_JVM );
     return mData;
 }
 
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 87ea084..e428e49 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -22,6 +22,7 @@
 #include <binder/IServiceManager.h>
 #include <binder/PermissionCache.h>
 #include "mediautils/ServiceUtilities.h"
+#include <system/audio-hal-enums.h>
 
 #include <iterator>
 #include <algorithm>
@@ -61,8 +62,20 @@
     return packages[0];
 }
 
+static int32_t getOpForSource(audio_source_t source) {
+  switch (source) {
+    case AUDIO_SOURCE_HOTWORD:
+      return AppOpsManager::OP_RECORD_AUDIO_HOTWORD;
+    case AUDIO_SOURCE_REMOTE_SUBMIX:
+      return AppOpsManager::OP_RECORD_AUDIO_OUTPUT;
+    case AUDIO_SOURCE_DEFAULT:
+    default:
+      return AppOpsManager::OP_RECORD_AUDIO;
+  }
+}
+
 static bool checkRecordingInternal(const String16& opPackageName, pid_t pid,
-        uid_t uid, bool start) {
+        uid_t uid, bool start, audio_source_t source) {
     // Okay to not track in app ops as audio server or media server is us and if
     // device is rooted security model is considered compromised.
     // system_server loses its RECORD_AUDIO permission when a secondary
@@ -87,16 +100,21 @@
     }
 
     AppOpsManager appOps;
-    const int32_t op = appOps.permissionToOpCode(sAndroidPermissionRecordAudio);
+    const int32_t op = getOpForSource(source);
     if (start) {
-        if (appOps.startOpNoThrow(op, uid, resolvedOpPackageName, /*startIfModeDefault*/ false)
-                != AppOpsManager::MODE_ALLOWED) {
-            ALOGE("Request denied by app op: %d", op);
+        if (int32_t mode = appOps.startOpNoThrow(
+                        op, uid, resolvedOpPackageName, /*startIfModeDefault*/ false);
+                mode != AppOpsManager::MODE_ALLOWED) {
+            ALOGE("Request start for \"%s\" (uid %d) denied by app op: %d, mode: %d",
+                    String8(resolvedOpPackageName).c_str(), uid, op, mode);
             return false;
         }
     } else {
-        if (appOps.checkOp(op, uid, resolvedOpPackageName) != AppOpsManager::MODE_ALLOWED) {
-            ALOGE("Request denied by app op: %d", op);
+        // Always use OP_RECORD_AUDIO for checks at creation time.
+        if (int32_t mode = appOps.checkOp(op, uid, resolvedOpPackageName);
+                mode != AppOpsManager::MODE_ALLOWED) {
+            ALOGE("Request check for \"%s\" (uid %d) denied by app op: %d, mode: %d",
+                    String8(resolvedOpPackageName).c_str(), uid, op, mode);
             return false;
         }
     }
@@ -105,14 +123,14 @@
 }
 
 bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid) {
-    return checkRecordingInternal(opPackageName, pid, uid, /*start*/ false);
+    return checkRecordingInternal(opPackageName, pid, uid, /*start*/ false, AUDIO_SOURCE_DEFAULT);
 }
 
-bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid) {
-     return checkRecordingInternal(opPackageName, pid, uid, /*start*/ true);
+bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid, audio_source_t source) {
+     return checkRecordingInternal(opPackageName, pid, uid, /*start*/ true, source);
 }
 
-void finishRecording(const String16& opPackageName, uid_t uid) {
+void finishRecording(const String16& opPackageName, uid_t uid, audio_source_t source) {
     // Okay to not track in app ops as audio server is us and if
     // device is rooted security model is considered compromised.
     if (isAudioServerOrRootUid(uid)) return;
@@ -125,7 +143,8 @@
     }
 
     AppOpsManager appOps;
-    const int32_t op = appOps.permissionToOpCode(sAndroidPermissionRecordAudio);
+
+    const int32_t op = getOpForSource(source);
     appOps.finishOp(op, uid, resolvedOpPackageName);
 }
 
diff --git a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
index 3d141b5..f4c815c 100644
--- a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
+++ b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
@@ -17,6 +17,7 @@
 #include <fcntl.h>
 
 #include <functional>
+#include  <type_traits>
 
 #include "fuzzer/FuzzedDataProvider.h"
 #include "mediautils/ServiceUtilities.h"
@@ -44,6 +45,8 @@
     FuzzedDataProvider data_provider(data, size);
     uid_t uid = data_provider.ConsumeIntegral<uid_t>();
     pid_t pid = data_provider.ConsumeIntegral<pid_t>();
+    audio_source_t source = static_cast<audio_source_t>(data_provider
+        .ConsumeIntegral<std::underlying_type_t<audio_source_t>>());
 
     // There is not state here, and order is not significant,
     // so we can simply call all of the target functions
@@ -54,8 +57,8 @@
     std::string packageNameStr = data_provider.ConsumeRandomLengthString(kMaxStringLen);
     android::String16 opPackageName(packageNameStr.c_str());
     android::recordingAllowed(opPackageName, pid, uid);
-    android::startRecording(opPackageName, pid, uid);
-    android::finishRecording(opPackageName, uid);
+    android::startRecording(opPackageName, pid, uid, source);
+    android::finishRecording(opPackageName, uid, source);
     android::captureAudioOutputAllowed(pid, uid);
     android::captureMediaOutputAllowed(pid, uid);
     android::captureHotwordAllowed(opPackageName, pid, uid);
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 212599a..0a82af6 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -24,6 +24,7 @@
 #include <binder/PermissionController.h>
 #include <cutils/multiuser.h>
 #include <private/android_filesystem_config.h>
+#include <system/audio-hal-enums.h>
 
 #include <map>
 #include <optional>
@@ -79,8 +80,8 @@
 }
 
 bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid);
-bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid);
-void finishRecording(const String16& opPackageName, uid_t uid);
+bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid, audio_source_t source);
+void finishRecording(const String16& opPackageName, uid_t uid, audio_source_t source);
 bool captureAudioOutputAllowed(pid_t pid, uid_t uid);
 bool captureMediaOutputAllowed(pid_t pid, uid_t uid);
 bool captureVoiceCommunicationOutputAllowed(pid_t pid, uid_t uid);
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index e7a12df..78ad467 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -185,9 +185,15 @@
 
 // ----------------------------------------------------------------------------
 
+void AudioFlinger::instantiate() {
+    sp<IServiceManager> sm(defaultServiceManager());
+    sm->addService(String16(IAudioFlinger::DEFAULT_SERVICE_NAME),
+                   new AudioFlingerServerAdapter(new AudioFlinger()), false,
+                   IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT);
+}
+
 AudioFlinger::AudioFlinger()
-    : BnAudioFlinger(),
-      mMediaLogNotifier(new AudioFlinger::MediaLogNotifier()),
+    : mMediaLogNotifier(new AudioFlinger::MediaLogNotifier()),
       mPrimaryHardwareDev(NULL),
       mAudioHwDevs(NULL),
       mHardwareStatus(AUDIO_HW_IDLE),
@@ -2587,7 +2593,7 @@
     sp<DeviceDescriptorBase> device = VALUE_OR_RETURN_STATUS(
             aidl2legacy_DeviceDescriptorBase(request.device));
     audio_output_flags_t flags = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_audio_output_flags_mask(request.flags));
+            aidl2legacy_int32_t_audio_output_flags_t_mask(request.flags));
 
     audio_io_handle_t output;
     uint32_t latencyMs;
@@ -2637,7 +2643,8 @@
         response->output = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
         response->config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
         response->latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(latencyMs));
-        response->flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_mask(flags));
+        response->flags = VALUE_OR_RETURN_STATUS(
+                legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
         return NO_ERROR;
     }
 
@@ -2813,7 +2820,7 @@
             device.mType,
             device.address().c_str(),
             VALUE_OR_RETURN_STATUS(aidl2legacy_AudioSourceType_audio_source_t(request.source)),
-            VALUE_OR_RETURN_STATUS(aidl2legacy_audio_input_flags_mask(request.flags)),
+            VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_input_flags_t_mask(request.flags)),
             AUDIO_DEVICE_NONE,
             String8{});
 
@@ -4042,42 +4049,40 @@
 
 // ----------------------------------------------------------------------------
 
-status_t AudioFlinger::onTransact(
-        uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+status_t AudioFlinger::onPreTransact(
+        TransactionCode code, const Parcel& /* data */, uint32_t /* flags */)
 {
     // make sure transactions reserved to AudioPolicyManager do not come from other processes
     switch (code) {
-        case SET_STREAM_VOLUME:
-        case SET_STREAM_MUTE:
-        case OPEN_OUTPUT:
-        case OPEN_DUPLICATE_OUTPUT:
-        case CLOSE_OUTPUT:
-        case SUSPEND_OUTPUT:
-        case RESTORE_OUTPUT:
-        case OPEN_INPUT:
-        case CLOSE_INPUT:
-        case INVALIDATE_STREAM:
-        case SET_VOICE_VOLUME:
-        case MOVE_EFFECTS:
-        case SET_EFFECT_SUSPENDED:
-        case LOAD_HW_MODULE:
-        case LIST_AUDIO_PORTS:
-        case GET_AUDIO_PORT:
-        case CREATE_AUDIO_PATCH:
-        case RELEASE_AUDIO_PATCH:
-        case LIST_AUDIO_PATCHES:
-        case SET_AUDIO_PORT_CONFIG:
-        case SET_RECORD_SILENCED:
+        case TransactionCode::SET_STREAM_VOLUME:
+        case TransactionCode::SET_STREAM_MUTE:
+        case TransactionCode::OPEN_OUTPUT:
+        case TransactionCode::OPEN_DUPLICATE_OUTPUT:
+        case TransactionCode::CLOSE_OUTPUT:
+        case TransactionCode::SUSPEND_OUTPUT:
+        case TransactionCode::RESTORE_OUTPUT:
+        case TransactionCode::OPEN_INPUT:
+        case TransactionCode::CLOSE_INPUT:
+        case TransactionCode::INVALIDATE_STREAM:
+        case TransactionCode::SET_VOICE_VOLUME:
+        case TransactionCode::MOVE_EFFECTS:
+        case TransactionCode::SET_EFFECT_SUSPENDED:
+        case TransactionCode::LOAD_HW_MODULE:
+        case TransactionCode::GET_AUDIO_PORT:
+        case TransactionCode::CREATE_AUDIO_PATCH:
+        case TransactionCode::RELEASE_AUDIO_PATCH:
+        case TransactionCode::LIST_AUDIO_PATCHES:
+        case TransactionCode::SET_AUDIO_PORT_CONFIG:
+        case TransactionCode::SET_RECORD_SILENCED:
             ALOGW("%s: transaction %d received from PID %d",
                   __func__, code, IPCThreadState::self()->getCallingPid());
             // return status only for non void methods
             switch (code) {
-                case SET_RECORD_SILENCED:
-                case SET_EFFECT_SUSPENDED:
+                case TransactionCode::SET_RECORD_SILENCED:
+                case TransactionCode::SET_EFFECT_SUSPENDED:
                     break;
                 default:
-                    reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
-                    break;
+                    return INVALID_OPERATION;
             }
             return OK;
         default:
@@ -4086,24 +4091,24 @@
 
     // make sure the following transactions come from system components
     switch (code) {
-        case SET_MASTER_VOLUME:
-        case SET_MASTER_MUTE:
-        case SET_MODE:
-        case SET_MIC_MUTE:
-        case SET_LOW_RAM_DEVICE:
-        case SYSTEM_READY:
-        case SET_AUDIO_HAL_PIDS: {
+        case TransactionCode::SET_MASTER_VOLUME:
+        case TransactionCode::SET_MASTER_MUTE:
+        case TransactionCode::MASTER_MUTE:
+        case TransactionCode::SET_MODE:
+        case TransactionCode::SET_MIC_MUTE:
+        case TransactionCode::SET_LOW_RAM_DEVICE:
+        case TransactionCode::SYSTEM_READY:
+        case TransactionCode::SET_AUDIO_HAL_PIDS: {
             if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
                 ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
                       __func__, code, IPCThreadState::self()->getCallingPid(),
                       IPCThreadState::self()->getCallingUid());
                 // return status only for non void methods
                 switch (code) {
-                    case SYSTEM_READY:
+                    case TransactionCode::SYSTEM_READY:
                         break;
                     default:
-                        reply->writeInt32(static_cast<int32_t> (INVALID_OPERATION));
-                        break;
+                        return INVALID_OPERATION;
                 }
                 return OK;
             }
@@ -4117,14 +4122,14 @@
     // most relevant events.
     // TODO should select more wisely the items from the list
     switch (code) {
-        case CREATE_TRACK:
-        case CREATE_RECORD:
-        case SET_MASTER_VOLUME:
-        case SET_MASTER_MUTE:
-        case SET_MIC_MUTE:
-        case SET_PARAMETERS:
-        case CREATE_EFFECT:
-        case SYSTEM_READY: {
+        case TransactionCode::CREATE_TRACK:
+        case TransactionCode::CREATE_RECORD:
+        case TransactionCode::SET_MASTER_VOLUME:
+        case TransactionCode::SET_MASTER_MUTE:
+        case TransactionCode::SET_MIC_MUTE:
+        case TransactionCode::SET_PARAMETERS:
+        case TransactionCode::CREATE_EFFECT:
+        case TransactionCode::SYSTEM_READY: {
             requestLogMerge();
             break;
         }
@@ -4132,7 +4137,8 @@
             break;
     }
 
-    std::string tag("IAudioFlinger command " + std::to_string(code));
+    std::string tag("IAudioFlinger command " +
+                    std::to_string(static_cast<std::underlying_type_t<TransactionCode>>(code)));
     TimeCheck check(tag.c_str());
 
     // Make sure we connect to Audio Policy Service before calling into AudioFlinger:
@@ -4145,7 +4151,7 @@
         AudioSystem::get_audio_policy_service();
     }
 
-    return BnAudioFlinger::onTransact(code, data, reply, flags);
+    return OK;
 }
 
 } // namespace android
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index a2e50f8..1cf1e67 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -123,16 +123,12 @@
 
 #define INCLUDING_FROM_AUDIOFLINGER_H
 
-class AudioFlinger :
-    public BinderService<AudioFlinger>,
-    public BnAudioFlinger
+class AudioFlinger : public AudioFlingerServerAdapter::Delegate
 {
-    friend class BinderService<AudioFlinger>;   // for AudioFlinger()
-
 public:
-    static const char* getServiceName() ANDROID_API { return "media.audio_flinger"; }
+    static void instantiate() ANDROID_API;
 
-    virtual     status_t    dump(int fd, const Vector<String16>& args);
+    status_t dump(int fd, const Vector<String16>& args) override;
 
     // IAudioFlinger interface, in binder opcode order
     status_t createTrack(const media::CreateTrackRequest& input,
@@ -270,11 +266,7 @@
 
     virtual status_t setAudioHalPids(const std::vector<pid_t>& pids);
 
-    virtual     status_t    onTransact(
-                                uint32_t code,
-                                const Parcel& data,
-                                Parcel* reply,
-                                uint32_t flags);
+    status_t onPreTransact(TransactionCode code, const Parcel& data, uint32_t flags) override;
 
     // end of IAudioFlinger interface
 
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index eaad6ef..3ab7737 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -60,6 +60,7 @@
 
 namespace android {
 
+using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
 
 namespace {
@@ -3027,7 +3028,7 @@
                 bs = handle.second->disable(&status);
             }
             if (!bs.isOk()) {
-              status = bs.transactionError();
+              status = statusTFromBinderStatus(bs);
             }
         }
     }
@@ -3142,7 +3143,7 @@
             bs = (*handle)->disable(&status);
         }
         if (!bs.isOk()) {
-            status = bs.transactionError();
+            status = statusTFromBinderStatus(bs);
         }
     }
     return status;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index b13b7be..ab2bc32 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -8684,6 +8684,7 @@
 
 void AudioFlinger::RecordThread::updateOutDevices(const DeviceDescriptorBaseVector& outDevices)
 {
+    Mutex::Autolock _l(mLock);
     mOutDevices = outDevices;
     mOutDeviceTypeAddrs = deviceTypeAddrsFromDescriptors(mOutDevices);
     for (size_t i = 0; i < mEffectChains.size(); i++) {
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 68b709f..6049f62 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -54,6 +54,7 @@
 
 namespace android {
 
+using aidl_utils::binderStatusFromStatusT;
 using binder::Status;
 using media::VolumeShaper;
 // ----------------------------------------------------------------------------
@@ -371,7 +372,7 @@
     if (*_aidl_return != OK) {
         return Status::ok();
     }
-    *timestamp = legacy2aidl_AudioTimestamp(legacy).value();
+    *timestamp = legacy2aidl_AudioTimestamp_AudioTimestampInternal(legacy).value();
     return Status::ok();
 }
 
@@ -2132,7 +2133,7 @@
 binder::Status AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
         int /*audio_session_t*/ triggerSession) {
     ALOGV("%s()", __func__);
-    return binder::Status::fromStatusT(
+    return binderStatusFromStatusT(
         mRecordTrack->start((AudioSystem::sync_event_t)event, (audio_session_t) triggerSession));
 }
 
@@ -2155,19 +2156,19 @@
     for (size_t i = 0; status == OK && i < mics.size(); ++i) {
        status = mics[i].writeToParcelable(&activeMicrophones->at(i));
     }
-    return binder::Status::fromStatusT(status);
+    return binderStatusFromStatusT(status);
 }
 
 binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneDirection(
         int /*audio_microphone_direction_t*/ direction) {
     ALOGV("%s()", __func__);
-    return binder::Status::fromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
+    return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
             static_cast<audio_microphone_direction_t>(direction)));
 }
 
 binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneFieldDimension(float zoom) {
     ALOGV("%s()", __func__);
-    return binder::Status::fromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
+    return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
 }
 
 // ----------------------------------------------------------------------------
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index b00426f..f753836 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -220,16 +220,16 @@
     virtual status_t    dump(int fd) = 0;
 
     virtual status_t setAllowedCapturePolicy(uid_t uid, audio_flags_mask_t flags) = 0;
-    virtual bool isOffloadSupported(const audio_offload_info_t& offloadInfo) = 0;
+    virtual audio_offload_mode_t getOffloadSupport(const audio_offload_info_t& offloadInfo) = 0;
     virtual bool isDirectOutputSupported(const audio_config_base_t& config,
                                          const audio_attributes_t& attributes) = 0;
 
     virtual status_t listAudioPorts(audio_port_role_t role,
                                     audio_port_type_t type,
                                     unsigned int *num_ports,
-                                    struct audio_port *ports,
+                                    struct audio_port_v7 *ports,
                                     unsigned int *generation) = 0;
-    virtual status_t getAudioPort(struct audio_port *port) = 0;
+    virtual status_t getAudioPort(struct audio_port_v7 *port) = 0;
     virtual status_t createAudioPatch(const struct audio_patch *patch,
                                        audio_patch_handle_t *handle,
                                        uid_t uid) = 0;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 6f47abc..a40f6aa 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -72,7 +72,7 @@
             const struct audio_port_config *srcConfig = NULL) const;
     virtual sp<AudioPort> getAudioPort() const { return mProfile; }
 
-    void toAudioPort(struct audio_port *port) const;
+    void toAudioPort(struct audio_port_v7 *port) const;
     void setPreemptedSessions(const SortedVector<audio_session_t>& sessions);
     SortedVector<audio_session_t> getPreemptedSessions() const;
     bool hasPreemptedSession(audio_session_t session) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 1302486..5153dce 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -261,7 +261,7 @@
                            const struct audio_port_config *srcConfig = NULL) const;
     virtual sp<AudioPort> getAudioPort() const { return mPolicyAudioPort->asAudioPort(); }
 
-    virtual void toAudioPort(struct audio_port *port) const;
+    virtual void toAudioPort(struct audio_port_v7 *port) const;
 
     audio_module_handle_t getModuleHandle() const;
 
@@ -358,7 +358,7 @@
 
     virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
                            const struct audio_port_config *srcConfig = NULL) const;
-    virtual void toAudioPort(struct audio_port *port) const;
+    virtual void toAudioPort(struct audio_port_v7 *port) const;
 
         status_t open(const audio_config_t *config,
                       const DeviceVector &devices,
@@ -432,7 +432,7 @@
 
     virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
                            const struct audio_port_config *srcConfig = NULL) const;
-    virtual void toAudioPort(struct audio_port *port) const;
+    virtual void toAudioPort(struct audio_port_v7 *port) const;
 
     const sp<SourceClientDescriptor> mSource;
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 80190b7..7c712e3 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -65,9 +65,6 @@
 
     bool supportsFormat(audio_format_t format);
 
-    void setDynamic() { mIsDynamic = true; }
-    bool isDynamic() const { return mIsDynamic; }
-
     // PolicyAudioPortConfig
     virtual sp<PolicyAudioPort> getPolicyAudioPort() const {
         return static_cast<PolicyAudioPort*>(const_cast<DeviceDescriptor*>(this));
@@ -108,8 +105,6 @@
     std::string mTagName; // Unique human readable identifier for a device port found in conf file.
     FormatVector        mEncodedFormats;
     audio_format_t      mCurrentEncodedFormat;
-    bool                mIsDynamic = false;
-    const std::string   mDeclaredAddress; // Original device address
 };
 
 class DeviceVector : public SortedVector<sp<DeviceDescriptor> >
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index b5b10f3..23f0c9a 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -131,17 +131,8 @@
 public:
     sp<HwModule> getModuleFromName(const char *name) const;
 
-    /**
-     * @brief getModuleForDeviceType try to get a device from type / format on all modules
-     * @param device type to consider
-     * @param encodedFormat to consider
-     * @param[out] tagName if not null, if a matching device is found, will return the tagName
-     * of original device from XML file so that audio routes matchin rules work.
-     * @return valid module if considered device found, nullptr otherwise.
-     */
     sp<HwModule> getModuleForDeviceType(audio_devices_t device,
-                                        audio_format_t encodedFormat,
-                                        std::string *tagName = nullptr) const;
+                                        audio_format_t encodedFormat) const;
 
     sp<HwModule> getModuleForDevice(const sp<DeviceDescriptor> &device,
                                     audio_format_t encodedFormat) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index 11d3a99..621c630 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -112,19 +112,6 @@
     }
 
     /**
-     * @brief getTag
-     * @param deviceTypes to be considered
-     * @return tagName of first matching device for the considered types, empty string otherwise.
-     */
-    std::string getTag(const DeviceTypeSet& deviceTypes) const
-    {
-        if (supportsDeviceTypes(deviceTypes)) {
-            return mSupportedDevices.getDevicesFromTypes(deviceTypes).itemAt(0)->getTagName();
-        }
-        return {};
-    }
-
-    /**
      * @brief supportsDevice
      * @param device to be checked against
      *        forceCheckOnAddress if true, check on type and address whatever the type, otherwise
@@ -144,7 +131,7 @@
     bool devicesSupportEncodedFormats(DeviceTypeSet deviceTypes) const
     {
         if (deviceTypes.empty()) {
-            return true; // required for isOffloadSupported() check
+            return true; // required for getOffloadSupport() check
         }
         DeviceVector deviceList =
             mSupportedDevices.getDevicesFromTypes(deviceTypes);
@@ -163,12 +150,6 @@
     }
     void removeSupportedDevice(const sp<DeviceDescriptor> &device)
     {
-        ssize_t ret = mSupportedDevices.indexOf(device);
-        if (ret >= 0 && !mSupportedDevices.itemAt(ret)->isDynamic()) {
-            // devices equality checks only type, address, name and format
-            // Prevents from removing non dynamically added devices
-            return;
-        }
         mSupportedDevices.remove(device);
     }
     void setSupportedDevices(const DeviceVector &devices)
diff --git a/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
index e6eef24..d2f6297 100644
--- a/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
@@ -42,11 +42,6 @@
 
     virtual const std::string getTagName() const = 0;
 
-    bool equals(const sp<PolicyAudioPort> &right) const
-    {
-        return getTagName() == right->getTagName();
-    }
-
     virtual sp<AudioPort> asAudioPort() const = 0;
 
     virtual void setFlags(uint32_t flags)
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 4922ebe..7016a08 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -92,7 +92,7 @@
     dstConfig->ext.mix.usecase.source = source();
 }
 
-void AudioInputDescriptor::toAudioPort(struct audio_port *port) const
+void AudioInputDescriptor::toAudioPort(struct audio_port_v7 *port) const
 {
     ALOG_ASSERT(mProfile != 0, "toAudioPort() called on input with null profile %d", mIoHandle);
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 1756021..c4d7340 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -215,7 +215,7 @@
     dstConfig->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
 }
 
-void AudioOutputDescriptor::toAudioPort(struct audio_port *port) const
+void AudioOutputDescriptor::toAudioPort(struct audio_port_v7 *port) const
 {
     // Should not be called for duplicated ports, see SwAudioOutputDescriptor::toAudioPortConfig.
     mPolicyAudioPort->asAudioPort()->toAudioPort(port);
@@ -406,8 +406,7 @@
     dstConfig->ext.mix.handle = mIoHandle;
 }
 
-void SwAudioOutputDescriptor::toAudioPort(
-                                                    struct audio_port *port) const
+void SwAudioOutputDescriptor::toAudioPort(struct audio_port_v7 *port) const
 {
     ALOG_ASSERT(!isDuplicated(), "toAudioPort() called on duplicated output %d", mIoHandle);
 
@@ -654,8 +653,7 @@
     mSource->srcDevice()->toAudioPortConfig(dstConfig, srcConfig);
 }
 
-void HwAudioOutputDescriptor::toAudioPort(
-                                                    struct audio_port *port) const
+void HwAudioOutputDescriptor::toAudioPort(struct audio_port_v7 *port) const
 {
     mSource->srcDevice()->toAudioPort(port);
 }
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
index c8e4e76..2a18f19 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
@@ -39,12 +39,12 @@
 bool AudioRoute::supportsPatch(const sp<PolicyAudioPort> &srcPort,
                                const sp<PolicyAudioPort> &dstPort) const
 {
-    if (mSink == 0 || dstPort == 0 || !dstPort->equals(mSink)) {
+    if (mSink == 0 || dstPort == 0 || dstPort != mSink) {
         return false;
     }
     ALOGV("%s: sinks %s matching", __FUNCTION__, mSink->getTagName().c_str());
     for (const auto &sourcePort : mSources) {
-        if (sourcePort->equals(srcPort)) {
+        if (sourcePort == srcPort) {
             ALOGV("%s: sources %s matching", __FUNCTION__, sourcePort->getTagName().c_str());
             return true;
         }
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index e43ca0f..30b739c 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -52,8 +52,7 @@
 DeviceDescriptor::DeviceDescriptor(const AudioDeviceTypeAddr &deviceTypeAddr,
                                    const std::string &tagName,
                                    const FormatVector &encodedFormats) :
-        DeviceDescriptorBase(deviceTypeAddr), mTagName(tagName), mEncodedFormats(encodedFormats),
-        mDeclaredAddress(deviceTypeAddr.getAddress())
+        DeviceDescriptorBase(deviceTypeAddr), mTagName(tagName), mEncodedFormats(encodedFormats)
 {
     mCurrentEncodedFormat = AUDIO_FORMAT_DEFAULT;
     /* If framework runs against a pre 5.0 Audio HAL, encoded formats are absent from the config.
@@ -76,10 +75,6 @@
 void DeviceDescriptor::detach() {
     mId = AUDIO_PORT_HANDLE_NONE;
     PolicyAudioPort::detach();
-    // The device address may have been overwritten on device connection
-    setAddress(mDeclaredAddress);
-    // Device Port does not have a name unless provided by setDeviceConnectionState
-    setName("");
 }
 
 template<typename T>
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 2967014..d31e443 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -271,9 +271,8 @@
     return nullptr;
 }
 
-sp<HwModule> HwModuleCollection::getModuleForDeviceType(audio_devices_t type,
-                                                        audio_format_t encodedFormat,
-                                                        std::string *tagName) const
+sp <HwModule> HwModuleCollection::getModuleForDeviceType(audio_devices_t type,
+                                                         audio_format_t encodedFormat) const
 {
     for (const auto& module : *this) {
         const auto& profiles = audio_is_output_device(type) ?
@@ -285,15 +284,9 @@
                     sp <DeviceDescriptor> deviceDesc =
                             declaredDevices.getDevice(type, String8(), encodedFormat);
                     if (deviceDesc) {
-                        if (tagName != nullptr) {
-                            *tagName = deviceDesc->getTagName();
-                        }
                         return module;
                     }
                 } else {
-                    if (tagName != nullptr) {
-                        *tagName = profile->getTag({type});
-                    }
                     return module;
                 }
             }
@@ -332,32 +325,15 @@
     }
 
     for (const auto& hwModule : *this) {
-        if (!allowToCreate) {
-            auto dynamicDevices = hwModule->getDynamicDevices();
-            auto dynamicDevice = dynamicDevices.getDevice(deviceType, devAddress, encodedFormat);
-            if (dynamicDevice) {
-                return dynamicDevice;
-            }
-        }
         DeviceVector moduleDevices = hwModule->getAllDevices();
         auto moduleDevice = moduleDevices.getDevice(deviceType, devAddress, encodedFormat);
-
-        // Prevent overwritting moduleDevice address if connected device does not have the same
-        // address (since getDevice with empty address ignores match on address), use dynamic device
-        if (moduleDevice && allowToCreate &&
-                (!moduleDevice->address().empty() &&
-                 (moduleDevice->address().compare(devAddress.c_str()) != 0))) {
-            break;
-        }
         if (moduleDevice) {
             if (encodedFormat != AUDIO_FORMAT_DEFAULT) {
                 moduleDevice->setEncodedFormat(encodedFormat);
             }
             if (allowToCreate) {
                 moduleDevice->attach(hwModule);
-                // Name may be overwritten, restored on detach.
                 moduleDevice->setAddress(devAddress.string());
-                // Name may be overwritten, restored on detach.
                 moduleDevice->setName(name);
             }
             return moduleDevice;
@@ -376,19 +352,18 @@
                                                       const char *name,
                                                       const audio_format_t encodedFormat) const
 {
-    std::string tagName = {};
-    sp<HwModule> hwModule = getModuleForDeviceType(type, encodedFormat, &tagName);
+    sp<HwModule> hwModule = getModuleForDeviceType(type, encodedFormat);
     if (hwModule == 0) {
         ALOGE("%s: could not find HW module for device %04x address %s", __FUNCTION__, type,
               address);
         return nullptr;
     }
 
-    sp<DeviceDescriptor> device = new DeviceDescriptor(type, tagName, address);
+    sp<DeviceDescriptor> device = new DeviceDescriptor(type, name, address);
     device->setName(name);
     device->setEncodedFormat(encodedFormat);
-    device->setDynamic();
-    // Add the device to the list of dynamic devices
+
+  // Add the device to the list of dynamic devices
     hwModule->addDynamicDevice(device);
     // Reciprocally attach the device to the module
     device->attach(hwModule);
@@ -400,7 +375,7 @@
     for (const auto &profile : profiles) {
         // Add the device as supported to all profile supporting "weakly" or not the device
         // according to its type
-        if (profile->supportsDevice(device, false /*matchAddress*/)) {
+        if (profile->supportsDevice(device, false /*matchAdress*/)) {
 
             // @todo quid of audio profile? import the profile from device of the same type?
             const auto &isoTypeDeviceForProfile =
@@ -431,9 +406,10 @@
 
         device->detach();
         // Only remove from dynamic list, not from declared list!!!
-        if (!hwModule->removeDynamicDevice(device)) {
+        if (!hwModule->getDynamicDevices().contains(device)) {
             return;
         }
+        hwModule->removeDynamicDevice(device);
         ALOGV("%s: removed dynamic device %s from module %s", __FUNCTION__,
               device->toString().c_str(), hwModule->getName());
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index cc4ec36..86f4539 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -206,6 +206,9 @@
             // Reset active device codec
             device->setEncodedFormat(AUDIO_FORMAT_DEFAULT);
 
+            // remove device from mReportedFormatsMap cache
+            mReportedFormatsMap.erase(device);
+
             } break;
 
         default:
@@ -334,6 +337,9 @@
             mAvailableInputDevices.remove(device);
 
             checkInputsForDevice(device, state);
+
+            // remove device from mReportedFormatsMap cache
+            mReportedFormatsMap.erase(device);
         } break;
 
         default:
@@ -3393,38 +3399,38 @@
 // This function checks for the parameters which can be offloaded.
 // This can be enhanced depending on the capability of the DSP and policy
 // of the system.
-bool AudioPolicyManager::isOffloadSupported(const audio_offload_info_t& offloadInfo)
+audio_offload_mode_t AudioPolicyManager::getOffloadSupport(const audio_offload_info_t& offloadInfo)
 {
-    ALOGV("isOffloadSupported: SR=%u, CM=0x%x, Format=0x%x, StreamType=%d,"
+    ALOGV("%s: SR=%u, CM=0x%x, Format=0x%x, StreamType=%d,"
      " BitRate=%u, duration=%" PRId64 " us, has_video=%d",
-     offloadInfo.sample_rate, offloadInfo.channel_mask,
+     __func__, offloadInfo.sample_rate, offloadInfo.channel_mask,
      offloadInfo.format,
      offloadInfo.stream_type, offloadInfo.bit_rate, offloadInfo.duration_us,
      offloadInfo.has_video);
 
     if (mMasterMono) {
-        return false; // no offloading if mono is set.
+        return AUDIO_OFFLOAD_NOT_SUPPORTED; // no offloading if mono is set.
     }
 
     // Check if offload has been disabled
     if (property_get_bool("audio.offload.disable", false /* default_value */)) {
-        ALOGV("offload disabled by audio.offload.disable");
-        return false;
+        ALOGV("%s: offload disabled by audio.offload.disable", __func__);
+        return AUDIO_OFFLOAD_NOT_SUPPORTED;
     }
 
     // Check if stream type is music, then only allow offload as of now.
     if (offloadInfo.stream_type != AUDIO_STREAM_MUSIC)
     {
-        ALOGV("isOffloadSupported: stream_type != MUSIC, returning false");
-        return false;
+        ALOGV("%s: stream_type != MUSIC, returning false", __func__);
+        return AUDIO_OFFLOAD_NOT_SUPPORTED;
     }
 
     //TODO: enable audio offloading with video when ready
     const bool allowOffloadWithVideo =
             property_get_bool("audio.offload.video", false /* default_value */);
     if (offloadInfo.has_video && !allowOffloadWithVideo) {
-        ALOGV("isOffloadSupported: has_video == true, returning false");
-        return false;
+        ALOGV("%s: has_video == true, returning false", __func__);
+        return AUDIO_OFFLOAD_NOT_SUPPORTED;
     }
 
     //If duration is less than minimum value defined in property, return false
@@ -3432,13 +3438,14 @@
             "audio.offload.min.duration.secs", -1 /* default_value */);
     if (min_duration_secs >= 0) {
         if (offloadInfo.duration_us < min_duration_secs * 1000000LL) {
-            ALOGV("Offload denied by duration < audio.offload.min.duration.secs(=%d)",
-                    min_duration_secs);
-            return false;
+            ALOGV("%s: Offload denied by duration < audio.offload.min.duration.secs(=%d)",
+                    __func__, min_duration_secs);
+            return AUDIO_OFFLOAD_NOT_SUPPORTED;
         }
     } else if (offloadInfo.duration_us < OFFLOAD_DEFAULT_MIN_DURATION_SECS * 1000000) {
-        ALOGV("Offload denied by duration < default min(=%u)", OFFLOAD_DEFAULT_MIN_DURATION_SECS);
-        return false;
+        ALOGV("%s: Offload denied by duration < default min(=%u)",
+                __func__, OFFLOAD_DEFAULT_MIN_DURATION_SECS);
+        return AUDIO_OFFLOAD_NOT_SUPPORTED;
     }
 
     // Do not allow offloading if one non offloadable effect is enabled. This prevents from
@@ -3448,7 +3455,7 @@
     // This may prevent offloading in rare situations where effects are left active by apps
     // in the background.
     if (mEffects.isNonOffloadableEffectEnabled()) {
-        return false;
+        return AUDIO_OFFLOAD_NOT_SUPPORTED;
     }
 
     // See if there is a profile to support this.
@@ -3459,8 +3466,14 @@
                                             offloadInfo.channel_mask,
                                             AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD,
                                             true /* directOnly */);
-    ALOGV("isOffloadSupported() profile %sfound", profile != 0 ? "" : "NOT ");
-    return (profile != 0);
+    ALOGV("%s: profile %sfound", __func__, profile != 0 ? "" : "NOT ");
+    if (profile == nullptr) {
+        return AUDIO_OFFLOAD_NOT_SUPPORTED;
+    }
+    if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD) != 0) {
+        return AUDIO_OFFLOAD_GAPLESS_SUPPORTED;
+    }
+    return AUDIO_OFFLOAD_SUPPORTED;
 }
 
 bool AudioPolicyManager::isDirectOutputSupported(const audio_config_base_t& config,
@@ -3484,15 +3497,15 @@
 status_t AudioPolicyManager::listAudioPorts(audio_port_role_t role,
                                             audio_port_type_t type,
                                             unsigned int *num_ports,
-                                            struct audio_port *ports,
+                                            struct audio_port_v7 *ports,
                                             unsigned int *generation)
 {
-    if (num_ports == NULL || (*num_ports != 0 && ports == NULL) ||
-            generation == NULL) {
+    if (num_ports == nullptr || (*num_ports != 0 && ports == nullptr) ||
+            generation == nullptr) {
         return BAD_VALUE;
     }
     ALOGV("listAudioPorts() role %d type %d num_ports %d ports %p", role, type, *num_ports, ports);
-    if (ports == NULL) {
+    if (ports == nullptr) {
         *num_ports = 0;
     }
 
@@ -3550,7 +3563,7 @@
     return NO_ERROR;
 }
 
-status_t AudioPolicyManager::getAudioPort(struct audio_port *port)
+status_t AudioPolicyManager::getAudioPort(struct audio_port_v7 *port)
 {
     if (port == nullptr || port->id == AUDIO_PORT_HANDLE_NONE) {
         return BAD_VALUE;
@@ -4332,14 +4345,28 @@
         // checkOutputsForDevice().
         for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
             sp<DeviceDescriptor> device = mAvailableOutputDevices[i];
-            FormatVector supportedFormats =
-                    device->getAudioPort()->getAudioProfiles().getSupportedFormats();
-            for (size_t j = 0; j < supportedFormats.size(); j++) {
-                if (mConfig.getSurroundFormats().count(supportedFormats[j]) != 0) {
-                    formats.insert(supportedFormats[j]);
+            audio_devices_t deviceType = device->type();
+            // Enabling/disabling formats are applied to only HDMI devices. So, this function
+            // returns formats reported by HDMI devices.
+            if (deviceType != AUDIO_DEVICE_OUT_HDMI) {
+                continue;
+            }
+            // Formats reported by sink devices
+            std::unordered_set<audio_format_t> formatset;
+            if (auto it = mReportedFormatsMap.find(device); it != mReportedFormatsMap.end()) {
+                formatset.insert(it->second.begin(), it->second.end());
+            }
+
+            // Formats hard-coded in the in policy configuration file (if any).
+            FormatVector encodedFormats = device->encodedFormats();
+            formatset.insert(encodedFormats.begin(), encodedFormats.end());
+            // Filter the formats which are supported by the vendor hardware.
+            for (auto it = formatset.begin(); it != formatset.end(); ++it) {
+                if (mConfig.getSurroundFormats().count(*it) != 0) {
+                    formats.insert(*it);
                 } else {
                     for (const auto& pair : mConfig.getSurroundFormats()) {
-                        if (pair.second.count(supportedFormats[j]) != 0) {
+                        if (pair.second.count(*it) != 0) {
                             formats.insert(pair.first);
                             break;
                         }
@@ -6538,6 +6565,7 @@
             return;
         }
         FormatVector formats = formatsFromString(reply.string());
+        mReportedFormatsMap[devDesc] = formats;
         if (device == AUDIO_DEVICE_OUT_HDMI
                 || isDeviceOfModule(devDesc, AUDIO_HARDWARE_MODULE_ID_MSD)) {
             modifySurroundFormats(devDesc, &formats);
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 217013f..00c4eab 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -225,7 +225,7 @@
         status_t dump(int fd) override;
 
         status_t setAllowedCapturePolicy(uid_t uid, audio_flags_mask_t capturePolicy) override;
-        virtual bool isOffloadSupported(const audio_offload_info_t& offloadInfo);
+        virtual audio_offload_mode_t getOffloadSupport(const audio_offload_info_t& offloadInfo);
 
         virtual bool isDirectOutputSupported(const audio_config_base_t& config,
                                              const audio_attributes_t& attributes);
@@ -233,9 +233,9 @@
         virtual status_t listAudioPorts(audio_port_role_t role,
                                         audio_port_type_t type,
                                         unsigned int *num_ports,
-                                        struct audio_port *ports,
+                                        struct audio_port_v7 *ports,
                                         unsigned int *generation);
-        virtual status_t getAudioPort(struct audio_port *port);
+        virtual status_t getAudioPort(struct audio_port_v7 *port);
         virtual status_t createAudioPatch(const struct audio_patch *patch,
                                            audio_patch_handle_t *handle,
                                            uid_t uid) {
@@ -813,6 +813,9 @@
         std::unordered_set<audio_format_t> mManualSurroundFormats;
 
         std::unordered_map<uid_t, audio_flags_mask_t> mAllowedCapturePolicies;
+
+        // The map of device descriptor and formats reported by the device.
+        std::map<wp<DeviceDescriptor>, FormatVector> mReportedFormatsMap;
 private:
         void onNewAudioModulesAvailableInt(DeviceVector *newDevices);
 
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index dd128a2..90b93e2 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -57,7 +57,7 @@
     request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
     request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(*config));
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_DeviceDescriptorBase(device));
-    request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_mask(flags));
+    request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
 
     status_t status = af->openOutput(request, &response);
     if (status == OK) {
@@ -134,7 +134,7 @@
     request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(*config));
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(deviceTypeAddr));
     request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSourceType(source));
-    request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_mask(flags));
+    request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
 
     media::OpenInputResponse response;
     status_t status = af->openInput(request, &response);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index df8e4c5..517d6bf 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -547,7 +547,7 @@
 }
 
 std::string AudioPolicyService::getDeviceTypeStrForPortId(audio_port_handle_t portId) {
-    struct audio_port port = {};
+    struct audio_port_v7 port = {};
     port.id = portId;
     status_t status = mAudioPolicyManager->getAudioPort(&port);
     if (status == NO_ERROR && port.type == AUDIO_PORT_TYPE_DEVICE) {
@@ -573,7 +573,8 @@
     }
 
     // check calling permissions
-    if (!(startRecording(client->opPackageName, client->pid, client->uid)
+    if (!(startRecording(client->opPackageName, client->pid, client->uid,
+            client->attributes.source)
             || client->attributes.source == AUDIO_SOURCE_FM_TUNER)) {
         ALOGE("%s permission denied: recording not allowed for uid %d pid %d",
                 __func__, client->uid, client->pid);
@@ -661,7 +662,8 @@
         client->active = false;
         client->startTimeNs = 0;
         updateUidStates_l();
-        finishRecording(client->opPackageName, client->uid);
+        finishRecording(client->opPackageName, client->uid,
+                        client->attributes.source);
     }
 
     return status;
@@ -687,7 +689,8 @@
     updateUidStates_l();
 
     // finish the recording app op
-    finishRecording(client->opPackageName, client->uid);
+    finishRecording(client->opPackageName, client->uid,
+                    client->attributes.source);
     AutoCallerClear acc;
     return mAudioPolicyManager->stopInput(portId);
 }
@@ -1086,15 +1089,15 @@
     return mAudioPolicyManager->setAllowedCapturePolicy(uid, capturePolicy);
 }
 
-bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
+audio_offload_mode_t AudioPolicyService::getOffloadSupport(const audio_offload_info_t& info)
 {
     if (mAudioPolicyManager == NULL) {
         ALOGV("mAudioPolicyManager == NULL");
-        return false;
+        return AUDIO_OFFLOAD_NOT_SUPPORTED;
     }
     Mutex::Autolock _l(mLock);
     AutoCallerClear acc;
-    return mAudioPolicyManager->isOffloadSupported(info);
+    return mAudioPolicyManager->getOffloadSupport(info);
 }
 
 bool AudioPolicyService::isDirectOutputSupported(const audio_config_base_t& config,
@@ -1117,7 +1120,7 @@
 status_t AudioPolicyService::listAudioPorts(audio_port_role_t role,
                                             audio_port_type_t type,
                                             unsigned int *num_ports,
-                                            struct audio_port *ports,
+                                            struct audio_port_v7 *ports,
                                             unsigned int *generation)
 {
     Mutex::Autolock _l(mLock);
@@ -1128,7 +1131,7 @@
     return mAudioPolicyManager->listAudioPorts(role, type, num_ports, ports, generation);
 }
 
-status_t AudioPolicyService::getAudioPort(struct audio_port *port)
+status_t AudioPolicyService::getAudioPort(struct audio_port_v7 *port)
 {
     Mutex::Autolock _l(mLock);
     if (mAudioPolicyManager == NULL) {
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 70a0e3a..c032f24 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -192,16 +192,16 @@
     virtual status_t setVoiceVolume(float volume, int delayMs = 0);
     status_t setSupportedSystemUsages(const std::vector<audio_usage_t>& systemUsages);
     status_t setAllowedCapturePolicy(uint_t uid, audio_flags_mask_t capturePolicy) override;
-    virtual bool isOffloadSupported(const audio_offload_info_t &config);
+    virtual audio_offload_mode_t getOffloadSupport(const audio_offload_info_t &config);
     virtual bool isDirectOutputSupported(const audio_config_base_t& config,
                                          const audio_attributes_t& attributes);
 
     virtual status_t listAudioPorts(audio_port_role_t role,
                                     audio_port_type_t type,
                                     unsigned int *num_ports,
-                                    struct audio_port *ports,
+                                    struct audio_port_v7 *ports,
                                     unsigned int *generation);
-    virtual status_t getAudioPort(struct audio_port *port);
+    virtual status_t getAudioPort(struct audio_port_v7 *port);
     virtual status_t createAudioPatch(const struct audio_patch *patch,
                                        audio_patch_handle_t *handle);
     virtual status_t releaseAudioPatch(audio_patch_handle_t handle);
diff --git a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
index bdddf06..433a6ff 100644
--- a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
@@ -121,6 +121,8 @@
 
     size_t getAudioPortListUpdateCount() const { return mAudioPortListUpdateCount; }
 
+    virtual void addSupportedFormat(audio_format_t /* format */) {}
+
 private:
     audio_module_handle_t mNextModuleHandle = AUDIO_MODULE_HANDLE_NONE + 1;
     audio_io_handle_t mNextIoHandle = AUDIO_IO_HANDLE_NONE + 1;
diff --git a/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h b/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h
new file mode 100644
index 0000000..a5ad9b1
--- /dev/null
+++ b/services/audiopolicy/tests/AudioPolicyManagerTestClientForHdmi.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <map>
+#include <set>
+
+#include <system/audio.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+#include "AudioPolicyTestClient.h"
+
+namespace android {
+
+class AudioPolicyManagerTestClientForHdmi : public AudioPolicyManagerTestClient {
+public:
+    String8 getParameters(audio_io_handle_t /* ioHandle */, const String8&  /* keys*/ ) override {
+        return mAudioParameters.toString();
+    }
+
+    void addSupportedFormat(audio_format_t format) override {
+        mAudioParameters.add(
+                String8(AudioParameter::keyStreamSupportedFormats),
+                String8(audio_format_to_string(format)));
+        mAudioParameters.addInt(String8(AudioParameter::keyStreamSupportedSamplingRates), 48000);
+        mAudioParameters.add(String8(AudioParameter::keyStreamSupportedChannels), String8(""));
+    }
+
+private:
+    AudioParameter mAudioParameters;
+};
+
+} // namespace android
\ No newline at end of file
diff --git a/services/audiopolicy/tests/AudioPolicyTestManager.h b/services/audiopolicy/tests/AudioPolicyTestManager.h
index 8bab020..be860e5 100644
--- a/services/audiopolicy/tests/AudioPolicyTestManager.h
+++ b/services/audiopolicy/tests/AudioPolicyTestManager.h
@@ -29,6 +29,7 @@
     using AudioPolicyManager::getOutputs;
     using AudioPolicyManager::getAvailableOutputDevices;
     using AudioPolicyManager::getAvailableInputDevices;
+    using AudioPolicyManager::setSurroundFormatEnabled;
     uint32_t getAudioPortGeneration() const { return mAudioPortGeneration; }
 };
 
diff --git a/services/audiopolicy/tests/audio_health_tests.cpp b/services/audiopolicy/tests/audio_health_tests.cpp
index 9a62e72..ca2f0c6 100644
--- a/services/audiopolicy/tests/audio_health_tests.cpp
+++ b/services/audiopolicy/tests/audio_health_tests.cpp
@@ -34,7 +34,7 @@
     unsigned int numPorts;
     unsigned int generation1;
     unsigned int generation;
-    struct audio_port *audioPorts = NULL;
+    struct audio_port_v7 *audioPorts = nullptr;
     int attempts = 10;
     do {
         if (attempts-- < 0) {
@@ -43,13 +43,14 @@
         }
         numPorts = 0;
         ASSERT_EQ(NO_ERROR, AudioSystem::listAudioPorts(
-                AUDIO_PORT_ROLE_NONE, AUDIO_PORT_TYPE_DEVICE, &numPorts, NULL, &generation1));
+                AUDIO_PORT_ROLE_NONE, AUDIO_PORT_TYPE_DEVICE, &numPorts, nullptr, &generation1));
         if (numPorts == 0) {
             free(audioPorts);
             GTEST_FAIL() << "Number of audio ports should not be zero";
         }
 
-        audioPorts = (struct audio_port *)realloc(audioPorts, numPorts * sizeof(struct audio_port));
+        audioPorts = (struct audio_port_v7 *)realloc(
+                audioPorts, numPorts * sizeof(struct audio_port_v7));
         status_t status = AudioSystem::listAudioPorts(
                 AUDIO_PORT_ROLE_NONE, AUDIO_PORT_TYPE_DEVICE, &numPorts, audioPorts, &generation);
         if (status != NO_ERROR) {
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index 7972dbf..889efac 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -33,6 +33,7 @@
 
 #include "AudioPolicyInterface.h"
 #include "AudioPolicyManagerTestClient.h"
+#include "AudioPolicyManagerTestClientForHdmi.h"
 #include "AudioPolicyTestClient.h"
 #include "AudioPolicyTestManager.h"
 
@@ -137,15 +138,16 @@
     // Tries to find a device port. If 'foundPort' isn't nullptr,
     // will generate a failure if the port hasn't been found.
     bool findDevicePort(audio_port_role_t role, audio_devices_t deviceType,
-            const std::string &address, audio_port *foundPort);
+            const std::string &address, audio_port_v7 *foundPort);
     static audio_port_handle_t getDeviceIdFromPatch(const struct audio_patch* patch);
+    virtual AudioPolicyManagerTestClient* getClient() { return new AudioPolicyManagerTestClient; }
 
     std::unique_ptr<AudioPolicyManagerTestClient> mClient;
     std::unique_ptr<AudioPolicyTestManager> mManager;
 };
 
 void AudioPolicyManagerTest::SetUp() {
-    mClient.reset(new AudioPolicyManagerTestClient);
+    mClient.reset(getClient());
     mManager.reset(new AudioPolicyTestManager(mClient.get()));
     SetUpManagerConfig();  // Subclasses may want to customize the config.
     ASSERT_EQ(NO_ERROR, mManager->initialize());
@@ -244,7 +246,7 @@
 }
 
 bool AudioPolicyManagerTest::findDevicePort(audio_port_role_t role,
-        audio_devices_t deviceType, const std::string &address, audio_port *foundPort) {
+        audio_devices_t deviceType, const std::string &address, audio_port_v7 *foundPort) {
     uint32_t numPorts = 0;
     uint32_t generation1;
     status_t ret;
@@ -254,7 +256,7 @@
     if (HasFailure()) return false;
 
     uint32_t generation2;
-    struct audio_port ports[numPorts];
+    struct audio_port_v7 ports[numPorts];
     ret = mManager->listAudioPorts(role, AUDIO_PORT_TYPE_DEVICE, &numPorts, ports, &generation2);
     EXPECT_EQ(NO_ERROR, ret) << "mManager->listAudioPorts returned error";
     EXPECT_EQ(generation1, generation2) << "Generations changed during ports retrieval";
@@ -668,6 +670,165 @@
     ASSERT_EQ(INVALID_OPERATION, ret);
 }
 
+class AudioPolicyManagerTestForHdmi
+        : public AudioPolicyManagerTestWithConfigurationFile {
+protected:
+    void SetUp() override;
+    std::string getConfigFile() override { return sTvConfig; }
+    std::map<audio_format_t, bool> getSurroundFormatsHelper(bool reported);
+    std::unordered_set<audio_format_t> getFormatsFromPorts();
+    AudioPolicyManagerTestClient* getClient() override {
+        return new AudioPolicyManagerTestClientForHdmi;
+    }
+    void TearDown() override;
+
+    static const std::string sTvConfig;
+
+};
+
+const std::string AudioPolicyManagerTestForHdmi::sTvConfig =
+        AudioPolicyManagerTestForHdmi::sExecutableDir +
+        "test_settop_box_surround_configuration.xml";
+
+void AudioPolicyManagerTestForHdmi::SetUp() {
+    AudioPolicyManagerTest::SetUp();
+    mClient->addSupportedFormat(AUDIO_FORMAT_E_AC3);
+    mManager->setDeviceConnectionState(
+            AUDIO_DEVICE_OUT_HDMI, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+            "" /*address*/, "" /*name*/, AUDIO_FORMAT_DEFAULT);
+}
+
+void AudioPolicyManagerTestForHdmi::TearDown() {
+    mManager->setDeviceConnectionState(
+            AUDIO_DEVICE_OUT_HDMI, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+            "" /*address*/, "" /*name*/, AUDIO_FORMAT_DEFAULT);
+    AudioPolicyManagerTest::TearDown();
+}
+
+std::map<audio_format_t, bool>
+        AudioPolicyManagerTestForHdmi::getSurroundFormatsHelper(bool reported) {
+    unsigned int numSurroundFormats = 0;
+    std::map<audio_format_t, bool> surroundFormatsMap;
+    status_t ret = mManager->getSurroundFormats(
+            &numSurroundFormats, nullptr /* surroundFormats */,
+            nullptr /* surroundFormatsEnabled */, reported);
+    EXPECT_EQ(NO_ERROR, ret);
+    if (ret != NO_ERROR) {
+        return surroundFormatsMap;
+    }
+    audio_format_t surroundFormats[numSurroundFormats];
+    memset(surroundFormats, 0, sizeof(audio_format_t) * numSurroundFormats);
+    bool surroundFormatsEnabled[numSurroundFormats];
+    memset(surroundFormatsEnabled, 0, sizeof(bool) * numSurroundFormats);
+    ret = mManager->getSurroundFormats(
+            &numSurroundFormats, surroundFormats, surroundFormatsEnabled, reported);
+    EXPECT_EQ(NO_ERROR, ret);
+    if (ret != NO_ERROR) {
+        return surroundFormatsMap;
+    }
+    for (int i = 0; i< numSurroundFormats; i++) {
+        surroundFormatsMap[surroundFormats[i]] = surroundFormatsEnabled[i];
+    }
+    return surroundFormatsMap;
+}
+
+std::unordered_set<audio_format_t>
+        AudioPolicyManagerTestForHdmi::getFormatsFromPorts() {
+    uint32_t numPorts = 0;
+    uint32_t generation1;
+    status_t ret;
+    std::unordered_set<audio_format_t> formats;
+    ret = mManager->listAudioPorts(
+            AUDIO_PORT_ROLE_SINK, AUDIO_PORT_TYPE_DEVICE, &numPorts, nullptr, &generation1);
+    EXPECT_EQ(NO_ERROR, ret) << "mManager->listAudioPorts returned error";
+    if (ret != NO_ERROR) {
+        return formats;
+    }
+    struct audio_port_v7 ports[numPorts];
+    ret = mManager->listAudioPorts(
+            AUDIO_PORT_ROLE_SINK, AUDIO_PORT_TYPE_DEVICE, &numPorts, ports, &generation1);
+    EXPECT_EQ(NO_ERROR, ret) << "mManager->listAudioPorts returned error";
+    if (ret != NO_ERROR) {
+        return formats;
+    }
+    for (const auto &port : ports) {
+        for (size_t i = 0; i < port.num_audio_profiles; ++i) {
+            formats.insert(port.audio_profiles[i].format);
+        }
+    }
+    return formats;
+}
+
+TEST_F(AudioPolicyManagerTestForHdmi, GetSurroundFormatsReturnsSupportedFormats) {
+    mManager->setForceUse(
+            AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS);
+    auto surroundFormats = getSurroundFormatsHelper(false /*reported*/);
+    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
+}
+
+TEST_F(AudioPolicyManagerTestForHdmi,
+        GetSurroundFormatsReturnsManipulatedFormats) {
+    mManager->setForceUse(
+            AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
+
+    status_t ret =
+            mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+    ASSERT_EQ(NO_ERROR, ret);
+    auto surroundFormats = getSurroundFormatsHelper(false /*reported*/);
+    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
+    ASSERT_FALSE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+
+    ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/);
+    ASSERT_EQ(NO_ERROR, ret);
+    surroundFormats = getSurroundFormatsHelper(false /*reported*/);
+    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
+    ASSERT_TRUE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+
+    ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+    ASSERT_EQ(NO_ERROR, ret);
+    surroundFormats = getSurroundFormatsHelper(false /*reported*/);
+    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
+    ASSERT_FALSE(surroundFormats[AUDIO_FORMAT_E_AC3]);
+}
+
+TEST_F(AudioPolicyManagerTestForHdmi,
+        ListAudioPortsReturnManipulatedHdmiFormats) {
+    mManager->setForceUse(
+            AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
+
+    ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/));
+    auto formats = getFormatsFromPorts();
+    ASSERT_EQ(0, formats.count(AUDIO_FORMAT_E_AC3));
+
+    ASSERT_EQ(NO_ERROR, mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/));
+    formats = getFormatsFromPorts();
+    ASSERT_EQ(1, formats.count(AUDIO_FORMAT_E_AC3));
+}
+
+TEST_F(AudioPolicyManagerTestForHdmi,
+        GetReportedSurroundFormatsReturnsHdmiReportedFormats) {
+    mManager->setForceUse(
+            AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS);
+    auto surroundFormats = getSurroundFormatsHelper(true /*reported*/);
+    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
+}
+
+TEST_F(AudioPolicyManagerTestForHdmi,
+        GetReportedSurroundFormatsReturnsNonManipulatedHdmiReportedFormats) {
+    mManager->setForceUse(
+            AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND, AUDIO_POLICY_FORCE_ENCODED_SURROUND_MANUAL);
+
+    status_t ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, false /*enabled*/);
+    ASSERT_EQ(NO_ERROR, ret);
+    auto surroundFormats = getSurroundFormatsHelper(true /*reported*/);
+    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
+
+    ret = mManager->setSurroundFormatEnabled(AUDIO_FORMAT_E_AC3, true /*enabled*/);
+    ASSERT_EQ(NO_ERROR, ret);
+    surroundFormats = getSurroundFormatsHelper(true /*reported*/);
+    ASSERT_EQ(1, surroundFormats.count(AUDIO_FORMAT_E_AC3));
+}
+
 class AudioPolicyManagerTestDPNoRemoteSubmixModule : public AudioPolicyManagerTestDynamicPolicy {
 protected:
     std::string getConfigFile() override { return sPrimaryOnlyConfig; }
@@ -714,7 +875,7 @@
             {AUDIO_USAGE_ALARM, AUDIO_SOURCE_DEFAULT, RULE_MATCH_ATTRIBUTE_USAGE}
     };
 
-    struct audio_port mInjectionPort;
+    struct audio_port_v7 mInjectionPort;
     audio_port_handle_t mPortId = AUDIO_PORT_HANDLE_NONE;
 };
 
@@ -731,7 +892,7 @@
             AUDIO_DEVICE_OUT_REMOTE_SUBMIX, mMixAddress, audioConfig, mUsageRules);
     ASSERT_EQ(NO_ERROR, ret);
 
-    struct audio_port extractionPort;
+    struct audio_port_v7 extractionPort;
     ASSERT_TRUE(findDevicePort(AUDIO_PORT_ROLE_SOURCE, AUDIO_DEVICE_IN_REMOTE_SUBMIX,
                     mMixAddress, &extractionPort));
 
@@ -900,7 +1061,7 @@
         {AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_VOICE_COMMUNICATION, RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET}
     };
 
-    struct audio_port mExtractionPort;
+    struct audio_port_v7 mExtractionPort;
     audio_port_handle_t mPortId = AUDIO_PORT_HANDLE_NONE;
 };
 
@@ -917,7 +1078,7 @@
             AUDIO_DEVICE_IN_REMOTE_SUBMIX, mMixAddress, audioConfig, mSourceRules);
     ASSERT_EQ(NO_ERROR, ret);
 
-    struct audio_port injectionPort;
+    struct audio_port_v7 injectionPort;
     ASSERT_TRUE(findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
                     mMixAddress, &injectionPort));
 
@@ -1068,7 +1229,7 @@
             type, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
             address.c_str(), name.c_str(), AUDIO_FORMAT_DEFAULT));
 
-    audio_port devicePort;
+    audio_port_v7 devicePort;
     const audio_port_role_t role = audio_is_output_device(type)
             ? AUDIO_PORT_ROLE_SINK : AUDIO_PORT_ROLE_SOURCE;
     ASSERT_TRUE(findDevicePort(role, type, address, &devicePort));
@@ -1129,7 +1290,7 @@
             flags, &output, &portId);
     sp<SwAudioOutputDescriptor> outDesc = mManager->getOutputs().valueFor(output);
     ASSERT_NE(nullptr, outDesc.get());
-    audio_port port = {};
+    audio_port_v7 port = {};
     outDesc->toAudioPort(&port);
     mManager->releaseOutput(portId);
     ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(
@@ -1211,7 +1372,7 @@
             findDevicePort(AUDIO_PORT_ROLE_SOURCE, AUDIO_DEVICE_IN_REMOTE_SUBMIX, "0", nullptr));
     mClient->swapAllowedModuleNames({"primary", "r_submix"});
     mManager->onNewAudioModulesAvailable();
-    struct audio_port port;
+    struct audio_port_v7 port;
     ASSERT_TRUE(findDevicePort(AUDIO_PORT_ROLE_SOURCE, AUDIO_DEVICE_IN_REMOTE_SUBMIX, "0", &port));
 }
 
diff --git a/services/audiopolicy/tests/resources/Android.bp b/services/audiopolicy/tests/resources/Android.bp
index 4f50dad..2f6e925 100644
--- a/services/audiopolicy/tests/resources/Android.bp
+++ b/services/audiopolicy/tests/resources/Android.bp
@@ -5,5 +5,6 @@
         "test_audio_policy_primary_only_configuration.xml",
         "test_invalid_audio_policy_configuration.xml",
         "test_tv_apm_configuration.xml",
+        "test_settop_box_surround_configuration.xml",
     ],
 }
diff --git a/services/audiopolicy/tests/resources/test_settop_box_surround_configuration.xml b/services/audiopolicy/tests/resources/test_settop_box_surround_configuration.xml
new file mode 100644
index 0000000..6f7375e
--- /dev/null
+++ b/services/audiopolicy/tests/resources/test_settop_box_surround_configuration.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!--
+  ~ Copyright (C) 2020 The Android Open Source Project
+  ~
+  ~ Licensed under the Apache License, Version 2.0 (the "License");
+  ~ you may not use this file except in compliance with the License.
+  ~ You may obtain a copy of the License at
+  ~
+  ~      http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+    <globalConfiguration speaker_drc_enabled="false"/>
+    <modules>
+        <module name="primary" halVersion="2.0">
+            <attachedDevices>
+                <item>Stub</item>
+            </attachedDevices>
+            <defaultOutputDevice>Stub</defaultOutputDevice>
+            <mixPorts>
+                <mixPort name="primary pcm" role="source"
+                         flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+                    <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+                             samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+                </mixPort>
+                <mixPort name="multichannel output" role="source"
+                         flags="AUDIO_OUTPUT_FLAG_DIRECT">
+                    <profile name="" />
+                </mixPort>
+            </mixPorts>
+            <devicePorts>
+                <devicePort tagName="Stub" type="AUDIO_DEVICE_OUT_STUB" role="sink" />
+                <devicePort tagName="HDMI" type="AUDIO_DEVICE_OUT_HDMI" role="sink" />
+            </devicePorts>
+            <routes>
+                <route type="mix" sink="Stub" sources="primary pcm"/>
+                <route type="mix" sink="HDMI" sources="primary pcm,multichannel output"/>
+            </routes>
+        </module>
+    </modules>
+</audioPolicyConfiguration>
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 8400dae..b4c0da3 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -21,6 +21,7 @@
 #include <algorithm>
 #include <climits>
 #include <stdio.h>
+#include <cstdlib>
 #include <cstring>
 #include <ctime>
 #include <string>
@@ -1694,6 +1695,8 @@
             // Otherwise, add client to active clients list
             finishConnectLocked(client, partial);
         }
+
+        client->setImageDumpMask(mImageDumpMask);
     } // lock is destroyed, allow further connect calls
 
     // Important: release the mutex here so the client can call back into the service from its
@@ -3880,6 +3883,10 @@
         return handleSetRotateAndCrop(args);
     } else if (args.size() >= 1 && args[0] == String16("get-rotate-and-crop")) {
         return handleGetRotateAndCrop(out);
+    } else if (args.size() >= 2 && args[0] == String16("set-image-dump-mask")) {
+        return handleSetImageDumpMask(args);
+    } else if (args.size() >= 1 && args[0] == String16("get-image-dump-mask")) {
+        return handleGetImageDumpMask(out);
     } else if (args.size() == 1 && args[0] == String16("help")) {
         printHelp(out);
         return NO_ERROR;
@@ -3979,6 +3986,30 @@
     return dprintf(out, "rotateAndCrop override: %d\n", mOverrideRotateAndCropMode);
 }
 
+status_t CameraService::handleSetImageDumpMask(const Vector<String16>& args) {
+    char *endPtr;
+    errno = 0;
+    String8 maskString8 = String8(args[1]);
+    long maskValue = strtol(maskString8.c_str(), &endPtr, 10);
+
+    if (errno != 0) return BAD_VALUE;
+    if (endPtr != maskString8.c_str() + maskString8.size()) return BAD_VALUE;
+    if (maskValue < 0 || maskValue > 1) return BAD_VALUE;
+
+    Mutex::Autolock lock(mServiceLock);
+
+    mImageDumpMask = maskValue;
+
+    return OK;
+}
+
+status_t CameraService::handleGetImageDumpMask(int out) {
+    Mutex::Autolock lock(mServiceLock);
+
+    return dprintf(out, "Image dump mask: %d\n", mImageDumpMask);
+}
+
+
 status_t CameraService::printHelp(int out) {
     return dprintf(out, "Camera service commands:\n"
         "  get-uid-state <PACKAGE> [--user USER_ID] gets the uid state\n"
@@ -3987,6 +4018,9 @@
         "  set-rotate-and-crop <ROTATION> overrides the rotate-and-crop value for AUTO backcompat\n"
         "      Valid values 0=0 deg, 1=90 deg, 2=180 deg, 3=270 deg, 4=No override\n"
         "  get-rotate-and-crop returns the current override rotate-and-crop value\n"
+        "  set-image-dump-mask <MASK> specifies the formats to be saved to disk\n"
+        "      Valid values 0=OFF, 1=ON for JPEG\n"
+        "  get-image-dump-mask returns the current image-dump-mask value\n"
         "  help print this message\n");
 }
 
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index d26c62d..43b03e6 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -398,6 +398,8 @@
         // Check what API level is used for this client. This is used to determine which
         // superclass this can be cast to.
         virtual bool canCastToApiClient(apiLevel level) const;
+
+        void setImageDumpMask(int /*mask*/) { }
     protected:
         // Initialized in constructor
 
@@ -1036,6 +1038,12 @@
     // Get the rotate-and-crop AUTO override behavior
     status_t handleGetRotateAndCrop(int out);
 
+    // Set the mask for image dump to disk
+    status_t handleSetImageDumpMask(const Vector<String16>& args);
+
+    // Get the mask for image dump to disk
+    status_t handleGetImageDumpMask(int out);
+
     // Prints the shell command help
     status_t printHelp(int out);
 
@@ -1077,6 +1085,9 @@
 
     // Current override rotate-and-crop mode
     uint8_t mOverrideRotateAndCropMode = ANDROID_SCALER_ROTATE_AND_CROP_AUTO;
+
+    // Current image dump mask
+    uint8_t mImageDumpMask = 0;
 };
 
 } // namespace android
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 5d40b82..3f72eca 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -206,6 +206,7 @@
     virtual void notifyRequestQueueEmpty();
     virtual void notifyRepeatingRequestError(long lastFrameNumber);
 
+    void setImageDumpMask(int mask) { if (mDevice != nullptr) mDevice->setImageDumpMask(mask); }
     /**
      * Interface used by independent components of CameraDeviceClient.
      */
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index 4fe5adf..a7173d1 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -65,7 +65,6 @@
         mYuvBufferAcquired(false),
         mProducerListener(new ProducerListener()),
         mDequeuedOutputBufferCnt(0),
-        mLockedAppSegmentBufferCnt(0),
         mCodecOutputCounter(0),
         mQuality(-1),
         mGridTimestampUs(0),
@@ -635,7 +634,6 @@
             mAppSegmentConsumer->unlockBuffer(imgBuffer);
         } else {
             mPendingInputFrames[frameNumber].appSegmentBuffer = imgBuffer;
-            mLockedAppSegmentBufferCnt++;
         }
         mInputAppSegmentBuffers.erase(it);
         mAppSegmentFrameNumbers.pop();
@@ -898,10 +896,6 @@
                         strerror(-res), res);
                 return res;
             }
-        } else if (mLockedAppSegmentBufferCnt == kMaxAcquiredAppSegment) {
-            ALOGE("%s: Out-of-order app segment buffers reaches limit %u", __FUNCTION__,
-                    kMaxAcquiredAppSegment);
-            return INVALID_OPERATION;
         }
     }
 
@@ -1039,7 +1033,6 @@
     mAppSegmentConsumer->unlockBuffer(inputFrame.appSegmentBuffer);
     inputFrame.appSegmentBuffer.data = nullptr;
     inputFrame.exifError = false;
-    mLockedAppSegmentBufferCnt--;
 
     return OK;
 }
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.h b/services/camera/libcameraservice/api2/HeicCompositeStream.h
index 33ca69a..a373127 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.h
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.h
@@ -253,7 +253,6 @@
 
     // Keep all incoming APP segment Blob buffer pending further processing.
     std::vector<int64_t> mInputAppSegmentBuffers;
-    int32_t           mLockedAppSegmentBufferCnt;
 
     // Keep all incoming HEIC blob buffer pending further processing.
     std::vector<CodecOutputBufferInfo> mCodecOutputBuffers;
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index a537ef5..77e660f 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -367,6 +367,14 @@
      * Get the status tracker of the camera device
      */
     virtual wp<camera3::StatusTracker> getStatusTracker() = 0;
+
+    /**
+     * Set bitmask for image dump flag
+     */
+    void setImageDumpMask(int mask) { mImageDumpMask = mask; }
+
+protected:
+    bool mImageDumpMask = 0;
 };
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 50ef953..8754ad3 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -1456,6 +1456,8 @@
 
     newStream->setBufferManager(mBufferManager);
 
+    newStream->setImageDumpMask(mImageDumpMask);
+
     res = mOutputStreams.add(mNextStreamId, newStream);
     if (res < 0) {
         SET_ERR_L("Can't add new stream to set: %s (%d)", strerror(-res), res);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 7b812f2..6dfc838 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -18,8 +18,15 @@
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
 
+#include <ctime>
+#include <fstream>
+
+#include <android-base/unique_fd.h>
+#include <ui/GraphicBuffer.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
+
+#include "api1/client2/JpegProcessor.h"
 #include "Camera3OutputStream.h"
 #include "utils/TraceHFR.h"
 
@@ -279,6 +286,12 @@
                   __FUNCTION__, mId, strerror(-res), res);
             return res;
         }
+        // If this is a JPEG output, and image dump mask is set, save image to
+        // disk.
+        if (getFormat() == HAL_PIXEL_FORMAT_BLOB && getDataSpace() == HAL_DATASPACE_V0_JFIF &&
+                mImageDumpMask) {
+            dumpImageToDisk(timestamp, anwBuffer, anwReleaseFence);
+        }
 
         res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence, surface_ids);
         if (shouldLogError(res, state)) {
@@ -957,6 +970,49 @@
     return (usage & GRALLOC_USAGE_HW_TEXTURE) != 0;
 }
 
+void Camera3OutputStream::dumpImageToDisk(nsecs_t timestamp,
+        ANativeWindowBuffer* anwBuffer, int fence) {
+    // Deriver output file name
+    std::string fileExtension = "jpg";
+    char imageFileName[64];
+    time_t now = time(0);
+    tm *localTime = localtime(&now);
+    snprintf(imageFileName, sizeof(imageFileName), "IMG_%4d%02d%02d_%02d%02d%02d_%" PRId64 ".%s",
+            1900 + localTime->tm_year, localTime->tm_mon, localTime->tm_mday,
+            localTime->tm_hour, localTime->tm_min, localTime->tm_sec,
+            timestamp, fileExtension.c_str());
+
+    // Lock the image for CPU read
+    sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(anwBuffer);
+    void* mapped = nullptr;
+    base::unique_fd fenceFd(dup(fence));
+    status_t res = graphicBuffer->lockAsync(GraphicBuffer::USAGE_SW_READ_OFTEN, &mapped,
+            fenceFd.get());
+    if (res != OK) {
+        ALOGE("%s: Failed to lock the buffer: %s (%d)", __FUNCTION__, strerror(-res), res);
+        return;
+    }
+
+    // Figure out actual file size
+    auto actualJpegSize = android::camera2::JpegProcessor::findJpegSize((uint8_t*)mapped, mMaxSize);
+    if (actualJpegSize == 0) {
+        actualJpegSize = mMaxSize;
+    }
+
+    // Output image data to file
+    std::string filePath = "/data/misc/cameraserver/";
+    filePath += imageFileName;
+    std::ofstream imageFile(filePath.c_str(), std::ofstream::binary);
+    if (!imageFile.is_open()) {
+        ALOGE("%s: Unable to create file %s", __FUNCTION__, filePath.c_str());
+        graphicBuffer->unlock();
+        return;
+    }
+    imageFile.write((const char*)mapped, actualJpegSize);
+
+    graphicBuffer->unlock();
+}
+
 }; // namespace camera3
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index b4e49f9..55f0d41 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -210,6 +210,8 @@
      */
     static void applyZSLUsageQuirk(int format, uint64_t *consumerUsage /*inout*/);
 
+    void setImageDumpMask(int mask) { mImageDumpMask = mask; }
+
   protected:
     Camera3OutputStream(int id, camera3_stream_type_t type,
             uint32_t width, uint32_t height, int format,
@@ -325,9 +327,14 @@
     // STATE_ABANDONED
     static bool shouldLogError(status_t res, StreamState state);
 
+    // Dump images to disk before returning to consumer
+    void dumpImageToDisk(nsecs_t timestamp, ANativeWindowBuffer* anwBuffer, int fence);
+
     static const int32_t kDequeueLatencyBinSize = 5; // in ms
     CameraLatencyHistogram mDequeueBufferLatency;
 
+    int mImageDumpMask = 0;
+
 }; // class Camera3OutputStream
 
 } // namespace camera3
diff --git a/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp b/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
index 81d7bf9..1bc2081 100644
--- a/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
+++ b/services/camera/libcameraservice/device3/ZoomRatioMapper.cpp
@@ -168,6 +168,19 @@
     entry = request->find(ANDROID_CONTROL_ZOOM_RATIO);
     if (entry.count == 1 && entry.data.f[0] != 1.0f) {
         zoomRatioIs1 = false;
+
+        // If cropRegion is windowboxing, override it with activeArray
+        camera_metadata_entry_t cropRegionEntry = request->find(ANDROID_SCALER_CROP_REGION);
+        if (cropRegionEntry.count == 4) {
+            int cropWidth = cropRegionEntry.data.i32[2];
+            int cropHeight = cropRegionEntry.data.i32[3];
+            if (cropWidth < mArrayWidth && cropHeight < mArrayHeight) {
+                cropRegionEntry.data.i32[0] = 0;
+                cropRegionEntry.data.i32[1] = 0;
+                cropRegionEntry.data.i32[2] = mArrayWidth;
+                cropRegionEntry.data.i32[3] = mArrayHeight;
+            }
+        }
     }
 
     if (mHalSupportsZoomRatio && zoomRatioIs1) {
diff --git a/services/mediametrics/fuzzer/Android.bp b/services/mediametrics/fuzzer/Android.bp
new file mode 100644
index 0000000..df4c867
--- /dev/null
+++ b/services/mediametrics/fuzzer/Android.bp
@@ -0,0 +1,59 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *****************************************************************************
+ * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
+ */
+
+cc_fuzz {
+    name: "mediametrics_service_fuzzer",
+
+    srcs: [
+        "mediametrics_service_fuzzer.cpp",
+    ],
+
+    static_libs: [
+        "libmediametrics",
+        "libmediametricsservice",
+        "libplatformprotos",
+    ],
+
+    shared_libs: [
+        "libbase",
+        "libbinder",
+        "libcutils",
+        "liblog",
+        "libmedia_helper",
+        "libmediautils",
+        "libmemunreachable",
+        "libprotobuf-cpp-lite",
+        "libstagefright",
+        "libstatslog",
+        "libutils",
+    ],
+
+    include_dirs: [
+        "frameworks/av/services/mediametrics",
+        "system/media/audio_utils/include",
+    ],
+
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
diff --git a/services/mediametrics/fuzzer/README.md b/services/mediametrics/fuzzer/README.md
new file mode 100644
index 0000000..a13830e
--- /dev/null
+++ b/services/mediametrics/fuzzer/README.md
@@ -0,0 +1,54 @@
+# Fuzzer for libmediametricsservice
+
+## Plugin Design Considerations
+The fuzzer plugin for libmediametricsservice is designed based on the
+understanding of the service and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+Media Metrics Service contains the following modules:
+1. Media Metrics Item Manipulation (module name: `Item`)
+2. Media Metrics Time Machine Storage (module name: `TimeMachineStorage`)
+3. Media Metrics Transaction Log (module name: `TransactionLog`)
+4. Media Metrics Analytics Action (module name: `AnalyticsAction`)
+5. Media Metrics Audio Analytics (module name: `AudioAnalytics`)
+6. Media Metrics Timed Action (module name: `TimedAction`)
+
+| Module| Valid Input Values| Configured Value|
+|------------- |-------------| ----- |
+| `Item` | Key: `std::string`. Values: `INT32_MIN` to `INT32_MAX`, `INT64_MIN` to `INT64_MAX`, `std::string`, `double`, `pair<INT32_MIN to INT32_MAX, INT32_MIN to INT32_MAX>` | Value obtained from FuzzedDataProvider |
+| `TimeMachineStorage`   | Key: `std::string`. Values: `INT32_MIN` to `INT32_MAX`, `INT64_MIN` to `INT64_MAX`, `std::string`, `double`, `pair<INT32_MIN to INT32_MAX, INT32_MIN to INT32_MAX>` | Value obtained from FuzzedDataProvider |
+| `TranscationLog`   | `mediametrics::Item` | `mediametrics::Item` created by obtaining values from FuzzedDataProvider|
+| `AnalyticsAction`   | URL: `std::string` ending with .event, Value: `std::string`, action: A function | URL and Values obtained from FuzzedDataProvider, a placeholder function was passed as action|
+| `AudioAnalytics`   | `mediametrics::Item` | `mediametrics::Item` created by obtaining values from FuzzedDataProvider|
+| `TimedAction`   | time: `std::chrono::seconds`, function: `std::function` | `std::chrono::seconds` : value obtained from FuzzedDataProvider, `std::function`: a placeholder function was used. |
+
+This also ensures that the plugin is always deterministic for any given input.
+
+## Build
+
+This describes steps to build mediametrics_service_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+  $ mm -j$(nproc) mediametrics_service_fuzzer
+```
+
+#### Steps to run
+Create a directory CORPUS_DIR and copy some files to that folder
+Push this directory to device.
+
+To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/mediametrics_service_fuzzer/mediametrics_service_fuzzer CORPUS_DIR
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp b/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
new file mode 100644
index 0000000..0cb2594
--- /dev/null
+++ b/services/mediametrics/fuzzer/mediametrics_service_fuzzer.cpp
@@ -0,0 +1,372 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *****************************************************************************
+ * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
+ */
+#include <fuzzer/FuzzedDataProvider.h>
+#include <media/MediaMetricsItem.h>
+#include <stdio.h>
+#include <string.h>
+#include <utils/Log.h>
+#include <algorithm>
+
+#include "AudioTypes.h"
+#include "MediaMetricsService.h"
+#include "StringUtils.h"
+
+using namespace android;
+
+// low water mark
+constexpr size_t kLogItemsLowWater = 1;
+// high water mark
+constexpr size_t kLogItemsHighWater = 2;
+
+class MediaMetricsServiceFuzzer {
+   public:
+    void invokeStartsWith(const uint8_t *data, size_t size);
+    void invokeInstantiate(const uint8_t *data, size_t size);
+    void invokePackageInstallerCheck(const uint8_t *data, size_t size);
+    void invokeItemManipulation(const uint8_t *data, size_t size);
+    void invokeItemExpansion(const uint8_t *data, size_t size);
+    void invokeTimeMachineStorage(const uint8_t *data, size_t size);
+    void invokeTransactionLog(const uint8_t *data, size_t size);
+    void invokeAnalyticsAction(const uint8_t *data, size_t size);
+    void invokeAudioAnalytics(const uint8_t *data, size_t size);
+    void invokeTimedAction(const uint8_t *data, size_t size);
+    void process(const uint8_t *data, size_t size);
+};
+
+void MediaMetricsServiceFuzzer::invokeStartsWith(const uint8_t *data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+    while (fdp.remaining_bytes()) {
+        android::mediametrics::startsWith(fdp.ConsumeRandomLengthString(),
+                                          fdp.ConsumeRandomLengthString());
+    }
+}
+
+void MediaMetricsServiceFuzzer::invokeInstantiate(const uint8_t *data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+    sp mediaMetricsService = new MediaMetricsService();
+
+    while (fdp.remaining_bytes()) {
+        std::unique_ptr<mediametrics::Item> random_key(
+            mediametrics::Item::create(fdp.ConsumeRandomLengthString()));
+        mediaMetricsService->submit(random_key.get());
+        random_key->setInt32(fdp.ConsumeRandomLengthString().c_str(),
+                             fdp.ConsumeIntegral<int32_t>());
+        mediaMetricsService->submit(random_key.get());
+
+        std::unique_ptr<mediametrics::Item> audiotrack_key(
+            mediametrics::Item::create("audiotrack"));
+        mediaMetricsService->submit(audiotrack_key.get());
+        audiotrack_key->addInt32(fdp.ConsumeRandomLengthString().c_str(),
+                                 fdp.ConsumeIntegral<int32_t>());
+        mediaMetricsService->submit(audiotrack_key.get());
+    }
+}
+
+void MediaMetricsServiceFuzzer::invokePackageInstallerCheck(const uint8_t *data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+    while (fdp.remaining_bytes()) {
+        MediaMetricsService::useUidForPackage(fdp.ConsumeRandomLengthString().c_str(),
+                                              fdp.ConsumeRandomLengthString().c_str());
+    }
+}
+
+void MediaMetricsServiceFuzzer::invokeItemManipulation(const uint8_t *data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+
+    mediametrics::Item item(fdp.ConsumeRandomLengthString().c_str());
+    while (fdp.remaining_bytes()) {
+        const uint8_t action = fdp.ConsumeIntegralInRange<uint8_t>(0, 16);
+        const std::string key = fdp.ConsumeRandomLengthString();
+        if (fdp.remaining_bytes() < 1 || key.length() < 1) {
+            break;
+        }
+        switch (action) {
+            case 0: {
+                item.setInt32(key.c_str(), fdp.ConsumeIntegral<int32_t>());
+                break;
+            }
+            case 1: {
+                item.addInt32(key.c_str(), fdp.ConsumeIntegral<int32_t>());
+                break;
+            }
+            case 2: {
+                int32_t i32 = 0;
+                item.getInt32(key.c_str(), &i32);
+                break;
+            }
+            case 3: {
+                item.setInt64(key.c_str(), fdp.ConsumeIntegral<int64_t>());
+                break;
+            }
+            case 4: {
+                item.addInt64(key.c_str(), fdp.ConsumeIntegral<int64_t>());
+                break;
+            }
+            case 5: {
+                int64_t i64 = 0;
+                item.getInt64(key.c_str(), &i64);
+                break;
+            }
+            case 6: {
+                item.setDouble(key.c_str(), fdp.ConsumeFloatingPoint<double>());
+                break;
+            }
+            case 7: {
+                item.addDouble(key.c_str(), fdp.ConsumeFloatingPoint<double>());
+                break;
+            }
+            case 8: {
+                double d = 0;
+                item.getDouble(key.c_str(), &d);
+                break;
+            }
+            case 9: {
+                item.setCString(key.c_str(), fdp.ConsumeRandomLengthString().c_str());
+                break;
+            }
+            case 10: {
+                char *s = nullptr;
+                item.getCString(key.c_str(), &s);
+                if (s) free(s);
+                break;
+            }
+            case 11: {
+                std::string s;
+                item.getString(key.c_str(), &s);
+                break;
+            }
+            case 12: {
+                item.setRate(key.c_str(), fdp.ConsumeIntegral<int64_t>(),
+                             fdp.ConsumeIntegral<int64_t>());
+                break;
+            }
+            case 13: {
+                int64_t b = 0, h = 0;
+                double d = 0;
+                item.getRate(key.c_str(), &b, &h, &d);
+                break;
+            }
+            case 14: {
+                (void)item.filter(key.c_str());
+                break;
+            }
+            case 15: {
+                const char *arr[1] = {""};
+                arr[0] = const_cast<char *>(key.c_str());
+                (void)item.filterNot(1, arr);
+                break;
+            }
+            case 16: {
+                (void)item.toString().c_str();
+                break;
+            }
+        }
+    }
+
+    Parcel p;
+    mediametrics::Item item2;
+
+    (void)item.writeToParcel(&p);
+    p.setDataPosition(0);  // rewind for reading
+    (void)item2.readFromParcel(p);
+
+    char *byteData = nullptr;
+    size_t length = 0;
+    (void)item.writeToByteString(&byteData, &length);
+    (void)item2.readFromByteString(byteData, length);
+    if (byteData) {
+        free(byteData);
+    }
+
+    sp mediaMetricsService = new MediaMetricsService();
+    mediaMetricsService->submit(&item2);
+}
+
+void MediaMetricsServiceFuzzer::invokeItemExpansion(const uint8_t *data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+
+    mediametrics::LogItem<1> item("FuzzItem");
+    item.setPid(fdp.ConsumeIntegral<int16_t>()).setUid(fdp.ConsumeIntegral<int16_t>());
+
+    while (fdp.remaining_bytes()) {
+        int32_t i = fdp.ConsumeIntegral<int32_t>();
+        item.set(std::to_string(i).c_str(), (int32_t)i);
+    }
+    item.updateHeader();
+
+    mediametrics::Item item2;
+    (void)item2.readFromByteString(item.getBuffer(), item.getLength());
+
+    sp mediaMetricsService = new MediaMetricsService();
+    mediaMetricsService->submit(&item2);
+}
+
+void MediaMetricsServiceFuzzer::invokeTimeMachineStorage(const uint8_t *data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+
+    auto item = std::make_shared<mediametrics::Item>("FuzzKey");
+    int32_t i32 = fdp.ConsumeIntegral<int32_t>();
+    int64_t i64 = fdp.ConsumeIntegral<int64_t>();
+    double d = fdp.ConsumeFloatingPoint<double>();
+    std::string str = fdp.ConsumeRandomLengthString();
+    std::pair<int64_t, int64_t> pair(fdp.ConsumeIntegral<int64_t>(),
+                                     fdp.ConsumeIntegral<int64_t>());
+    (*item).set("i32", i32).set("i64", i64).set("double", d).set("string", str).set("rate", pair);
+
+    android::mediametrics::TimeMachine timeMachine;
+    timeMachine.put(item, true);
+
+    timeMachine.get("Key", "i32", &i32, -1);
+
+    timeMachine.get("Key", "i64", &i64, -1);
+
+    timeMachine.get("Key", "double", &d, -1);
+
+    timeMachine.get("Key", "string", &str, -1);
+
+    timeMachine.get("Key.i32", &i32, -1);
+
+    timeMachine.get("Key.i64", &i64, -1);
+
+    timeMachine.get("Key.double", &d, -1);
+
+    str.clear();
+    timeMachine.get("Key.string", &str, -1);
+}
+
+void MediaMetricsServiceFuzzer::invokeTransactionLog(const uint8_t *data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+
+    auto item = std::make_shared<mediametrics::Item>("Key1");
+    (*item)
+        .set("one", fdp.ConsumeIntegral<int32_t>())
+        .set("two", fdp.ConsumeIntegral<int32_t>())
+        .setTimestamp(fdp.ConsumeIntegral<int32_t>());
+
+    android::mediametrics::TransactionLog transactionLog(
+        kLogItemsLowWater, kLogItemsHighWater);  // keep at most 2 items
+    transactionLog.size();
+
+    transactionLog.put(item);
+    transactionLog.size();
+
+    auto item2 = std::make_shared<mediametrics::Item>("Key2");
+    (*item2)
+        .set("three", fdp.ConsumeIntegral<int32_t>())
+        .set("[Key1]three", fdp.ConsumeIntegral<int32_t>())
+        .setTimestamp(fdp.ConsumeIntegral<int32_t>());
+
+    transactionLog.put(item2);
+    transactionLog.size();
+
+    auto item3 = std::make_shared<mediametrics::Item>("Key3");
+    (*item3)
+        .set("six", fdp.ConsumeIntegral<int32_t>())
+        .set("[Key1]four", fdp.ConsumeIntegral<int32_t>())  // affects Key1
+        .set("[Key1]five", fdp.ConsumeIntegral<int32_t>())  // affects key1
+        .setTimestamp(fdp.ConsumeIntegral<int32_t>());
+
+    transactionLog.put(item3);
+    transactionLog.size();
+}
+
+void MediaMetricsServiceFuzzer::invokeAnalyticsAction(const uint8_t *data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+
+    mediametrics::AnalyticsActions analyticsActions;
+    bool action = false;
+
+    while (fdp.remaining_bytes()) {
+        analyticsActions.addAction(
+            (fdp.ConsumeRandomLengthString() + std::string(".event")).c_str(),
+            fdp.ConsumeRandomLengthString(),
+            std::make_shared<mediametrics::AnalyticsActions::Function>(
+                [&](const std::shared_ptr<const android::mediametrics::Item> &) {
+                    action = true;
+                }));
+    }
+
+    FuzzedDataProvider fdp2 = FuzzedDataProvider(data, size);
+
+    while (fdp2.remaining_bytes()) {
+        // make a test item
+        auto item = std::make_shared<mediametrics::Item>(fdp2.ConsumeRandomLengthString().c_str());
+        (*item).set("event", fdp2.ConsumeRandomLengthString().c_str());
+
+        // get the actions and execute them
+        auto actions = analyticsActions.getActionsForItem(item);
+        for (const auto &action : actions) {
+            action->operator()(item);
+        }
+    }
+}
+
+void MediaMetricsServiceFuzzer::invokeAudioAnalytics(const uint8_t *data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+    android::mediametrics::AudioAnalytics audioAnalytics;
+
+    while (fdp.remaining_bytes()) {
+        auto item = std::make_shared<mediametrics::Item>(fdp.ConsumeRandomLengthString().c_str());
+        int32_t transactionUid = fdp.ConsumeIntegral<int32_t>();  // arbitrary
+        (*item)
+            .set(fdp.ConsumeRandomLengthString().c_str(), fdp.ConsumeIntegral<int32_t>())
+            .set(fdp.ConsumeRandomLengthString().c_str(), fdp.ConsumeIntegral<int32_t>())
+            .set(AMEDIAMETRICS_PROP_ALLOWUID, transactionUid)
+            .setUid(transactionUid)
+            .setTimestamp(fdp.ConsumeIntegral<int32_t>());
+        audioAnalytics.submit(item, fdp.ConsumeBool());
+    }
+
+    audioAnalytics.dump(1000);
+}
+
+void MediaMetricsServiceFuzzer::invokeTimedAction(const uint8_t *data, size_t size) {
+    FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+    android::mediametrics::TimedAction timedAction;
+    std::atomic_int value = 0;
+
+    while (fdp.remaining_bytes()) {
+        timedAction.postIn(std::chrono::seconds(fdp.ConsumeIntegral<int32_t>()),
+                           [&value] { ++value; });
+        timedAction.size();
+    }
+}
+
+void MediaMetricsServiceFuzzer::process(const uint8_t *data, size_t size) {
+    invokeStartsWith(data, size);
+    invokeInstantiate(data, size);
+    invokePackageInstallerCheck(data, size);
+    invokeItemManipulation(data, size);
+    invokeItemExpansion(data, size);
+    invokeTimeMachineStorage(data, size);
+    invokeTransactionLog(data, size);
+    invokeAnalyticsAction(data, size);
+    invokeAudioAnalytics(data, size);
+    invokeTimedAction(data, size);
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+    if (size < 1) {
+        return 0;
+    }
+    MediaMetricsServiceFuzzer mediaMetricsServiceFuzzer;
+    mediaMetricsServiceFuzzer.process(data, size);
+    return 0;
+}
diff --git a/services/mediametrics/statsd_extractor.cpp b/services/mediametrics/statsd_extractor.cpp
index 16814d9..4180e0c 100644
--- a/services/mediametrics/statsd_extractor.cpp
+++ b/services/mediametrics/statsd_extractor.cpp
@@ -71,6 +71,22 @@
         metrics_proto.set_tracks(ntrk);
     }
 
+    // android.media.mediaextractor.entry       string
+    std::string entry_point_string;
+    if (item->getString("android.media.mediaextractor.entry", &entry_point_string)) {
+      stats::mediametrics::ExtractorData::EntryPoint entry_point;
+      if (entry_point_string == "sdk") {
+        entry_point = stats::mediametrics::ExtractorData_EntryPoint_SDK;
+      } else if (entry_point_string == "ndk-with-jvm") {
+        entry_point = stats::mediametrics::ExtractorData_EntryPoint_NDK_WITH_JVM;
+      } else if (entry_point_string == "ndk-no-jvm") {
+        entry_point = stats::mediametrics::ExtractorData_EntryPoint_NDK_NO_JVM;
+      } else {
+        entry_point = stats::mediametrics::ExtractorData_EntryPoint_OTHER;
+      }
+      metrics_proto.set_entry_point(entry_point);
+    }
+
     std::string serialized;
     if (!metrics_proto.SerializeToString(&serialized)) {
         ALOGE("Failed to serialize extractor metrics");
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 32ac583..289cffd 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -22,6 +22,7 @@
 #include <android/binder_manager.h>
 #include <android/binder_process.h>
 #include <binder/IMediaResourceMonitor.h>
+#include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <cutils/sched_policy.h>
 #include <dirent.h>
@@ -96,7 +97,7 @@
 
     service->overridePid(mPid, -1);
     // thiz is freed in the call below, so it must be last call referring thiz
-    service->removeResource(mPid, mClientId, false);
+    service->removeResource(mPid, mClientId, false /*checkValid*/);
 }
 
 class OverrideProcessInfoDeathNotifier : public DeathNotifier {
@@ -422,8 +423,12 @@
 
     Mutex::Autolock lock(mLock);
     if (!mProcessInfo->isValidPid(pid)) {
-        ALOGE("Rejected addResource call with invalid pid.");
-        return Status::fromServiceSpecificError(BAD_VALUE);
+        pid_t callingPid = IPCThreadState::self()->getCallingPid();
+        uid_t callingUid = IPCThreadState::self()->getCallingUid();
+        ALOGW("%s called with untrusted pid %d, using calling pid %d, uid %d", __FUNCTION__,
+                pid, callingPid, callingUid);
+        pid = callingPid;
+        uid = callingUid;
     }
     ResourceInfos& infos = getResourceInfosForEdit(pid, mMap);
     ResourceInfo& info = getResourceInfoForEdit(uid, clientId, client, infos);
@@ -477,8 +482,10 @@
 
     Mutex::Autolock lock(mLock);
     if (!mProcessInfo->isValidPid(pid)) {
-        ALOGE("Rejected removeResource call with invalid pid.");
-        return Status::fromServiceSpecificError(BAD_VALUE);
+        pid_t callingPid = IPCThreadState::self()->getCallingPid();
+        ALOGW("%s called with untrusted pid %d, using calling pid %d", __FUNCTION__,
+                pid, callingPid);
+        pid = callingPid;
     }
     ssize_t index = mMap.indexOfKey(pid);
     if (index < 0) {
@@ -531,7 +538,7 @@
 }
 
 Status ResourceManagerService::removeClient(int32_t pid, int64_t clientId) {
-    removeResource(pid, clientId, true);
+    removeResource(pid, clientId, true /*checkValid*/);
     return Status::ok();
 }
 
@@ -543,8 +550,10 @@
 
     Mutex::Autolock lock(mLock);
     if (checkValid && !mProcessInfo->isValidPid(pid)) {
-        ALOGE("Rejected removeResource call with invalid pid.");
-        return Status::fromServiceSpecificError(BAD_VALUE);
+        pid_t callingPid = IPCThreadState::self()->getCallingPid();
+        ALOGW("%s called with untrusted pid %d, using calling pid %d", __FUNCTION__,
+                pid, callingPid);
+        pid = callingPid;
     }
     ssize_t index = mMap.indexOfKey(pid);
     if (index < 0) {
@@ -599,8 +608,10 @@
     {
         Mutex::Autolock lock(mLock);
         if (!mProcessInfo->isValidPid(callingPid)) {
-            ALOGE("Rejected reclaimResource call with invalid callingPid.");
-            return Status::fromServiceSpecificError(BAD_VALUE);
+            pid_t actualCallingPid = IPCThreadState::self()->getCallingPid();
+            ALOGW("%s called with untrusted pid %d, using actual calling pid %d", __FUNCTION__,
+                    callingPid, actualCallingPid);
+            callingPid = actualCallingPid;
         }
         const MediaResourceParcel *secureCodec = NULL;
         const MediaResourceParcel *nonSecureCodec = NULL;
@@ -836,8 +847,10 @@
 
     Mutex::Autolock lock(mLock);
     if (!mProcessInfo->isValidPid(pid)) {
-        ALOGE("Rejected markClientForPendingRemoval call with invalid pid.");
-        return Status::fromServiceSpecificError(BAD_VALUE);
+        pid_t callingPid = IPCThreadState::self()->getCallingPid();
+        ALOGW("%s called with untrusted pid %d, using calling pid %d", __FUNCTION__,
+                pid, callingPid);
+        pid = callingPid;
     }
     ssize_t index = mMap.indexOfKey(pid);
     if (index < 0) {
@@ -866,8 +879,10 @@
     {
         Mutex::Autolock lock(mLock);
         if (!mProcessInfo->isValidPid(pid)) {
-            ALOGE("Rejected reclaimResourcesFromClientsPendingRemoval call with invalid pid.");
-            return Status::fromServiceSpecificError(BAD_VALUE);
+            pid_t callingPid = IPCThreadState::self()->getCallingPid();
+            ALOGW("%s called with untrusted pid %d, using calling pid %d", __FUNCTION__,
+                    pid, callingPid);
+            pid = callingPid;
         }
 
         for (MediaResource::Type type : {MediaResource::Type::kSecureCodec,
diff --git a/services/mediaresourcemanager/ResourceObserverService.cpp b/services/mediaresourcemanager/ResourceObserverService.cpp
index 44fe72d..9cc6fe4 100644
--- a/services/mediaresourcemanager/ResourceObserverService.cpp
+++ b/services/mediaresourcemanager/ResourceObserverService.cpp
@@ -27,14 +27,6 @@
 
 #include "ResourceObserverService.h"
 
-namespace aidl {
-namespace android {
-namespace media {
-bool operator<(const MediaObservableFilter& lhs, const MediaObservableFilter &rhs) {
-    return lhs.type < rhs.type || (lhs.type == rhs.type && lhs.eventFilter < rhs.eventFilter);
-}
-}}} // namespace ::aidl::android::media
-
 namespace android {
 
 using ::aidl::android::media::MediaResourceParcel;
diff --git a/services/mediaresourcemanager/test/ResourceManagerServiceTestUtils.h b/services/mediaresourcemanager/test/ResourceManagerServiceTestUtils.h
index 4cf5f0a..8e29312 100644
--- a/services/mediaresourcemanager/test/ResourceManagerServiceTestUtils.h
+++ b/services/mediaresourcemanager/test/ResourceManagerServiceTestUtils.h
@@ -23,15 +23,6 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/ProcessInfoInterface.h>
 
-namespace aidl {
-namespace android {
-namespace media {
-bool operator== (const MediaResourceParcel& lhs, const MediaResourceParcel& rhs) {
-    return lhs.type == rhs.type && lhs.subType == rhs.subType &&
-            lhs.id == rhs.id && lhs.value == rhs.value;
-}
-}}}
-
 namespace android {
 
 using Status = ::ndk::ScopedAStatus;
diff --git a/services/mediaresourcemanager/test/ResourceObserverService_test.cpp b/services/mediaresourcemanager/test/ResourceObserverService_test.cpp
index 4c26246..e3d3e78 100644
--- a/services/mediaresourcemanager/test/ResourceObserverService_test.cpp
+++ b/services/mediaresourcemanager/test/ResourceObserverService_test.cpp
@@ -25,14 +25,6 @@
 #include "ResourceObserverService.h"
 #include "ResourceManagerServiceTestUtils.h"
 
-namespace aidl {
-namespace android {
-namespace media {
-bool operator==(const MediaObservableParcel& lhs, const MediaObservableParcel& rhs) {
-    return lhs.type == rhs.type && lhs.value == rhs.value;
-}
-}}} // namespace ::aidl::android::media
-
 namespace android {
 
 using ::aidl::android::media::BnResourceObserver;