Merge "Codec2: Initialize InputSurfaceWrapper::Config structure fields"
diff --git a/apex/Android.bp b/apex/Android.bp
index f3e8a55..6ba9cb9 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -97,6 +97,7 @@
         "crash_dump.policy",
         "mediaswcodec.xml",
     ],
+    use_vendor: true,
     key: "com.android.media.swcodec.key",
     certificate: ":com.android.media.swcodec.certificate",
 
diff --git a/cmds/stagefright/SimplePlayer.cpp b/cmds/stagefright/SimplePlayer.cpp
index f4b8164..e000633 100644
--- a/cmds/stagefright/SimplePlayer.cpp
+++ b/cmds/stagefright/SimplePlayer.cpp
@@ -272,7 +272,7 @@
 status_t SimplePlayer::onPrepare() {
     CHECK_EQ(mState, UNPREPARED);
 
-    mExtractor = new NuMediaExtractor;
+    mExtractor = new NuMediaExtractor(NuMediaExtractor::EntryPoint::OTHER);
 
     status_t err = mExtractor->setDataSource(
             NULL /* httpService */, mPath.c_str());
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index c26e0b9..33c4663 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -79,7 +79,7 @@
 
     static int64_t kTimeout = 500ll;
 
-    sp<NuMediaExtractor> extractor = new NuMediaExtractor;
+    sp<NuMediaExtractor> extractor = new NuMediaExtractor(NuMediaExtractor::EntryPoint::OTHER);
     if (extractor->setDataSource(NULL /* httpService */, path) != OK) {
         fprintf(stderr, "unable to instantiate extractor.\n");
         return 1;
diff --git a/cmds/stagefright/mediafilter.cpp b/cmds/stagefright/mediafilter.cpp
index b894545..ca058ab 100644
--- a/cmds/stagefright/mediafilter.cpp
+++ b/cmds/stagefright/mediafilter.cpp
@@ -319,7 +319,8 @@
 
     static int64_t kTimeout = 500ll;
 
-    sp<NuMediaExtractor> extractor = new NuMediaExtractor;
+    sp<NuMediaExtractor> extractor = new NuMediaExtractor(NuMediaExtractor::EntryPoint::OTHER);
+
     if (extractor->setDataSource(NULL /* httpService */, path) != OK) {
         fprintf(stderr, "unable to instantiate extractor.\n");
         return 1;
diff --git a/cmds/stagefright/muxer.cpp b/cmds/stagefright/muxer.cpp
index 4a83a4a..bc7e41e 100644
--- a/cmds/stagefright/muxer.cpp
+++ b/cmds/stagefright/muxer.cpp
@@ -62,7 +62,7 @@
         int trimEndTimeMs,
         int rotationDegrees,
         MediaMuxer::OutputFormat container = MediaMuxer::OUTPUT_FORMAT_MPEG_4) {
-    sp<NuMediaExtractor> extractor = new NuMediaExtractor;
+    sp<NuMediaExtractor> extractor = new NuMediaExtractor(NuMediaExtractor::EntryPoint::OTHER);
     if (extractor->setDataSource(NULL /* httpService */, path) != OK) {
         fprintf(stderr, "unable to instantiate extractor. %s\n", path);
         return 1;
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
index 12ed725..b520c17 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
@@ -734,7 +734,7 @@
             }
             if (timestampMax < timestamp) timestampMax = timestamp;
         }
-        timestampOffset = timestampMax;
+        timestampOffset = timestampMax + 33333;
         eleInfo.close();
 
         // Reset Total frames before second decode loop
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 3ef454b..06464b5 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -1403,6 +1403,7 @@
                 continue;
             }
             if (work->input.buffers.empty()
+                    || work->input.buffers.front() == nullptr
                     || work->input.buffers.front()->data().linearBlocks().empty()) {
                 ALOGD("[%s] no linear codec config data found", mName);
                 continue;
diff --git a/media/codecs/g711/decoder/Android.bp b/media/codecs/g711/decoder/Android.bp
index 51f4c38..efff60b 100644
--- a/media/codecs/g711/decoder/Android.bp
+++ b/media/codecs/g711/decoder/Android.bp
@@ -35,13 +35,7 @@
         ],
         cfi: true,
     },
-
-    apex_available: [
-        "//apex_available:platform",
-        "com.android.media.swcodec",
-        "test_com.android.media.swcodec",
-    ],
-
+    apex_available: ["com.android.media.swcodec"],
     min_sdk_version: "29",
 
     target: {
diff --git a/media/codecs/mp3dec/Android.bp b/media/codecs/mp3dec/Android.bp
index 1acf0a6..f84da21 100644
--- a/media/codecs/mp3dec/Android.bp
+++ b/media/codecs/mp3dec/Android.bp
@@ -1,3 +1,18 @@
+cc_library_headers {
+    name: "libstagefright_mp3dec_headers",
+    vendor_available: true,
+    min_sdk_version: "29",
+    host_supported:true,
+    export_include_dirs: [
+        "include",
+        "src",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.media.swcodec",
+    ],
+}
+
 cc_library_static {
     name: "libstagefright_mp3dec",
     vendor_available: true,
@@ -65,10 +80,8 @@
 
     include_dirs: ["frameworks/av/media/libstagefright/include"],
 
-    export_include_dirs: [
-        "include",
-        "src",
-    ],
+    header_libs: ["libstagefright_mp3dec_headers"],
+    export_header_lib_headers: ["libstagefright_mp3dec_headers"],
 
     cflags: [
         "-DOSCL_UNUSED_ARG(x)=(void)(x)",
diff --git a/media/libaaudio/src/binding/AAudioBinderAdapter.cpp b/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
index 2b2fe6d..6e3a1c8 100644
--- a/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
+++ b/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
@@ -15,10 +15,12 @@
  */
 
 #include <binding/AAudioBinderAdapter.h>
+#include <media/AidlConversionUtil.h>
 #include <utility/AAudioUtilities.h>
 
 namespace aaudio {
 
+using android::aidl_utils::statusTFromBinderStatus;
 using android::binder::Status;
 
 AAudioBinderAdapter::AAudioBinderAdapter(IAAudioService* delegate)
@@ -36,7 +38,7 @@
                                           &params,
                                           &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     config = params;
     return result;
@@ -46,7 +48,7 @@
     aaudio_result_t result;
     Status status = mDelegate->closeStream(streamHandle, &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     return result;
 }
@@ -59,7 +61,7 @@
                                                     &endpoint,
                                                     &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     endpointOut = std::move(endpoint);
     return result;
@@ -69,7 +71,7 @@
     aaudio_result_t result;
     Status status = mDelegate->startStream(streamHandle, &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     return result;
 }
@@ -78,7 +80,7 @@
     aaudio_result_t result;
     Status status = mDelegate->pauseStream(streamHandle, &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     return result;
 }
@@ -87,7 +89,7 @@
     aaudio_result_t result;
     Status status = mDelegate->stopStream(streamHandle, &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     return result;
 }
@@ -96,7 +98,7 @@
     aaudio_result_t result;
     Status status = mDelegate->flushStream(streamHandle, &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     return result;
 }
@@ -107,7 +109,7 @@
     aaudio_result_t result;
     Status status = mDelegate->registerAudioThread(streamHandle, clientThreadId, periodNanoseconds, &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     return result;
 }
@@ -117,7 +119,7 @@
     aaudio_result_t result;
     Status status = mDelegate->unregisterAudioThread(streamHandle, clientThreadId, &result);
     if (!status.isOk()) {
-        result = AAudioConvert_androidToAAudioResult(status.transactionError());
+        result = AAudioConvert_androidToAAudioResult(statusTFromBinderStatus(status));
     }
     return result;
 }
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 1d036d0..af8ff19 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -558,7 +558,7 @@
         if (status < 0) { // a non-negative value is the volume shaper id.
             ALOGE("applyVolumeShaper() failed with status %d", status);
         }
-        return binder::Status::fromStatusT(status);
+        return aidl_utils::binderStatusFromStatusT(status);
     } else {
         ALOGD("applyVolumeShaper()"
                       " no AudioTrack for volume control from IPlayer");
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index a27cf78..7e5eba3 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -117,56 +117,58 @@
 
 ConversionResult<Direction> direction(media::AudioPortRole role, media::AudioPortType type) {
     switch (type) {
+        case media::AudioPortType::NONE:
+        case media::AudioPortType::SESSION:
+            break;  // must be listed  -Werror,-Wswitch
         case media::AudioPortType::DEVICE:
             switch (role) {
+                case media::AudioPortRole::NONE:
+                     break;  // must be listed  -Werror,-Wswitch
                 case media::AudioPortRole::SOURCE:
                     return Direction::INPUT;
                 case media::AudioPortRole::SINK:
                     return Direction::OUTPUT;
-                default:
-                    break;
             }
             break;
         case media::AudioPortType::MIX:
             switch (role) {
+                case media::AudioPortRole::NONE:
+                     break;  // must be listed  -Werror,-Wswitch
                 case media::AudioPortRole::SOURCE:
                     return Direction::OUTPUT;
                 case media::AudioPortRole::SINK:
                     return Direction::INPUT;
-                default:
-                    break;
             }
             break;
-        default:
-            break;
     }
     return unexpected(BAD_VALUE);
 }
 
 ConversionResult<Direction> direction(audio_port_role_t role, audio_port_type_t type) {
     switch (type) {
+        case AUDIO_PORT_TYPE_NONE:
+        case AUDIO_PORT_TYPE_SESSION:
+            break;  // must be listed  -Werror,-Wswitch
         case AUDIO_PORT_TYPE_DEVICE:
             switch (role) {
+                case AUDIO_PORT_ROLE_NONE:
+                     break;  // must be listed  -Werror,-Wswitch
                 case AUDIO_PORT_ROLE_SOURCE:
                     return Direction::INPUT;
                 case AUDIO_PORT_ROLE_SINK:
                     return Direction::OUTPUT;
-                default:
-                    break;
             }
             break;
         case AUDIO_PORT_TYPE_MIX:
             switch (role) {
+                case AUDIO_PORT_ROLE_NONE:
+                     break;  // must be listed  -Werror,-Wswitch
                 case AUDIO_PORT_ROLE_SOURCE:
                     return Direction::OUTPUT;
                 case AUDIO_PORT_ROLE_SINK:
                     return Direction::INPUT;
-                default:
-                    break;
             }
             break;
-        default:
-            break;
     }
     return unexpected(BAD_VALUE);
 }
@@ -276,8 +278,9 @@
     return std::string(legacy.c_str());
 }
 
-// The legacy enum is unnamed. Thus, we use int.
-ConversionResult<int> aidl2legacy_AudioPortConfigType(media::AudioPortConfigType aidl) {
+// The legacy enum is unnamed. Thus, we use int32_t.
+ConversionResult<int32_t> aidl2legacy_AudioPortConfigType_int32_t(
+        media::AudioPortConfigType aidl) {
     switch (aidl) {
         case media::AudioPortConfigType::SAMPLE_RATE:
             return AUDIO_PORT_CONFIG_SAMPLE_RATE;
@@ -293,8 +296,9 @@
     return unexpected(BAD_VALUE);
 }
 
-// The legacy enum is unnamed. Thus, we use int.
-ConversionResult<media::AudioPortConfigType> legacy2aidl_AudioPortConfigType(int legacy) {
+// The legacy enum is unnamed. Thus, we use int32_t.
+ConversionResult<media::AudioPortConfigType> legacy2aidl_int32_t_AudioPortConfigType(
+        int32_t legacy) {
     switch (legacy) {
         case AUDIO_PORT_CONFIG_SAMPLE_RATE:
             return media::AudioPortConfigType::SAMPLE_RATE;
@@ -312,7 +316,7 @@
 
 ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl) {
     return convertBitmask<unsigned int, int32_t, int, media::AudioPortConfigType>(
-            aidl, aidl2legacy_AudioPortConfigType,
+            aidl, aidl2legacy_AudioPortConfigType_int32_t,
             // AudioPortConfigType enum is index-based.
             index2enum_index<media::AudioPortConfigType>,
             // AUDIO_PORT_CONFIG_* flags are mask-based.
@@ -321,7 +325,7 @@
 
 ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy) {
     return convertBitmask<int32_t, unsigned int, media::AudioPortConfigType, int>(
-            legacy, legacy2aidl_AudioPortConfigType,
+            legacy, legacy2aidl_int32_t_AudioPortConfigType,
             // AUDIO_PORT_CONFIG_* flags are mask-based.
             index2enum_bitmask<unsigned>,
             // AudioPortConfigType enum is index-based.
@@ -363,9 +367,8 @@
             return AUDIO_INPUT_CONFIG_CHANGED;
         case media::AudioIoConfigEvent::CLIENT_STARTED:
             return AUDIO_CLIENT_STARTED;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_AudioIoConfigEvent(
@@ -389,9 +392,8 @@
             return media::AudioIoConfigEvent::INPUT_CONFIG_CHANGED;
         case AUDIO_CLIENT_STARTED:
             return media::AudioIoConfigEvent::CLIENT_STARTED;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_port_role_t> aidl2legacy_AudioPortRole_audio_port_role_t(
@@ -403,9 +405,8 @@
             return AUDIO_PORT_ROLE_SOURCE;
         case media::AudioPortRole::SINK:
             return AUDIO_PORT_ROLE_SINK;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioPortRole> legacy2aidl_audio_port_role_t_AudioPortRole(
@@ -417,9 +418,8 @@
             return media::AudioPortRole::SOURCE;
         case AUDIO_PORT_ROLE_SINK:
             return media::AudioPortRole::SINK;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_port_type_t> aidl2legacy_AudioPortType_audio_port_type_t(
@@ -433,9 +433,8 @@
             return AUDIO_PORT_TYPE_MIX;
         case media::AudioPortType::SESSION:
             return AUDIO_PORT_TYPE_SESSION;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioPortType> legacy2aidl_audio_port_type_t_AudioPortType(
@@ -449,9 +448,8 @@
             return media::AudioPortType::MIX;
         case AUDIO_PORT_TYPE_SESSION:
             return media::AudioPortType::SESSION;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_format_t> aidl2legacy_AudioFormat_audio_format_t(
@@ -476,9 +474,8 @@
             return AUDIO_GAIN_MODE_CHANNELS;
         case media::AudioGainMode::RAMP:
             return AUDIO_GAIN_MODE_RAMP;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioGainMode> legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy) {
@@ -489,9 +486,8 @@
             return media::AudioGainMode::CHANNELS;
         case AUDIO_GAIN_MODE_RAMP:
             return media::AudioGainMode::RAMP;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl) {
@@ -503,7 +499,7 @@
             enumToMask_bitmask<audio_gain_mode_t, audio_gain_mode_t>);
 }
 
-ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_mask_int32_t(audio_gain_mode_t legacy) {
+ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy) {
     return convertBitmask<int32_t, audio_gain_mode_t, media::AudioGainMode, audio_gain_mode_t>(
             legacy, legacy2aidl_audio_gain_mode_t_AudioGainMode,
             // AUDIO_GAIN_MODE_* constants are mask-based.
@@ -548,7 +544,7 @@
         const audio_gain_config& legacy, audio_port_role_t role, audio_port_type_t type) {
     media::AudioGainConfig aidl;
     aidl.index = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.index));
-    aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_mask_int32_t(legacy.mode));
+    aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
     aidl.channelMask =
             VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
     const bool isInput = VALUE_OR_RETURN(direction(role, type)) == Direction::INPUT;
@@ -583,14 +579,15 @@
             return AUDIO_INPUT_FLAG_HW_AV_SYNC;
         case media::AudioInputFlags::DIRECT:
             return AUDIO_INPUT_FLAG_DIRECT;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioInputFlags> legacy2aidl_audio_input_flags_t_AudioInputFlags(
         audio_input_flags_t legacy) {
     switch (legacy) {
+        case AUDIO_INPUT_FLAG_NONE:
+            break; // shouldn't get here. must be listed  -Werror,-Wswitch
         case AUDIO_INPUT_FLAG_FAST:
             return media::AudioInputFlags::FAST;
         case AUDIO_INPUT_FLAG_HW_HOTWORD:
@@ -607,9 +604,8 @@
             return media::AudioInputFlags::HW_AV_SYNC;
         case AUDIO_INPUT_FLAG_DIRECT:
             return media::AudioInputFlags::DIRECT;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_output_flags_t> aidl2legacy_AudioOutputFlags_audio_output_flags_t(
@@ -647,14 +643,15 @@
             return AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
         case media::AudioOutputFlags::GAPLESS_OFFLOAD:
             return AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
         audio_output_flags_t legacy) {
     switch (legacy) {
+        case AUDIO_OUTPUT_FLAG_NONE:
+            break; // shouldn't get here. must be listed  -Werror,-Wswitch
         case AUDIO_OUTPUT_FLAG_DIRECT:
             return media::AudioOutputFlags::DIRECT;
         case AUDIO_OUTPUT_FLAG_PRIMARY:
@@ -687,12 +684,12 @@
             return media::AudioOutputFlags::INCALL_MUSIC;
         case AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD:
             return media::AudioOutputFlags::GAPLESS_OFFLOAD;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
-ConversionResult<audio_input_flags_t> aidl2legacy_audio_input_flags_mask(int32_t aidl) {
+ConversionResult<audio_input_flags_t> aidl2legacy_int32_t_audio_input_flags_t_mask(
+        int32_t aidl) {
     using LegacyMask = std::underlying_type_t<audio_input_flags_t>;
 
     LegacyMask converted = VALUE_OR_RETURN(
@@ -703,7 +700,8 @@
     return static_cast<audio_input_flags_t>(converted);
 }
 
-ConversionResult<int32_t> legacy2aidl_audio_input_flags_mask(audio_input_flags_t legacy) {
+ConversionResult<int32_t> legacy2aidl_audio_input_flags_t_int32_t_mask(
+        audio_input_flags_t legacy) {
     using LegacyMask = std::underlying_type_t<audio_input_flags_t>;
 
     LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
@@ -713,7 +711,8 @@
             enumToMask_index<int32_t, media::AudioInputFlags>);
 }
 
-ConversionResult<audio_output_flags_t> aidl2legacy_audio_output_flags_mask(int32_t aidl) {
+ConversionResult<audio_output_flags_t> aidl2legacy_int32_t_audio_output_flags_t_mask(
+        int32_t aidl) {
     return convertBitmask<audio_output_flags_t,
                           int32_t,
                           audio_output_flags_t,
@@ -723,7 +722,8 @@
             enumToMask_bitmask<audio_output_flags_t, audio_output_flags_t>);
 }
 
-ConversionResult<int32_t> legacy2aidl_audio_output_flags_mask(audio_output_flags_t legacy) {
+ConversionResult<int32_t> legacy2aidl_audio_output_flags_t_int32_t_mask(
+        audio_output_flags_t legacy) {
     using LegacyMask = std::underlying_type_t<audio_output_flags_t>;
 
     LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
@@ -740,13 +740,15 @@
     switch (dir) {
         case Direction::INPUT: {
             legacy.input = VALUE_OR_RETURN(
-                    aidl2legacy_audio_input_flags_mask(VALUE_OR_RETURN(UNION_GET(aidl, input))));
+                    aidl2legacy_int32_t_audio_input_flags_t_mask(
+                            VALUE_OR_RETURN(UNION_GET(aidl, input))));
         }
             break;
 
         case Direction::OUTPUT: {
             legacy.output = VALUE_OR_RETURN(
-                    aidl2legacy_audio_output_flags_mask(VALUE_OR_RETURN(UNION_GET(aidl, output))));
+                    aidl2legacy_int32_t_audio_output_flags_t_mask(
+                            VALUE_OR_RETURN(UNION_GET(aidl, output))));
         }
             break;
     }
@@ -762,17 +764,20 @@
     switch (dir) {
         case Direction::INPUT:
             UNION_SET(aidl, input,
-                      VALUE_OR_RETURN(legacy2aidl_audio_input_flags_mask(legacy.input)));
+                      VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(
+                              legacy.input)));
             break;
         case Direction::OUTPUT:
             UNION_SET(aidl, output,
-                      VALUE_OR_RETURN(legacy2aidl_audio_output_flags_mask(legacy.output)));
+                      VALUE_OR_RETURN(legacy2aidl_audio_output_flags_t_int32_t_mask(
+                              legacy.output)));
             break;
     }
     return aidl;
 }
 
-ConversionResult<audio_port_config_device_ext> aidl2legacy_AudioPortConfigDeviceExt(
+ConversionResult<audio_port_config_device_ext>
+aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
         const media::AudioPortConfigDeviceExt& aidl) {
     audio_port_config_device_ext legacy;
     legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidl.hwModule));
@@ -781,7 +786,8 @@
     return legacy;
 }
 
-ConversionResult<media::AudioPortConfigDeviceExt> legacy2aidl_AudioPortConfigDeviceExt(
+ConversionResult<media::AudioPortConfigDeviceExt>
+legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
         const audio_port_config_device_ext& legacy) {
     media::AudioPortConfigDeviceExt aidl;
     aidl.hwModule = VALUE_OR_RETURN(legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
@@ -826,9 +832,8 @@
             return AUDIO_STREAM_PATCH;
         case media::AudioStreamType::CALL_ASSISTANT:
             return AUDIO_STREAM_CALL_ASSISTANT;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
@@ -866,9 +871,8 @@
             return media::AudioStreamType::PATCH;
         case AUDIO_STREAM_CALL_ASSISTANT:
             return media::AudioStreamType::CALL_ASSISTANT;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_source_t> aidl2legacy_AudioSourceType_audio_source_t(
@@ -905,9 +909,8 @@
             return AUDIO_SOURCE_FM_TUNER;
         case media::AudioSourceType::HOTWORD:
             return AUDIO_SOURCE_HOTWORD;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<media::AudioSourceType> legacy2aidl_audio_source_t_AudioSourceType(
@@ -943,9 +946,8 @@
             return media::AudioSourceType::FM_TUNER;
         case AUDIO_SOURCE_HOTWORD:
             return media::AudioSourceType::HOTWORD;
-        default:
-            return unexpected(BAD_VALUE);
     }
+    return unexpected(BAD_VALUE);
 }
 
 ConversionResult<audio_session_t> aidl2legacy_int32_t_audio_session_t(int32_t aidl) {
@@ -967,24 +969,21 @@
         case media::AudioPortRole::NONE:
             // Just verify that the union is empty.
             VALUE_OR_RETURN(UNION_GET(aidl, unspecified));
-            break;
+            return legacy;
 
         case media::AudioPortRole::SOURCE:
             // This is not a bug. A SOURCE role corresponds to the stream field.
             legacy.stream = VALUE_OR_RETURN(aidl2legacy_AudioStreamType_audio_stream_type_t(
                     VALUE_OR_RETURN(UNION_GET(aidl, stream))));
-            break;
+            return legacy;
 
         case media::AudioPortRole::SINK:
             // This is not a bug. A SINK role corresponds to the source field.
             legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSourceType_audio_source_t(
                     VALUE_OR_RETURN(UNION_GET(aidl, source))));
-            break;
-
-        default:
-            LOG_ALWAYS_FATAL("Shouldn't get here");
+            return legacy;
     }
-    return legacy;
+    LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<media::AudioPortConfigMixExtUseCase> legacy2aidl_AudioPortConfigMixExtUseCase(
@@ -994,21 +993,19 @@
     switch (role) {
         case AUDIO_PORT_ROLE_NONE:
             UNION_SET(aidl, unspecified, false);
-            break;
+            return aidl;
         case AUDIO_PORT_ROLE_SOURCE:
             // This is not a bug. A SOURCE role corresponds to the stream field.
             UNION_SET(aidl, stream, VALUE_OR_RETURN(
                     legacy2aidl_audio_stream_type_t_AudioStreamType(legacy.stream)));
-            break;
+            return aidl;
         case AUDIO_PORT_ROLE_SINK:
             // This is not a bug. A SINK role corresponds to the source field.
             UNION_SET(aidl, source,
                       VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSourceType(legacy.source)));
-            break;
-        default:
-            LOG_ALWAYS_FATAL("Shouldn't get here");
+            return aidl;
     }
-    return aidl;
+    LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortConfigMixExt(
@@ -1029,14 +1026,16 @@
     return aidl;
 }
 
-ConversionResult<audio_port_config_session_ext> aidl2legacy_AudioPortConfigSessionExt(
+ConversionResult<audio_port_config_session_ext>
+aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
         const media::AudioPortConfigSessionExt& aidl) {
     audio_port_config_session_ext legacy;
     legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl.session));
     return legacy;
 }
 
-ConversionResult<media::AudioPortConfigSessionExt> legacy2aidl_AudioPortConfigSessionExt(
+ConversionResult<media::AudioPortConfigSessionExt>
+legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
         const audio_port_config_session_ext& legacy) {
     media::AudioPortConfigSessionExt aidl;
     aidl.session = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(legacy.session));
@@ -1054,23 +1053,24 @@
         case media::AudioPortType::NONE:
             // Just verify that the union is empty.
             VALUE_OR_RETURN(UNION_GET(aidl, unspecified));
-            break;
+            return legacy;
         case media::AudioPortType::DEVICE:
             legacy.device = VALUE_OR_RETURN(
-                    aidl2legacy_AudioPortConfigDeviceExt(VALUE_OR_RETURN(UNION_GET(aidl, device))));
-            break;
+                    aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
+                            VALUE_OR_RETURN(UNION_GET(aidl, device))));
+            return legacy;
         case media::AudioPortType::MIX:
             legacy.mix = VALUE_OR_RETURN(
                     aidl2legacy_AudioPortConfigMixExt(VALUE_OR_RETURN(UNION_GET(aidl, mix)), role));
-            break;
+            return legacy;
         case media::AudioPortType::SESSION:
-            legacy.session = VALUE_OR_RETURN(aidl2legacy_AudioPortConfigSessionExt(
-                    VALUE_OR_RETURN(UNION_GET(aidl, session))));
-            break;
-        default:
-            LOG_ALWAYS_FATAL("Shouldn't get here");
+            legacy.session = VALUE_OR_RETURN(
+                    aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
+                            VALUE_OR_RETURN(UNION_GET(aidl, session))));
+            return legacy;
+
     }
-    return legacy;
+    LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<media::AudioPortConfigExt> legacy2aidl_AudioPortConfigExt(
@@ -1080,23 +1080,25 @@
     switch (type) {
         case AUDIO_PORT_TYPE_NONE:
             UNION_SET(aidl, unspecified, false);
-            break;
+            return aidl;
         case AUDIO_PORT_TYPE_DEVICE:
             UNION_SET(aidl, device,
-                      VALUE_OR_RETURN(legacy2aidl_AudioPortConfigDeviceExt(legacy.device)));
-            break;
+                      VALUE_OR_RETURN(
+                        legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
+                          legacy.device)));
+            return aidl;
         case AUDIO_PORT_TYPE_MIX:
             UNION_SET(aidl, mix,
                       VALUE_OR_RETURN(legacy2aidl_AudioPortConfigMixExt(legacy.mix, role)));
-            break;
+            return aidl;
         case AUDIO_PORT_TYPE_SESSION:
             UNION_SET(aidl, session,
-                      VALUE_OR_RETURN(legacy2aidl_AudioPortConfigSessionExt(legacy.session)));
-            break;
-        default:
-            LOG_ALWAYS_FATAL("Shouldn't get here");
+                      VALUE_OR_RETURN(
+                        legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
+                          legacy.session)));
+            return aidl;
     }
-    return aidl;
+    LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
@@ -1235,7 +1237,8 @@
     return aidl;
 }
 
-ConversionResult<AudioClient> aidl2legacy_AudioClient(const media::AudioClient& aidl) {
+ConversionResult<AudioClient> aidl2legacy_AudioClient_AudioClient(
+        const media::AudioClient& aidl) {
     AudioClient legacy;
     legacy.clientUid = VALUE_OR_RETURN(aidl2legacy_int32_t_uid_t(aidl.clientUid));
     legacy.clientPid = VALUE_OR_RETURN(aidl2legacy_int32_t_pid_t(aidl.clientPid));
@@ -1244,7 +1247,8 @@
     return legacy;
 }
 
-ConversionResult<media::AudioClient> legacy2aidl_AudioClient(const AudioClient& legacy) {
+ConversionResult<media::AudioClient> legacy2aidl_AudioClient_AudioClient(
+        const AudioClient& legacy) {
     media::AudioClient aidl;
     aidl.clientUid = VALUE_OR_RETURN(legacy2aidl_uid_t_int32_t(legacy.clientUid));
     aidl.clientPid = VALUE_OR_RETURN(legacy2aidl_pid_t_int32_t(legacy.clientPid));
@@ -1672,7 +1676,7 @@
 }
 
 ConversionResult<AudioTimestamp>
-aidl2legacy_AudioTimestamp(const media::AudioTimestampInternal& aidl) {
+aidl2legacy_AudioTimestampInternal_AudioTimestamp(const media::AudioTimestampInternal& aidl) {
     AudioTimestamp legacy;
     legacy.mPosition = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.position));
     legacy.mTime.tv_sec = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sec));
@@ -1681,7 +1685,7 @@
 }
 
 ConversionResult<media::AudioTimestampInternal>
-legacy2aidl_AudioTimestamp(const AudioTimestamp& legacy) {
+legacy2aidl_AudioTimestamp_AudioTimestampInternal(const AudioTimestamp& legacy) {
     media::AudioTimestampInternal aidl;
     aidl.position = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.mPosition));
     aidl.sec = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.mTime.tv_sec));
@@ -1912,25 +1916,24 @@
         case media::AudioPortType::NONE:
             // Just verify that the union is empty.
             VALUE_OR_RETURN(UNION_GET(aidl, unspecified));
-            break;
+            return legacy;
         case media::AudioPortType::DEVICE:
             legacy.device = VALUE_OR_RETURN(
                     aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
                             VALUE_OR_RETURN(UNION_GET(aidl, device))));
-            break;
+            return legacy;
         case media::AudioPortType::MIX:
             legacy.mix = VALUE_OR_RETURN(
                     aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
                             VALUE_OR_RETURN(UNION_GET(aidl, mix))));
-            break;
+            return legacy;
         case media::AudioPortType::SESSION:
             legacy.session = VALUE_OR_RETURN(aidl2legacy_AudioPortSessionExt_audio_port_session_ext(
                     VALUE_OR_RETURN(UNION_GET(aidl, session))));
-            break;
-        default:
-            LOG_ALWAYS_FATAL("Shouldn't get here");
+            return legacy;
+
     }
-    return legacy;
+    LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<media::AudioPortExt> legacy2aidl_AudioPortExt(
@@ -1939,25 +1942,23 @@
     switch (type) {
         case AUDIO_PORT_TYPE_NONE:
             UNION_SET(aidl, unspecified, false);
-            break;
+            return aidl;
         case AUDIO_PORT_TYPE_DEVICE:
             UNION_SET(aidl, device,
                       VALUE_OR_RETURN(
                               legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(legacy.device)));
-            break;
+            return aidl;
         case AUDIO_PORT_TYPE_MIX:
             UNION_SET(aidl, mix,
                       VALUE_OR_RETURN(legacy2aidl_audio_port_mix_ext_AudioPortMixExt(legacy.mix)));
-            break;
+            return aidl;
         case AUDIO_PORT_TYPE_SESSION:
             UNION_SET(aidl, session,
                       VALUE_OR_RETURN(legacy2aidl_audio_port_session_ext_AudioPortSessionExt(
                               legacy.session)));
-            break;
-        default:
-            LOG_ALWAYS_FATAL("Shouldn't get here");
+            return aidl;
     }
-    return aidl;
+    LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
 }
 
 ConversionResult<audio_profile>
@@ -2024,7 +2025,7 @@
 ConversionResult<media::AudioGain>
 legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy) {
     media::AudioGain aidl;
-    aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_mask_int32_t(legacy.mode));
+    aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
     aidl.channelMask = VALUE_OR_RETURN(
             legacy2aidl_audio_channel_mask_t_int32_t(legacy.channel_mask));
     aidl.minValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.min_value));
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index ae899c0..79ea1bb 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -30,7 +30,7 @@
 #include <utils/Log.h>
 
 namespace android {
-
+using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
 
 namespace {
@@ -262,7 +262,7 @@
             bs = mIEffect->disable(&status);
         }
         if (!bs.isOk()) {
-            status = bs.transactionError();
+            status = statusTFromBinderStatus(bs);
         }
         if (status == NO_ERROR) {
             mEnabled = enabled;
@@ -303,7 +303,7 @@
 
     Status bs = mIEffect->command(cmdCode, data, *replySize, &response, &status);
     if (!bs.isOk()) {
-        status = bs.transactionError();
+        status = statusTFromBinderStatus(bs);
     }
     if (status == NO_ERROR) {
         memcpy(replyData, response.data(), response.size());
@@ -351,7 +351,7 @@
                                   &response,
                                   &status);
     if (!bs.isOk()) {
-        status = bs.transactionError();
+        status = statusTFromBinderStatus(bs);
         return status;
     }
     assert(response.size() == sizeof(int));
@@ -410,7 +410,7 @@
                                   &response,
                                   &status);
     if (!bs.isOk()) {
-        status = bs.transactionError();
+        status = statusTFromBinderStatus(bs);
     }
     return status;
 }
@@ -441,7 +441,7 @@
 
     Status bs = mIEffect->command(EFFECT_CMD_GET_PARAM, cmd, psize, &response, &status);
     if (!bs.isOk()) {
-        status = bs.transactionError();
+        status = statusTFromBinderStatus(bs);
         return status;
     }
     memcpy(param, response.data(), response.size());
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index f01b1d0..112cb67 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -47,6 +47,8 @@
 #define WAIT_PERIOD_MS          10
 
 namespace android {
+using aidl_utils::statusTFromBinderStatus;
+
 // ---------------------------------------------------------------------------
 
 // static
@@ -450,7 +452,7 @@
     mActive = true;
 
     if (!(flags & CBLK_INVALID)) {
-        status = mAudioRecord->start(event, triggerSession).transactionError();
+        status = statusTFromBinderStatus(mAudioRecord->start(event, triggerSession));
         if (status == DEAD_OBJECT) {
             flags |= CBLK_INVALID;
         }
@@ -1439,8 +1441,8 @@
         if (mActive) {
             // callback thread or sync event hasn't changed
             // FIXME this fails if we have a new AudioFlinger instance
-            result = mAudioRecord->start(
-                AudioSystem::SYNC_EVENT_SAME, AUDIO_SESSION_NONE).transactionError();
+            result = statusTFromBinderStatus(mAudioRecord->start(
+                AudioSystem::SYNC_EVENT_SAME, AUDIO_SESSION_NONE));
         }
         mFramesReadServerOffset = mFramesRead; // server resets to zero so we need an offset.
     }
@@ -1531,7 +1533,7 @@
 {
     AutoMutex lock(mLock);
     std::vector<media::MicrophoneInfoData> mics;
-    status_t status = mAudioRecord->getActiveMicrophones(&mics).transactionError();
+    status_t status = statusTFromBinderStatus(mAudioRecord->getActiveMicrophones(&mics));
     activeMicrophones->resize(mics.size());
     for (size_t i = 0; status == OK && i < mics.size(); ++i) {
         status = activeMicrophones->at(i).readFromParcelable(mics[i]);
@@ -1552,7 +1554,7 @@
         // the internal AudioRecord hasn't be created yet, so just stash the attribute.
         return OK;
     } else {
-        return mAudioRecord->setPreferredMicrophoneDirection(direction).transactionError();
+        return statusTFromBinderStatus(mAudioRecord->setPreferredMicrophoneDirection(direction));
     }
 }
 
@@ -1568,7 +1570,7 @@
         // the internal AudioRecord hasn't be created yet, so just stash the attribute.
         return OK;
     } else {
-        return mAudioRecord->setPreferredMicrophoneFieldDimension(zoom).transactionError();
+        return statusTFromBinderStatus(mAudioRecord->setPreferredMicrophoneFieldDimension(zoom));
     }
 }
 
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 9aef794..09fcc66 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -35,7 +35,7 @@
 
 #define VALUE_OR_RETURN_BINDER_STATUS(x) \
     ({ auto _tmp = (x); \
-       if (!_tmp.ok()) return Status::fromStatusT(_tmp.error()); \
+       if (!_tmp.ok()) return aidl_utils::binderStatusFromStatusT(_tmp.error()); \
        std::move(_tmp.value()); })
 
 // ----------------------------------------------------------------------------
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 8c53c5b..1b1e143 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -2819,7 +2819,7 @@
         media::AudioTimestampInternal ts;
         mAudioTrack->getTimestamp(&ts, &status);
         if (status == OK) {
-            timestamp = VALUE_OR_FATAL(aidl2legacy_AudioTimestamp(ts));
+            timestamp = VALUE_OR_FATAL(aidl2legacy_AudioTimestampInternal_AudioTimestamp(ts));
         }
     } else {
         // read timestamp from shared memory
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index a7cca45..20124df 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -28,6 +28,7 @@
 
 namespace android {
 
+using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -55,13 +56,13 @@
     media::CreateTrackRequest aidl;
     aidl.attr = VALUE_OR_RETURN(legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
     aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_t_AudioConfig(config));
-    aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient(clientInfo));
+    aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient_AudioClient(clientInfo));
     aidl.sharedBuffer = VALUE_OR_RETURN(legacy2aidl_NullableIMemory_SharedFileRegion(sharedBuffer));
     aidl.notificationsPerBuffer = VALUE_OR_RETURN(convertIntegral<int32_t>(notificationsPerBuffer));
     aidl.speed = speed;
     aidl.audioTrackCallback = audioTrackCallback;
     aidl.opPackageName = opPackageName;
-    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_output_flags_mask(flags));
+    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
     aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(frameCount));
     aidl.notificationFrameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(notificationFrameCount));
     aidl.selectedDeviceId = VALUE_OR_RETURN(
@@ -75,14 +76,14 @@
     IAudioFlinger::CreateTrackInput legacy;
     legacy.attr = VALUE_OR_RETURN(aidl2legacy_AudioAttributesInternal_audio_attributes_t(aidl.attr));
     legacy.config = VALUE_OR_RETURN(aidl2legacy_AudioConfig_audio_config_t(aidl.config));
-    legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient(aidl.clientInfo));
+    legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient_AudioClient(aidl.clientInfo));
     legacy.sharedBuffer = VALUE_OR_RETURN(aidl2legacy_NullableSharedFileRegion_IMemory(aidl.sharedBuffer));
     legacy.notificationsPerBuffer = VALUE_OR_RETURN(
             convertIntegral<uint32_t>(aidl.notificationsPerBuffer));
     legacy.speed = aidl.speed;
     legacy.audioTrackCallback = aidl.audioTrackCallback;
     legacy.opPackageName = aidl.opPackageName;
-    legacy.flags = VALUE_OR_RETURN(aidl2legacy_audio_output_flags_mask(aidl.flags));
+    legacy.flags = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_output_flags_t_mask(aidl.flags));
     legacy.frameCount = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
     legacy.notificationFrameCount = VALUE_OR_RETURN(
             convertIntegral<size_t>(aidl.notificationFrameCount));
@@ -95,7 +96,7 @@
 ConversionResult<media::CreateTrackResponse>
 IAudioFlinger::CreateTrackOutput::toAidl() const {
     media::CreateTrackResponse aidl;
-    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_output_flags_mask(flags));
+    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
     aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(frameCount));
     aidl.notificationFrameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(notificationFrameCount));
     aidl.selectedDeviceId = VALUE_OR_RETURN(
@@ -115,7 +116,7 @@
 IAudioFlinger::CreateTrackOutput::fromAidl(
         const media::CreateTrackResponse& aidl) {
     IAudioFlinger::CreateTrackOutput legacy;
-    legacy.flags = VALUE_OR_RETURN(aidl2legacy_audio_output_flags_mask(aidl.flags));
+    legacy.flags = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_output_flags_t_mask(aidl.flags));
     legacy.frameCount = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
     legacy.notificationFrameCount = VALUE_OR_RETURN(
             convertIntegral<size_t>(aidl.notificationFrameCount));
@@ -137,10 +138,10 @@
     media::CreateRecordRequest aidl;
     aidl.attr = VALUE_OR_RETURN(legacy2aidl_audio_attributes_t_AudioAttributesInternal(attr));
     aidl.config = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(config));
-    aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient(clientInfo));
+    aidl.clientInfo = VALUE_OR_RETURN(legacy2aidl_AudioClient_AudioClient(clientInfo));
     aidl.opPackageName = VALUE_OR_RETURN(legacy2aidl_String16_string(opPackageName));
     aidl.riid = VALUE_OR_RETURN(legacy2aidl_audio_unique_id_t_int32_t(riid));
-    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_input_flags_mask(flags));
+    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
     aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(frameCount));
     aidl.notificationFrameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(notificationFrameCount));
     aidl.selectedDeviceId = VALUE_OR_RETURN(
@@ -155,10 +156,10 @@
     IAudioFlinger::CreateRecordInput legacy;
     legacy.attr = VALUE_OR_RETURN(aidl2legacy_AudioAttributesInternal_audio_attributes_t(aidl.attr));
     legacy.config = VALUE_OR_RETURN(aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.config));
-    legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient(aidl.clientInfo));
+    legacy.clientInfo = VALUE_OR_RETURN(aidl2legacy_AudioClient_AudioClient(aidl.clientInfo));
     legacy.opPackageName = VALUE_OR_RETURN(aidl2legacy_string_view_String16(aidl.opPackageName));
     legacy.riid = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_unique_id_t(aidl.riid));
-    legacy.flags = VALUE_OR_RETURN(aidl2legacy_audio_input_flags_mask(aidl.flags));
+    legacy.flags = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_input_flags_t_mask(aidl.flags));
     legacy.frameCount = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
     legacy.notificationFrameCount = VALUE_OR_RETURN(
             convertIntegral<size_t>(aidl.notificationFrameCount));
@@ -171,7 +172,7 @@
 ConversionResult<media::CreateRecordResponse>
 IAudioFlinger::CreateRecordOutput::toAidl() const {
     media::CreateRecordResponse aidl;
-    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_input_flags_mask(flags));
+    aidl.flags = VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
     aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(frameCount));
     aidl.notificationFrameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(notificationFrameCount));
     aidl.selectedDeviceId = VALUE_OR_RETURN(
@@ -190,7 +191,7 @@
 IAudioFlinger::CreateRecordOutput::fromAidl(
         const media::CreateRecordResponse& aidl) {
     IAudioFlinger::CreateRecordOutput legacy;
-    legacy.flags = VALUE_OR_RETURN(aidl2legacy_audio_input_flags_mask(aidl.flags));
+    legacy.flags = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_input_flags_t_mask(aidl.flags));
     legacy.frameCount = VALUE_OR_RETURN(convertIntegral<size_t>(aidl.frameCount));
     legacy.notificationFrameCount = VALUE_OR_RETURN(
             convertIntegral<size_t>(aidl.notificationFrameCount));
@@ -214,19 +215,19 @@
 
 status_t AudioFlingerClientAdapter::createTrack(const media::CreateTrackRequest& input,
                                                 media::CreateTrackResponse& output) {
-    return mDelegate->createTrack(input, &output).transactionError();
+    return statusTFromBinderStatus(mDelegate->createTrack(input, &output));
 }
 
 status_t AudioFlingerClientAdapter::createRecord(const media::CreateRecordRequest& input,
                                                  media::CreateRecordResponse& output) {
-    return mDelegate->createRecord(input, &output).transactionError();
+    return statusTFromBinderStatus(mDelegate->createRecord(input, &output));
 }
 
 uint32_t AudioFlingerClientAdapter::sampleRate(audio_io_handle_t ioHandle) const {
     auto result = [&]() -> ConversionResult<uint32_t> {
         int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
         int32_t aidlRet;
-        RETURN_IF_ERROR(mDelegate->sampleRate(ioHandleAidl, &aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->sampleRate(ioHandleAidl, &aidlRet)));
         return convertIntegral<uint32_t>(aidlRet);
     }();
     // Failure is ignored.
@@ -237,7 +238,7 @@
     auto result = [&]() -> ConversionResult<audio_format_t> {
         int32_t outputAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output));
         media::audio::common::AudioFormat aidlRet;
-        RETURN_IF_ERROR(mDelegate->format(outputAidl, &aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->format(outputAidl, &aidlRet)));
         return aidl2legacy_AudioFormat_audio_format_t(aidlRet);
     }();
     return result.value_or(AUDIO_FORMAT_INVALID);
@@ -247,7 +248,7 @@
     auto result = [&]() -> ConversionResult<size_t> {
         int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
         int64_t aidlRet;
-        RETURN_IF_ERROR(mDelegate->frameCount(ioHandleAidl, &aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->frameCount(ioHandleAidl, &aidlRet)));
         return convertIntegral<size_t>(aidlRet);
     }();
     // Failure is ignored.
@@ -258,7 +259,7 @@
     auto result = [&]() -> ConversionResult<uint32_t> {
         int32_t outputAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output));
         int32_t aidlRet;
-        RETURN_IF_ERROR(mDelegate->latency(outputAidl, &aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->latency(outputAidl, &aidlRet)));
         return convertIntegral<uint32_t>(aidlRet);
     }();
     // Failure is ignored.
@@ -266,17 +267,17 @@
 }
 
 status_t AudioFlingerClientAdapter::setMasterVolume(float value) {
-    return mDelegate->setMasterVolume(value).transactionError();
+    return statusTFromBinderStatus(mDelegate->setMasterVolume(value));
 }
 
 status_t AudioFlingerClientAdapter::setMasterMute(bool muted) {
-    return mDelegate->setMasterMute(muted).transactionError();
+    return statusTFromBinderStatus(mDelegate->setMasterMute(muted));
 }
 
 float AudioFlingerClientAdapter::masterVolume() const {
     auto result = [&]() -> ConversionResult<float> {
         float aidlRet;
-        RETURN_IF_ERROR(mDelegate->masterVolume(&aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->masterVolume(&aidlRet)));
         return aidlRet;
     }();
     // Failure is ignored.
@@ -286,7 +287,7 @@
 bool AudioFlingerClientAdapter::masterMute() const {
     auto result = [&]() -> ConversionResult<bool> {
         bool aidlRet;
-        RETURN_IF_ERROR(mDelegate->masterMute(&aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(mDelegate->masterMute(&aidlRet)));
         return aidlRet;
     }();
     // Failure is ignored.
@@ -294,11 +295,11 @@
 }
 
 status_t AudioFlingerClientAdapter::setMasterBalance(float balance) {
-    return mDelegate->setMasterBalance(balance).transactionError();
+    return statusTFromBinderStatus(mDelegate->setMasterBalance(balance));
 }
 
 status_t AudioFlingerClientAdapter::getMasterBalance(float* balance) const{
-    return mDelegate->getMasterBalance(balance).transactionError();
+    return statusTFromBinderStatus(mDelegate->getMasterBalance(balance));
 }
 
 status_t AudioFlingerClientAdapter::setStreamVolume(audio_stream_type_t stream, float value,
@@ -306,13 +307,13 @@
     media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
     int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
-    return mDelegate->setStreamVolume(streamAidl, value, outputAidl).transactionError();
+    return statusTFromBinderStatus(mDelegate->setStreamVolume(streamAidl, value, outputAidl));
 }
 
 status_t AudioFlingerClientAdapter::setStreamMute(audio_stream_type_t stream, bool muted) {
     media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
-    return mDelegate->setStreamMute(streamAidl, muted).transactionError();
+    return statusTFromBinderStatus(mDelegate->setStreamMute(streamAidl, muted));
 }
 
 float AudioFlingerClientAdapter::streamVolume(audio_stream_type_t stream,
@@ -322,8 +323,8 @@
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
         float aidlRet;
-        RETURN_IF_ERROR(
-                mDelegate->streamVolume(streamAidl, outputAidl, &aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->streamVolume(streamAidl, outputAidl, &aidlRet)));
         return aidlRet;
     }();
     // Failure is ignored.
@@ -335,8 +336,8 @@
         media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
                 legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
         bool aidlRet;
-        RETURN_IF_ERROR(
-                mDelegate->streamMute(streamAidl, &aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->streamMute(streamAidl, &aidlRet)));
         return aidlRet;
     }();
     // Failure is ignored.
@@ -345,18 +346,18 @@
 
 status_t AudioFlingerClientAdapter::setMode(audio_mode_t mode) {
     media::AudioMode modeAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_mode_t_AudioMode(mode));
-    return mDelegate->setMode(modeAidl).transactionError();
+    return statusTFromBinderStatus(mDelegate->setMode(modeAidl));
 }
 
 status_t AudioFlingerClientAdapter::setMicMute(bool state) {
-    return mDelegate->setMicMute(state).transactionError();
+    return statusTFromBinderStatus(mDelegate->setMicMute(state));
 }
 
 bool AudioFlingerClientAdapter::getMicMute() const {
     auto result = [&]() -> ConversionResult<bool> {
         bool aidlRet;
-        RETURN_IF_ERROR(
-                mDelegate->getMicMute(&aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getMicMute(&aidlRet)));
         return aidlRet;
     }();
     // Failure is ignored.
@@ -367,7 +368,7 @@
     auto result = [&]() -> status_t {
         int32_t portIdAidl = VALUE_OR_RETURN_STATUS(
                 legacy2aidl_audio_port_handle_t_int32_t(portId));
-        return mDelegate->setRecordSilenced(portIdAidl, silenced).transactionError();
+        return statusTFromBinderStatus(mDelegate->setRecordSilenced(portIdAidl, silenced));
     }();
     // Failure is ignored.
     (void) result;
@@ -378,7 +379,7 @@
     int32_t ioHandleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
     std::string keyValuePairsAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_String8_string(keyValuePairs));
-    return mDelegate->setParameters(ioHandleAidl, keyValuePairsAidl).transactionError();
+    return statusTFromBinderStatus(mDelegate->setParameters(ioHandleAidl, keyValuePairsAidl));
 }
 
 String8 AudioFlingerClientAdapter::getParameters(audio_io_handle_t ioHandle, const String8& keys)
@@ -387,8 +388,8 @@
         int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
         std::string keysAidl = VALUE_OR_RETURN(legacy2aidl_String8_string(keys));
         std::string aidlRet;
-        RETURN_IF_ERROR(
-                mDelegate->getParameters(ioHandleAidl, keysAidl, &aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getParameters(ioHandleAidl, keysAidl, &aidlRet)));
         return aidl2legacy_string_view_String8(aidlRet);
     }();
     // Failure is ignored.
@@ -409,9 +410,9 @@
         int32_t channelMaskAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_channel_mask_t_int32_t(channelMask));
         int64_t aidlRet;
-        RETURN_IF_ERROR(
+        RETURN_IF_ERROR(statusTFromBinderStatus(
                 mDelegate->getInputBufferSize(sampleRateAidl, formatAidl, channelMaskAidl,
-                                              &aidlRet).transactionError());
+                                              &aidlRet)));
         return convertIntegral<size_t>(aidlRet);
     }();
     // Failure is ignored.
@@ -420,7 +421,7 @@
 
 status_t AudioFlingerClientAdapter::openOutput(const media::OpenOutputRequest& request,
                                                media::OpenOutputResponse* response) {
-    return mDelegate->openOutput(request, response).transactionError();
+    return statusTFromBinderStatus(mDelegate->openOutput(request, response));
 }
 
 audio_io_handle_t AudioFlingerClientAdapter::openDuplicateOutput(audio_io_handle_t output1,
@@ -429,8 +430,8 @@
         int32_t output1Aidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output1));
         int32_t output2Aidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(output2));
         int32_t aidlRet;
-        RETURN_IF_ERROR(mDelegate->openDuplicateOutput(output1Aidl, output2Aidl,
-                                                       &aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->openDuplicateOutput(output1Aidl, output2Aidl, &aidlRet)));
         return aidl2legacy_int32_t_audio_io_handle_t(aidlRet);
     }();
     // Failure is ignored.
@@ -439,44 +440,45 @@
 
 status_t AudioFlingerClientAdapter::closeOutput(audio_io_handle_t output) {
     int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
-    return mDelegate->closeOutput(outputAidl).transactionError();
+    return statusTFromBinderStatus(mDelegate->closeOutput(outputAidl));
 }
 
 status_t AudioFlingerClientAdapter::suspendOutput(audio_io_handle_t output) {
     int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
-    return mDelegate->suspendOutput(outputAidl).transactionError();
+    return statusTFromBinderStatus(mDelegate->suspendOutput(outputAidl));
 }
 
 status_t AudioFlingerClientAdapter::restoreOutput(audio_io_handle_t output) {
     int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
-    return mDelegate->restoreOutput(outputAidl).transactionError();
+    return statusTFromBinderStatus(mDelegate->restoreOutput(outputAidl));
 }
 
 status_t AudioFlingerClientAdapter::openInput(const media::OpenInputRequest& request,
                                               media::OpenInputResponse* response) {
-    return mDelegate->openInput(request, response).transactionError();
+    return statusTFromBinderStatus(mDelegate->openInput(request, response));
 }
 
 status_t AudioFlingerClientAdapter::closeInput(audio_io_handle_t input) {
     int32_t inputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(input));
-    return mDelegate->closeInput(inputAidl).transactionError();
+    return statusTFromBinderStatus(mDelegate->closeInput(inputAidl));
 }
 
 status_t AudioFlingerClientAdapter::invalidateStream(audio_stream_type_t stream) {
     media::AudioStreamType streamAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_stream_type_t_AudioStreamType(stream));
-    return mDelegate->invalidateStream(streamAidl).transactionError();
+    return statusTFromBinderStatus(mDelegate->invalidateStream(streamAidl));
 }
 
 status_t AudioFlingerClientAdapter::setVoiceVolume(float volume) {
-    return mDelegate->setVoiceVolume(volume).transactionError();
+    return statusTFromBinderStatus(mDelegate->setVoiceVolume(volume));
 }
 
 status_t AudioFlingerClientAdapter::getRenderPosition(uint32_t* halFrames, uint32_t* dspFrames,
                                                       audio_io_handle_t output) const {
     int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
     media::RenderPosition aidlRet;
-    RETURN_STATUS_IF_ERROR(mDelegate->getRenderPosition(outputAidl, &aidlRet).transactionError());
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->getRenderPosition(outputAidl, &aidlRet)));
     if (halFrames != nullptr) {
         *halFrames = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(aidlRet.halFrames));
     }
@@ -490,7 +492,8 @@
     auto result = [&]() -> ConversionResult<uint32_t> {
         int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
         int32_t aidlRet;
-        RETURN_IF_ERROR(mDelegate->getInputFramesLost(ioHandleAidl, &aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getInputFramesLost(ioHandleAidl, &aidlRet)));
         return convertIntegral<uint32_t>(aidlRet);
     }();
     // Failure is ignored.
@@ -502,7 +505,8 @@
         media::AudioUniqueIdUse useAidl = VALUE_OR_RETURN(
                 legacy2aidl_audio_unique_id_use_t_AudioUniqueIdUse(use));
         int32_t aidlRet;
-        RETURN_IF_ERROR(mDelegate->newAudioUniqueId(useAidl, &aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->newAudioUniqueId(useAidl, &aidlRet)));
         return aidl2legacy_int32_t_audio_unique_id_t(aidlRet);
     }();
     return result.value_or(AUDIO_UNIQUE_ID_ALLOCATE);
@@ -515,8 +519,8 @@
                 legacy2aidl_audio_session_t_int32_t(audioSession));
         int32_t pidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(pid));
         int32_t uidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(uid));
-        return mDelegate->acquireAudioSessionId(audioSessionAidl, pidAidl,
-                                                uidAidl).transactionError();
+        return statusTFromBinderStatus(
+                mDelegate->acquireAudioSessionId(audioSessionAidl, pidAidl, uidAidl));
     }();
     // Failure is ignored.
 }
@@ -526,14 +530,16 @@
         int32_t audioSessionAidl = VALUE_OR_RETURN_STATUS(
                 legacy2aidl_audio_session_t_int32_t(audioSession));
         int32_t pidAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(pid));
-        return mDelegate->releaseAudioSessionId(audioSessionAidl, pidAidl).transactionError();
+        return statusTFromBinderStatus(
+                mDelegate->releaseAudioSessionId(audioSessionAidl, pidAidl));
     }();
     // Failure is ignored.
 }
 
 status_t AudioFlingerClientAdapter::queryNumberEffects(uint32_t* numEffects) const {
     int32_t aidlRet;
-    RETURN_STATUS_IF_ERROR(mDelegate->queryNumberEffects(&aidlRet).transactionError());
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->queryNumberEffects(&aidlRet)));
     if (numEffects != nullptr) {
         *numEffects = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(aidlRet));
     }
@@ -544,7 +550,8 @@
 AudioFlingerClientAdapter::queryEffect(uint32_t index, effect_descriptor_t* pDescriptor) const {
     int32_t indexAidl = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(index));
     media::EffectDescriptor aidlRet;
-    RETURN_STATUS_IF_ERROR(mDelegate->queryEffect(indexAidl, &aidlRet).transactionError());
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->queryEffect(indexAidl, &aidlRet)));
     if (pDescriptor != nullptr) {
         *pDescriptor = VALUE_OR_RETURN_STATUS(
                 aidl2legacy_EffectDescriptor_effect_descriptor_t(aidlRet));
@@ -563,9 +570,9 @@
     int32_t preferredTypeFlagAidl = VALUE_OR_RETURN_STATUS(
             convertReinterpret<int32_t>(preferredTypeFlag));
     media::EffectDescriptor aidlRet;
-    RETURN_STATUS_IF_ERROR(
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
             mDelegate->getEffectDescriptor(effectUuidAidl, typeUuidAidl, preferredTypeFlagAidl,
-                                           &aidlRet).transactionError());
+                                           &aidlRet)));
     if (pDescriptor != nullptr) {
         *pDescriptor = VALUE_OR_RETURN_STATUS(
                 aidl2legacy_EffectDescriptor_effect_descriptor_t(aidlRet));
@@ -575,7 +582,7 @@
 
 status_t AudioFlingerClientAdapter::createEffect(const media::CreateEffectRequest& request,
                                                  media::CreateEffectResponse* response) {
-    return mDelegate->createEffect(request, response).transactionError();
+    return statusTFromBinderStatus(mDelegate->createEffect(request, response));
 }
 
 status_t
@@ -586,7 +593,8 @@
             legacy2aidl_audio_io_handle_t_int32_t(srcOutput));
     int32_t dstOutputAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_io_handle_t_int32_t(dstOutput));
-    return mDelegate->moveEffects(sessionAidl, srcOutputAidl, dstOutputAidl).transactionError();
+    return statusTFromBinderStatus(
+            mDelegate->moveEffects(sessionAidl, srcOutputAidl, dstOutputAidl));
 }
 
 void AudioFlingerClientAdapter::setEffectSuspended(int effectId,
@@ -596,8 +604,8 @@
         int32_t effectIdAidl = VALUE_OR_RETURN_STATUS(convertReinterpret<int32_t>(effectId));
         int32_t sessionIdAidl = VALUE_OR_RETURN_STATUS(
                 legacy2aidl_audio_session_t_int32_t(sessionId));
-        return mDelegate->setEffectSuspended(effectIdAidl, sessionIdAidl,
-                                             suspended).transactionError();
+        return statusTFromBinderStatus(
+                mDelegate->setEffectSuspended(effectIdAidl, sessionIdAidl, suspended));
     }();
     // Failure is ignored.
 }
@@ -606,7 +614,8 @@
     auto result = [&]() -> ConversionResult<audio_module_handle_t> {
         std::string nameAidl(name);
         int32_t aidlRet;
-        RETURN_IF_ERROR(mDelegate->loadHwModule(nameAidl, &aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->loadHwModule(nameAidl, &aidlRet)));
         return aidl2legacy_int32_t_audio_module_handle_t(aidlRet);
     }();
     // Failure is ignored.
@@ -616,7 +625,8 @@
 uint32_t AudioFlingerClientAdapter::getPrimaryOutputSamplingRate() {
     auto result = [&]() -> ConversionResult<uint32_t> {
         int32_t aidlRet;
-        RETURN_IF_ERROR(mDelegate->getPrimaryOutputSamplingRate(&aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getPrimaryOutputSamplingRate(&aidlRet)));
         return convertIntegral<uint32_t>(aidlRet);
     }();
     // Failure is ignored.
@@ -626,7 +636,8 @@
 size_t AudioFlingerClientAdapter::getPrimaryOutputFrameCount() {
     auto result = [&]() -> ConversionResult<size_t> {
         int64_t aidlRet;
-        RETURN_IF_ERROR(mDelegate->getPrimaryOutputFrameCount(&aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getPrimaryOutputFrameCount(&aidlRet)));
         return convertIntegral<size_t>(aidlRet);
     }();
     // Failure is ignored.
@@ -634,13 +645,14 @@
 }
 
 status_t AudioFlingerClientAdapter::setLowRamDevice(bool isLowRamDevice, int64_t totalMemory) {
-    return mDelegate->setLowRamDevice(isLowRamDevice, totalMemory).transactionError();
+    return statusTFromBinderStatus(mDelegate->setLowRamDevice(isLowRamDevice, totalMemory));
 }
 
 status_t AudioFlingerClientAdapter::getAudioPort(struct audio_port_v7* port) {
     media::AudioPort portAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_v7_AudioPort(*port));
     media::AudioPort aidlRet;
-    RETURN_STATUS_IF_ERROR(mDelegate->getAudioPort(portAidl, &aidlRet).transactionError());
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->getAudioPort(portAidl, &aidlRet)));
     *port = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPort_audio_port_v7(aidlRet));
     return OK;
 }
@@ -649,7 +661,8 @@
                                                      audio_patch_handle_t* handle) {
     media::AudioPatch patchAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_patch_AudioPatch(*patch));
     int32_t aidlRet;
-    RETURN_STATUS_IF_ERROR(mDelegate->createAudioPatch(patchAidl, &aidlRet).transactionError());
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->createAudioPatch(patchAidl, &aidlRet)));
     if (handle != nullptr) {
         *handle = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_patch_handle_t(aidlRet));
     }
@@ -658,14 +671,15 @@
 
 status_t AudioFlingerClientAdapter::releaseAudioPatch(audio_patch_handle_t handle) {
     int32_t handleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_patch_handle_t_int32_t(handle));
-    return mDelegate->releaseAudioPatch(handleAidl).transactionError();
+    return statusTFromBinderStatus(mDelegate->releaseAudioPatch(handleAidl));
 }
 
 status_t AudioFlingerClientAdapter::listAudioPatches(unsigned int* num_patches,
                                                      struct audio_patch* patches) {
     std::vector<media::AudioPatch> aidlRet;
     int32_t maxPatches = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(*num_patches));
-    RETURN_STATUS_IF_ERROR(mDelegate->listAudioPatches(maxPatches, &aidlRet).transactionError());
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->listAudioPatches(maxPatches, &aidlRet)));
     *num_patches = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(aidlRet.size()));
     return convertRange(aidlRet.begin(), aidlRet.end(), patches,
                         aidl2legacy_AudioPatch_audio_patch);
@@ -674,29 +688,30 @@
 status_t AudioFlingerClientAdapter::setAudioPortConfig(const struct audio_port_config* config) {
     media::AudioPortConfig configAidl = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_port_config_AudioPortConfig(*config));
-    return mDelegate->setAudioPortConfig(configAidl).transactionError();
+    return statusTFromBinderStatus(mDelegate->setAudioPortConfig(configAidl));
 }
 
 audio_hw_sync_t AudioFlingerClientAdapter::getAudioHwSyncForSession(audio_session_t sessionId) {
     auto result = [&]() -> ConversionResult<audio_hw_sync_t> {
         int32_t sessionIdAidl = VALUE_OR_RETURN(legacy2aidl_audio_session_t_int32_t(sessionId));
         int32_t aidlRet;
-        RETURN_IF_ERROR(
-                mDelegate->getAudioHwSyncForSession(sessionIdAidl, &aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->getAudioHwSyncForSession(sessionIdAidl, &aidlRet)));
         return aidl2legacy_int32_t_audio_hw_sync_t(aidlRet);
     }();
     return result.value_or(AUDIO_HW_SYNC_INVALID);
 }
 
 status_t AudioFlingerClientAdapter::systemReady() {
-    return mDelegate->systemReady().transactionError();
+    return statusTFromBinderStatus(mDelegate->systemReady());
 }
 
 size_t AudioFlingerClientAdapter::frameCountHAL(audio_io_handle_t ioHandle) const {
     auto result = [&]() -> ConversionResult<size_t> {
         int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
         int64_t aidlRet;
-        RETURN_IF_ERROR(mDelegate->frameCountHAL(ioHandleAidl, &aidlRet).transactionError());
+        RETURN_IF_ERROR(statusTFromBinderStatus(
+                mDelegate->frameCountHAL(ioHandleAidl, &aidlRet)));
         return convertIntegral<size_t>(aidlRet);
     }();
     // Failure is ignored.
@@ -706,7 +721,8 @@
 status_t
 AudioFlingerClientAdapter::getMicrophones(std::vector<media::MicrophoneInfo>* microphones) {
     std::vector<media::MicrophoneInfoData> aidlRet;
-    RETURN_STATUS_IF_ERROR(mDelegate->getMicrophones(&aidlRet).transactionError());
+    RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+            mDelegate->getMicrophones(&aidlRet)));
     if (microphones != nullptr) {
         *microphones = VALUE_OR_RETURN_STATUS(
                 convertContainer<std::vector<media::MicrophoneInfo>>(aidlRet,
@@ -718,7 +734,7 @@
 status_t AudioFlingerClientAdapter::setAudioHalPids(const std::vector<pid_t>& pids) {
     std::vector<int32_t> pidsAidl = VALUE_OR_RETURN_STATUS(
             convertContainer<std::vector<int32_t>>(pids, legacy2aidl_pid_t_int32_t));
-    return mDelegate->setAudioHalPids(pidsAidl).transactionError();
+    return statusTFromBinderStatus(mDelegate->setAudioHalPids(pidsAidl));
 }
 
 
diff --git a/media/libaudioclient/PlayerBase.cpp b/media/libaudioclient/PlayerBase.cpp
index c443865..9e7d89e 100644
--- a/media/libaudioclient/PlayerBase.cpp
+++ b/media/libaudioclient/PlayerBase.cpp
@@ -15,13 +15,14 @@
  */
 
 #include <binder/IServiceManager.h>
+#include <media/AidlConversionUtil.h>
 #include <media/PlayerBase.h>
 
 #define max(a, b) ((a) > (b) ? (a) : (b))
 #define min(a, b) ((a) < (b) ? (a) : (b))
 
 namespace android {
-
+using aidl_utils::binderStatusFromStatusT;
 using media::VolumeShaperConfiguration;
 using media::VolumeShaperOperation;
 
@@ -150,7 +151,7 @@
     if (status != NO_ERROR) {
         ALOGW("PlayerBase::setVolume() error %d", status);
     }
-    return binder::Status::fromStatusT(status);
+    return binderStatusFromStatusT(status);
 }
 
 binder::Status PlayerBase::setPan(float pan) {
@@ -170,7 +171,7 @@
     if (status != NO_ERROR) {
         ALOGW("PlayerBase::setPan() error %d", status);
     }
-    return binder::Status::fromStatusT(status);
+    return binderStatusFromStatusT(status);
 }
 
 binder::Status PlayerBase::setStartDelayMs(int32_t delayMs __unused) {
diff --git a/media/libaudioclient/TrackPlayerBase.cpp b/media/libaudioclient/TrackPlayerBase.cpp
index e571838..372b7c3 100644
--- a/media/libaudioclient/TrackPlayerBase.cpp
+++ b/media/libaudioclient/TrackPlayerBase.cpp
@@ -17,7 +17,7 @@
 #include <media/TrackPlayerBase.h>
 
 namespace android {
-
+using aidl_utils::binderStatusFromStatusT;
 using media::VolumeShaper;
 
 //--------------------------------------------------------------------------------------------------
@@ -115,7 +115,7 @@
     status_t s = spConfiguration->readFromParcelable(configuration)
             ?: spOperation->readFromParcelable(operation);
     if (s != OK) {
-        return binder::Status::fromStatusT(s);
+        return binderStatusFromStatusT(s);
     }
 
     if (mAudioTrack != 0) {
@@ -124,7 +124,7 @@
         if (status < 0) { // a non-negative value is the volume shaper id.
             ALOGE("TrackPlayerBase::applyVolumeShaper() failed with status %d", status);
         }
-        return binder::Status::fromStatusT(status);
+        return binderStatusFromStatusT(status);
     } else {
         ALOGD("TrackPlayerBase::applyVolumeShaper()"
               " no AudioTrack for volume control from IPlayer");
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index a6e5e2e..e3858b9 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -80,10 +80,12 @@
 ConversionResult<audio_hw_sync_t> aidl2legacy_int32_t_audio_hw_sync_t(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_audio_hw_sync_t_int32_t(audio_hw_sync_t legacy);
 
-// The legacy enum is unnamed. Thus, we use int.
-ConversionResult<int> aidl2legacy_AudioPortConfigType(media::AudioPortConfigType aidl);
-// The legacy enum is unnamed. Thus, we use int.
-ConversionResult<media::AudioPortConfigType> legacy2aidl_AudioPortConfigType(int legacy);
+// The legacy enum is unnamed. Thus, we use int32_t.
+ConversionResult<int32_t> aidl2legacy_AudioPortConfigType_int32_t(
+        media::AudioPortConfigType aidl);
+// The legacy enum is unnamed. Thus, we use int32_t.
+ConversionResult<media::AudioPortConfigType> legacy2aidl_int32_t_AudioPortConfigType(
+        int32_t legacy);
 
 ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy);
@@ -129,7 +131,7 @@
 legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy);
 
 ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_mask_int32_t(audio_gain_mode_t legacy);
+ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy);
 
 ConversionResult<audio_devices_t> aidl2legacy_int32_t_audio_devices_t(int32_t aidl);
 ConversionResult<int32_t> legacy2aidl_audio_devices_t_int32_t(audio_devices_t legacy);
@@ -149,20 +151,26 @@
 ConversionResult<media::AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
         audio_output_flags_t legacy);
 
-ConversionResult<audio_input_flags_t> aidl2legacy_audio_input_flags_mask(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_input_flags_mask(audio_input_flags_t legacy);
+ConversionResult<audio_input_flags_t> aidl2legacy_int32_t_audio_input_flags_t_mask(
+        int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_input_flags_t_int32_t_mask(
+        audio_input_flags_t legacy);
 
-ConversionResult<audio_output_flags_t> aidl2legacy_audio_output_flags_mask(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_output_flags_mask(audio_output_flags_t legacy);
+ConversionResult<audio_output_flags_t> aidl2legacy_int32_t_audio_output_flags_t_mask(
+        int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_output_flags_t_int32_t_mask(
+        audio_output_flags_t legacy);
 
 ConversionResult<audio_io_flags> aidl2legacy_AudioIoFlags_audio_io_flags(
         const media::AudioIoFlags& aidl, media::AudioPortRole role, media::AudioPortType type);
 ConversionResult<media::AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
         const audio_io_flags& legacy, audio_port_role_t role, audio_port_type_t type);
 
-ConversionResult<audio_port_config_device_ext> aidl2legacy_AudioPortConfigDeviceExt(
+ConversionResult<audio_port_config_device_ext>
+aidl2legacy_AudioPortConfigDeviceExt_audio_port_config_device_ext(
         const media::AudioPortConfigDeviceExt& aidl);
-ConversionResult<media::AudioPortConfigDeviceExt> legacy2aidl_AudioPortConfigDeviceExt(
+ConversionResult<media::AudioPortConfigDeviceExt>
+legacy2aidl_audio_port_config_device_ext_AudioPortConfigDeviceExt(
         const audio_port_config_device_ext& legacy);
 
 ConversionResult<audio_stream_type_t> aidl2legacy_AudioStreamType_audio_stream_type_t(
@@ -183,9 +191,11 @@
 ConversionResult<media::AudioPortConfigMixExt> legacy2aidl_AudioPortConfigMixExt(
         const audio_port_config_mix_ext& legacy, audio_port_role_t role);
 
-ConversionResult<audio_port_config_session_ext> aidl2legacy_AudioPortConfigSessionExt(
+ConversionResult<audio_port_config_session_ext>
+aidl2legacy_AudioPortConfigSessionExt_audio_port_config_session_ext(
         const media::AudioPortConfigSessionExt& aidl);
-ConversionResult<media::AudioPortConfigSessionExt> legacy2aidl_AudioPortConfigSessionExt(
+ConversionResult<media::AudioPortConfigSessionExt>
+legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
         const audio_port_config_session_ext& legacy);
 
 ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
@@ -204,8 +214,10 @@
 ConversionResult<media::AudioIoDescriptor> legacy2aidl_AudioIoDescriptor_AudioIoDescriptor(
         const sp<AudioIoDescriptor>& legacy);
 
-ConversionResult<AudioClient> aidl2legacy_AudioClient(const media::AudioClient& aidl);
-ConversionResult<media::AudioClient> legacy2aidl_AudioClient(const AudioClient& legacy);
+ConversionResult<AudioClient> aidl2legacy_AudioClient_AudioClient(
+        const media::AudioClient& aidl);
+ConversionResult<media::AudioClient> legacy2aidl_AudioClient_AudioClient(
+        const AudioClient& legacy);
 
 ConversionResult<audio_content_type_t>
 aidl2legacy_AudioContentType_audio_content_type_t(media::AudioContentType aidl);
@@ -263,9 +275,9 @@
 legacy2aidl_NullableIMemory_SharedFileRegion(const sp<IMemory>& legacy);
 
 ConversionResult<AudioTimestamp>
-aidl2legacy_AudioTimestamp(const media::AudioTimestampInternal& aidl);
+aidl2legacy_AudioTimestampInternal_AudioTimestamp(const media::AudioTimestampInternal& aidl);
 ConversionResult<media::AudioTimestampInternal>
-legacy2aidl_AudioTimestamp(const AudioTimestamp& legacy);
+legacy2aidl_AudioTimestamp_AudioTimestampInternal(const AudioTimestamp& legacy);
 
 ConversionResult<audio_uuid_t>
 aidl2legacy_AudioUuid_audio_uuid_t(const media::AudioUuid& aidl);
diff --git a/media/libaudioclient/include/media/AidlConversionUtil.h b/media/libaudioclient/include/media/AidlConversionUtil.h
index 6bfb743..9453673 100644
--- a/media/libaudioclient/include/media/AidlConversionUtil.h
+++ b/media/libaudioclient/include/media/AidlConversionUtil.h
@@ -21,6 +21,7 @@
 #include <utility>
 
 #include <android-base/expected.h>
+#include <binder/Status.h>
 
 namespace android {
 
@@ -132,4 +133,98 @@
 #define UNION_SET(u, field, value) \
     (u).set<std::decay_t<decltype(u)>::Tag::field>(value)
 
+namespace aidl_utils {
+
+/**
+ * Return the equivalent Android status_t from a binder exception code.
+ *
+ * Generally one should use statusTFromBinderStatus() instead.
+ *
+ * Exception codes can be generated from a remote Java service exception, translate
+ * them for use on the Native side.
+ *
+ * Note: for EX_TRANSACTION_FAILED and EX_SERVICE_SPECIFIC a more detailed error code
+ * can be found from transactionError() or serviceSpecificErrorCode().
+ */
+static inline status_t statusTFromExceptionCode(int32_t exceptionCode) {
+    using namespace ::android::binder;
+    switch (exceptionCode) {
+        case Status::EX_NONE:
+            return OK;
+        case Status::EX_SECURITY: // Java SecurityException, rethrows locally in Java
+            return PERMISSION_DENIED;
+        case Status::EX_BAD_PARCELABLE: // Java BadParcelableException, rethrows in Java
+        case Status::EX_ILLEGAL_ARGUMENT: // Java IllegalArgumentException, rethrows in Java
+        case Status::EX_NULL_POINTER: // Java NullPointerException, rethrows in Java
+            return BAD_VALUE;
+        case Status::EX_ILLEGAL_STATE: // Java IllegalStateException, rethrows in Java
+        case Status::EX_UNSUPPORTED_OPERATION: // Java UnsupportedOperationException, rethrows
+            return INVALID_OPERATION;
+        case Status::EX_HAS_REPLY_HEADER: // Native strictmode violation
+        case Status::EX_PARCELABLE: // Java bootclass loader (not standard exception), rethrows
+        case Status::EX_NETWORK_MAIN_THREAD: // Java NetworkOnMainThreadException, rethrows
+        case Status::EX_TRANSACTION_FAILED: // Native - see error code
+        case Status::EX_SERVICE_SPECIFIC:  // Java ServiceSpecificException,
+                                           // rethrows in Java with integer error code
+            return UNKNOWN_ERROR;
+    }
+    return UNKNOWN_ERROR;
+}
+
+/**
+ * Return the equivalent Android status_t from a binder status.
+ *
+ * Used to handle errors from a AIDL method declaration
+ *
+ * [oneway] void method(type0 param0, ...)
+ *
+ * or the following (where return_type is not a status_t)
+ *
+ * return_type method(type0 param0, ...)
+ */
+static inline status_t statusTFromBinderStatus(const ::android::binder::Status &status) {
+    return status.isOk() ? OK // check OK,
+        : status.serviceSpecificErrorCode() // service-side error, not standard Java exception
+                                            // (fromServiceSpecificError)
+        ?: status.transactionError() // a native binder transaction error (fromStatusT)
+        ?: statusTFromExceptionCode(status.exceptionCode()); // a service-side error with a
+                                                    // standard Java exception (fromExceptionCode)
+}
+
+/**
+ * Return a binder::Status from native service status.
+ *
+ * This is used for methods not returning an explicit status_t,
+ * where Java callers expect an exception, not an integer return value.
+ */
+static inline ::android::binder::Status binderStatusFromStatusT(
+        status_t status, const char *optionalMessage = nullptr) {
+    const char * const emptyIfNull = optionalMessage == nullptr ? "" : optionalMessage;
+    // From binder::Status instructions:
+    //  Prefer a generic exception code when possible, then a service specific
+    //  code, and finally a status_t for low level failures or legacy support.
+    //  Exception codes and service specific errors map to nicer exceptions for
+    //  Java clients.
+
+    using namespace ::android::binder;
+    switch (status) {
+        case OK:
+            return Status::ok();
+        case PERMISSION_DENIED: // throw SecurityException on Java side
+            return Status::fromExceptionCode(Status::EX_SECURITY, emptyIfNull);
+        case BAD_VALUE: // throw IllegalArgumentException on Java side
+            return Status::fromExceptionCode(Status::EX_ILLEGAL_ARGUMENT, emptyIfNull);
+        case INVALID_OPERATION: // throw IllegalStateException on Java side
+            return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE, emptyIfNull);
+    }
+
+    // A service specific error will not show on status.transactionError() so
+    // be sure to use statusTFromBinderStatus() for reliable error handling.
+
+    // throw a ServiceSpecificException.
+    return Status::fromServiceSpecificError(status, emptyIfNull);
+}
+
+} // namespace aidl_utils
+
 }  // namespace android
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index 350a780..21d18d3 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -7,6 +7,18 @@
 }
 
 cc_test {
+    name: "audio_aidl_status_tests",
+    defaults: ["libaudioclient_tests_defaults"],
+    srcs: ["audio_aidl_status_tests.cpp"],
+    shared_libs: [
+        "libaudioclient_aidl_conversion",
+        "libbinder",
+        "libcutils",
+        "libutils",
+    ],
+}
+
+cc_test {
     name: "test_create_audiotrack",
     defaults: ["libaudioclient_tests_defaults"],
     srcs: ["test_create_audiotrack.cpp",
diff --git a/media/libaudioclient/tests/audio_aidl_status_tests.cpp b/media/libaudioclient/tests/audio_aidl_status_tests.cpp
new file mode 100644
index 0000000..5517091
--- /dev/null
+++ b/media/libaudioclient/tests/audio_aidl_status_tests.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <media/AidlConversionUtil.h>
+#include <utils/Errors.h>
+
+using namespace android;
+using namespace android::aidl_utils;
+using android::binder::Status;
+
+// Tests for statusTFromBinderStatus() and binderStatusFromStatusT().
+
+// STATUS_T_SMALL_VALUE_LIMIT is an arbitrary limit where we exhaustively check status_t errors.
+// It is known that this limit doesn't cover UNKNOWN_ERROR ~ INT32_MIN.
+constexpr status_t STATUS_T_SMALL_VALUE_LIMIT = -1000;
+
+// Small status values are preserved on round trip
+TEST(audio_aidl_status_tests, statusRoundTripSmallValues) {
+    for (status_t status = 0; status > STATUS_T_SMALL_VALUE_LIMIT; --status) {
+        ASSERT_EQ(status, statusTFromBinderStatus(binderStatusFromStatusT(status)));
+    }
+}
+
+// Special status values are preserved on round trip.
+TEST(audio_aidl_status_tests, statusRoundTripSpecialValues) {
+    for (status_t status : {
+            OK,
+            UNKNOWN_ERROR,
+            NO_MEMORY,
+            INVALID_OPERATION,
+            BAD_VALUE,
+            BAD_TYPE,
+            NAME_NOT_FOUND,
+            PERMISSION_DENIED,
+            NO_INIT,
+            ALREADY_EXISTS,
+            DEAD_OBJECT,
+            FAILED_TRANSACTION,
+            BAD_INDEX,
+            NOT_ENOUGH_DATA,
+            WOULD_BLOCK,
+            TIMED_OUT,
+            UNKNOWN_TRANSACTION,
+            FDS_NOT_ALLOWED}) {
+        ASSERT_EQ(status, statusTFromBinderStatus(binderStatusFromStatusT(status)));
+    }
+}
+
+// Binder exceptions show as an error (not fixed at this time); these come fromExceptionCode().
+TEST(audio_aidl_status_tests, binderStatusExceptions) {
+    for (int exceptionCode : {
+            //Status::EX_NONE,
+            Status::EX_SECURITY,
+            Status::EX_BAD_PARCELABLE,
+            Status::EX_ILLEGAL_ARGUMENT,
+            Status::EX_NULL_POINTER,
+            Status::EX_ILLEGAL_STATE,
+            Status::EX_NETWORK_MAIN_THREAD,
+            Status::EX_UNSUPPORTED_OPERATION,
+            //Status::EX_SERVICE_SPECIFIC, -- tested fromServiceSpecificError()
+            Status::EX_PARCELABLE,
+            // This is special and Java specific; see Parcel.java.
+            Status::EX_HAS_REPLY_HEADER,
+            // This is special, and indicates to C++ binder proxies that the
+            // transaction has failed at a low level.
+            //Status::EX_TRANSACTION_FAILED, -- tested fromStatusT().
+            }) {
+        ASSERT_NE(OK, statusTFromBinderStatus(Status::fromExceptionCode(exceptionCode)));
+    }
+}
+
+// Binder transaction errors show exactly in status_t; these come fromStatusT().
+TEST(audio_aidl_status_tests, binderStatusTransactionError) {
+    for (status_t status : {
+            OK, // Note: fromStatusT does check if this is 0, so this is no error.
+            UNKNOWN_ERROR,
+            NO_MEMORY,
+            INVALID_OPERATION,
+            BAD_VALUE,
+            BAD_TYPE,
+            NAME_NOT_FOUND,
+            PERMISSION_DENIED,
+            NO_INIT,
+            ALREADY_EXISTS,
+            DEAD_OBJECT,
+            FAILED_TRANSACTION,
+            BAD_INDEX,
+            NOT_ENOUGH_DATA,
+            WOULD_BLOCK,
+            TIMED_OUT,
+            UNKNOWN_TRANSACTION,
+            FDS_NOT_ALLOWED}) {
+        ASSERT_EQ(status, statusTFromBinderStatus(Status::fromStatusT(status)));
+    }
+}
+
+// Binder service specific errors show in status_t; these come fromServiceSpecificError().
+TEST(audio_aidl_status_tests, binderStatusServiceSpecificError) {
+    // fromServiceSpecificError() still stores exception code if status is 0.
+    for (status_t status = -1; status > STATUS_T_SMALL_VALUE_LIMIT; --status) {
+        ASSERT_EQ(status, statusTFromBinderStatus(Status::fromServiceSpecificError(status)));
+    }
+}
+
+// Binder status with message.
+TEST(audio_aidl_status_tests, binderStatusMessage) {
+    const String8 message("abcd");
+    for (status_t status = -1; status > STATUS_T_SMALL_VALUE_LIMIT; --status) {
+        const Status binderStatus = binderStatusFromStatusT(status, message.c_str());
+        ASSERT_EQ(status, statusTFromBinderStatus(binderStatus));
+        ASSERT_EQ(message, binderStatus.exceptionMessage());
+    }
+}
diff --git a/media/libaudiofoundation/AudioGain.cpp b/media/libaudiofoundation/AudioGain.cpp
index 56343d8..1dee938 100644
--- a/media/libaudiofoundation/AudioGain.cpp
+++ b/media/libaudiofoundation/AudioGain.cpp
@@ -140,7 +140,7 @@
     parcelable->useInChannelMask = mUseInChannelMask;
     parcelable->useForVolume = mUseForVolume;
     parcelable->mode = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_gain_mode_t_mask_int32_t(mGain.mode));
+            legacy2aidl_audio_gain_mode_t_int32_t_mask(mGain.mode));
     parcelable->channelMask = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_channel_mask_t_int32_t(mGain.channel_mask));
     parcelable->minValue = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.min_value));
diff --git a/media/libaudiofoundation/AudioPort.cpp b/media/libaudiofoundation/AudioPort.cpp
index 6b63675..20d8632 100644
--- a/media/libaudiofoundation/AudioPort.cpp
+++ b/media/libaudiofoundation/AudioPort.cpp
@@ -291,7 +291,7 @@
     parcelable->id = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_handle_t_int32_t(mId));
     parcelable->gain.index = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(mGain.index));
     parcelable->gain.mode = VALUE_OR_RETURN_STATUS(
-            legacy2aidl_audio_gain_mode_t_mask_int32_t(mGain.mode));
+            legacy2aidl_audio_gain_mode_t_int32_t_mask(mGain.mode));
     parcelable->gain.channelMask = VALUE_OR_RETURN_STATUS(
             legacy2aidl_audio_channel_mask_t_int32_t(mGain.channel_mask));
     parcelable->gain.rampDurationMs = VALUE_OR_RETURN_STATUS(
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index 77c2550..0108816 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -215,7 +215,7 @@
         const struct audio_config *config, size_t *size) {
     if (mDevice == 0) return NO_INIT;
     AudioConfig hidlConfig;
-    HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+    HidlUtils::audioConfigFromHal(*config, true /*isInput*/, &hidlConfig);
     Result retval;
     Return<void> ret = mDevice->getInputBufferSize(
             hidlConfig,
@@ -240,7 +240,7 @@
     status_t status = deviceAddressFromHal(deviceType, address, &hidlDevice);
     if (status != OK) return status;
     AudioConfig hidlConfig;
-    HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+    HidlUtils::audioConfigFromHal(*config, false /*isInput*/, &hidlConfig);
     Result retval = Result::NOT_INITIALIZED;
     Return<void> ret = mDevice->openOutputStream(
             handle,
@@ -275,7 +275,7 @@
     status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
     if (status != OK) return status;
     AudioConfig hidlConfig;
-    HidlUtils::audioConfigFromHal(*config, &hidlConfig);
+    HidlUtils::audioConfigFromHal(*config, true /*isInput*/, &hidlConfig);
     Result retval = Result::NOT_INITIALIZED;
 #if MAJOR_VERSION == 2
     auto sinkMetadata = AudioSource(source);
diff --git a/media/libeffects/lvm/lib/Android.bp b/media/libeffects/lvm/lib/Android.bp
index 8f2f016..dbe0d62 100644
--- a/media/libeffects/lvm/lib/Android.bp
+++ b/media/libeffects/lvm/lib/Android.bp
@@ -131,12 +131,15 @@
     shared_libs: [
         "liblog",
     ],
+    static_libs: [
+        "libaudioutils",
+    ],
     header_libs: [
         "libhardware_headers",
     ],
     cppflags: [
+        "-DBIQUAD_OPT",
         "-fvisibility=hidden",
-
         "-Wall",
         "-Werror",
     ],
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp b/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp
index 5b47aa6..1f0b459 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp
@@ -21,6 +21,9 @@
 /*                                                                                      */
 /****************************************************************************************/
 
+#ifdef BIQUAD_OPT
+#include <audio_utils/BiquadFilter.h>
+#endif
 #include "LVDBE.h"
 #include "LVDBE_Private.h"
 #include "VectorArithmetic.h"
@@ -107,12 +110,20 @@
     /*
      * Setup the high pass filter
      */
+#ifdef BIQUAD_OPT
+    std::array<LVM_FLOAT, android::audio_utils::kBiquadNumCoefs> coefs = {
+            LVDBE_HPF_Table[Offset].A0, LVDBE_HPF_Table[Offset].A1, LVDBE_HPF_Table[Offset].A2,
+            -(LVDBE_HPF_Table[Offset].B1), -(LVDBE_HPF_Table[Offset].B2)};
+    pInstance->pBqInstance
+            ->setCoefficients<std::array<LVM_FLOAT, android::audio_utils::kBiquadNumCoefs>>(coefs);
+#else
     LoadConst_Float(0,                                      /* Clear the history, value 0 */
                     (LVM_FLOAT*)&pInstance->pData->HPFTaps, /* Destination */
                     sizeof(pInstance->pData->HPFTaps) / sizeof(LVM_FLOAT)); /* Number of words */
     BQ_2I_D32F32Cll_TRC_WRA_01_Init(&pInstance->pCoef->HPFInstance, /* Initialise the filter */
                                     &pInstance->pData->HPFTaps,
                                     (BQ_FLOAT_Coefs_t*)&LVDBE_HPF_Table[Offset]);
+#endif
 
     /*
      * Setup the band pass filter
@@ -275,6 +286,15 @@
     LVDBE_Instance_t* pInstance = (LVDBE_Instance_t*)hInstance;
     LVMixer3_2St_FLOAT_st* pBypassMixer_Instance = &pInstance->pData->BypassMixer;
 
+#ifdef BIQUAD_OPT
+    /*
+     * Create biquad instance
+     */
+    pInstance->pBqInstance.reset(
+            new android::audio_utils::BiquadFilter<LVM_FLOAT>(pParams->NrChannels));
+    pInstance->pBqInstance->clear();
+#endif
+
     /*
      * Update the filters
      */
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp b/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
index 12af162..611b762 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
@@ -94,6 +94,14 @@
         return LVDBE_NULLADDRESS;
     }
 
+#ifdef BIQUAD_OPT
+    /*
+     * Create biquad instance
+     */
+    pInstance->pBqInstance.reset(
+            new android::audio_utils::BiquadFilter<LVM_FLOAT>(LVM_MAX_CHANNELS));
+#endif
+
     /*
      * Initialise the filters
      */
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h b/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
index 4fef1ef..fa85638 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
@@ -33,6 +33,9 @@
 /*                                                                                      */
 /****************************************************************************************/
 
+#ifdef BIQUAD_OPT
+#include <audio_utils/BiquadFilter.h>
+#endif
 #include "LVDBE.h" /* Calling or Application layer definitions */
 #include "BIQUAD.h"
 #include "LVC_Mixer.h"
@@ -63,7 +66,9 @@
     AGC_MIX_VOL_2St1Mon_FLOAT_t AGCInstance; /* AGC instance parameters */
 
     /* Process variables */
+#ifndef BIQUAD_OPT
     Biquad_2I_Order2_FLOAT_Taps_t HPFTaps; /* High pass filter taps */
+#endif
     Biquad_1I_Order2_FLOAT_Taps_t BPFTaps; /* Band pass filter taps */
     LVMixer3_1St_FLOAT_st BypassVolume;    /* Bypass volume scaler */
     LVMixer3_2St_FLOAT_st BypassMixer;     /* Bypass Mixer for Click Removal */
@@ -73,7 +78,9 @@
 /* Coefs structure */
 typedef struct {
     /* Process variables */
+#ifndef BIQUAD_OPT
     Biquad_FLOAT_Instance_t HPFInstance; /* High pass filter instance */
+#endif
     Biquad_FLOAT_Instance_t BPFInstance; /* Band pass filter instance */
 } LVDBE_Coef_FLOAT_t;
 /* Instance structure */
@@ -86,6 +93,10 @@
     LVDBE_Data_FLOAT_t* pData; /* Instance data */
     LVDBE_Coef_FLOAT_t* pCoef; /* Instance coefficients */
     void* pScratch;            /* scratch pointer */
+#ifdef BIQUAD_OPT
+    std::unique_ptr<android::audio_utils::BiquadFilter<LVM_FLOAT>>
+            pBqInstance; /* Biquad filter instance */
+#endif
 } LVDBE_Instance_t;
 
 /****************************************************************************************/
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp b/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
index f4a4d6f..bd04a02 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
@@ -20,6 +20,9 @@
 /*    Includes                                                                          */
 /*                                                                                      */
 /****************************************************************************************/
+#ifdef BIQUAD_OPT
+#include <audio_utils/BiquadFilter.h>
+#endif
 
 #include <string.h>  // memset
 #include "LVDBE.h"
@@ -125,10 +128,14 @@
          * Apply the high pass filter if selected
          */
         if (pInstance->Params.HPFSelect == LVDBE_HPF_ON) {
+#ifdef BIQUAD_OPT
+            pInstance->pBqInstance->process(pScratch, pScratch, NrFrames);
+#else
             BQ_MC_D32F32C30_TRC_WRA_01(&pInstance->pCoef->HPFInstance, /* Filter instance      */
                                        pScratch,                       /* Source               */
                                        pScratch,                       /* Destination          */
                                        (LVM_INT16)NrFrames, (LVM_INT16)NrChannels);
+#endif
         }
 
         /*
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 670b415..865baad 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -1021,6 +1021,16 @@
         ActiveParams.NrChannels = NrChannels;
         ActiveParams.ChMask = pConfig->inputCfg.channels;
 
+        if (NrChannels == 1) {
+            ActiveParams.SourceFormat = LVM_MONO;
+        } else if (NrChannels == 2) {
+            ActiveParams.SourceFormat = LVM_STEREO;
+        } else if (NrChannels > 2 && NrChannels <= LVM_MAX_CHANNELS) {
+            ActiveParams.SourceFormat = LVM_MULTICHANNEL;
+        } else {
+            return -EINVAL;
+        }
+
         LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance, &ActiveParams);
 
         LVM_ERROR_CHECK(LvmStatus, "LVM_SetControlParameters", "Effect_setConfig")
diff --git a/media/libeffects/preprocessing/Android.bp b/media/libeffects/preprocessing/Android.bp
index 5217cf9..681e247 100644
--- a/media/libeffects/preprocessing/Android.bp
+++ b/media/libeffects/preprocessing/Android.bp
@@ -1,35 +1,5 @@
 // audio preprocessing wrapper
 cc_library_shared {
-    name: "libaudiopreprocessing_legacy",
-
-    vendor: true,
-
-    relative_install_path: "soundfx",
-
-    srcs: ["PreProcessing.cpp"],
-
-    shared_libs: [
-        "libwebrtc_audio_preprocessing",
-        "libspeexresampler",
-        "libutils",
-        "liblog",
-    ],
-
-    cflags: [
-        "-DWEBRTC_POSIX",
-        "-DWEBRTC_LEGACY",
-        "-fvisibility=hidden",
-        "-Wall",
-        "-Werror",
-    ],
-
-    header_libs: [
-        "libaudioeffects",
-        "libhardware_headers",
-    ],
-}
-
-cc_library_shared {
     name: "libaudiopreprocessing",
     vendor: true,
     relative_install_path: "soundfx",
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index 1a5547b..03ccc34 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -23,15 +23,10 @@
 #include <hardware/audio_effect.h>
 #include <utils/Log.h>
 #include <utils/Timers.h>
-#ifndef WEBRTC_LEGACY
 #include <audio_effects/effect_agc2.h>
-#endif
 #include <audio_effects/effect_ns.h>
 #include <audio_processing.h>
 #include <module_common_types.h>
-#ifdef WEBRTC_LEGACY
-#include "speex/speex_resampler.h"
-#endif
 
 // undefine to perform multi channels API functional tests
 //#define DUAL_MIC_TEST
@@ -46,9 +41,7 @@
 // types of pre processing modules
 enum preproc_id {
     PREPROC_AGC,  // Automatic Gain Control
-#ifndef WEBRTC_LEGACY
     PREPROC_AGC2,  // Automatic Gain Control 2
-#endif
     PREPROC_AEC,  // Acoustic Echo Canceler
     PREPROC_NS,   // Noise Suppressor
     PREPROC_NUM_EFFECTS
@@ -110,10 +103,8 @@
     int id;                        // audio session ID
     int io;                        // handle of input stream this session is on
     webrtc::AudioProcessing* apm;  // handle on webRTC audio processing module (APM)
-#ifndef WEBRTC_LEGACY
     // Audio Processing module builder
     webrtc::AudioProcessingBuilder ap_builder;
-#endif
     size_t apmFrameCount;      // buffer size for webRTC process (10 ms)
     uint32_t apmSamplingRate;  // webRTC APM sampling rate (8/16 or 32 kHz)
     size_t frameCount;         // buffer size before input resampler ( <=> apmFrameCount)
@@ -124,42 +115,25 @@
     uint32_t enabledMsk;       // bit field containing IDs of enabled pre processors
     uint32_t processedMsk;     // bit field containing IDs of pre processors already
                                // processed in current round
-#ifdef WEBRTC_LEGACY
-    webrtc::AudioFrame* procFrame;  // audio frame passed to webRTC AMP ProcessStream()
-#else
     // audio config strucutre
     webrtc::AudioProcessing::Config config;
     webrtc::StreamConfig inputConfig;   // input stream configuration
     webrtc::StreamConfig outputConfig;  // output stream configuration
-#endif
     int16_t* inBuf;    // input buffer used when resampling
     size_t inBufSize;  // input buffer size in frames
     size_t framesIn;   // number of frames in input buffer
-#ifdef WEBRTC_LEGACY
-    SpeexResamplerState* inResampler;  // handle on input speex resampler
-#endif
     int16_t* outBuf;    // output buffer used when resampling
     size_t outBufSize;  // output buffer size in frames
     size_t framesOut;   // number of frames in output buffer
-#ifdef WEBRTC_LEGACY
-    SpeexResamplerState* outResampler;  // handle on output speex resampler
-#endif
     uint32_t revChannelCount;  // number of channels on reverse stream
     uint32_t revEnabledMsk;    // bit field containing IDs of enabled pre processors
                                // with reverse channel
     uint32_t revProcessedMsk;  // bit field containing IDs of pre processors with reverse
                                // channel already processed in current round
-#ifdef WEBRTC_LEGACY
-    webrtc::AudioFrame* revFrame;  // audio frame passed to webRTC AMP AnalyzeReverseStream()
-#else
     webrtc::StreamConfig revConfig;     // reverse stream configuration.
-#endif
     int16_t* revBuf;    // reverse channel input buffer
     size_t revBufSize;  // reverse channel input buffer size
     size_t framesRev;   // number of frames in reverse channel input buffer
-#ifdef WEBRTC_LEGACY
-    SpeexResamplerState* revResampler;  // handle on reverse channel input speex resampler
-#endif
 };
 
 #ifdef DUAL_MIC_TEST
@@ -213,7 +187,6 @@
         "Automatic Gain Control",
         "The Android Open Source Project"};
 
-#ifndef WEBRTC_LEGACY
 // Automatic Gain Control 2
 static const effect_descriptor_t sAgc2Descriptor = {
         {0xae3c653b, 0xbe18, 0x4ab8, 0x8938, {0x41, 0x8f, 0x0a, 0x7f, 0x06, 0xac}},  // type
@@ -224,7 +197,6 @@
         0,  // FIXME indicate memory usage
         "Automatic Gain Control 2",
         "The Android Open Source Project"};
-#endif
 
 // Acoustic Echo Cancellation
 static const effect_descriptor_t sAecDescriptor = {
@@ -249,9 +221,7 @@
         "The Android Open Source Project"};
 
 static const effect_descriptor_t* sDescriptors[PREPROC_NUM_EFFECTS] = {&sAgcDescriptor,
-#ifndef WEBRTC_LEGACY
                                                                        &sAgc2Descriptor,
-#endif
                                                                        &sAecDescriptor,
                                                                        &sNsDescriptor};
 
@@ -260,9 +230,7 @@
 //------------------------------------------------------------------------------
 
 const effect_uuid_t* const sUuidToPreProcTable[PREPROC_NUM_EFFECTS] = {FX_IID_AGC,
-#ifndef WEBRTC_LEGACY
                                                                        FX_IID_AGC2,
-#endif
                                                                        FX_IID_AEC, FX_IID_NS};
 
 const effect_uuid_t* ProcIdToUuid(int procId) {
@@ -297,7 +265,6 @@
 static const int kAgcDefaultCompGain = 9;
 static const bool kAgcDefaultLimiter = true;
 
-#ifndef WEBRTC_LEGACY
 int Agc2Init(preproc_effect_t* effect) {
     ALOGV("Agc2Init");
     effect->session->config = effect->session->apm->GetConfig();
@@ -308,48 +275,27 @@
     effect->session->apm->ApplyConfig(effect->session->config);
     return 0;
 }
-#endif
 
 int AgcInit(preproc_effect_t* effect) {
     ALOGV("AgcInit");
-#ifdef WEBRTC_LEGACY
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-    agc->set_mode(webrtc::GainControl::kFixedDigital);
-    agc->set_target_level_dbfs(kAgcDefaultTargetLevel);
-    agc->set_compression_gain_db(kAgcDefaultCompGain);
-    agc->enable_limiter(kAgcDefaultLimiter);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller1.target_level_dbfs = kAgcDefaultTargetLevel;
     effect->session->config.gain_controller1.compression_gain_db = kAgcDefaultCompGain;
     effect->session->config.gain_controller1.enable_limiter = kAgcDefaultLimiter;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
     return 0;
 }
 
-#ifndef WEBRTC_LEGACY
 int Agc2Create(preproc_effect_t* effect) {
     Agc2Init(effect);
     return 0;
 }
-#endif
 
 int AgcCreate(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::GainControl* agc = effect->session->apm->gain_control();
-    ALOGV("AgcCreate got agc %p", agc);
-    if (agc == NULL) {
-        ALOGW("AgcCreate Error");
-        return -ENOMEM;
-    }
-    effect->engine = static_cast<preproc_fx_handle_t>(agc);
-#endif
     AgcInit(effect);
     return 0;
 }
 
-#ifndef WEBRTC_LEGACY
 int Agc2GetParameter(preproc_effect_t* effect, void* pParam, uint32_t* pValueSize, void* pValue) {
     int status = 0;
     uint32_t param = *(uint32_t*)pParam;
@@ -422,15 +368,11 @@
 
     return status;
 }
-#endif
 
 int AgcGetParameter(preproc_effect_t* effect, void* pParam, uint32_t* pValueSize, void* pValue) {
     int status = 0;
     uint32_t param = *(uint32_t*)pParam;
     t_agc_settings* pProperties = (t_agc_settings*)pValue;
-#ifdef WEBRTC_LEGACY
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-#endif
 
     switch (param) {
         case AGC_PARAM_TARGET_LEVEL:
@@ -459,32 +401,6 @@
             break;
     }
 
-#ifdef WEBRTC_LEGACY
-    switch (param) {
-        case AGC_PARAM_TARGET_LEVEL:
-            *(int16_t*)pValue = (int16_t)(agc->target_level_dbfs() * -100);
-            ALOGV("AgcGetParameter() target level %d milliBels", *(int16_t*)pValue);
-            break;
-        case AGC_PARAM_COMP_GAIN:
-            *(int16_t*)pValue = (int16_t)(agc->compression_gain_db() * 100);
-            ALOGV("AgcGetParameter() comp gain %d milliBels", *(int16_t*)pValue);
-            break;
-        case AGC_PARAM_LIMITER_ENA:
-            *(bool*)pValue = (bool)agc->is_limiter_enabled();
-            ALOGV("AgcGetParameter() limiter enabled %s",
-                  (*(int16_t*)pValue != 0) ? "true" : "false");
-            break;
-        case AGC_PARAM_PROPERTIES:
-            pProperties->targetLevel = (int16_t)(agc->target_level_dbfs() * -100);
-            pProperties->compGain = (int16_t)(agc->compression_gain_db() * 100);
-            pProperties->limiterEnabled = (bool)agc->is_limiter_enabled();
-            break;
-        default:
-            ALOGW("AgcGetParameter() unknown param %d", param);
-            status = -EINVAL;
-            break;
-    }
-#else
     effect->session->config = effect->session->apm->GetConfig();
     switch (param) {
         case AGC_PARAM_TARGET_LEVEL:
@@ -515,11 +431,9 @@
             status = -EINVAL;
             break;
     }
-#endif
     return status;
 }
 
-#ifndef WEBRTC_LEGACY
 int Agc2SetParameter(preproc_effect_t* effect, void* pParam, void* pValue) {
     int status = 0;
     uint32_t param = *(uint32_t*)pParam;
@@ -567,43 +481,9 @@
 
     return status;
 }
-#endif
 
 int AgcSetParameter(preproc_effect_t* effect, void* pParam, void* pValue) {
     int status = 0;
-#ifdef WEBRTC_LEGACY
-    uint32_t param = *(uint32_t*)pParam;
-    t_agc_settings* pProperties = (t_agc_settings*)pValue;
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-
-    switch (param) {
-        case AGC_PARAM_TARGET_LEVEL:
-            ALOGV("AgcSetParameter() target level %d milliBels", *(int16_t*)pValue);
-            status = agc->set_target_level_dbfs(-(*(int16_t*)pValue / 100));
-            break;
-        case AGC_PARAM_COMP_GAIN:
-            ALOGV("AgcSetParameter() comp gain %d milliBels", *(int16_t*)pValue);
-            status = agc->set_compression_gain_db(*(int16_t*)pValue / 100);
-            break;
-        case AGC_PARAM_LIMITER_ENA:
-            ALOGV("AgcSetParameter() limiter enabled %s", *(bool*)pValue ? "true" : "false");
-            status = agc->enable_limiter(*(bool*)pValue);
-            break;
-        case AGC_PARAM_PROPERTIES:
-            ALOGV("AgcSetParameter() properties level %d, gain %d limiter %d",
-                  pProperties->targetLevel, pProperties->compGain, pProperties->limiterEnabled);
-            status = agc->set_target_level_dbfs(-(pProperties->targetLevel / 100));
-            if (status != 0) break;
-            status = agc->set_compression_gain_db(pProperties->compGain / 100);
-            if (status != 0) break;
-            status = agc->enable_limiter(pProperties->limiterEnabled);
-            break;
-        default:
-            ALOGW("AgcSetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
-            status = -EINVAL;
-            break;
-    }
-#else
     uint32_t param = *(uint32_t*)pParam;
     t_agc_settings* pProperties = (t_agc_settings*)pValue;
     effect->session->config = effect->session->apm->GetConfig();
@@ -637,96 +517,57 @@
             break;
     }
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 
     ALOGV("AgcSetParameter() done status %d", status);
 
     return status;
 }
 
-#ifndef WEBRTC_LEGACY
 void Agc2Enable(preproc_effect_t* effect) {
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller2.enabled = true;
     effect->session->apm->ApplyConfig(effect->session->config);
 }
-#endif
 
 void AgcEnable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-    ALOGV("AgcEnable agc %p", agc);
-    agc->Enable(true);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller1.enabled = true;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
-#ifndef WEBRTC_LEGACY
 void Agc2Disable(preproc_effect_t* effect) {
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller2.enabled = false;
     effect->session->apm->ApplyConfig(effect->session->config);
 }
-#endif
 
 void AgcDisable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    ALOGV("AgcDisable");
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-    agc->Enable(false);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller1.enabled = false;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 static const preproc_ops_t sAgcOps = {AgcCreate,       AgcInit,         NULL, AgcEnable, AgcDisable,
                                       AgcSetParameter, AgcGetParameter, NULL};
 
-#ifndef WEBRTC_LEGACY
 static const preproc_ops_t sAgc2Ops = {Agc2Create,       Agc2Init,    NULL,
                                        Agc2Enable,       Agc2Disable, Agc2SetParameter,
                                        Agc2GetParameter, NULL};
-#endif
 
 //------------------------------------------------------------------------------
 // Acoustic Echo Canceler (AEC)
 //------------------------------------------------------------------------------
 
-#ifdef WEBRTC_LEGACY
-static const webrtc::EchoControlMobile::RoutingMode kAecDefaultMode =
-        webrtc::EchoControlMobile::kEarpiece;
-static const bool kAecDefaultComfortNoise = true;
-#endif
 
 int AecInit(preproc_effect_t* effect) {
     ALOGV("AecInit");
-#ifdef WEBRTC_LEGACY
-    webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
-    aec->set_routing_mode(kAecDefaultMode);
-    aec->enable_comfort_noise(kAecDefaultComfortNoise);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.echo_canceller.mobile_mode = true;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
     return 0;
 }
 
 int AecCreate(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::EchoControlMobile* aec = effect->session->apm->echo_control_mobile();
-    ALOGV("AecCreate got aec %p", aec);
-    if (aec == NULL) {
-        ALOGW("AgcCreate Error");
-        return -ENOMEM;
-    }
-    effect->engine = static_cast<preproc_fx_handle_t>(aec);
-#endif
     AecInit(effect);
     return 0;
 }
@@ -744,13 +585,11 @@
             *(uint32_t*)pValue = 1000 * effect->session->apm->stream_delay_ms();
             ALOGV("AecGetParameter() echo delay %d us", *(uint32_t*)pValue);
             break;
-#ifndef WEBRTC_LEGACY
         case AEC_PARAM_MOBILE_MODE:
             effect->session->config = effect->session->apm->GetConfig();
             *(uint32_t*)pValue = effect->session->config.echo_canceller.mobile_mode;
             ALOGV("AecGetParameter() mobile mode %d us", *(uint32_t*)pValue);
             break;
-#endif
         default:
             ALOGW("AecGetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
             status = -EINVAL;
@@ -770,14 +609,12 @@
             status = effect->session->apm->set_stream_delay_ms(value / 1000);
             ALOGV("AecSetParameter() echo delay %d us, status %d", value, status);
             break;
-#ifndef WEBRTC_LEGACY
         case AEC_PARAM_MOBILE_MODE:
             effect->session->config = effect->session->apm->GetConfig();
             effect->session->config.echo_canceller.mobile_mode = value;
             ALOGV("AecSetParameter() mobile mode %d us", value);
             effect->session->apm->ApplyConfig(effect->session->config);
             break;
-#endif
         default:
             ALOGW("AecSetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
             status = -EINVAL;
@@ -787,57 +624,24 @@
 }
 
 void AecEnable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
-    ALOGV("AecEnable aec %p", aec);
-    aec->Enable(true);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.echo_canceller.enabled = true;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 void AecDisable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    ALOGV("AecDisable");
-    webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
-    aec->Enable(false);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.echo_canceller.enabled = false;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 int AecSetDevice(preproc_effect_t* effect, uint32_t device) {
     ALOGV("AecSetDevice %08x", device);
-#ifdef WEBRTC_LEGACY
-    webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
-    webrtc::EchoControlMobile::RoutingMode mode =
-            webrtc::EchoControlMobile::kQuietEarpieceOrHeadset;
-#endif
 
     if (audio_is_input_device(device)) {
         return 0;
     }
 
-#ifdef WEBRTC_LEGACY
-    switch (device) {
-        case AUDIO_DEVICE_OUT_EARPIECE:
-            mode = webrtc::EchoControlMobile::kEarpiece;
-            break;
-        case AUDIO_DEVICE_OUT_SPEAKER:
-            mode = webrtc::EchoControlMobile::kSpeakerphone;
-            break;
-        case AUDIO_DEVICE_OUT_WIRED_HEADSET:
-        case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
-        case AUDIO_DEVICE_OUT_USB_HEADSET:
-        default:
-            break;
-    }
-    aec->set_routing_mode(mode);
-#endif
     return 0;
 }
 
@@ -849,49 +653,19 @@
 // Noise Suppression (NS)
 //------------------------------------------------------------------------------
 
-#ifdef WEBRTC_LEGACY
-static const webrtc::NoiseSuppression::Level kNsDefaultLevel = webrtc::NoiseSuppression::kModerate;
-#else
 static const webrtc::AudioProcessing::Config::NoiseSuppression::Level kNsDefaultLevel =
         webrtc::AudioProcessing::Config::NoiseSuppression::kModerate;
-#endif
 
 int NsInit(preproc_effect_t* effect) {
     ALOGV("NsInit");
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
-    ns->set_level(kNsDefaultLevel);
-    webrtc::Config config;
-    std::vector<webrtc::Point> geometry;
-    // TODO(aluebs): Make the geometry settable.
-    geometry.push_back(webrtc::Point(-0.03f, 0.f, 0.f));
-    geometry.push_back(webrtc::Point(-0.01f, 0.f, 0.f));
-    geometry.push_back(webrtc::Point(0.01f, 0.f, 0.f));
-    geometry.push_back(webrtc::Point(0.03f, 0.f, 0.f));
-    // The geometry needs to be set with Beamforming enabled.
-    config.Set<webrtc::Beamforming>(new webrtc::Beamforming(true, geometry));
-    effect->session->apm->SetExtraOptions(config);
-    config.Set<webrtc::Beamforming>(new webrtc::Beamforming(false, geometry));
-    effect->session->apm->SetExtraOptions(config);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.noise_suppression.level = kNsDefaultLevel;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
     effect->type = NS_TYPE_SINGLE_CHANNEL;
     return 0;
 }
 
 int NsCreate(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = effect->session->apm->noise_suppression();
-    ALOGV("NsCreate got ns %p", ns);
-    if (ns == NULL) {
-        ALOGW("AgcCreate Error");
-        return -ENOMEM;
-    }
-    effect->engine = static_cast<preproc_fx_handle_t>(ns);
-#endif
     NsInit(effect);
     return 0;
 }
@@ -904,31 +678,6 @@
 
 int NsSetParameter(preproc_effect_t* effect, void* pParam, void* pValue) {
     int status = 0;
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
-    uint32_t param = *(uint32_t*)pParam;
-    uint32_t value = *(uint32_t*)pValue;
-    switch (param) {
-        case NS_PARAM_LEVEL:
-            ns->set_level((webrtc::NoiseSuppression::Level)value);
-            ALOGV("NsSetParameter() level %d", value);
-            break;
-        case NS_PARAM_TYPE: {
-            webrtc::Config config;
-            std::vector<webrtc::Point> geometry;
-            bool is_beamforming_enabled = value == NS_TYPE_MULTI_CHANNEL && ns->is_enabled();
-            config.Set<webrtc::Beamforming>(
-                    new webrtc::Beamforming(is_beamforming_enabled, geometry));
-            effect->session->apm->SetExtraOptions(config);
-            effect->type = value;
-            ALOGV("NsSetParameter() type %d", value);
-            break;
-        }
-        default:
-            ALOGW("NsSetParameter() unknown param %08x value %08x", param, value);
-            status = -EINVAL;
-    }
-#else
     uint32_t param = *(uint32_t*)pParam;
     uint32_t value = *(uint32_t*)pValue;
     effect->session->config = effect->session->apm->GetConfig();
@@ -943,52 +692,28 @@
             status = -EINVAL;
     }
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 
     return status;
 }
 
 void NsEnable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
-    ALOGV("NsEnable ns %p", ns);
-    ns->Enable(true);
-    if (effect->type == NS_TYPE_MULTI_CHANNEL) {
-        webrtc::Config config;
-        std::vector<webrtc::Point> geometry;
-        config.Set<webrtc::Beamforming>(new webrtc::Beamforming(true, geometry));
-        effect->session->apm->SetExtraOptions(config);
-    }
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.noise_suppression.enabled = true;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 void NsDisable(preproc_effect_t* effect) {
     ALOGV("NsDisable");
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
-    ns->Enable(false);
-    webrtc::Config config;
-    std::vector<webrtc::Point> geometry;
-    config.Set<webrtc::Beamforming>(new webrtc::Beamforming(false, geometry));
-    effect->session->apm->SetExtraOptions(config);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.noise_suppression.enabled = false;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 static const preproc_ops_t sNsOps = {NsCreate,  NsInit,         NULL,           NsEnable,
                                      NsDisable, NsSetParameter, NsGetParameter, NULL};
 
 static const preproc_ops_t* sPreProcOps[PREPROC_NUM_EFFECTS] = {&sAgcOps,
-#ifndef WEBRTC_LEGACY
                                                                 &sAgc2Ops,
-#endif
                                                                 &sAecOps, &sNsOps};
 
 //------------------------------------------------------------------------------
@@ -1119,9 +844,6 @@
     session->id = 0;
     session->io = 0;
     session->createdMsk = 0;
-#ifdef WEBRTC_LEGACY
-    session->apm = NULL;
-#endif
     for (i = 0; i < PREPROC_NUM_EFFECTS && status == 0; i++) {
         status = Effect_Init(&session->effects[i], i);
     }
@@ -1135,75 +857,32 @@
     ALOGV("Session_CreateEffect procId %d, createdMsk %08x", procId, session->createdMsk);
 
     if (session->createdMsk == 0) {
-#ifdef WEBRTC_LEGACY
-        session->apm = webrtc::AudioProcessing::Create();
-        if (session->apm == NULL) {
-            ALOGW("Session_CreateEffect could not get apm engine");
-            goto error;
-        }
-        const webrtc::ProcessingConfig processing_config = {
-                {{kPreprocDefaultSr, kPreProcDefaultCnl},
-                 {kPreprocDefaultSr, kPreProcDefaultCnl},
-                 {kPreprocDefaultSr, kPreProcDefaultCnl},
-                 {kPreprocDefaultSr, kPreProcDefaultCnl}}};
-        session->apm->Initialize(processing_config);
-        session->procFrame = new webrtc::AudioFrame();
-        if (session->procFrame == NULL) {
-            ALOGW("Session_CreateEffect could not allocate audio frame");
-            goto error;
-        }
-        session->revFrame = new webrtc::AudioFrame();
-        if (session->revFrame == NULL) {
-            ALOGW("Session_CreateEffect could not allocate reverse audio frame");
-            goto error;
-        }
-#else
         session->apm = session->ap_builder.Create();
         if (session->apm == NULL) {
             ALOGW("Session_CreateEffect could not get apm engine");
             goto error;
         }
-#endif
         session->apmSamplingRate = kPreprocDefaultSr;
         session->apmFrameCount = (kPreprocDefaultSr) / 100;
         session->frameCount = session->apmFrameCount;
         session->samplingRate = kPreprocDefaultSr;
         session->inChannelCount = kPreProcDefaultCnl;
         session->outChannelCount = kPreProcDefaultCnl;
-#ifdef WEBRTC_LEGACY
-        session->procFrame->sample_rate_hz_ = kPreprocDefaultSr;
-        session->procFrame->num_channels_ = kPreProcDefaultCnl;
-#else
         session->inputConfig.set_sample_rate_hz(kPreprocDefaultSr);
         session->inputConfig.set_num_channels(kPreProcDefaultCnl);
         session->outputConfig.set_sample_rate_hz(kPreprocDefaultSr);
         session->outputConfig.set_num_channels(kPreProcDefaultCnl);
-#endif
         session->revChannelCount = kPreProcDefaultCnl;
-#ifdef WEBRTC_LEGACY
-        session->revFrame->sample_rate_hz_ = kPreprocDefaultSr;
-        session->revFrame->num_channels_ = kPreProcDefaultCnl;
-#else
         session->revConfig.set_sample_rate_hz(kPreprocDefaultSr);
         session->revConfig.set_num_channels(kPreProcDefaultCnl);
-#endif
         session->enabledMsk = 0;
         session->processedMsk = 0;
         session->revEnabledMsk = 0;
         session->revProcessedMsk = 0;
-#ifdef WEBRTC_LEGACY
-        session->inResampler = NULL;
-#endif
         session->inBuf = NULL;
         session->inBufSize = 0;
-#ifdef WEBRTC_LEGACY
-        session->outResampler = NULL;
-#endif
         session->outBuf = NULL;
         session->outBufSize = 0;
-#ifdef WEBRTC_LEGACY
-        session->revResampler = NULL;
-#endif
         session->revBuf = NULL;
         session->revBufSize = 0;
     }
@@ -1217,17 +896,8 @@
 
 error:
     if (session->createdMsk == 0) {
-#ifdef WEBRTC_LEGACY
-        delete session->revFrame;
-        session->revFrame = NULL;
-        delete session->procFrame;
-        session->procFrame = NULL;
-        delete session->apm;
-        session->apm = NULL;  // NOLINT(clang-analyzer-cplusplus.NewDelete)
-#else
         delete session->apm;
         session->apm = NULL;
-#endif
     }
     return status;
 }
@@ -1236,29 +906,8 @@
     ALOGW_IF(Effect_Release(fx) != 0, " Effect_Release() failed for proc ID %d", fx->procId);
     session->createdMsk &= ~(1 << fx->procId);
     if (session->createdMsk == 0) {
-#ifdef WEBRTC_LEGACY
         delete session->apm;
         session->apm = NULL;
-        delete session->procFrame;
-        session->procFrame = NULL;
-        delete session->revFrame;
-        session->revFrame = NULL;
-        if (session->inResampler != NULL) {
-            speex_resampler_destroy(session->inResampler);
-            session->inResampler = NULL;
-        }
-        if (session->outResampler != NULL) {
-            speex_resampler_destroy(session->outResampler);
-            session->outResampler = NULL;
-        }
-        if (session->revResampler != NULL) {
-            speex_resampler_destroy(session->revResampler);
-            session->revResampler = NULL;
-        }
-#else
-        delete session->apm;
-        session->apm = NULL;
-#endif
         delete session->inBuf;
         session->inBuf = NULL;
         delete session->outBuf;
@@ -1284,9 +933,6 @@
 
     ALOGV("Session_SetConfig sr %d cnl %08x", config->inputCfg.samplingRate,
           config->inputCfg.channels);
-#ifdef WEBRTC_LEGACY
-    int status;
-#endif
 
     // AEC implementation is limited to 16kHz
     if (config->inputCfg.samplingRate >= 32000 && !(session->createdMsk & (1 << PREPROC_AEC))) {
@@ -1297,51 +943,25 @@
         session->apmSamplingRate = 8000;
     }
 
-#ifdef WEBRTC_LEGACY
-    const webrtc::ProcessingConfig processing_config = {
-            {{static_cast<int>(session->apmSamplingRate), inCnl},
-             {static_cast<int>(session->apmSamplingRate), outCnl},
-             {static_cast<int>(session->apmSamplingRate), inCnl},
-             {static_cast<int>(session->apmSamplingRate), inCnl}}};
-    status = session->apm->Initialize(processing_config);
-    if (status < 0) {
-        return -EINVAL;
-    }
-#endif
 
     session->samplingRate = config->inputCfg.samplingRate;
     session->apmFrameCount = session->apmSamplingRate / 100;
     if (session->samplingRate == session->apmSamplingRate) {
         session->frameCount = session->apmFrameCount;
     } else {
-#ifdef WEBRTC_LEGACY
-        session->frameCount =
-                (session->apmFrameCount * session->samplingRate) / session->apmSamplingRate + 1;
-#else
         session->frameCount =
                 (session->apmFrameCount * session->samplingRate) / session->apmSamplingRate;
-#endif
     }
     session->inChannelCount = inCnl;
     session->outChannelCount = outCnl;
-#ifdef WEBRTC_LEGACY
-    session->procFrame->num_channels_ = inCnl;
-    session->procFrame->sample_rate_hz_ = session->apmSamplingRate;
-#else
     session->inputConfig.set_sample_rate_hz(session->samplingRate);
     session->inputConfig.set_num_channels(inCnl);
     session->outputConfig.set_sample_rate_hz(session->samplingRate);
     session->outputConfig.set_num_channels(inCnl);
-#endif
 
     session->revChannelCount = inCnl;
-#ifdef WEBRTC_LEGACY
-    session->revFrame->num_channels_ = inCnl;
-    session->revFrame->sample_rate_hz_ = session->apmSamplingRate;
-#else
     session->revConfig.set_sample_rate_hz(session->samplingRate);
     session->revConfig.set_num_channels(inCnl);
-#endif
 
     // force process buffer reallocation
     session->inBufSize = 0;
@@ -1349,53 +969,6 @@
     session->framesIn = 0;
     session->framesOut = 0;
 
-#ifdef WEBRTC_LEGACY
-    if (session->inResampler != NULL) {
-        speex_resampler_destroy(session->inResampler);
-        session->inResampler = NULL;
-    }
-    if (session->outResampler != NULL) {
-        speex_resampler_destroy(session->outResampler);
-        session->outResampler = NULL;
-    }
-    if (session->revResampler != NULL) {
-        speex_resampler_destroy(session->revResampler);
-        session->revResampler = NULL;
-    }
-    if (session->samplingRate != session->apmSamplingRate) {
-        int error;
-        session->inResampler =
-                speex_resampler_init(session->inChannelCount, session->samplingRate,
-                                     session->apmSamplingRate, RESAMPLER_QUALITY, &error);
-        if (session->inResampler == NULL) {
-            ALOGW("Session_SetConfig Cannot create speex resampler: %s",
-                  speex_resampler_strerror(error));
-            return -EINVAL;
-        }
-        session->outResampler =
-                speex_resampler_init(session->outChannelCount, session->apmSamplingRate,
-                                     session->samplingRate, RESAMPLER_QUALITY, &error);
-        if (session->outResampler == NULL) {
-            ALOGW("Session_SetConfig Cannot create speex resampler: %s",
-                  speex_resampler_strerror(error));
-            speex_resampler_destroy(session->inResampler);
-            session->inResampler = NULL;
-            return -EINVAL;
-        }
-        session->revResampler =
-                speex_resampler_init(session->inChannelCount, session->samplingRate,
-                                     session->apmSamplingRate, RESAMPLER_QUALITY, &error);
-        if (session->revResampler == NULL) {
-            ALOGW("Session_SetConfig Cannot create speex resampler: %s",
-                  speex_resampler_strerror(error));
-            speex_resampler_destroy(session->inResampler);
-            session->inResampler = NULL;
-            speex_resampler_destroy(session->outResampler);
-            session->outResampler = NULL;
-            return -EINVAL;
-        }
-    }
-#endif
 
     session->state = PREPROC_SESSION_STATE_CONFIG;
     return 0;
@@ -1430,22 +1003,7 @@
         return -EINVAL;
     }
     uint32_t inCnl = audio_channel_count_from_out_mask(config->inputCfg.channels);
-#ifdef WEBRTC_LEGACY
-    const webrtc::ProcessingConfig processing_config = {
-            {{static_cast<int>(session->apmSamplingRate), session->inChannelCount},
-             {static_cast<int>(session->apmSamplingRate), session->outChannelCount},
-             {static_cast<int>(session->apmSamplingRate), inCnl},
-             {static_cast<int>(session->apmSamplingRate), inCnl}}};
-    int status = session->apm->Initialize(processing_config);
-    if (status < 0) {
-        return -EINVAL;
-    }
-#endif
     session->revChannelCount = inCnl;
-#ifdef WEBRTC_LEGACY
-    session->revFrame->num_channels_ = inCnl;
-    session->revFrame->sample_rate_hz_ = session->apmSamplingRate;
-#endif
     // force process buffer reallocation
     session->revBufSize = 0;
     session->framesRev = 0;
@@ -1467,24 +1025,10 @@
     if (enabled) {
         if (session->enabledMsk == 0) {
             session->framesIn = 0;
-#ifdef WEBRTC_LEGACY
-            if (session->inResampler != NULL) {
-                speex_resampler_reset_mem(session->inResampler);
-            }
-            session->framesOut = 0;
-            if (session->outResampler != NULL) {
-                speex_resampler_reset_mem(session->outResampler);
-            }
-#endif
         }
         session->enabledMsk |= (1 << procId);
         if (HasReverseStream(procId)) {
             session->framesRev = 0;
-#ifdef WEBRTC_LEGACY
-            if (session->revResampler != NULL) {
-                speex_resampler_reset_mem(session->revResampler);
-            }
-#endif
             session->revEnabledMsk |= (1 << procId);
         }
     } else {
@@ -1600,82 +1144,6 @@
             return 0;
         }
 
-#ifdef WEBRTC_LEGACY
-        if (session->inResampler != NULL) {
-            size_t fr = session->frameCount - session->framesIn;
-            if (inBuffer->frameCount < fr) {
-                fr = inBuffer->frameCount;
-            }
-            if (session->inBufSize < session->framesIn + fr) {
-                int16_t* buf;
-                session->inBufSize = session->framesIn + fr;
-                buf = (int16_t*)realloc(
-                        session->inBuf,
-                        session->inBufSize * session->inChannelCount * sizeof(int16_t));
-                if (buf == NULL) {
-                    session->framesIn = 0;
-                    free(session->inBuf);
-                    session->inBuf = NULL;
-                    return -ENOMEM;
-                }
-                session->inBuf = buf;
-            }
-            memcpy(session->inBuf + session->framesIn * session->inChannelCount, inBuffer->s16,
-                   fr * session->inChannelCount * sizeof(int16_t));
-#ifdef DUAL_MIC_TEST
-            pthread_mutex_lock(&gPcmDumpLock);
-            if (gPcmDumpFh != NULL) {
-                fwrite(inBuffer->raw, fr * session->inChannelCount * sizeof(int16_t), 1,
-                       gPcmDumpFh);
-            }
-            pthread_mutex_unlock(&gPcmDumpLock);
-#endif
-
-            session->framesIn += fr;
-            inBuffer->frameCount = fr;
-            if (session->framesIn < session->frameCount) {
-                return 0;
-            }
-            spx_uint32_t frIn = session->framesIn;
-            spx_uint32_t frOut = session->apmFrameCount;
-            if (session->inChannelCount == 1) {
-                speex_resampler_process_int(session->inResampler, 0, session->inBuf, &frIn,
-                                            session->procFrame->data_, &frOut);
-            } else {
-                speex_resampler_process_interleaved_int(session->inResampler, session->inBuf, &frIn,
-                                                        session->procFrame->data_, &frOut);
-            }
-            memmove(session->inBuf, session->inBuf + frIn * session->inChannelCount,
-                    (session->framesIn - frIn) * session->inChannelCount * sizeof(int16_t));
-            session->framesIn -= frIn;
-        } else {
-            size_t fr = session->frameCount - session->framesIn;
-            if (inBuffer->frameCount < fr) {
-                fr = inBuffer->frameCount;
-            }
-            memcpy(session->procFrame->data_ + session->framesIn * session->inChannelCount,
-                   inBuffer->s16, fr * session->inChannelCount * sizeof(int16_t));
-
-#ifdef DUAL_MIC_TEST
-            pthread_mutex_lock(&gPcmDumpLock);
-            if (gPcmDumpFh != NULL) {
-                fwrite(inBuffer->raw, fr * session->inChannelCount * sizeof(int16_t), 1,
-                       gPcmDumpFh);
-            }
-            pthread_mutex_unlock(&gPcmDumpLock);
-#endif
-
-            session->framesIn += fr;
-            inBuffer->frameCount = fr;
-            if (session->framesIn < session->frameCount) {
-                return 0;
-            }
-            session->framesIn = 0;
-        }
-        session->procFrame->samples_per_channel_ = session->apmFrameCount;
-
-        effect->session->apm->ProcessStream(session->procFrame);
-#else
         size_t fr = session->frameCount - session->framesIn;
         if (inBuffer->frameCount < fr) {
             fr = inBuffer->frameCount;
@@ -1696,7 +1164,6 @@
             return status;
         }
         outBuffer->frameCount = inBuffer->frameCount;
-#endif
 
         if (session->outBufSize < session->framesOut + session->frameCount) {
             int16_t* buf;
@@ -1713,30 +1180,7 @@
             session->outBuf = buf;
         }
 
-#ifdef WEBRTC_LEGACY
-        if (session->outResampler != NULL) {
-            spx_uint32_t frIn = session->apmFrameCount;
-            spx_uint32_t frOut = session->frameCount;
-            if (session->inChannelCount == 1) {
-                speex_resampler_process_int(
-                        session->outResampler, 0, session->procFrame->data_, &frIn,
-                        session->outBuf + session->framesOut * session->outChannelCount, &frOut);
-            } else {
-                speex_resampler_process_interleaved_int(
-                        session->outResampler, session->procFrame->data_, &frIn,
-                        session->outBuf + session->framesOut * session->outChannelCount, &frOut);
-            }
-            session->framesOut += frOut;
-        } else {
-            memcpy(session->outBuf + session->framesOut * session->outChannelCount,
-                   session->procFrame->data_,
-                   session->frameCount * session->outChannelCount * sizeof(int16_t));
-            session->framesOut += session->frameCount;
-        }
-        size_t fr = session->framesOut;
-#else
         fr = session->framesOut;
-#endif
         if (framesRq - framesWr < fr) {
             fr = framesRq - framesWr;
         }
@@ -2129,63 +1573,6 @@
 
     if ((session->revProcessedMsk & session->revEnabledMsk) == session->revEnabledMsk) {
         effect->session->revProcessedMsk = 0;
-#ifdef WEBRTC_LEGACY
-        if (session->revResampler != NULL) {
-            size_t fr = session->frameCount - session->framesRev;
-            if (inBuffer->frameCount < fr) {
-                fr = inBuffer->frameCount;
-            }
-            if (session->revBufSize < session->framesRev + fr) {
-                int16_t* buf;
-                session->revBufSize = session->framesRev + fr;
-                buf = (int16_t*)realloc(
-                        session->revBuf,
-                        session->revBufSize * session->inChannelCount * sizeof(int16_t));
-                if (buf == NULL) {
-                    session->framesRev = 0;
-                    free(session->revBuf);
-                    session->revBuf = NULL;
-                    return -ENOMEM;
-                }
-                session->revBuf = buf;
-            }
-            memcpy(session->revBuf + session->framesRev * session->inChannelCount, inBuffer->s16,
-                   fr * session->inChannelCount * sizeof(int16_t));
-
-            session->framesRev += fr;
-            inBuffer->frameCount = fr;
-            if (session->framesRev < session->frameCount) {
-                return 0;
-            }
-            spx_uint32_t frIn = session->framesRev;
-            spx_uint32_t frOut = session->apmFrameCount;
-            if (session->inChannelCount == 1) {
-                speex_resampler_process_int(session->revResampler, 0, session->revBuf, &frIn,
-                                            session->revFrame->data_, &frOut);
-            } else {
-                speex_resampler_process_interleaved_int(session->revResampler, session->revBuf,
-                                                        &frIn, session->revFrame->data_, &frOut);
-            }
-            memmove(session->revBuf, session->revBuf + frIn * session->inChannelCount,
-                    (session->framesRev - frIn) * session->inChannelCount * sizeof(int16_t));
-            session->framesRev -= frIn;
-        } else {
-            size_t fr = session->frameCount - session->framesRev;
-            if (inBuffer->frameCount < fr) {
-                fr = inBuffer->frameCount;
-            }
-            memcpy(session->revFrame->data_ + session->framesRev * session->inChannelCount,
-                   inBuffer->s16, fr * session->inChannelCount * sizeof(int16_t));
-            session->framesRev += fr;
-            inBuffer->frameCount = fr;
-            if (session->framesRev < session->frameCount) {
-                return 0;
-            }
-            session->framesRev = 0;
-        }
-        session->revFrame->samples_per_channel_ = session->apmFrameCount;
-        effect->session->apm->AnalyzeReverseStream(session->revFrame);
-#else
         size_t fr = session->frameCount - session->framesRev;
         if (inBuffer->frameCount < fr) {
             fr = inBuffer->frameCount;
@@ -2205,7 +1592,6 @@
             ALOGE("Process Reverse Stream failed with error %d\n", status);
             return status;
         }
-#endif
         return 0;
     } else {
         return -ENODATA;
diff --git a/media/libeffects/preprocessing/benchmarks/Android.bp b/media/libeffects/preprocessing/benchmarks/Android.bp
index 2808293..262fd19 100644
--- a/media/libeffects/preprocessing/benchmarks/Android.bp
+++ b/media/libeffects/preprocessing/benchmarks/Android.bp
@@ -1,31 +1,4 @@
 cc_benchmark {
-    name: "preprocessing_legacy_benchmark",
-    vendor: true,
-    relative_install_path: "soundfx",
-    srcs: ["preprocessing_benchmark.cpp"],
-    shared_libs: [
-        "libaudiopreprocessing_legacy",
-        "libaudioutils",
-        "liblog",
-        "libutils",
-        "libwebrtc_audio_preprocessing",
-    ],
-    cflags: [
-        "-DWEBRTC_POSIX",
-        "-DWEBRTC_LEGACY",
-        "-fvisibility=default",
-        "-Wall",
-        "-Werror",
-        "-Wextra",
-    ],
-    header_libs: [
-        "libaudioeffects",
-        "libhardware_headers",
-        "libwebrtc_absl_headers",
-    ],
-}
-
-cc_benchmark {
     name: "preprocessing_benchmark",
     vendor: true,
     relative_install_path: "soundfx",
diff --git a/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp b/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp
index 3a0ad6d..694a6c4 100644
--- a/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp
+++ b/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp
@@ -54,9 +54,7 @@
 #include <cstdlib>
 #include <random>
 #include <vector>
-#ifndef WEBRTC_LEGACY
 #include <audio_effects/effect_agc2.h>
-#endif
 #include <audio_effects/effect_ns.h>
 #include <benchmark/benchmark.h>
 #include <hardware/audio_effect.h>
@@ -76,10 +74,8 @@
         {0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
         // ns  uuid
         {0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
-#ifndef WEBRTC_LEGACY
         // agc2 uuid
         {0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, {0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86}},
-#endif
 };
 constexpr size_t kNumEffectUuids = std::size(kEffectUuids);
 constexpr audio_channel_mask_t kChMasks[] = {
@@ -93,9 +89,7 @@
     PREPROC_AGC,  // Automatic Gain Control
     PREPROC_AEC,  // Acoustic Echo Canceler
     PREPROC_NS,   // Noise Suppressor
-#ifndef WEBRTC_LEGACY
     PREPROC_AGC2,  // Automatic Gain Control 2
-#endif
     PREPROC_NUM_EFFECTS
 };
 
diff --git a/media/libeffects/preprocessing/tests/Android.bp b/media/libeffects/preprocessing/tests/Android.bp
index 045b0d3..b439880 100644
--- a/media/libeffects/preprocessing/tests/Android.bp
+++ b/media/libeffects/preprocessing/tests/Android.bp
@@ -1,37 +1,5 @@
 // audio preprocessing unit test
 cc_test {
-    name: "AudioPreProcessingLegacyTest",
-
-    vendor: true,
-
-    relative_install_path: "soundfx",
-
-    srcs: ["PreProcessingTest.cpp"],
-
-    shared_libs: [
-        "libaudiopreprocessing_legacy",
-        "libaudioutils",
-        "liblog",
-        "libutils",
-        "libwebrtc_audio_preprocessing",
-    ],
-
-    cflags: [
-        "-DWEBRTC_POSIX",
-        "-DWEBRTC_LEGACY",
-        "-fvisibility=default",
-        "-Wall",
-        "-Werror",
-        "-Wextra",
-    ],
-
-    header_libs: [
-        "libaudioeffects",
-        "libhardware_headers",
-    ],
-}
-
-cc_test {
     name: "AudioPreProcessingTest",
 
     vendor: true,
diff --git a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
index 65b9469..5f223c9 100644
--- a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
+++ b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
@@ -22,9 +22,7 @@
 
 #include <audio_effects/effect_aec.h>
 #include <audio_effects/effect_agc.h>
-#ifndef WEBRTC_LEGACY
 #include <audio_effects/effect_agc2.h>
-#endif
 #include <audio_effects/effect_ns.h>
 #include <log/log.h>
 
@@ -38,9 +36,7 @@
 // types of pre processing modules
 enum PreProcId {
     PREPROC_AGC,  // Automatic Gain Control
-#ifndef WEBRTC_LEGACY
     PREPROC_AGC2,  // Automatic Gain Control 2
-#endif
     PREPROC_AEC,  // Acoustic Echo Canceler
     PREPROC_NS,   // Noise Suppressor
     PREPROC_NUM_EFFECTS
@@ -57,11 +53,9 @@
     ARG_AGC_COMP_LVL,
     ARG_AEC_DELAY,
     ARG_NS_LVL,
-#ifndef WEBRTC_LEGACY
     ARG_AGC2_GAIN,
     ARG_AGC2_LVL,
     ARG_AGC2_SAT_MGN
-#endif
 };
 
 struct preProcConfigParams_t {
@@ -70,19 +64,15 @@
     int nsLevel = 0;         // a value between 0-3
     int agcTargetLevel = 3;  // in dB
     int agcCompLevel = 9;    // in dB
-#ifndef WEBRTC_LEGACY
     float agc2Gain = 0.f;              // in dB
     float agc2SaturationMargin = 2.f;  // in dB
     int agc2Level = 0;                 // either kRms(0) or kPeak(1)
-#endif
     int aecDelay = 0;  // in ms
 };
 
 const effect_uuid_t kPreProcUuids[PREPROC_NUM_EFFECTS] = {
         {0xaa8130e0, 0x66fc, 0x11e0, 0xbad0, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // agc uuid
-#ifndef WEBRTC_LEGACY
         {0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, {0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86}},  // agc2 uuid
-#endif
         {0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // aec uuid
         {0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // ns  uuid
 };
@@ -138,24 +128,20 @@
     printf("\n           Enable Noise Suppression, default disabled");
     printf("\n     --agc");
     printf("\n           Enable Gain Control, default disabled");
-#ifndef WEBRTC_LEGACY
     printf("\n     --agc2");
     printf("\n           Enable Gain Controller 2, default disabled");
-#endif
     printf("\n     --ns_lvl <ns_level>");
     printf("\n           Noise Suppression level in dB, default value 0dB");
     printf("\n     --agc_tgt_lvl <target_level>");
     printf("\n           AGC Target Level in dB, default value 3dB");
     printf("\n     --agc_comp_lvl <comp_level>");
     printf("\n           AGC Comp Level in dB, default value 9dB");
-#ifndef WEBRTC_LEGACY
     printf("\n     --agc2_gain <fixed_digital_gain>");
     printf("\n           AGC Fixed Digital Gain in dB, default value 0dB");
     printf("\n     --agc2_lvl <level_estimator>");
     printf("\n           AGC Adaptive Digital Level Estimator, default value kRms");
     printf("\n     --agc2_sat_mgn <saturation_margin>");
     printf("\n           AGC Adaptive Digital Saturation Margin in dB, default value 2dB");
-#endif
     printf("\n     --aec_delay <delay>");
     printf("\n           AEC delay value in ms, default value 0ms");
     printf("\n");
@@ -217,18 +203,14 @@
             {"ch_mask", required_argument, nullptr, ARG_CH_MASK},
             {"agc_tgt_lvl", required_argument, nullptr, ARG_AGC_TGT_LVL},
             {"agc_comp_lvl", required_argument, nullptr, ARG_AGC_COMP_LVL},
-#ifndef WEBRTC_LEGACY
             {"agc2_gain", required_argument, nullptr, ARG_AGC2_GAIN},
             {"agc2_lvl", required_argument, nullptr, ARG_AGC2_LVL},
             {"agc2_sat_mgn", required_argument, nullptr, ARG_AGC2_SAT_MGN},
-#endif
             {"aec_delay", required_argument, nullptr, ARG_AEC_DELAY},
             {"ns_lvl", required_argument, nullptr, ARG_NS_LVL},
             {"aec", no_argument, &effectEn[PREPROC_AEC], 1},
             {"agc", no_argument, &effectEn[PREPROC_AGC], 1},
-#ifndef WEBRTC_LEGACY
             {"agc2", no_argument, &effectEn[PREPROC_AGC2], 1},
-#endif
             {"ns", no_argument, &effectEn[PREPROC_NS], 1},
             {nullptr, 0, nullptr, 0},
     };
@@ -277,7 +259,6 @@
                 preProcCfgParams.agcCompLevel = atoi(optarg);
                 break;
             }
-#ifndef WEBRTC_LEGACY
             case ARG_AGC2_GAIN: {
                 preProcCfgParams.agc2Gain = atof(optarg);
                 break;
@@ -290,7 +271,6 @@
                 preProcCfgParams.agc2SaturationMargin = atof(optarg);
                 break;
             }
-#endif
             case ARG_AEC_DELAY: {
                 preProcCfgParams.aecDelay = atoi(optarg);
                 break;
@@ -387,7 +367,6 @@
             return EXIT_FAILURE;
         }
     }
-#ifndef WEBRTC_LEGACY
     if (effectEn[PREPROC_AGC2]) {
         if (int status = preProcSetConfigParam(AGC2_PARAM_FIXED_DIGITAL_GAIN,
                                                (float)preProcCfgParams.agc2Gain,
@@ -411,7 +390,6 @@
             return EXIT_FAILURE;
         }
     }
-#endif
     if (effectEn[PREPROC_NS]) {
         if (int status = preProcSetConfigParam(NS_PARAM_LEVEL, (uint32_t)preProcCfgParams.nsLevel,
                                                effectHandle[PREPROC_NS]);
diff --git a/media/libmedia/IMediaExtractor.cpp b/media/libmedia/IMediaExtractor.cpp
index 39caf53..7ed76d8 100644
--- a/media/libmedia/IMediaExtractor.cpp
+++ b/media/libmedia/IMediaExtractor.cpp
@@ -38,7 +38,8 @@
     FLAGS,
     SETMEDIACAS,
     NAME,
-    GETMETRICS
+    GETMETRICS,
+    SETENTRYPOINT
 };
 
 class BpMediaExtractor : public BpInterface<IMediaExtractor> {
@@ -142,6 +143,13 @@
         }
         return nm;
     }
+
+    virtual status_t setEntryPoint(EntryPoint entryPoint) {
+        Parcel data, reply;
+        data.writeInterfaceToken(BpMediaExtractor::getInterfaceDescriptor());
+        data.writeInt32(static_cast<int32_t>(entryPoint));
+        return remote()->transact(SETENTRYPOINT, data, &reply);
+    }
 };
 
 IMPLEMENT_META_INTERFACE(MediaExtractor, "android.media.IMediaExtractor");
@@ -232,6 +240,16 @@
             reply->writeString8(nm);
             return NO_ERROR;
         }
+        case SETENTRYPOINT: {
+            ALOGV("setEntryPoint");
+            CHECK_INTERFACE(IMediaExtractor, data, reply);
+            int32_t entryPoint;
+            status_t err = data.readInt32(&entryPoint);
+            if (err == OK) {
+                setEntryPoint(EntryPoint(entryPoint));
+            }
+            return err;
+        }
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/include/android/IMediaExtractor.h b/media/libmedia/include/android/IMediaExtractor.h
index 3e035ad..f9cafde 100644
--- a/media/libmedia/include/android/IMediaExtractor.h
+++ b/media/libmedia/include/android/IMediaExtractor.h
@@ -63,6 +63,15 @@
     virtual status_t setMediaCas(const HInterfaceToken &casToken) = 0;
 
     virtual String8 name() = 0;
+
+    enum class EntryPoint {
+        SDK = 1,
+        NDK_WITH_JVM = 2,
+        NDK_NO_JVM = 3,
+        OTHER = 4,
+    };
+
+    virtual status_t setEntryPoint(EntryPoint entryPoint) = 0;
 };
 
 
diff --git a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
index 5ec5e08..b4cc65f 100644
--- a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
+++ b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "VideoTrackTranscoder"
 
 #include <android-base/logging.h>
+#include <android-base/properties.h>
 #include <media/NdkCommon.h>
 #include <media/VideoTrackTranscoder.h>
 #include <utils/AndroidThreads.h>
@@ -39,7 +40,8 @@
 // Default key frame interval in seconds.
 static constexpr float kDefaultKeyFrameIntervalSeconds = 1.0f;
 // Default codec operating rate.
-static constexpr int32_t kDefaultCodecOperatingRate = 240;
+static int32_t kDefaultCodecOperatingRate =
+        base::GetIntProperty("debug.media.transcoding.codec_max_operating_rate", /*default*/ 240);
 // Default codec priority.
 static constexpr int32_t kDefaultCodecPriority = 1;
 // Default bitrate, in case source estimation fails.
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 0af97df..447d599 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -1137,11 +1137,24 @@
     if (!truncatePreAllocation()) {
         if (err == OK) { err = ERROR_IO; }
     }
+
+    // TODO(b/174770856) remove this measurement (and perhaps the fsync)
+    nsecs_t sync_started = systemTime(SYSTEM_TIME_REALTIME);
     if (fsync(mFd) != 0) {
         ALOGW("(ignored)fsync err:%s(%d)", std::strerror(errno), errno);
         // Don't bubble up fsync error, b/157291505.
         // if (err == OK) { err = ERROR_IO; }
     }
+    nsecs_t sync_finished = systemTime(SYSTEM_TIME_REALTIME);
+    nsecs_t sync_elapsed_ns = sync_finished - sync_started;
+    int64_t filesize = -1;
+    struct stat statbuf;
+    if (fstat(mFd, &statbuf) == 0) {
+        filesize = statbuf.st_size;
+    }
+    ALOGD("final fsync() takes %" PRId64 " ms, file size %" PRId64,
+          sync_elapsed_ns / 1000000, (int64_t) filesize);
+
     if (close(mFd) != 0) {
         ALOGE("close err:%s(%d)", std::strerror(errno), errno);
         if (err == OK) { err = ERROR_IO; }
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 6245014..f2c7dd6 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -50,8 +50,9 @@
       mSampleTimeUs(timeUs) {
 }
 
-NuMediaExtractor::NuMediaExtractor()
-    : mTotalBitrate(-1LL),
+NuMediaExtractor::NuMediaExtractor(EntryPoint entryPoint)
+    : mEntryPoint(entryPoint),
+      mTotalBitrate(-1LL),
       mDurationUs(-1LL) {
 }
 
@@ -93,6 +94,7 @@
     if (mImpl == NULL) {
         return ERROR_UNSUPPORTED;
     }
+    setEntryPointToRemoteMediaExtractor();
 
     status_t err = OK;
     if (!mCasToken.empty()) {
@@ -134,6 +136,7 @@
     if (mImpl == NULL) {
         return ERROR_UNSUPPORTED;
     }
+    setEntryPointToRemoteMediaExtractor();
 
     if (!mCasToken.empty()) {
         err = mImpl->setMediaCas(mCasToken);
@@ -168,6 +171,7 @@
     if (mImpl == NULL) {
         return ERROR_UNSUPPORTED;
     }
+    setEntryPointToRemoteMediaExtractor();
 
     if (!mCasToken.empty()) {
         err = mImpl->setMediaCas(mCasToken);
@@ -489,6 +493,16 @@
     }
 }
 
+void NuMediaExtractor::setEntryPointToRemoteMediaExtractor() {
+    if (mImpl == NULL) {
+        return;
+    }
+    status_t err = mImpl->setEntryPoint(mEntryPoint);
+    if (err != OK) {
+        ALOGW("Failed to set entry point with error %d.", err);
+    }
+}
+
 ssize_t NuMediaExtractor::fetchAllTrackSamples(
         int64_t seekTimeUs, MediaSource::ReadOptions::SeekMode mode) {
     TrackInfo *minInfo = NULL;
diff --git a/media/libstagefright/RemoteMediaExtractor.cpp b/media/libstagefright/RemoteMediaExtractor.cpp
index 25e43c2..381eb1a 100644
--- a/media/libstagefright/RemoteMediaExtractor.cpp
+++ b/media/libstagefright/RemoteMediaExtractor.cpp
@@ -39,6 +39,12 @@
 static const char *kExtractorFormat = "android.media.mediaextractor.fmt";
 static const char *kExtractorMime = "android.media.mediaextractor.mime";
 static const char *kExtractorTracks = "android.media.mediaextractor.ntrk";
+static const char *kExtractorEntryPoint = "android.media.mediaextractor.entry";
+
+static const char *kEntryPointSdk = "sdk";
+static const char *kEntryPointWithJvm = "ndk-with-jvm";
+static const char *kEntryPointNoJvm = "ndk-no-jvm";
+static const char *kEntryPointOther = "other";
 
 RemoteMediaExtractor::RemoteMediaExtractor(
         MediaExtractor *extractor,
@@ -74,6 +80,9 @@
             }
             // what else is interesting and not already available?
         }
+        // By default, we set the entry point to be "other". Clients of this
+        // class will override this value by calling setEntryPoint.
+        mMetricsItem->setCString(kExtractorEntryPoint, kEntryPointOther);
     }
 }
 
@@ -143,6 +152,28 @@
     return String8(mExtractor->name());
 }
 
+status_t RemoteMediaExtractor::setEntryPoint(EntryPoint entryPoint) {
+    const char* entryPointString;
+    switch (entryPoint) {
+      case EntryPoint::SDK:
+            entryPointString = kEntryPointSdk;
+            break;
+        case EntryPoint::NDK_WITH_JVM:
+            entryPointString = kEntryPointWithJvm;
+            break;
+        case EntryPoint::NDK_NO_JVM:
+            entryPointString = kEntryPointNoJvm;
+            break;
+        case EntryPoint::OTHER:
+            entryPointString = kEntryPointOther;
+            break;
+        default:
+            return BAD_VALUE;
+    }
+    mMetricsItem->setCString(kExtractorEntryPoint, entryPointString);
+    return OK;
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 
 // static
diff --git a/media/libstagefright/bqhelper/Android.bp b/media/libstagefright/bqhelper/Android.bp
index 2b0494c..8698d33 100644
--- a/media/libstagefright/bqhelper/Android.bp
+++ b/media/libstagefright/bqhelper/Android.bp
@@ -101,7 +101,6 @@
         "//apex_available:platform",
     ],
     vendor_available: false,
-    min_sdk_version: "29",
     static_libs: [
         "libgui_bufferqueue_static",
     ],
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index f242b19..7752bda 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -33,7 +33,7 @@
 
 #include <media/stagefright/foundation/hexdump.h>
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#ifndef __ANDROID_VNDK__
 #include <binder/Parcel.h>
 #endif
 
@@ -646,7 +646,7 @@
     return s;
 }
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#ifndef __ANDROID_VNDK__
 // static
 sp<AMessage> AMessage::FromParcel(const Parcel &parcel, size_t maxNestingLevel) {
     int32_t what = parcel.readInt32();
@@ -813,7 +813,7 @@
         }
     }
 }
-#endif  // !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#endif  // __ANDROID_VNDK__
 
 sp<AMessage> AMessage::changesFrom(const sp<const AMessage> &other, bool deep) const {
     if (other == NULL) {
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index b1ed077..8722e14 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -27,7 +27,7 @@
 #include "ADebug.h"
 #include "AString.h"
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#ifndef __ANDROID_VNDK__
 #include <binder/Parcel.h>
 #endif
 
@@ -365,7 +365,7 @@
     return !strcasecmp(mData + mSize - suffixLen, suffix);
 }
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#ifndef __ANDROID_VNDK__
 // static
 AString AString::FromParcel(const Parcel &parcel) {
     size_t size = static_cast<size_t>(parcel.readInt32());
@@ -380,7 +380,7 @@
     }
     return err;
 }
-#endif // !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#endif
 
 AString AStringPrintf(const char *format, ...) {
     va_list ap;
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index 39670a2..ebf1035 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -86,11 +86,6 @@
                 "-DNO_IMEMORY",
             ],
         },
-        apex: {
-            exclude_shared_libs: [
-                "libbinder",
-            ],
-        },
         darwin: {
             enabled: false,
         },
diff --git a/media/libstagefright/foundation/MediaBuffer.cpp b/media/libstagefright/foundation/MediaBuffer.cpp
index 68df21f..8e245dc 100644
--- a/media/libstagefright/foundation/MediaBuffer.cpp
+++ b/media/libstagefright/foundation/MediaBuffer.cpp
@@ -51,12 +51,12 @@
       mRangeLength(size),
       mOwnsData(true),
       mMetaData(new MetaDataBase) {
-#if !defined(NO_IMEMORY) && !defined(__ANDROID_APEX__)
+#ifndef NO_IMEMORY
     if (size < kSharedMemThreshold
             || std::atomic_load_explicit(&mUseSharedMemory, std::memory_order_seq_cst) == 0) {
 #endif
         mData = malloc(size);
-#if !defined(NO_IMEMORY) && !defined(__ANDROID_APEX__)
+#ifndef NO_IMEMORY
     } else {
         ALOGV("creating memoryDealer");
         size_t newSize = 0;
diff --git a/media/libstagefright/foundation/MediaBufferGroup.cpp b/media/libstagefright/foundation/MediaBufferGroup.cpp
index fc98f28..3c25047 100644
--- a/media/libstagefright/foundation/MediaBufferGroup.cpp
+++ b/media/libstagefright/foundation/MediaBufferGroup.cpp
@@ -62,7 +62,7 @@
         mInternal->mGrowthLimit = buffers;
     }
 
-#if !defined(NO_IMEMORY) && !defined(__ANDROID_APEX__)
+#ifndef NO_IMEMORY
     if (buffer_size >= kSharedMemoryThreshold) {
         ALOGD("creating MemoryDealer");
         // Using a single MemoryDealer is efficient for a group of shared memory objects.
diff --git a/media/libstagefright/foundation/MetaData.cpp b/media/libstagefright/foundation/MetaData.cpp
index 7f48cfd..8174597 100644
--- a/media/libstagefright/foundation/MetaData.cpp
+++ b/media/libstagefright/foundation/MetaData.cpp
@@ -28,7 +28,7 @@
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MetaData.h>
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#ifndef __ANDROID_VNDK__
 #include <binder/Parcel.h>
 #endif
 
@@ -48,7 +48,7 @@
 MetaData::~MetaData() {
 }
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#ifndef __ANDROID_VNDK__
 /* static */
 sp<MetaData> MetaData::createFromParcel(const Parcel &parcel) {
 
diff --git a/media/libstagefright/foundation/MetaDataBase.cpp b/media/libstagefright/foundation/MetaDataBase.cpp
index 3f050ea..4b439c6 100644
--- a/media/libstagefright/foundation/MetaDataBase.cpp
+++ b/media/libstagefright/foundation/MetaDataBase.cpp
@@ -28,7 +28,7 @@
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/MetaDataBase.h>
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#ifndef __ANDROID_VNDK__
 #include <binder/Parcel.h>
 #endif
 
@@ -452,7 +452,7 @@
     }
 }
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#ifndef __ANDROID_VNDK__
 status_t MetaDataBase::writeToParcel(Parcel &parcel) {
     status_t ret;
     size_t numItems = mInternalData->mItems.size();
@@ -532,7 +532,7 @@
     ALOGW("no metadata in parcel");
     return UNKNOWN_ERROR;
 }
-#endif // !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#endif
 
 }  // namespace android
 
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
index 31e58ba..b5d6666 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
@@ -63,7 +63,7 @@
     AMessage();
     AMessage(uint32_t what, const sp<const AHandler> &handler);
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#ifndef __ANDROID_VNDK__
     // Construct an AMessage from a parcel.
     // nestingAllowed determines how many levels AMessage can be nested inside
     // AMessage. The default value here is arbitrarily set to 255.
@@ -88,7 +88,7 @@
     // All items in the AMessage must have types that are recognized by
     // FromParcel(); otherwise, TRESPASS error will occur.
     void writeToParcel(Parcel *parcel) const;
-#endif // !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#endif
 
     void setWhat(uint32_t what);
     uint32_t what() const;
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h
index 517774b..deef0d4 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h
@@ -89,7 +89,7 @@
 
     void tolower();
 
-#if !defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
+#ifndef __ANDROID_VNDK__
     static AString FromParcel(const Parcel &parcel);
     status_t writeToParcel(Parcel *parcel) const;
 #endif
diff --git a/media/libstagefright/include/media/stagefright/MediaBuffer.h b/media/libstagefright/include/media/stagefright/MediaBuffer.h
index 2c03f27..9145b63 100644
--- a/media/libstagefright/include/media/stagefright/MediaBuffer.h
+++ b/media/libstagefright/include/media/stagefright/MediaBuffer.h
@@ -46,7 +46,7 @@
     explicit MediaBuffer(size_t size);
 
     explicit MediaBuffer(const sp<ABuffer> &buffer);
-#if !defined(NO_IMEMORY) && !defined(__ANDROID_APEX__)
+#ifndef NO_IMEMORY
     MediaBuffer(const sp<IMemory> &mem) :
          // TODO: Using unsecurePointer() has some associated security pitfalls
          //       (see declaration for details).
@@ -97,7 +97,7 @@
     }
 
     virtual int remoteRefcount() const {
-#if !defined(NO_IMEMORY) && !defined(__ANDROID_APEX__)
+#ifndef NO_IMEMORY
          // TODO: Using unsecurePointer() has some associated security pitfalls
          //       (see declaration for details).
          //       Either document why it is safe in this case or address the
@@ -114,7 +114,7 @@
 
     // returns old value
     int addRemoteRefcount(int32_t value) {
-#if !defined(NO_IMEMORY) && !defined(__ANDROID_APEX__)
+#ifndef NO_IMEMORY
           // TODO: Using unsecurePointer() has some associated security pitfalls
          //       (see declaration for details).
          //       Either document why it is safe in this case or address the
@@ -132,7 +132,7 @@
     }
 
     static bool isDeadObject(const sp<IMemory> &memory) {
-#if !defined(NO_IMEMORY) && !defined(__ANDROID_APEX__)
+#ifndef NO_IMEMORY
          // TODO: Using unsecurePointer() has some associated security pitfalls
          //       (see declaration for details).
          //       Either document why it is safe in this case or address the
@@ -235,7 +235,7 @@
     };
 
     inline SharedControl *getSharedControl() const {
-#if !defined(NO_IMEMORY) && !defined(__ANDROID_APEX__)
+#ifndef NO_IMEMORY
          // TODO: Using unsecurePointer() has some associated security pitfalls
          //       (see declaration for details).
          //       Either document why it is safe in this case or address the
diff --git a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
index 227cead..d8f2b00 100644
--- a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
@@ -47,12 +47,14 @@
         SAMPLE_FLAG_ENCRYPTED   = 2,
     };
 
+    typedef IMediaExtractor::EntryPoint EntryPoint;
+
     // identical to IMediaExtractor::GetTrackMetaDataFlags
     enum GetTrackFormatFlags {
         kIncludeExtensiveMetaData = 1, // reads sample table and possibly stream headers
     };
 
-    NuMediaExtractor();
+    explicit NuMediaExtractor(EntryPoint entryPoint);
 
     status_t setDataSource(
             const sp<MediaHTTPService> &httpService,
@@ -128,6 +130,8 @@
         uint32_t mTrackFlags;  // bitmask of "TrackFlags"
     };
 
+    const EntryPoint mEntryPoint;
+
     mutable Mutex mLock;
 
     sp<DataSource> mDataSource;
@@ -139,6 +143,8 @@
     int64_t mTotalBitrate;  // in bits/sec
     int64_t mDurationUs;
 
+    void setEntryPointToRemoteMediaExtractor();
+
     ssize_t fetchAllTrackSamples(
             int64_t seekTimeUs = -1ll,
             MediaSource::ReadOptions::SeekMode mode =
diff --git a/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h b/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h
index 2ce7bc7..25125f2 100644
--- a/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/RemoteMediaExtractor.h
@@ -42,6 +42,7 @@
     virtual uint32_t flags() const;
     virtual status_t setMediaCas(const HInterfaceToken &casToken);
     virtual String8 name();
+    virtual status_t setEntryPoint(EntryPoint entryPoint);
 
 private:
     MediaExtractor *mExtractor;
diff --git a/media/libstagefright/renderfright/include/renderengine/RenderEngine.h b/media/libstagefright/renderfright/include/renderengine/RenderEngine.h
index 09a0f65..40fdff4 100644
--- a/media/libstagefright/renderfright/include/renderengine/RenderEngine.h
+++ b/media/libstagefright/renderfright/include/renderengine/RenderEngine.h
@@ -33,7 +33,7 @@
 /**
  * Allows to set RenderEngine backend to GLES (default) or Vulkan (NOT yet supported).
  */
-#define PROPERTY_DEBUG_RENDERENGINE_BACKEND "debug.renderengine.backend"
+#define PROPERTY_DEBUG_RENDERENGINE_BACKEND "debug.stagefright.renderengine.backend"
 
 struct ANativeWindowBuffer;
 
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index f57077c..07f9dd3 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -131,7 +131,7 @@
     unsigned start = (unsigned)((rand()* 1000LL)/RAND_MAX) + 15550;
     start &= ~1;
 
-    for (unsigned port = start; port < 65536; port += 2) {
+    for (unsigned port = start; port < 65535; port += 2) {
         struct sockaddr_in addr;
         memset(addr.sin_zero, 0, sizeof(addr.sin_zero));
         addr.sin_family = AF_INET;
@@ -149,6 +149,13 @@
                  (const struct sockaddr *)&addr, sizeof(addr)) == 0) {
             *rtpPort = port;
             return;
+        } else {
+            // we should recreate a RTP socket to avoid bind other port in same RTP socket
+            close(*rtpSocket);
+
+            *rtpSocket = socket(AF_INET, SOCK_DGRAM, 0);
+            CHECK_GE(*rtpSocket, 0);
+            bumpSocketBufferSize(*rtpSocket);
         }
     }
 
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index 0da0740..0c65e9e 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -22,6 +22,7 @@
 #include <media/NdkMediaExtractor.h>
 #include <media/NdkMediaErrorPriv.h>
 #include <media/NdkMediaFormatPriv.h>
+#include "NdkJavaVMHelperPriv.h"
 #include "NdkMediaDataSourcePriv.h"
 
 
@@ -63,7 +64,10 @@
 AMediaExtractor* AMediaExtractor_new() {
     ALOGV("ctor");
     AMediaExtractor *mData = new AMediaExtractor();
-    mData->mImpl = new NuMediaExtractor();
+    mData->mImpl = new NuMediaExtractor(
+        NdkJavaVMHelper::getJNIEnv() != nullptr
+                ? NuMediaExtractor::EntryPoint::NDK_WITH_JVM
+                : NuMediaExtractor::EntryPoint::NDK_NO_JVM );
     return mData;
 }
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index d442af6..78ad467 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -2593,7 +2593,7 @@
     sp<DeviceDescriptorBase> device = VALUE_OR_RETURN_STATUS(
             aidl2legacy_DeviceDescriptorBase(request.device));
     audio_output_flags_t flags = VALUE_OR_RETURN_STATUS(
-            aidl2legacy_audio_output_flags_mask(request.flags));
+            aidl2legacy_int32_t_audio_output_flags_t_mask(request.flags));
 
     audio_io_handle_t output;
     uint32_t latencyMs;
@@ -2643,7 +2643,8 @@
         response->output = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
         response->config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
         response->latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(latencyMs));
-        response->flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_mask(flags));
+        response->flags = VALUE_OR_RETURN_STATUS(
+                legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
         return NO_ERROR;
     }
 
@@ -2819,7 +2820,7 @@
             device.mType,
             device.address().c_str(),
             VALUE_OR_RETURN_STATUS(aidl2legacy_AudioSourceType_audio_source_t(request.source)),
-            VALUE_OR_RETURN_STATUS(aidl2legacy_audio_input_flags_mask(request.flags)),
+            VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_input_flags_t_mask(request.flags)),
             AUDIO_DEVICE_NONE,
             String8{});
 
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index eaad6ef..3ab7737 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -60,6 +60,7 @@
 
 namespace android {
 
+using aidl_utils::statusTFromBinderStatus;
 using binder::Status;
 
 namespace {
@@ -3027,7 +3028,7 @@
                 bs = handle.second->disable(&status);
             }
             if (!bs.isOk()) {
-              status = bs.transactionError();
+              status = statusTFromBinderStatus(bs);
             }
         }
     }
@@ -3142,7 +3143,7 @@
             bs = (*handle)->disable(&status);
         }
         if (!bs.isOk()) {
-            status = bs.transactionError();
+            status = statusTFromBinderStatus(bs);
         }
     }
     return status;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 68b709f..6049f62 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -54,6 +54,7 @@
 
 namespace android {
 
+using aidl_utils::binderStatusFromStatusT;
 using binder::Status;
 using media::VolumeShaper;
 // ----------------------------------------------------------------------------
@@ -371,7 +372,7 @@
     if (*_aidl_return != OK) {
         return Status::ok();
     }
-    *timestamp = legacy2aidl_AudioTimestamp(legacy).value();
+    *timestamp = legacy2aidl_AudioTimestamp_AudioTimestampInternal(legacy).value();
     return Status::ok();
 }
 
@@ -2132,7 +2133,7 @@
 binder::Status AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
         int /*audio_session_t*/ triggerSession) {
     ALOGV("%s()", __func__);
-    return binder::Status::fromStatusT(
+    return binderStatusFromStatusT(
         mRecordTrack->start((AudioSystem::sync_event_t)event, (audio_session_t) triggerSession));
 }
 
@@ -2155,19 +2156,19 @@
     for (size_t i = 0; status == OK && i < mics.size(); ++i) {
        status = mics[i].writeToParcelable(&activeMicrophones->at(i));
     }
-    return binder::Status::fromStatusT(status);
+    return binderStatusFromStatusT(status);
 }
 
 binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneDirection(
         int /*audio_microphone_direction_t*/ direction) {
     ALOGV("%s()", __func__);
-    return binder::Status::fromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
+    return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
             static_cast<audio_microphone_direction_t>(direction)));
 }
 
 binder::Status AudioFlinger::RecordHandle::setPreferredMicrophoneFieldDimension(float zoom) {
     ALOGV("%s()", __func__);
-    return binder::Status::fromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
+    return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
 }
 
 // ----------------------------------------------------------------------------
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index dd128a2..90b93e2 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -57,7 +57,7 @@
     request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
     request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(*config));
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_DeviceDescriptorBase(device));
-    request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_mask(flags));
+    request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
 
     status_t status = af->openOutput(request, &response);
     if (status == OK) {
@@ -134,7 +134,7 @@
     request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(*config));
     request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(deviceTypeAddr));
     request.source = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_source_t_AudioSourceType(source));
-    request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_mask(flags));
+    request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
 
     media::OpenInputResponse response;
     status_t status = af->openInput(request, &response);
diff --git a/services/mediametrics/statsd_extractor.cpp b/services/mediametrics/statsd_extractor.cpp
index 16814d9..4180e0c 100644
--- a/services/mediametrics/statsd_extractor.cpp
+++ b/services/mediametrics/statsd_extractor.cpp
@@ -71,6 +71,22 @@
         metrics_proto.set_tracks(ntrk);
     }
 
+    // android.media.mediaextractor.entry       string
+    std::string entry_point_string;
+    if (item->getString("android.media.mediaextractor.entry", &entry_point_string)) {
+      stats::mediametrics::ExtractorData::EntryPoint entry_point;
+      if (entry_point_string == "sdk") {
+        entry_point = stats::mediametrics::ExtractorData_EntryPoint_SDK;
+      } else if (entry_point_string == "ndk-with-jvm") {
+        entry_point = stats::mediametrics::ExtractorData_EntryPoint_NDK_WITH_JVM;
+      } else if (entry_point_string == "ndk-no-jvm") {
+        entry_point = stats::mediametrics::ExtractorData_EntryPoint_NDK_NO_JVM;
+      } else {
+        entry_point = stats::mediametrics::ExtractorData_EntryPoint_OTHER;
+      }
+      metrics_proto.set_entry_point(entry_point);
+    }
+
     std::string serialized;
     if (!metrics_proto.SerializeToString(&serialized)) {
         ALOGE("Failed to serialize extractor metrics");