Merge "MPEG4Extractor: use Vector<uint32_t> encryptedsizes"
diff --git a/camera/camera2/CaptureRequest.cpp b/camera/camera2/CaptureRequest.cpp
index 1843ec4..ebc09d7 100644
--- a/camera/camera2/CaptureRequest.cpp
+++ b/camera/camera2/CaptureRequest.cpp
@@ -94,12 +94,12 @@
     // Do not distinguish null arrays from 0-sized arrays.
     for (int32_t i = 0; i < size; ++i) {
         // Parcel.writeParcelableArray
-        size_t len;
-        const char16_t* className = parcel->readString16Inplace(&len);
+        std::optional<std::string> className;
+        parcel->readUtf8FromUtf16(&className);
         ALOGV("%s: Read surface class = %s", __FUNCTION__,
-              className != NULL ? String8(className).string() : "<null>");
+              className.value_or("<null>").c_str());
 
-        if (className == NULL) {
+        if (className == std::nullopt) {
             continue;
         }
 
diff --git a/drm/libmediadrm/protos/Android.bp b/drm/libmediadrm/protos/Android.bp
new file mode 100644
index 0000000..b26cda4
--- /dev/null
+++ b/drm/libmediadrm/protos/Android.bp
@@ -0,0 +1,38 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This is the version of the drm metrics configured for protobuf full on host.
+// It is used by the metrics_dump tool.
+
+cc_library_host_shared {
+    name: "libdrm_metrics_protos_full_host",
+    vendor_available: true,
+
+    include_dirs: ["external/protobuf/src"],
+
+    srcs: [
+        "metrics.proto",
+    ],
+
+    proto: {
+        export_proto_headers: true,
+        type: "full",
+    },
+
+    cflags: [
+        // Suppress unused parameter error. This error occurs
+        // when using the map type in a proto definition.
+        "-Wno-unused-parameter",
+    ],
+}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
index 2dcd00f..051a968 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
@@ -15,7 +15,7 @@
 namespace clearkey {
 
 std::string MemoryFileSystem::GetFileName(const std::string& path) {
-    size_t index = path.find_last_of("/");
+    size_t index = path.find_last_of('/');
     if (index != std::string::npos) {
         return path.substr(index+1);
     } else {
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index ba19565..1654b11 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -625,21 +625,19 @@
         Mutexed<Output>::Locked output(mOutput);
         if (!output->buffers ||
                 output->buffers->hasPending() ||
-                output->buffers->numClientBuffers() >= output->numSlots) {
+                output->buffers->numActiveSlots() >= output->numSlots) {
             return;
         }
     }
-    size_t numInputSlots = mInput.lock()->numSlots;
-    for (size_t i = 0; i < numInputSlots; ++i) {
-        if (mPipelineWatcher.lock()->pipelineFull()) {
-            return;
-        }
+    size_t numActiveSlots = 0;
+    while (!mPipelineWatcher.lock()->pipelineFull()) {
         sp<MediaCodecBuffer> inBuffer;
         size_t index;
         {
             Mutexed<Input>::Locked input(mInput);
-            if (input->buffers->numClientBuffers() >= input->numSlots) {
-                return;
+            numActiveSlots = input->buffers->numActiveSlots();
+            if (numActiveSlots >= input->numSlots) {
+                break;
             }
             if (!input->buffers->requestNewBuffer(&index, &inBuffer)) {
                 ALOGV("[%s] no new buffer available", mName);
@@ -649,6 +647,7 @@
         ALOGV("[%s] new input index = %zu [%p]", mName, index, inBuffer.get());
         mCallback->onInputBufferAvailable(index, inBuffer);
     }
+    ALOGV("[%s] # active slots after feedInputBufferIfAvailable = %zu", mName, numActiveSlots);
 }
 
 status_t CCodecBufferChannel::renderOutputBuffer(
diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp
index 10f7e66..689e3bb 100644
--- a/media/codec2/sfplugin/CCodecBuffers.cpp
+++ b/media/codec2/sfplugin/CCodecBuffers.cpp
@@ -489,11 +489,12 @@
     mBuffers.clear();
 }
 
-size_t FlexBuffersImpl::numClientBuffers() const {
+size_t FlexBuffersImpl::numActiveSlots() const {
     return std::count_if(
             mBuffers.begin(), mBuffers.end(),
             [](const Entry &entry) {
-                return (entry.clientBuffer != nullptr);
+                return (entry.clientBuffer != nullptr
+                        || !entry.compBuffer.expired());
             });
 }
 
@@ -639,11 +640,11 @@
     }
 }
 
-size_t BuffersArrayImpl::numClientBuffers() const {
+size_t BuffersArrayImpl::numActiveSlots() const {
     return std::count_if(
             mBuffers.begin(), mBuffers.end(),
             [](const Entry &entry) {
-                return entry.ownedByClient;
+                return entry.ownedByClient || !entry.compBuffer.expired();
             });
 }
 
@@ -693,8 +694,8 @@
     mImpl.flush();
 }
 
-size_t InputBuffersArray::numClientBuffers() const {
-    return mImpl.numClientBuffers();
+size_t InputBuffersArray::numActiveSlots() const {
+    return mImpl.numActiveSlots();
 }
 
 sp<Codec2Buffer> InputBuffersArray::createNewBuffer() {
@@ -731,8 +732,8 @@
     return nullptr;
 }
 
-size_t SlotInputBuffers::numClientBuffers() const {
-    return mImpl.numClientBuffers();
+size_t SlotInputBuffers::numActiveSlots() const {
+    return mImpl.numActiveSlots();
 }
 
 sp<Codec2Buffer> SlotInputBuffers::createNewBuffer() {
@@ -783,8 +784,8 @@
     return std::move(array);
 }
 
-size_t LinearInputBuffers::numClientBuffers() const {
-    return mImpl.numClientBuffers();
+size_t LinearInputBuffers::numActiveSlots() const {
+    return mImpl.numActiveSlots();
 }
 
 // static
@@ -960,8 +961,8 @@
     return std::move(array);
 }
 
-size_t GraphicMetadataInputBuffers::numClientBuffers() const {
-    return mImpl.numClientBuffers();
+size_t GraphicMetadataInputBuffers::numActiveSlots() const {
+    return mImpl.numActiveSlots();
 }
 
 sp<Codec2Buffer> GraphicMetadataInputBuffers::createNewBuffer() {
@@ -1025,8 +1026,8 @@
     return std::move(array);
 }
 
-size_t GraphicInputBuffers::numClientBuffers() const {
-    return mImpl.numClientBuffers();
+size_t GraphicInputBuffers::numActiveSlots() const {
+    return mImpl.numActiveSlots();
 }
 
 sp<Codec2Buffer> GraphicInputBuffers::createNewBuffer() {
@@ -1115,8 +1116,8 @@
     mImpl.getArray(array);
 }
 
-size_t OutputBuffersArray::numClientBuffers() const {
-    return mImpl.numClientBuffers();
+size_t OutputBuffersArray::numActiveSlots() const {
+    return mImpl.numActiveSlots();
 }
 
 void OutputBuffersArray::realloc(const std::shared_ptr<C2Buffer> &c2buffer) {
@@ -1226,8 +1227,8 @@
     return array;
 }
 
-size_t FlexOutputBuffers::numClientBuffers() const {
-    return mImpl.numClientBuffers();
+size_t FlexOutputBuffers::numActiveSlots() const {
+    return mImpl.numActiveSlots();
 }
 
 // LinearOutputBuffers
diff --git a/media/codec2/sfplugin/CCodecBuffers.h b/media/codec2/sfplugin/CCodecBuffers.h
index 0d4fa81..4772ab5 100644
--- a/media/codec2/sfplugin/CCodecBuffers.h
+++ b/media/codec2/sfplugin/CCodecBuffers.h
@@ -72,7 +72,7 @@
     /**
      * Return number of buffers the client owns.
      */
-    virtual size_t numClientBuffers() const = 0;
+    virtual size_t numActiveSlots() const = 0;
 
     /**
      * Examine image data from the buffer and update the format if necessary.
@@ -584,7 +584,7 @@
      * Return the number of buffers that are sent to the client but not released
      * yet.
      */
-    size_t numClientBuffers() const;
+    size_t numActiveSlots() const;
 
     /**
      * Return the number of buffers that are sent to the component but not
@@ -705,7 +705,7 @@
      * Return the number of buffers that are sent to the client but not released
      * yet.
      */
-    size_t numClientBuffers() const;
+    size_t numActiveSlots() const;
 
     /**
      * Return the size of the array.
@@ -765,7 +765,7 @@
 
     void flush() override;
 
-    size_t numClientBuffers() const final;
+    size_t numActiveSlots() const final;
 
 protected:
     sp<Codec2Buffer> createNewBuffer() override;
@@ -796,7 +796,7 @@
 
     std::unique_ptr<InputBuffers> toArrayMode(size_t size) final;
 
-    size_t numClientBuffers() const final;
+    size_t numActiveSlots() const final;
 
 protected:
     sp<Codec2Buffer> createNewBuffer() final;
@@ -826,7 +826,7 @@
 
     std::unique_ptr<InputBuffers> toArrayMode(size_t size) override;
 
-    size_t numClientBuffers() const final;
+    size_t numActiveSlots() const final;
 
 protected:
     sp<Codec2Buffer> createNewBuffer() override;
@@ -894,7 +894,7 @@
 
     std::unique_ptr<InputBuffers> toArrayMode(size_t size) final;
 
-    size_t numClientBuffers() const final;
+    size_t numActiveSlots() const final;
 
 protected:
     sp<Codec2Buffer> createNewBuffer() override;
@@ -924,7 +924,7 @@
     std::unique_ptr<InputBuffers> toArrayMode(
             size_t size) final;
 
-    size_t numClientBuffers() const final;
+    size_t numActiveSlots() const final;
 
 protected:
     sp<Codec2Buffer> createNewBuffer() override;
@@ -965,7 +965,7 @@
         array->clear();
     }
 
-    size_t numClientBuffers() const final {
+    size_t numActiveSlots() const final {
         return 0u;
     }
 
@@ -1019,7 +1019,7 @@
 
     void getArray(Vector<sp<MediaCodecBuffer>> *array) const final;
 
-    size_t numClientBuffers() const final;
+    size_t numActiveSlots() const final;
 
     /**
      * Reallocate the array, filled with buffers with the same size as given
@@ -1073,7 +1073,7 @@
 
     std::unique_ptr<OutputBuffersArray> toArrayMode(size_t size) override;
 
-    size_t numClientBuffers() const final;
+    size_t numActiveSlots() const final;
 
     /**
      * Return an appropriate Codec2Buffer object for the type of buffers.
diff --git a/media/libeffects/lvm/benchmarks/Android.bp b/media/libeffects/lvm/benchmarks/Android.bp
new file mode 100644
index 0000000..420e172
--- /dev/null
+++ b/media/libeffects/lvm/benchmarks/Android.bp
@@ -0,0 +1,16 @@
+cc_benchmark {
+    name: "lvm_benchmark",
+    vendor: true,
+    srcs: ["lvm_benchmark.cpp"],
+    static_libs: [
+        "libbundlewrapper",
+        "libmusicbundle",
+    ],
+    shared_libs: [
+        "libaudioutils",
+        "liblog",
+    ],
+    header_libs: [
+        "libhardware_headers",
+    ],
+}
diff --git a/media/libeffects/lvm/benchmarks/lvm_benchmark.cpp b/media/libeffects/lvm/benchmarks/lvm_benchmark.cpp
new file mode 100644
index 0000000..ee9da3f
--- /dev/null
+++ b/media/libeffects/lvm/benchmarks/lvm_benchmark.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <array>
+#include <climits>
+#include <cstdlib>
+#include <random>
+#include <vector>
+#include <log/log.h>
+#include <benchmark/benchmark.h>
+#include <hardware/audio_effect.h>
+#include <system/audio.h>
+
+extern audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM;
+constexpr effect_uuid_t kEffectUuids[] = {
+        // NXP SW BassBoost
+        {0x8631f300, 0x72e2, 0x11df, 0xb57e, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+        // NXP SW Virtualizer
+        {0x1d4033c0, 0x8557, 0x11df, 0x9f2d, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+        // NXP SW Equalizer
+        {0xce772f20, 0x847d, 0x11df, 0xbb17, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+        // NXP SW Volume
+        {0x119341a0, 0x8469, 0x11df, 0x81f9, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
+};
+
+constexpr size_t kNumEffectUuids = std::size(kEffectUuids);
+
+constexpr size_t kFrameCount = 2048;
+
+constexpr audio_channel_mask_t kChMasks[] = {
+        AUDIO_CHANNEL_OUT_MONO,    AUDIO_CHANNEL_OUT_STEREO,  AUDIO_CHANNEL_OUT_2POINT1,
+        AUDIO_CHANNEL_OUT_QUAD,    AUDIO_CHANNEL_OUT_PENTA,   AUDIO_CHANNEL_OUT_5POINT1,
+        AUDIO_CHANNEL_OUT_6POINT1, AUDIO_CHANNEL_OUT_7POINT1,
+};
+
+constexpr size_t kNumChMasks = std::size(kChMasks);
+constexpr int kSampleRate = 44100;
+// TODO(b/131240940) Remove once effects are updated to produce mono output
+constexpr size_t kMinOutputChannelCount = 2;
+
+/*******************************************************************
+ * A test result running on Pixel 3 for comparison.
+ * The first parameter indicates the number of channels.
+ * The second parameter indicates the effect.
+ * 0: Bass Boost, 1: Virtualizer, 2: Equalizer, 3: Volume
+ * -----------------------------------------------------
+ * Benchmark           Time             CPU   Iterations
+ * -----------------------------------------------------
+ * BM_LVM/2/0     131279 ns       130855 ns         5195
+ * BM_LVM/2/1     184814 ns       184219 ns         3799
+ * BM_LVM/2/2      91935 ns        91649 ns         7647
+ * BM_LVM/2/3      26707 ns        26623 ns        26281
+ * BM_LVM/3/0     172130 ns       171562 ns         4085
+ * BM_LVM/3/1     192443 ns       191923 ns         3644
+ * BM_LVM/3/2     127444 ns       127107 ns         5483
+ * BM_LVM/3/3      26811 ns        26730 ns        26163
+ * BM_LVM/4/0     223688 ns       223076 ns         3133
+ * BM_LVM/4/1     204961 ns       204408 ns         3425
+ * BM_LVM/4/2     169162 ns       168708 ns         4143
+ * BM_LVM/4/3      37330 ns        37225 ns        18795
+ * BM_LVM/5/0     272628 ns       271668 ns         2568
+ * BM_LVM/5/1     218487 ns       217883 ns         3212
+ * BM_LVM/5/2     211049 ns       210479 ns         3324
+ * BM_LVM/5/3      46962 ns        46835 ns        15051
+ * BM_LVM/6/0     318881 ns       317734 ns         2216
+ * BM_LVM/6/1     231899 ns       231244 ns         3028
+ * BM_LVM/6/2     252655 ns       251963 ns         2771
+ * BM_LVM/6/3      54944 ns        54794 ns        12799
+ * BM_LVM/7/0     366622 ns       365262 ns         1916
+ * BM_LVM/7/1     245076 ns       244388 ns         2866
+ * BM_LVM/7/2     295105 ns       294304 ns         2379
+ * BM_LVM/7/3      63595 ns        63420 ns        11070
+ * BM_LVM/8/0     410957 ns       409387 ns         1706
+ * BM_LVM/8/1     257824 ns       257098 ns         2723
+ * BM_LVM/8/2     342546 ns       341530 ns         2059
+ * BM_LVM/8/3      72896 ns        72700 ns         9685
+ *******************************************************************/
+
+static void BM_LVM(benchmark::State& state) {
+    const size_t chMask = kChMasks[state.range(0) - 1];
+    const effect_uuid_t uuid = kEffectUuids[state.range(1)];
+    const size_t channelCount = audio_channel_count_from_out_mask(chMask);
+
+    // Initialize input buffer with deterministic pseudo-random values
+    std::minstd_rand gen(chMask);
+    std::uniform_real_distribution<> dis(-1.0f, 1.0f);
+    std::vector<float> input(kFrameCount * channelCount);
+    for (auto& in : input) {
+        in = dis(gen);
+    }
+
+    effect_handle_t effectHandle = nullptr;
+    if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.create_effect(&uuid, 1, 1, &effectHandle);
+        status != 0) {
+        ALOGE("create_effect returned an error = %d\n", status);
+        return;
+    }
+
+    effect_config_t config{};
+    config.inputCfg.samplingRate = config.outputCfg.samplingRate = kSampleRate;
+    config.inputCfg.channels = config.outputCfg.channels = chMask;
+    config.inputCfg.format = config.outputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+
+    int reply = 0;
+    uint32_t replySize = sizeof(reply);
+    if (int status = (*effectHandle)
+                             ->command(effectHandle, EFFECT_CMD_SET_CONFIG, sizeof(effect_config_t),
+                                       &config, &replySize, &reply);
+        status != 0) {
+        ALOGE("command returned an error = %d\n", status);
+        return;
+    }
+
+    if (int status =
+                (*effectHandle)
+                        ->command(effectHandle, EFFECT_CMD_ENABLE, 0, nullptr, &replySize, &reply);
+        status != 0) {
+        ALOGE("Command enable call returned error %d\n", reply);
+        return;
+    }
+
+    // Run the test
+    for (auto _ : state) {
+        std::vector<float> output(kFrameCount * std::max(channelCount, kMinOutputChannelCount));
+
+        benchmark::DoNotOptimize(input.data());
+        benchmark::DoNotOptimize(output.data());
+
+        audio_buffer_t inBuffer = {.frameCount = kFrameCount, .f32 = input.data()};
+        audio_buffer_t outBuffer = {.frameCount = kFrameCount, .f32 = output.data()};
+        (*effectHandle)->process(effectHandle, &inBuffer, &outBuffer);
+
+        benchmark::ClobberMemory();
+    }
+
+    state.SetComplexityN(state.range(0));
+
+    if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(effectHandle); status != 0) {
+        ALOGE("release_effect returned an error = %d\n", status);
+        return;
+    }
+}
+
+static void LVMArgs(benchmark::internal::Benchmark* b) {
+    // TODO(b/131240940) Test single channel once effects are updated to process mono data
+    for (int i = 2; i <= kNumChMasks; i++) {
+        for (int j = 0; j < kNumEffectUuids; ++j) {
+            b->Args({i, j});
+        }
+    }
+}
+
+BENCHMARK(BM_LVM)->Apply(LVMArgs);
+
+BENCHMARK_MAIN();
diff --git a/media/libeffects/lvm/wrapper/Android.bp b/media/libeffects/lvm/wrapper/Android.bp
index f08caec..be60aae 100644
--- a/media/libeffects/lvm/wrapper/Android.bp
+++ b/media/libeffects/lvm/wrapper/Android.bp
@@ -1,5 +1,5 @@
 // music bundle wrapper
-cc_library_shared {
+cc_library {
     name: "libbundlewrapper",
 
     arch: {
diff --git a/media/libeffects/preprocessing/Android.bp b/media/libeffects/preprocessing/Android.bp
index 16cd0ad..5217cf9 100644
--- a/media/libeffects/preprocessing/Android.bp
+++ b/media/libeffects/preprocessing/Android.bp
@@ -1,6 +1,6 @@
 // audio preprocessing wrapper
 cc_library_shared {
-    name: "libaudiopreprocessing",
+    name: "libaudiopreprocessing_legacy",
 
     vendor: true,
 
@@ -17,6 +17,7 @@
 
     cflags: [
         "-DWEBRTC_POSIX",
+        "-DWEBRTC_LEGACY",
         "-fvisibility=hidden",
         "-Wall",
         "-Werror",
@@ -27,3 +28,34 @@
         "libhardware_headers",
     ],
 }
+
+cc_library_shared {
+    name: "libaudiopreprocessing",
+    vendor: true,
+    relative_install_path: "soundfx",
+    srcs: ["PreProcessing.cpp"],
+    local_include_dirs: [
+        ".",
+    ],
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+        "-Wno-unused-parameter",
+    ],
+
+    shared_libs: [
+        "liblog",
+        "libutils",
+    ],
+
+    static_libs: [
+        "webrtc_audio_processing",
+    ],
+
+    header_libs: [
+        "libaudioeffects",
+        "libhardware_headers",
+        "libwebrtc_absl_headers",
+    ],
+}
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index c7afe2f..f2f74a5 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -23,10 +23,15 @@
 #include <hardware/audio_effect.h>
 #include <audio_effects/effect_aec.h>
 #include <audio_effects/effect_agc.h>
+#ifndef WEBRTC_LEGACY
+#include <audio_effects/effect_agc2.h>
+#endif
 #include <audio_effects/effect_ns.h>
 #include <module_common_types.h>
 #include <audio_processing.h>
+#ifdef WEBRTC_LEGACY
 #include "speex/speex_resampler.h"
+#endif
 
 // undefine to perform multi channels API functional tests
 //#define DUAL_MIC_TEST
@@ -42,6 +47,9 @@
 enum preproc_id
 {
     PREPROC_AGC,        // Automatic Gain Control
+#ifndef WEBRTC_LEGACY
+    PREPROC_AGC2,       // Automatic Gain Control 2
+#endif
     PREPROC_AEC,        // Acoustic Echo Canceler
     PREPROC_NS,         // Noise Suppressor
     PREPROC_NUM_EFFECTS
@@ -103,6 +111,10 @@
     int id;                             // audio session ID
     int io;                             // handle of input stream this session is on
     webrtc::AudioProcessing* apm;       // handle on webRTC audio processing module (APM)
+#ifndef WEBRTC_LEGACY
+    // Audio Processing module builder
+    webrtc::AudioProcessingBuilder ap_builder;
+#endif
     size_t apmFrameCount;               // buffer size for webRTC process (10 ms)
     uint32_t apmSamplingRate;           // webRTC APM sampling rate (8/16 or 32 kHz)
     size_t frameCount;                  // buffer size before input resampler ( <=> apmFrameCount)
@@ -113,25 +125,42 @@
     uint32_t enabledMsk;                // bit field containing IDs of enabled pre processors
     uint32_t processedMsk;              // bit field containing IDs of pre processors already
                                         // processed in current round
+#ifdef WEBRTC_LEGACY
     webrtc::AudioFrame *procFrame;      // audio frame passed to webRTC AMP ProcessStream()
+#else
+    // audio config strucutre
+    webrtc::AudioProcessing::Config config;
+    webrtc::StreamConfig inputConfig;   // input stream configuration
+    webrtc::StreamConfig outputConfig;  // output stream configuration
+#endif
     int16_t *inBuf;                     // input buffer used when resampling
     size_t inBufSize;                   // input buffer size in frames
     size_t framesIn;                    // number of frames in input buffer
+#ifdef WEBRTC_LEGACY
     SpeexResamplerState *inResampler;   // handle on input speex resampler
+#endif
     int16_t *outBuf;                    // output buffer used when resampling
     size_t outBufSize;                  // output buffer size in frames
     size_t framesOut;                   // number of frames in output buffer
+#ifdef WEBRTC_LEGACY
     SpeexResamplerState *outResampler;  // handle on output speex resampler
+#endif
     uint32_t revChannelCount;           // number of channels on reverse stream
     uint32_t revEnabledMsk;             // bit field containing IDs of enabled pre processors
                                         // with reverse channel
     uint32_t revProcessedMsk;           // bit field containing IDs of pre processors with reverse
                                         // channel already processed in current round
+#ifdef WEBRTC_LEGACY
     webrtc::AudioFrame *revFrame;       // audio frame passed to webRTC AMP AnalyzeReverseStream()
+#else
+    webrtc::StreamConfig revConfig;     // reverse stream configuration.
+#endif
     int16_t *revBuf;                    // reverse channel input buffer
     size_t revBufSize;                  // reverse channel input buffer size
     size_t framesRev;                   // number of frames in reverse channel input buffer
+#ifdef WEBRTC_LEGACY
     SpeexResamplerState *revResampler;  // handle on reverse channel input speex resampler
+#endif
 };
 
 #ifdef DUAL_MIC_TEST
@@ -188,6 +217,20 @@
         "The Android Open Source Project"
 };
 
+#ifndef WEBRTC_LEGACY
+// Automatic Gain Control 2
+static const effect_descriptor_t sAgc2Descriptor = {
+        { 0xae3c653b, 0xbe18, 0x4ab8, 0x8938, { 0x41, 0x8f, 0x0a, 0x7f, 0x06, 0xac } }, // type
+        { 0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, { 0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86 } }, // uuid
+        EFFECT_CONTROL_API_VERSION,
+        (EFFECT_FLAG_TYPE_PRE_PROC|EFFECT_FLAG_DEVICE_IND),
+        0, //FIXME indicate CPU load
+        0, //FIXME indicate memory usage
+        "Automatic Gain Control 2",
+        "The Android Open Source Project"
+};
+#endif
+
 // Acoustic Echo Cancellation
 static const effect_descriptor_t sAecDescriptor = {
         { 0x7b491460, 0x8d4d, 0x11e0, 0xbd61, { 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b } }, // type
@@ -215,6 +258,9 @@
 
 static const effect_descriptor_t *sDescriptors[PREPROC_NUM_EFFECTS] = {
         &sAgcDescriptor,
+#ifndef WEBRTC_LEGACY
+        &sAgc2Descriptor,
+#endif
         &sAecDescriptor,
         &sNsDescriptor
 };
@@ -225,6 +271,9 @@
 
 const effect_uuid_t * const sUuidToPreProcTable[PREPROC_NUM_EFFECTS] = {
         FX_IID_AGC,
+#ifndef WEBRTC_LEGACY
+        FX_IID_AGC2,
+#endif
         FX_IID_AEC,
         FX_IID_NS
 };
@@ -266,19 +315,50 @@
 static const int kAgcDefaultCompGain = 9;
 static const bool kAgcDefaultLimiter = true;
 
+#ifndef WEBRTC_LEGACY
+int  Agc2Init (preproc_effect_t *effect)
+{
+    ALOGV("Agc2Init");
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.gain_controller2.fixed_digital.gain_db = 0.f;
+    effect->session->config.gain_controller2.adaptive_digital.level_estimator =
+        effect->session->config.gain_controller2.kRms;
+    effect->session->config.gain_controller2.adaptive_digital.extra_saturation_margin_db = 2.f;
+    effect->session->apm->ApplyConfig(effect->session->config);
+    return 0;
+}
+#endif
+
 int  AgcInit (preproc_effect_t *effect)
 {
     ALOGV("AgcInit");
+#ifdef WEBRTC_LEGACY
     webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
     agc->set_mode(webrtc::GainControl::kFixedDigital);
     agc->set_target_level_dbfs(kAgcDefaultTargetLevel);
     agc->set_compression_gain_db(kAgcDefaultCompGain);
     agc->enable_limiter(kAgcDefaultLimiter);
+#else
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.gain_controller1.target_level_dbfs = kAgcDefaultTargetLevel;
+    effect->session->config.gain_controller1.compression_gain_db = kAgcDefaultCompGain;
+    effect->session->config.gain_controller1.enable_limiter = kAgcDefaultLimiter;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
     return 0;
 }
 
+#ifndef WEBRTC_LEGACY
+int  Agc2Create(preproc_effect_t *effect)
+{
+    Agc2Init(effect);
+    return 0;
+}
+#endif
+
 int  AgcCreate(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     webrtc::GainControl *agc = effect->session->apm->gain_control();
     ALOGV("AgcCreate got agc %p", agc);
     if (agc == NULL) {
@@ -286,10 +366,93 @@
         return -ENOMEM;
     }
     effect->engine = static_cast<preproc_fx_handle_t>(agc);
+#endif
     AgcInit(effect);
     return 0;
 }
 
+#ifndef WEBRTC_LEGACY
+int Agc2GetParameter(preproc_effect_t *effect,
+                    void *pParam,
+                    uint32_t *pValueSize,
+                    void *pValue)
+{
+    int status = 0;
+    uint32_t param = *(uint32_t *)pParam;
+    agc2_settings_t *pProperties = (agc2_settings_t *)pValue;
+
+    switch (param) {
+    case AGC2_PARAM_FIXED_DIGITAL_GAIN:
+        if (*pValueSize < sizeof(float)) {
+            *pValueSize = 0.f;
+            return -EINVAL;
+        }
+        break;
+    case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR:
+        if (*pValueSize < sizeof(int32_t)) {
+            *pValueSize = 0;
+            return -EINVAL;
+        }
+        break;
+    case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN:
+        if (*pValueSize < sizeof(float)) {
+            *pValueSize = 0.f;
+            return -EINVAL;
+        }
+        break;
+    case AGC2_PARAM_PROPERTIES:
+        if (*pValueSize < sizeof(agc2_settings_t)) {
+            *pValueSize = 0;
+            return -EINVAL;
+        }
+        break;
+
+    default:
+        ALOGW("Agc2GetParameter() unknown param %08x", param);
+        status = -EINVAL;
+        break;
+    }
+
+    effect->session->config = effect->session->apm->GetConfig();
+    switch (param) {
+    case AGC2_PARAM_FIXED_DIGITAL_GAIN:
+        *(float *) pValue =
+                (float)(effect->session->config.gain_controller2.fixed_digital.gain_db);
+        ALOGV("Agc2GetParameter() target level %f dB", *(float *) pValue);
+        break;
+    case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR:
+        *(uint32_t *) pValue =
+                (uint32_t)(effect->session->config.gain_controller2.adaptive_digital.
+                level_estimator);
+        ALOGV("Agc2GetParameter() level estimator %d",
+                *(webrtc::AudioProcessing::Config::GainController2::LevelEstimator *) pValue);
+        break;
+    case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN:
+        *(float *) pValue =
+                (float)(effect->session->config.gain_controller2.adaptive_digital.
+                extra_saturation_margin_db);
+        ALOGV("Agc2GetParameter() extra saturation margin %f dB", *(float *) pValue);
+        break;
+    case AGC2_PARAM_PROPERTIES:
+        pProperties->fixedDigitalGain =
+                (float)(effect->session->config.gain_controller2.fixed_digital.gain_db);
+        pProperties->level_estimator =
+                (uint32_t)(effect->session->config.gain_controller2.adaptive_digital.
+                level_estimator);
+        pProperties->extraSaturationMargin =
+                (float)(effect->session->config.gain_controller2.adaptive_digital.
+                extra_saturation_margin_db);
+        break;
+    default:
+        ALOGW("Agc2GetParameter() unknown param %d", param);
+        status = -EINVAL;
+        break;
+    }
+
+    return status;
+}
+#endif
+
 int AgcGetParameter(preproc_effect_t *effect,
                     void *pParam,
                     uint32_t *pValueSize,
@@ -298,7 +461,9 @@
     int status = 0;
     uint32_t param = *(uint32_t *)pParam;
     t_agc_settings *pProperties = (t_agc_settings *)pValue;
+#ifdef WEBRTC_LEGACY
     webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
+#endif
 
     switch (param) {
     case AGC_PARAM_TARGET_LEVEL:
@@ -327,6 +492,7 @@
         break;
     }
 
+#ifdef WEBRTC_LEGACY
     switch (param) {
     case AGC_PARAM_TARGET_LEVEL:
         *(int16_t *) pValue = (int16_t)(agc->target_level_dbfs() * -100);
@@ -351,12 +517,98 @@
         status = -EINVAL;
         break;
     }
+#else
+    effect->session->config = effect->session->apm->GetConfig();
+    switch (param) {
+    case AGC_PARAM_TARGET_LEVEL:
+        *(int16_t *) pValue =
+                (int16_t)(effect->session->config.gain_controller1.target_level_dbfs * -100);
+        ALOGV("AgcGetParameter() target level %d milliBels", *(int16_t *) pValue);
+        break;
+    case AGC_PARAM_COMP_GAIN:
+        *(int16_t *) pValue =
+                (int16_t)(effect->session->config.gain_controller1.compression_gain_db * -100);
+        ALOGV("AgcGetParameter() comp gain %d milliBels", *(int16_t *) pValue);
+        break;
+    case AGC_PARAM_LIMITER_ENA:
+        *(bool *) pValue =
+                (bool)(effect->session->config.gain_controller1.enable_limiter);
+        ALOGV("AgcGetParameter() limiter enabled %s",
+                (*(int16_t *) pValue != 0) ? "true" : "false");
+        break;
+    case AGC_PARAM_PROPERTIES:
+        pProperties->targetLevel =
+                (int16_t)(effect->session->config.gain_controller1.target_level_dbfs * -100);
+        pProperties->compGain =
+                (int16_t)(effect->session->config.gain_controller1.compression_gain_db * -100);
+        pProperties->limiterEnabled =
+                (bool)(effect->session->config.gain_controller1.enable_limiter);
+        break;
+    default:
+        ALOGW("AgcGetParameter() unknown param %d", param);
+        status = -EINVAL;
+        break;
+    }
+#endif
     return status;
 }
 
+#ifndef WEBRTC_LEGACY
+int Agc2SetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
+{
+    int status = 0;
+    uint32_t param = *(uint32_t *)pParam;
+    float valueFloat = 0.f;
+    agc2_settings_t *pProperties = (agc2_settings_t *)pValue;
+    effect->session->config = effect->session->apm->GetConfig();
+    switch (param) {
+    case AGC2_PARAM_FIXED_DIGITAL_GAIN:
+        valueFloat = (float)(*(int32_t *) pValue);
+        ALOGV("Agc2SetParameter() fixed digital gain %f dB", valueFloat);
+        effect->session->config.gain_controller2.fixed_digital.gain_db = valueFloat;
+        break;
+    case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR:
+        ALOGV("Agc2SetParameter() level estimator %d", *(webrtc::AudioProcessing::Config::
+                GainController2::LevelEstimator *) pValue);
+        effect->session->config.gain_controller2.adaptive_digital.level_estimator =
+                (*(webrtc::AudioProcessing::Config::GainController2::LevelEstimator *) pValue);
+        break;
+    case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN:
+        valueFloat = (float)(*(int32_t *) pValue);
+        ALOGV("Agc2SetParameter() extra saturation margin %f dB", valueFloat);
+        effect->session->config.gain_controller2.adaptive_digital.extra_saturation_margin_db =
+                valueFloat;
+        break;
+    case AGC2_PARAM_PROPERTIES:
+        ALOGV("Agc2SetParameter() properties gain %f, level %d margin %f",
+                pProperties->fixedDigitalGain,
+                pProperties->level_estimator,
+                pProperties->extraSaturationMargin);
+        effect->session->config.gain_controller2.fixed_digital.gain_db =
+                pProperties->fixedDigitalGain;
+        effect->session->config.gain_controller2.adaptive_digital.level_estimator =
+                (webrtc::AudioProcessing::Config::GainController2::LevelEstimator)pProperties->
+                level_estimator;
+        effect->session->config.gain_controller2.adaptive_digital.extra_saturation_margin_db =
+                pProperties->extraSaturationMargin;
+        break;
+    default:
+        ALOGW("Agc2SetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
+        status = -EINVAL;
+        break;
+    }
+    effect->session->apm->ApplyConfig(effect->session->config);
+
+    ALOGV("Agc2SetParameter() done status %d", status);
+
+    return status;
+}
+#endif
+
 int AgcSetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
 {
     int status = 0;
+#ifdef WEBRTC_LEGACY
     uint32_t param = *(uint32_t *)pParam;
     t_agc_settings *pProperties = (t_agc_settings *)pValue;
     webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
@@ -390,27 +642,95 @@
         status = -EINVAL;
         break;
     }
+#else
+    uint32_t param = *(uint32_t *)pParam;
+    t_agc_settings *pProperties = (t_agc_settings *)pValue;
+    effect->session->config = effect->session->apm->GetConfig();
+    switch (param) {
+    case AGC_PARAM_TARGET_LEVEL:
+        ALOGV("AgcSetParameter() target level %d milliBels", *(int16_t *)pValue);
+        effect->session->config.gain_controller1.target_level_dbfs =
+             (-(*(int16_t *)pValue / 100));
+        break;
+    case AGC_PARAM_COMP_GAIN:
+        ALOGV("AgcSetParameter() comp gain %d milliBels", *(int16_t *)pValue);
+        effect->session->config.gain_controller1.compression_gain_db =
+             (*(int16_t *)pValue / 100);
+        break;
+    case AGC_PARAM_LIMITER_ENA:
+        ALOGV("AgcSetParameter() limiter enabled %s", *(bool *)pValue ? "true" : "false");
+        effect->session->config.gain_controller1.enable_limiter =
+             (*(bool *)pValue);
+        break;
+    case AGC_PARAM_PROPERTIES:
+        ALOGV("AgcSetParameter() properties level %d, gain %d limiter %d",
+              pProperties->targetLevel,
+              pProperties->compGain,
+              pProperties->limiterEnabled);
+        effect->session->config.gain_controller1.target_level_dbfs =
+              -(pProperties->targetLevel / 100);
+        effect->session->config.gain_controller1.compression_gain_db =
+              pProperties->compGain / 100;
+        effect->session->config.gain_controller1.enable_limiter =
+              pProperties->limiterEnabled;
+        break;
+    default:
+        ALOGW("AgcSetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
+        status = -EINVAL;
+        break;
+    }
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 
     ALOGV("AgcSetParameter() done status %d", status);
 
     return status;
 }
 
+#ifndef WEBRTC_LEGACY
+void Agc2Enable(preproc_effect_t *effect)
+{
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.gain_controller2.enabled = true;
+    effect->session->apm->ApplyConfig(effect->session->config);
+}
+#endif
+
 void AgcEnable(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
     ALOGV("AgcEnable agc %p", agc);
     agc->Enable(true);
+#else
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.gain_controller1.enabled = true;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 }
 
+#ifndef WEBRTC_LEGACY
+void Agc2Disable(preproc_effect_t *effect)
+{
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.gain_controller2.enabled = false;
+    effect->session->apm->ApplyConfig(effect->session->config);
+}
+#endif
+
 void AgcDisable(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     ALOGV("AgcDisable");
     webrtc::GainControl *agc = static_cast<webrtc::GainControl *>(effect->engine);
     agc->Enable(false);
+#else
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.gain_controller1.enabled = false;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 }
 
-
 static const preproc_ops_t sAgcOps = {
         AgcCreate,
         AgcInit,
@@ -422,26 +742,48 @@
         NULL
 };
 
+#ifndef WEBRTC_LEGACY
+static const preproc_ops_t sAgc2Ops = {
+        Agc2Create,
+        Agc2Init,
+        NULL,
+        Agc2Enable,
+        Agc2Disable,
+        Agc2SetParameter,
+        Agc2GetParameter,
+        NULL
+};
+#endif
 
 //------------------------------------------------------------------------------
 // Acoustic Echo Canceler (AEC)
 //------------------------------------------------------------------------------
 
+#ifdef WEBRTC_LEGACY
 static const webrtc::EchoControlMobile::RoutingMode kAecDefaultMode =
         webrtc::EchoControlMobile::kEarpiece;
 static const bool kAecDefaultComfortNoise = true;
+#endif
 
 int  AecInit (preproc_effect_t *effect)
 {
     ALOGV("AecInit");
+#ifdef WEBRTC_LEGACY
     webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
     aec->set_routing_mode(kAecDefaultMode);
     aec->enable_comfort_noise(kAecDefaultComfortNoise);
+#else
+    effect->session->config =
+        effect->session->apm->GetConfig() ;
+    effect->session->config.echo_canceller.mobile_mode = false;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
     return 0;
 }
 
 int  AecCreate(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     webrtc::EchoControlMobile *aec = effect->session->apm->echo_control_mobile();
     ALOGV("AecCreate got aec %p", aec);
     if (aec == NULL) {
@@ -449,6 +791,7 @@
         return -ENOMEM;
     }
     effect->engine = static_cast<preproc_fx_handle_t>(aec);
+#endif
     AecInit (effect);
     return 0;
 }
@@ -470,6 +813,14 @@
         *(uint32_t *)pValue = 1000 * effect->session->apm->stream_delay_ms();
         ALOGV("AecGetParameter() echo delay %d us", *(uint32_t *)pValue);
         break;
+#ifndef WEBRTC_LEGACY
+    case AEC_PARAM_MOBILE_MODE:
+        effect->session->config =
+            effect->session->apm->GetConfig() ;
+        *(uint32_t *)pValue = effect->session->config.echo_canceller.mobile_mode;
+        ALOGV("AecGetParameter() mobile mode %d us", *(uint32_t *)pValue);
+        break;
+#endif
     default:
         ALOGW("AecGetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
         status = -EINVAL;
@@ -490,6 +841,15 @@
         status = effect->session->apm->set_stream_delay_ms(value/1000);
         ALOGV("AecSetParameter() echo delay %d us, status %d", value, status);
         break;
+#ifndef WEBRTC_LEGACY
+    case AEC_PARAM_MOBILE_MODE:
+        effect->session->config =
+            effect->session->apm->GetConfig() ;
+        effect->session->config.echo_canceller.mobile_mode = value;
+        ALOGV("AecSetParameter() mobile mode %d us", value);
+        effect->session->apm->ApplyConfig(effect->session->config);
+        break;
+#endif
     default:
         ALOGW("AecSetParameter() unknown param %08x value %08x", param, *(uint32_t *)pValue);
         status = -EINVAL;
@@ -500,28 +860,43 @@
 
 void AecEnable(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
     ALOGV("AecEnable aec %p", aec);
     aec->Enable(true);
+#else
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.echo_canceller.enabled = true;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 }
 
 void AecDisable(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     ALOGV("AecDisable");
     webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
     aec->Enable(false);
+#else
+    effect->session->config = effect->session->apm->GetConfig();
+    effect->session->config.echo_canceller.enabled = false;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 }
 
 int AecSetDevice(preproc_effect_t *effect, uint32_t device)
 {
     ALOGV("AecSetDevice %08x", device);
+#ifdef WEBRTC_LEGACY
     webrtc::EchoControlMobile *aec = static_cast<webrtc::EchoControlMobile *>(effect->engine);
     webrtc::EchoControlMobile::RoutingMode mode = webrtc::EchoControlMobile::kQuietEarpieceOrHeadset;
+#endif
 
     if (audio_is_input_device(device)) {
         return 0;
     }
 
+#ifdef WEBRTC_LEGACY
     switch(device) {
     case AUDIO_DEVICE_OUT_EARPIECE:
         mode = webrtc::EchoControlMobile::kEarpiece;
@@ -536,6 +911,7 @@
         break;
     }
     aec->set_routing_mode(mode);
+#endif
     return 0;
 }
 
@@ -554,11 +930,17 @@
 // Noise Suppression (NS)
 //------------------------------------------------------------------------------
 
+#ifdef WEBRTC_LEGACY
 static const webrtc::NoiseSuppression::Level kNsDefaultLevel = webrtc::NoiseSuppression::kModerate;
+#else
+static const webrtc::AudioProcessing::Config::NoiseSuppression::Level kNsDefaultLevel =
+                webrtc::AudioProcessing::Config::NoiseSuppression::kModerate;
+#endif
 
 int  NsInit (preproc_effect_t *effect)
 {
     ALOGV("NsInit");
+#ifdef WEBRTC_LEGACY
     webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
     ns->set_level(kNsDefaultLevel);
     webrtc::Config config;
@@ -575,12 +957,20 @@
     config.Set<webrtc::Beamforming>(
             new webrtc::Beamforming(false, geometry));
     effect->session->apm->SetExtraOptions(config);
+#else
+    effect->session->config =
+        effect->session->apm->GetConfig() ;
+    effect->session->config.noise_suppression.level =
+        kNsDefaultLevel;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
     effect->type = NS_TYPE_SINGLE_CHANNEL;
     return 0;
 }
 
 int  NsCreate(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     webrtc::NoiseSuppression *ns = effect->session->apm->noise_suppression();
     ALOGV("NsCreate got ns %p", ns);
     if (ns == NULL) {
@@ -588,6 +978,7 @@
         return -ENOMEM;
     }
     effect->engine = static_cast<preproc_fx_handle_t>(ns);
+#endif
     NsInit (effect);
     return 0;
 }
@@ -604,6 +995,7 @@
 int NsSetParameter (preproc_effect_t *effect, void *pParam, void *pValue)
 {
     int status = 0;
+#ifdef WEBRTC_LEGACY
     webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
     uint32_t param = *(uint32_t *)pParam;
     uint32_t value = *(uint32_t *)pValue;
@@ -629,12 +1021,30 @@
             ALOGW("NsSetParameter() unknown param %08x value %08x", param, value);
             status = -EINVAL;
     }
+#else
+    uint32_t param = *(uint32_t *)pParam;
+    uint32_t value = *(uint32_t *)pValue;
+    effect->session->config =
+        effect->session->apm->GetConfig();
+    switch (param) {
+        case NS_PARAM_LEVEL:
+            effect->session->config.noise_suppression.level =
+               (webrtc::AudioProcessing::Config::NoiseSuppression::Level)value;
+            ALOGV("NsSetParameter() level %d", value);
+            break;
+        default:
+            ALOGW("NsSetParameter() unknown param %08x value %08x", param, value);
+            status = -EINVAL;
+    }
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 
     return status;
 }
 
 void NsEnable(preproc_effect_t *effect)
 {
+#ifdef WEBRTC_LEGACY
     webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
     ALOGV("NsEnable ns %p", ns);
     ns->Enable(true);
@@ -644,17 +1054,30 @@
         config.Set<webrtc::Beamforming>(new webrtc::Beamforming(true, geometry));
         effect->session->apm->SetExtraOptions(config);
     }
+#else
+    effect->session->config =
+        effect->session->apm->GetConfig();
+    effect->session->config.noise_suppression.enabled = true;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 }
 
 void NsDisable(preproc_effect_t *effect)
 {
     ALOGV("NsDisable");
+#ifdef WEBRTC_LEGACY
     webrtc::NoiseSuppression *ns = static_cast<webrtc::NoiseSuppression *>(effect->engine);
     ns->Enable(false);
     webrtc::Config config;
     std::vector<webrtc::Point> geometry;
     config.Set<webrtc::Beamforming>(new webrtc::Beamforming(false, geometry));
     effect->session->apm->SetExtraOptions(config);
+#else
+    effect->session->config =
+        effect->session->apm->GetConfig();
+    effect->session->config.noise_suppression.enabled = false;
+    effect->session->apm->ApplyConfig(effect->session->config);
+#endif
 }
 
 static const preproc_ops_t sNsOps = {
@@ -669,8 +1092,12 @@
 };
 
 
+
 static const preproc_ops_t *sPreProcOps[PREPROC_NUM_EFFECTS] = {
         &sAgcOps,
+#ifndef WEBRTC_LEGACY
+        &sAgc2Ops,
+#endif
         &sAecOps,
         &sNsOps
 };
@@ -812,7 +1239,9 @@
     session->id = 0;
     session->io = 0;
     session->createdMsk = 0;
+#ifdef WEBRTC_LEGACY
     session->apm = NULL;
+#endif
     for (i = 0; i < PREPROC_NUM_EFFECTS && status == 0; i++) {
         status = Effect_Init(&session->effects[i], i);
     }
@@ -829,6 +1258,7 @@
     ALOGV("Session_CreateEffect procId %d, createdMsk %08x", procId, session->createdMsk);
 
     if (session->createdMsk == 0) {
+#ifdef WEBRTC_LEGACY
         session->apm = webrtc::AudioProcessing::Create();
         if (session->apm == NULL) {
             ALOGW("Session_CreateEffect could not get apm engine");
@@ -850,28 +1280,53 @@
             ALOGW("Session_CreateEffect could not allocate reverse audio frame");
             goto error;
         }
+#else
+        session->apm = session->ap_builder.Create();
+        if (session->apm == NULL) {
+            ALOGW("Session_CreateEffect could not get apm engine");
+            goto error;
+        }
+#endif
         session->apmSamplingRate = kPreprocDefaultSr;
         session->apmFrameCount = (kPreprocDefaultSr) / 100;
         session->frameCount = session->apmFrameCount;
         session->samplingRate = kPreprocDefaultSr;
         session->inChannelCount = kPreProcDefaultCnl;
         session->outChannelCount = kPreProcDefaultCnl;
+#ifdef WEBRTC_LEGACY
         session->procFrame->sample_rate_hz_ = kPreprocDefaultSr;
         session->procFrame->num_channels_ = kPreProcDefaultCnl;
+#else
+        session->inputConfig.set_sample_rate_hz(kPreprocDefaultSr);
+        session->inputConfig.set_num_channels(kPreProcDefaultCnl);
+        session->outputConfig.set_sample_rate_hz(kPreprocDefaultSr);
+        session->outputConfig.set_num_channels(kPreProcDefaultCnl);
+#endif
         session->revChannelCount = kPreProcDefaultCnl;
+#ifdef WEBRTC_LEGACY
         session->revFrame->sample_rate_hz_ = kPreprocDefaultSr;
         session->revFrame->num_channels_ = kPreProcDefaultCnl;
+#else
+        session->revConfig.set_sample_rate_hz(kPreprocDefaultSr);
+        session->revConfig.set_num_channels(kPreProcDefaultCnl);
+#endif
         session->enabledMsk = 0;
         session->processedMsk = 0;
         session->revEnabledMsk = 0;
         session->revProcessedMsk = 0;
+#ifdef WEBRTC_LEGACY
         session->inResampler = NULL;
+#endif
         session->inBuf = NULL;
         session->inBufSize = 0;
+#ifdef WEBRTC_LEGACY
         session->outResampler = NULL;
+#endif
         session->outBuf = NULL;
         session->outBufSize = 0;
+#ifdef WEBRTC_LEGACY
         session->revResampler = NULL;
+#endif
         session->revBuf = NULL;
         session->revBufSize = 0;
     }
@@ -885,12 +1340,17 @@
 
 error:
     if (session->createdMsk == 0) {
+#ifdef WEBRTC_LEGACY
         delete session->revFrame;
         session->revFrame = NULL;
         delete session->procFrame;
         session->procFrame = NULL;
         delete session->apm;
         session->apm = NULL; // NOLINT(clang-analyzer-cplusplus.NewDelete)
+#else
+        delete session->apm;
+        session->apm = NULL;
+#endif
     }
     return status;
 }
@@ -901,6 +1361,7 @@
     ALOGW_IF(Effect_Release(fx) != 0, " Effect_Release() failed for proc ID %d", fx->procId);
     session->createdMsk &= ~(1<<fx->procId);
     if (session->createdMsk == 0) {
+#ifdef WEBRTC_LEGACY
         delete session->apm;
         session->apm = NULL;
         delete session->procFrame;
@@ -919,6 +1380,10 @@
             speex_resampler_destroy(session->revResampler);
             session->revResampler = NULL;
         }
+#else
+        delete session->apm;
+        session->apm = NULL;
+#endif
         delete session->inBuf;
         session->inBuf = NULL;
         delete session->outBuf;
@@ -946,7 +1411,9 @@
 
     ALOGV("Session_SetConfig sr %d cnl %08x",
          config->inputCfg.samplingRate, config->inputCfg.channels);
+#ifdef WEBRTC_LEGACY
     int status;
+#endif
 
     // AEC implementation is limited to 16kHz
     if (config->inputCfg.samplingRate >= 32000 && !(session->createdMsk & (1 << PREPROC_AEC))) {
@@ -958,6 +1425,7 @@
         session->apmSamplingRate = 8000;
     }
 
+#ifdef WEBRTC_LEGACY
     const webrtc::ProcessingConfig processing_config = {
       {{static_cast<int>(session->apmSamplingRate), inCnl},
        {static_cast<int>(session->apmSamplingRate), outCnl},
@@ -967,23 +1435,41 @@
     if (status < 0) {
         return -EINVAL;
     }
+#endif
 
     session->samplingRate = config->inputCfg.samplingRate;
     session->apmFrameCount = session->apmSamplingRate / 100;
     if (session->samplingRate == session->apmSamplingRate) {
         session->frameCount = session->apmFrameCount;
     } else {
+#ifdef WEBRTC_LEGACY
         session->frameCount = (session->apmFrameCount * session->samplingRate) /
                 session->apmSamplingRate  + 1;
+#else
+        session->frameCount = (session->apmFrameCount * session->samplingRate) /
+                session->apmSamplingRate;
+#endif
     }
     session->inChannelCount = inCnl;
     session->outChannelCount = outCnl;
+#ifdef WEBRTC_LEGACY
     session->procFrame->num_channels_ = inCnl;
     session->procFrame->sample_rate_hz_ = session->apmSamplingRate;
+#else
+    session->inputConfig.set_sample_rate_hz(session->samplingRate);
+    session->inputConfig.set_num_channels(inCnl);
+    session->outputConfig.set_sample_rate_hz(session->samplingRate);
+    session->outputConfig.set_num_channels(inCnl);
+#endif
 
     session->revChannelCount = inCnl;
+#ifdef WEBRTC_LEGACY
     session->revFrame->num_channels_ = inCnl;
     session->revFrame->sample_rate_hz_ = session->apmSamplingRate;
+#else
+    session->revConfig.set_sample_rate_hz(session->samplingRate);
+    session->revConfig.set_num_channels(inCnl);
+#endif
 
     // force process buffer reallocation
     session->inBufSize = 0;
@@ -992,6 +1478,7 @@
     session->framesOut = 0;
 
 
+#ifdef WEBRTC_LEGACY
     if (session->inResampler != NULL) {
         speex_resampler_destroy(session->inResampler);
         session->inResampler = NULL;
@@ -1043,6 +1530,7 @@
             return -EINVAL;
         }
     }
+#endif
 
     session->state = PREPROC_SESSION_STATE_CONFIG;
     return 0;
@@ -1079,6 +1567,7 @@
         return -EINVAL;
     }
     uint32_t inCnl = audio_channel_count_from_out_mask(config->inputCfg.channels);
+#ifdef WEBRTC_LEGACY
     const webrtc::ProcessingConfig processing_config = {
        {{static_cast<int>(session->apmSamplingRate), session->inChannelCount},
         {static_cast<int>(session->apmSamplingRate), session->outChannelCount},
@@ -1088,9 +1577,12 @@
     if (status < 0) {
         return -EINVAL;
     }
+#endif
     session->revChannelCount = inCnl;
+#ifdef WEBRTC_LEGACY
     session->revFrame->num_channels_ = inCnl;
     session->revFrame->sample_rate_hz_ = session->apmSamplingRate;
+#endif
     // force process buffer reallocation
     session->revBufSize = 0;
     session->framesRev = 0;
@@ -1114,6 +1606,7 @@
     if (enabled) {
         if(session->enabledMsk == 0) {
             session->framesIn = 0;
+#ifdef WEBRTC_LEGACY
             if (session->inResampler != NULL) {
                 speex_resampler_reset_mem(session->inResampler);
             }
@@ -1121,13 +1614,16 @@
             if (session->outResampler != NULL) {
                 speex_resampler_reset_mem(session->outResampler);
             }
+#endif
         }
         session->enabledMsk |= (1 << procId);
         if (HasReverseStream(procId)) {
             session->framesRev = 0;
+#ifdef WEBRTC_LEGACY
             if (session->revResampler != NULL) {
                 speex_resampler_reset_mem(session->revResampler);
             }
+#endif
             session->revEnabledMsk |= (1 << procId);
         }
     } else {
@@ -1252,6 +1748,7 @@
             return 0;
         }
 
+#ifdef WEBRTC_LEGACY
         if (session->inResampler != NULL) {
             size_t fr = session->frameCount - session->framesIn;
             if (inBuffer->frameCount < fr) {
@@ -1335,6 +1832,28 @@
         session->procFrame->samples_per_channel_ = session->apmFrameCount;
 
         effect->session->apm->ProcessStream(session->procFrame);
+#else
+        size_t fr = session->frameCount - session->framesIn;
+        if (inBuffer->frameCount < fr) {
+            fr = inBuffer->frameCount;
+        }
+        session->framesIn += fr;
+        inBuffer->frameCount = fr;
+        if (session->framesIn < session->frameCount) {
+            return 0;
+        }
+        session->framesIn = 0;
+        if (int status = effect->session->apm->ProcessStream(
+                                    (const int16_t* const)inBuffer->s16,
+                                    (const webrtc::StreamConfig)effect->session->inputConfig,
+                                    (const webrtc::StreamConfig)effect->session->outputConfig,
+                                    (int16_t* const)outBuffer->s16);
+             status != 0) {
+            ALOGE("Process Stream failed with error %d\n", status);
+            return status;
+        }
+        outBuffer->frameCount = inBuffer->frameCount;
+#endif
 
         if (session->outBufSize < session->framesOut + session->frameCount) {
             int16_t *buf;
@@ -1350,6 +1869,7 @@
             session->outBuf = buf;
         }
 
+#ifdef WEBRTC_LEGACY
         if (session->outResampler != NULL) {
             spx_uint32_t frIn = session->apmFrameCount;
             spx_uint32_t frOut = session->frameCount;
@@ -1375,6 +1895,9 @@
             session->framesOut += session->frameCount;
         }
         size_t fr = session->framesOut;
+#else
+        fr = session->framesOut;
+#endif
         if (framesRq - framesWr < fr) {
             fr = framesRq - framesWr;
         }
@@ -1794,6 +2317,7 @@
 
     if ((session->revProcessedMsk & session->revEnabledMsk) == session->revEnabledMsk) {
         effect->session->revProcessedMsk = 0;
+#ifdef WEBRTC_LEGACY
         if (session->revResampler != NULL) {
             size_t fr = session->frameCount - session->framesRev;
             if (inBuffer->frameCount < fr) {
@@ -1858,6 +2382,27 @@
         }
         session->revFrame->samples_per_channel_ = session->apmFrameCount;
         effect->session->apm->AnalyzeReverseStream(session->revFrame);
+#else
+        size_t fr = session->frameCount - session->framesRev;
+        if (inBuffer->frameCount < fr) {
+            fr = inBuffer->frameCount;
+        }
+        session->framesRev += fr;
+        inBuffer->frameCount = fr;
+        if (session->framesRev < session->frameCount) {
+            return 0;
+        }
+        session->framesRev = 0;
+        if (int status = effect->session->apm->ProcessReverseStream(
+                        (const int16_t* const)inBuffer->s16,
+                        (const webrtc::StreamConfig)effect->session->revConfig,
+                        (const webrtc::StreamConfig)effect->session->revConfig,
+                        (int16_t* const)outBuffer->s16);
+             status != 0) {
+            ALOGE("Process Reverse Stream failed with error %d\n", status);
+            return status;
+        }
+#endif
         return 0;
     } else {
         return -ENODATA;
diff --git a/media/libeffects/preprocessing/tests/Android.bp b/media/libeffects/preprocessing/tests/Android.bp
index 71f6e8f..045b0d3 100644
--- a/media/libeffects/preprocessing/tests/Android.bp
+++ b/media/libeffects/preprocessing/tests/Android.bp
@@ -1,5 +1,37 @@
 // audio preprocessing unit test
 cc_test {
+    name: "AudioPreProcessingLegacyTest",
+
+    vendor: true,
+
+    relative_install_path: "soundfx",
+
+    srcs: ["PreProcessingTest.cpp"],
+
+    shared_libs: [
+        "libaudiopreprocessing_legacy",
+        "libaudioutils",
+        "liblog",
+        "libutils",
+        "libwebrtc_audio_preprocessing",
+    ],
+
+    cflags: [
+        "-DWEBRTC_POSIX",
+        "-DWEBRTC_LEGACY",
+        "-fvisibility=default",
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+    ],
+
+    header_libs: [
+        "libaudioeffects",
+        "libhardware_headers",
+    ],
+}
+
+cc_test {
     name: "AudioPreProcessingTest",
 
     vendor: true,
@@ -13,16 +45,7 @@
         "libaudioutils",
         "liblog",
         "libutils",
-        "libwebrtc_audio_preprocessing",
     ],
-
-    cflags: [
-        "-DWEBRTC_POSIX",
-        "-fvisibility=default",
-        "-Wall",
-        "-Werror",
-    ],
-
     header_libs: [
         "libaudioeffects",
         "libhardware_headers",
diff --git a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
index 5c81d78..3244c1f 100644
--- a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
+++ b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
@@ -14,23 +14,19 @@
  * limitations under the License.
  */
 
+#include <getopt.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/stat.h>
+#include <vector>
+
 #include <audio_effects/effect_aec.h>
 #include <audio_effects/effect_agc.h>
+#ifndef WEBRTC_LEGACY
+#include <audio_effects/effect_agc2.h>
+#endif
 #include <audio_effects/effect_ns.h>
-#include <audio_processing.h>
-#include <getopt.h>
-#include <hardware/audio_effect.h>
-#include <module_common_types.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <utils/Log.h>
-#include <utils/Timers.h>
-
-#include <audio_utils/channels.h>
-#include <audio_utils/primitives.h>
 #include <log/log.h>
-#include <system/audio.h>
 
 // This is the only symbol that needs to be imported
 extern audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM;
@@ -42,6 +38,9 @@
 // types of pre processing modules
 enum PreProcId {
   PREPROC_AGC,  // Automatic Gain Control
+#ifndef WEBRTC_LEGACY
+  PREPROC_AGC2,  // Automatic Gain Control 2
+#endif
   PREPROC_AEC,  // Acoustic Echo Canceler
   PREPROC_NS,   // Noise Suppressor
   PREPROC_NUM_EFFECTS
@@ -58,6 +57,12 @@
   ARG_AGC_COMP_LVL,
   ARG_AEC_DELAY,
   ARG_NS_LVL,
+#ifndef WEBRTC_LEGACY
+  ARG_AEC_MOBILE,
+  ARG_AGC2_GAIN,
+  ARG_AGC2_LVL,
+  ARG_AGC2_SAT_MGN
+#endif
 };
 
 struct preProcConfigParams_t {
@@ -66,11 +71,19 @@
   int nsLevel = 0;         // a value between 0-3
   int agcTargetLevel = 3;  // in dB
   int agcCompLevel = 9;    // in dB
+#ifndef WEBRTC_LEGACY
+  float agc2Gain = 0.f;             // in dB
+  float agc2SaturationMargin = 2.f; // in dB
+  int agc2Level = 0;                // either kRms(0) or kPeak(1)
+#endif
   int aecDelay = 0;        // in ms
 };
 
 const effect_uuid_t kPreProcUuids[PREPROC_NUM_EFFECTS] = {
     {0xaa8130e0, 0x66fc, 0x11e0, 0xbad0, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // agc uuid
+#ifndef WEBRTC_LEGACY
+    {0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, {0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86}},  // agc2 uuid
+#endif
     {0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // aec uuid
     {0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // ns  uuid
 };
@@ -126,14 +139,30 @@
   printf("\n           Enable Noise Suppression, default disabled");
   printf("\n     --agc");
   printf("\n           Enable Gain Control, default disabled");
+#ifndef WEBRTC_LEGACY
+  printf("\n     --agc2");
+  printf("\n           Enable Gain Controller 2, default disabled");
+#endif
   printf("\n     --ns_lvl <ns_level>");
   printf("\n           Noise Suppression level in dB, default value 0dB");
   printf("\n     --agc_tgt_lvl <target_level>");
   printf("\n           AGC Target Level in dB, default value 3dB");
   printf("\n     --agc_comp_lvl <comp_level>");
   printf("\n           AGC Comp Level in dB, default value 9dB");
+#ifndef WEBRTC_LEGACY
+  printf("\n     --agc2_gain <fixed_digital_gain>");
+  printf("\n           AGC Fixed Digital Gain in dB, default value 0dB");
+  printf("\n     --agc2_lvl <level_estimator>");
+  printf("\n           AGC Adaptive Digital Level Estimator, default value kRms");
+  printf("\n     --agc2_sat_mgn <saturation_margin>");
+  printf("\n           AGC Adaptive Digital Saturation Margin in dB, default value 2dB");
+#endif
   printf("\n     --aec_delay <delay>");
   printf("\n           AEC delay value in ms, default value 0ms");
+#ifndef WEBRTC_LEGACY
+  printf("\n     --aec_mobile");
+  printf("\n           Enable mobile mode of echo canceller, default disabled");
+#endif
   printf("\n");
 }
 
@@ -184,6 +213,9 @@
   const char *outputFile = nullptr;
   const char *farFile = nullptr;
   int effectEn[PREPROC_NUM_EFFECTS] = {0};
+#ifndef WEBRTC_LEGACY
+  int aecMobileMode = 0;
+#endif
 
   const option long_opts[] = {
       {"help", no_argument, nullptr, ARG_HELP},
@@ -194,11 +226,22 @@
       {"ch_mask", required_argument, nullptr, ARG_CH_MASK},
       {"agc_tgt_lvl", required_argument, nullptr, ARG_AGC_TGT_LVL},
       {"agc_comp_lvl", required_argument, nullptr, ARG_AGC_COMP_LVL},
+#ifndef WEBRTC_LEGACY
+      {"agc2_gain", required_argument, nullptr, ARG_AGC2_GAIN},
+      {"agc2_lvl", required_argument, nullptr, ARG_AGC2_LVL},
+      {"agc2_sat_mgn", required_argument, nullptr, ARG_AGC2_SAT_MGN},
+#endif
       {"aec_delay", required_argument, nullptr, ARG_AEC_DELAY},
       {"ns_lvl", required_argument, nullptr, ARG_NS_LVL},
       {"aec", no_argument, &effectEn[PREPROC_AEC], 1},
       {"agc", no_argument, &effectEn[PREPROC_AGC], 1},
+#ifndef WEBRTC_LEGACY
+      {"agc2", no_argument, &effectEn[PREPROC_AGC2], 1},
+#endif
       {"ns", no_argument, &effectEn[PREPROC_NS], 1},
+#ifndef WEBRTC_LEGACY
+      {"aec_mobile", no_argument, &aecMobileMode, 1},
+#endif
       {nullptr, 0, nullptr, 0},
   };
   struct preProcConfigParams_t preProcCfgParams {};
@@ -246,6 +289,20 @@
         preProcCfgParams.agcCompLevel = atoi(optarg);
         break;
       }
+#ifndef WEBRTC_LEGACY
+      case ARG_AGC2_GAIN: {
+        preProcCfgParams.agc2Gain = atof(optarg);
+        break;
+      }
+      case ARG_AGC2_LVL: {
+        preProcCfgParams.agc2Level = atoi(optarg);
+        break;
+      }
+      case ARG_AGC2_SAT_MGN: {
+        preProcCfgParams.agc2SaturationMargin = atof(optarg);
+        break;
+      }
+#endif
       case ARG_AEC_DELAY: {
         preProcCfgParams.aecDelay = atoi(optarg);
         break;
@@ -342,6 +399,31 @@
       return EXIT_FAILURE;
     }
   }
+#ifndef WEBRTC_LEGACY
+  if (effectEn[PREPROC_AGC2]) {
+    if (int status = preProcSetConfigParam(AGC2_PARAM_FIXED_DIGITAL_GAIN,
+                                           (float)preProcCfgParams.agc2Gain,
+                                           effectHandle[PREPROC_AGC2]);
+        status != 0) {
+      ALOGE("Invalid AGC2 Fixed Digital Gain. Error %d\n", status);
+      return EXIT_FAILURE;
+    }
+    if (int status = preProcSetConfigParam(AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR,
+                                           (uint32_t)preProcCfgParams.agc2Level,
+                                           effectHandle[PREPROC_AGC2]);
+        status != 0) {
+      ALOGE("Invalid AGC2 Level Estimator. Error %d\n", status);
+      return EXIT_FAILURE;
+    }
+    if (int status = preProcSetConfigParam(AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN,
+                                           (float)preProcCfgParams.agc2SaturationMargin,
+                                           effectHandle[PREPROC_AGC2]);
+        status != 0) {
+      ALOGE("Invalid AGC2 Saturation Margin. Error %d\n", status);
+      return EXIT_FAILURE;
+    }
+  }
+#endif
   if (effectEn[PREPROC_NS]) {
     if (int status = preProcSetConfigParam(NS_PARAM_LEVEL, (uint32_t)preProcCfgParams.nsLevel,
                                            effectHandle[PREPROC_NS]);
@@ -350,6 +432,16 @@
       return EXIT_FAILURE;
     }
   }
+#ifndef WEBRTC_LEGACY
+  if (effectEn[PREPROC_AEC]) {
+    if (int status = preProcSetConfigParam(AEC_PARAM_MOBILE_MODE, (uint32_t)aecMobileMode,
+                                           effectHandle[PREPROC_AEC]);
+        status != 0) {
+      ALOGE("Invalid AEC mobile mode value %d\n", status);
+      return EXIT_FAILURE;
+    }
+  }
+#endif
 
   // Process Call
   const int frameLength = (int)(preProcCfgParams.samplingFreq * kTenMilliSecVal);
diff --git a/media/libmedia/tests/codeclist/Android.bp b/media/libmedia/tests/codeclist/Android.bp
new file mode 100644
index 0000000..b9c1bdb
--- /dev/null
+++ b/media/libmedia/tests/codeclist/Android.bp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_test {
+    name: "CodecListTest",
+    gtest: true,
+
+    srcs: [
+        "CodecListTest.cpp",
+    ],
+
+    shared_libs: [
+        "libbinder",
+        "liblog",
+        "libmedia_codeclist",
+        "libstagefright",
+        "libstagefright_foundation",
+        "libstagefright_xmlparser",
+        "libutils",
+    ],
+
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
+
+    sanitize: {
+        cfi: true,
+        misc_undefined: [
+            "unsigned-integer-overflow",
+            "signed-integer-overflow",
+        ],
+    },
+}
diff --git a/media/libmedia/tests/codeclist/CodecListTest.cpp b/media/libmedia/tests/codeclist/CodecListTest.cpp
new file mode 100644
index 0000000..bd2adf7
--- /dev/null
+++ b/media/libmedia/tests/codeclist/CodecListTest.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CodecListTest"
+#include <utils/Log.h>
+
+#include <gtest/gtest.h>
+
+#include <binder/Parcel.h>
+#include <media/stagefright/MediaCodecConstants.h>
+#include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/xmlparser/MediaCodecsXmlParser.h>
+
+#define kSwCodecXmlPath "/apex/com.android.media.swcodec/etc/"
+
+using namespace android;
+
+struct CddReq {
+    CddReq(const char *type, bool encoder) {
+        mediaType = type;
+        isEncoder = encoder;
+    }
+
+    const char *mediaType;
+    bool isEncoder;
+};
+
+TEST(CodecListTest, CodecListSanityTest) {
+    sp<IMediaCodecList> list = MediaCodecList::getInstance();
+    ASSERT_NE(list, nullptr) << "Unable to get MediaCodecList instance.";
+    EXPECT_GT(list->countCodecs(), 0) << "No codecs in CodecList";
+    for (size_t i = 0; i < list->countCodecs(); ++i) {
+        sp<MediaCodecInfo> info = list->getCodecInfo(i);
+        ASSERT_NE(info, nullptr) << "CodecInfo is null";
+        ssize_t index = list->findCodecByName(info->getCodecName());
+        EXPECT_GE(index, 0) << "Wasn't able to find existing codec: " << info->getCodecName();
+    }
+}
+
+TEST(CodecListTest, CodecListByTypeTest) {
+    sp<IMediaCodecList> list = MediaCodecList::getInstance();
+    ASSERT_NE(list, nullptr) << "Unable to get MediaCodecList instance.";
+
+    std::vector<CddReq> cddReq{
+            // media type, isEncoder
+            CddReq(MIMETYPE_AUDIO_AAC, false),
+            CddReq(MIMETYPE_AUDIO_AAC, true),
+
+            CddReq(MIMETYPE_VIDEO_AVC, false),
+            CddReq(MIMETYPE_VIDEO_HEVC, false),
+            CddReq(MIMETYPE_VIDEO_MPEG4, false),
+            CddReq(MIMETYPE_VIDEO_VP8, false),
+            CddReq(MIMETYPE_VIDEO_VP9, false),
+
+            CddReq(MIMETYPE_VIDEO_AVC, true),
+            CddReq(MIMETYPE_VIDEO_VP8, true),
+    };
+
+    for (CddReq codecReq : cddReq) {
+        ssize_t index = list->findCodecByType(codecReq.mediaType, codecReq.isEncoder);
+        EXPECT_GE(index, 0) << "Wasn't able to find codec for media type: " << codecReq.mediaType
+                            << (codecReq.isEncoder ? " encoder" : " decoder");
+    }
+}
+
+TEST(CodecInfoTest, ListInfoTest) {
+    ALOGV("Compare CodecInfo with info in XML");
+    MediaCodecsXmlParser parser;
+    status_t status = parser.parseXmlFilesInSearchDirs();
+    ASSERT_EQ(status, OK) << "XML Parsing failed for default paths";
+
+    const std::vector<std::string> &xmlFiles = MediaCodecsXmlParser::getDefaultXmlNames();
+    const std::vector<std::string> &searchDirsApex{std::string(kSwCodecXmlPath)};
+    status = parser.parseXmlFilesInSearchDirs(xmlFiles, searchDirsApex);
+    ASSERT_EQ(status, OK) << "XML Parsing of " << kSwCodecXmlPath << " failed";
+
+    MediaCodecsXmlParser::CodecMap codecMap = parser.getCodecMap();
+
+    sp<IMediaCodecList> list = MediaCodecList::getInstance();
+    ASSERT_NE(list, nullptr) << "Unable to get MediaCodecList instance";
+
+    // Compare CodecMap from XML to CodecList
+    for (auto mapIter : codecMap) {
+        ssize_t index = list->findCodecByName(mapIter.first.c_str());
+        if (index < 0) {
+            std::cout << "[   WARN   ] " << mapIter.first << " not found in CodecList \n";
+            continue;
+        }
+
+        sp<MediaCodecInfo> info = list->getCodecInfo(index);
+        ASSERT_NE(info, nullptr) << "CodecInfo is null";
+
+        MediaCodecsXmlParser::CodecProperties codecProperties = mapIter.second;
+        ASSERT_EQ(codecProperties.isEncoder, info->isEncoder()) << "Encoder property mismatch";
+
+        ALOGV("codec name: %s", info->getCodecName());
+        ALOGV("codec rank: %d", info->getRank());
+        ALOGV("codec ownername: %s", info->getOwnerName());
+        ALOGV("codec isEncoder: %d", info->isEncoder());
+
+        ALOGV("attributeFlags: kFlagIsHardwareAccelerated, kFlagIsSoftwareOnly, kFlagIsVendor, "
+              "kFlagIsEncoder");
+        std::bitset<4> attr(info->getAttributes());
+        ALOGV("codec attributes: %s", attr.to_string().c_str());
+
+        Vector<AString> mediaTypes;
+        info->getSupportedMediaTypes(&mediaTypes);
+        ALOGV("supported media types count: %zu", mediaTypes.size());
+        ASSERT_FALSE(mediaTypes.isEmpty())
+                << "no media type supported by codec: " << info->getCodecName();
+
+        MediaCodecsXmlParser::TypeMap typeMap = codecProperties.typeMap;
+        for (auto mediaType : mediaTypes) {
+            ALOGV("codec mediaTypes: %s", mediaType.c_str());
+            auto searchTypeMap = typeMap.find(mediaType.c_str());
+            ASSERT_NE(searchTypeMap, typeMap.end())
+                    << "CodecList doesn't contain codec media type: " << mediaType.c_str();
+            MediaCodecsXmlParser::AttributeMap attributeMap = searchTypeMap->second;
+
+            const sp<MediaCodecInfo::Capabilities> &capabilities =
+                    info->getCapabilitiesFor(mediaType.c_str());
+
+            Vector<uint32_t> colorFormats;
+            capabilities->getSupportedColorFormats(&colorFormats);
+            for (auto colorFormat : colorFormats) {
+                ALOGV("supported color formats: %d", colorFormat);
+            }
+
+            Vector<MediaCodecInfo::ProfileLevel> profileLevels;
+            capabilities->getSupportedProfileLevels(&profileLevels);
+            if (!profileLevels.empty()) {
+                ALOGV("supported profilelevel for media type: %s", mediaType.c_str());
+            }
+            for (auto profileLevel : profileLevels) {
+                ALOGV("profile: %d, level: %d", profileLevel.mProfile, profileLevel.mLevel);
+            }
+
+            sp<AMessage> details = capabilities->getDetails();
+            ASSERT_NE(details, nullptr) << "Details in codec capabilities is null";
+            ALOGV("no. of entries in details: %zu", details->countEntries());
+
+            for (size_t idxDetail = 0; idxDetail < details->countEntries(); idxDetail++) {
+                AMessage::Type type;
+                const char *name = details->getEntryNameAt(idxDetail, &type);
+                ALOGV("details entry name: %s", name);
+                AMessage::ItemData itemData = details->getEntryAt(idxDetail);
+                switch (type) {
+                    case AMessage::kTypeInt32:
+                        int32_t val32;
+                        if (itemData.find(&val32)) {
+                            ALOGV("entry int val: %d", val32);
+                            auto searchAttr = attributeMap.find(name);
+                            if (searchAttr == attributeMap.end()) {
+                                ALOGW("Parser doesn't have key: %s", name);
+                            } else if (stoi(searchAttr->second) != val32) {
+                                ALOGW("Values didn't match for key: %s", name);
+                                ALOGV("Values act/exp: %d / %d", val32, stoi(searchAttr->second));
+                            }
+                        }
+                        break;
+                    case AMessage::kTypeString:
+                        if (AString valStr; itemData.find(&valStr)) {
+                            ALOGV("entry str val: %s", valStr.c_str());
+                            auto searchAttr = attributeMap.find(name);
+                            if (searchAttr == attributeMap.end()) {
+                                ALOGW("Parser doesn't have key: %s", name);
+                            } else if (searchAttr->second != valStr.c_str()) {
+                                ALOGW("Values didn't match for key: %s", name);
+                                ALOGV("Values act/exp: %s / %s", valStr.c_str(),
+                                      searchAttr->second.c_str());
+                            }
+                        }
+                        break;
+                    default:
+                        ALOGV("data type: %d shouldn't be present in details", type);
+                        break;
+                }
+            }
+        }
+
+        Parcel *codecInfoParcel = new Parcel();
+        ASSERT_NE(codecInfoParcel, nullptr) << "Unable to create parcel";
+
+        status_t status = info->writeToParcel(codecInfoParcel);
+        ASSERT_EQ(status, OK) << "Writing to parcel failed";
+
+        codecInfoParcel->setDataPosition(0);
+        sp<MediaCodecInfo> parcelCodecInfo = info->FromParcel(*codecInfoParcel);
+        ASSERT_NE(parcelCodecInfo, nullptr) << "CodecInfo from parcel is null";
+        delete codecInfoParcel;
+
+        EXPECT_STREQ(info->getCodecName(), parcelCodecInfo->getCodecName())
+                << "Returned codec name in info doesn't match";
+        EXPECT_EQ(info->getRank(), parcelCodecInfo->getRank())
+                << "Returned component rank in info doesn't match";
+    }
+}
+
+TEST(CodecListTest, CodecListGlobalSettingsTest) {
+    sp<IMediaCodecList> list = MediaCodecList::getInstance();
+    ASSERT_NE(list, nullptr) << "Unable to get MediaCodecList instance";
+
+    sp<AMessage> globalSettings = list->getGlobalSettings();
+    ASSERT_NE(globalSettings, nullptr) << "GlobalSettings AMessage is null";
+    ALOGV("global settings: %s", globalSettings->debugString(0).c_str());
+}
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 83da092..9533ae5 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -146,7 +146,9 @@
     }
 
     // Close socket before posting message to RTSPSource message handler.
-    close(mHandler->getARTSPConnection()->getSocket());
+    if (mHandler != NULL) {
+        close(mHandler->getARTSPConnection()->getSocket());
+    }
 
     sp<AMessage> msg = new AMessage(kWhatDisconnect, this);
 
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index d677744..ffe3052 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -5804,17 +5804,19 @@
 
         case ACodec::kWhatSetSurface:
         {
-            sp<AReplyToken> replyID;
-            CHECK(msg->senderAwaitsResponse(&replyID));
-
             sp<RefBase> obj;
             CHECK(msg->findObject("surface", &obj));
 
             status_t err = mCodec->handleSetSurface(static_cast<Surface *>(obj.get()));
 
-            sp<AMessage> response = new AMessage;
-            response->setInt32("err", err);
-            response->postReply(replyID);
+            sp<AReplyToken> replyID;
+            if (msg->senderAwaitsResponse(&replyID)) {
+                sp<AMessage> response = new AMessage;
+                response->setInt32("err", err);
+                response->postReply(replyID);
+            } else if (err != OK) {
+                mCodec->signalError(OMX_ErrorUndefined, err);
+            }
             break;
         }
 
@@ -8310,6 +8312,23 @@
             break;
         }
 
+        case kWhatSetSurface:
+        {
+            ALOGV("[%s] Deferring setSurface", mCodec->mComponentName.c_str());
+
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            mCodec->deferMessage(msg);
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", OK);
+            response->postReply(replyID);
+
+            handled = true;
+            break;
+        }
+
         case kWhatCheckIfStuck:
         {
             int32_t generation = 0;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c38de64..69084bf 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -637,6 +637,9 @@
       mDequeueInputReplyID(0),
       mDequeueOutputTimeoutGeneration(0),
       mDequeueOutputReplyID(0),
+      mTunneledInputWidth(0),
+      mTunneledInputHeight(0),
+      mTunneled(false),
       mHaveInputSurface(false),
       mHavePendingInputBuffers(false),
       mCpuBoostRequested(false),
@@ -2951,6 +2954,14 @@
 
             extractCSD(format);
 
+            int32_t tunneled;
+            if (format->findInt32("feature-tunneled-playback", &tunneled) && tunneled != 0) {
+                ALOGI("Configuring TUNNELED video playback.");
+                mTunneled = true;
+            } else {
+                mTunneled = false;
+            }
+
             mCodec->initiateConfigureComponent(format);
             break;
         }
@@ -3930,7 +3941,18 @@
     if (hasCryptoOrDescrambler() && !c2Buffer && !memory) {
         AString *errorDetailMsg;
         CHECK(msg->findPointer("errorDetailMsg", (void **)&errorDetailMsg));
-
+        // Notify mCrypto of video resolution changes
+        if (mTunneled && mCrypto != NULL) {
+            int32_t width, height;
+            if (mInputFormat->findInt32("width", &width) &&
+                mInputFormat->findInt32("height", &height) && width > 0 && height > 0) {
+                if (width != mTunneledInputWidth || height != mTunneledInputHeight) {
+                    mTunneledInputWidth = width;
+                    mTunneledInputHeight = height;
+                    mCrypto->notifyResolution(width, height);
+                }
+            }
+        }
         err = mBufferChannel->queueSecureInputBuffer(
                 buffer,
                 (mFlags & kFlagIsSecure),
diff --git a/media/libstagefright/codecs/amrnb/enc/fuzzer/Android.bp b/media/libstagefright/codecs/amrnb/enc/fuzzer/Android.bp
new file mode 100644
index 0000000..e88e5eb
--- /dev/null
+++ b/media/libstagefright/codecs/amrnb/enc/fuzzer/Android.bp
@@ -0,0 +1,41 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *****************************************************************************
+ * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
+ */
+
+cc_fuzz {
+    name: "amrnb_enc_fuzzer",
+    host_supported: true,
+
+    srcs: [
+        "amrnb_enc_fuzzer.cpp",
+    ],
+
+    static_libs: [
+        "liblog",
+        "libstagefright_amrnbenc",
+        "libstagefright_amrnb_common",
+    ],
+
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
diff --git a/media/libstagefright/codecs/amrnb/enc/fuzzer/README.md b/media/libstagefright/codecs/amrnb/enc/fuzzer/README.md
new file mode 100644
index 0000000..239b4a8
--- /dev/null
+++ b/media/libstagefright/codecs/amrnb/enc/fuzzer/README.md
@@ -0,0 +1,60 @@
+# Fuzzer for libstagefright_amrnbenc encoder
+
+## Plugin Design Considerations
+The fuzzer plugin for AMR-NB is designed based on the understanding of the
+codec and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+AMR-WB supports the following parameters:
+1. Output Format (parameter name: `outputFormat`)
+2. Mode (parameter name: `mode`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `outputFormat` | 0. `AMR_TX_WMF` 1. `AMR_TX_IF2` 2. `AMR_TX_ETS` | Bits 0, 1 and 2 of 1st byte of data. |
+| `mode`   | 0. `MR475` 1. `MR515` 2. `MR59` 3. `MR67`  4. `MR74 ` 5. `MR795` 6. `MR102` 7. `MR122` 8. `MRDTX` | Bits 3, 4, 5 and 6 of 1st byte of data. |
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feeds the entire input data to the codec using a loop.
+If the encode operation was successful, the input is advanced by the frame size.
+If the encode operation was un-successful, the input is still advanced by frame size so
+that the fuzzer can proceed to feed the next frame.
+
+This ensures that the plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesnt `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build amrnb_enc_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+  $ mm -j$(nproc) amrnb_enc_fuzzer
+```
+
+#### Steps to run
+Create a directory CORPUS_DIR and copy some pcm files to that folder
+Push this directory to device.
+
+To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/amrnb_enc_fuzzer/amrnb_enc_fuzzer CORPUS_DIR
+```
+To run on host
+```
+  $ $ANDROID_HOST_OUT/fuzz/x86_64/amrnb_enc_fuzzer/amrnb_enc_fuzzer CORPUS_DIR
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/media/libstagefright/codecs/amrnb/enc/fuzzer/amrnb_enc_fuzzer.cpp b/media/libstagefright/codecs/amrnb/enc/fuzzer/amrnb_enc_fuzzer.cpp
new file mode 100644
index 0000000..2fcbf24
--- /dev/null
+++ b/media/libstagefright/codecs/amrnb/enc/fuzzer/amrnb_enc_fuzzer.cpp
@@ -0,0 +1,105 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *****************************************************************************
+ * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
+ */
+#include <string.h>
+#include <utils/Log.h>
+#include <algorithm>
+#include "gsmamr_enc.h"
+
+// Constants for AMR-NB
+const int32_t kNumInputSamples = L_FRAME;  // 160 samples
+const int32_t kOutputBufferSize = 2 * kNumInputSamples * sizeof(Word16);
+const Mode kModes[9] = {MR475, /* 4.75 kbps */
+                        MR515, /* 5.15 kbps */
+                        MR59,  /* 5.90 kbps */
+                        MR67,  /* 6.70 kbps */
+                        MR74,  /* 7.40 kbps */
+                        MR795, /* 7.95 kbps */
+                        MR102, /* 10.2 kbps */
+                        MR122, /* 12.2 kbps */
+                        MRDTX, /* DTX       */};
+const Word16 kOutputFormat[3] = {AMR_TX_WMF, AMR_TX_IF2, AMR_TX_ETS};
+
+class Codec {
+   public:
+    Codec() = default;
+    ~Codec() { deInitEncoder(); }
+    Word16 initEncoder(const uint8_t *data);
+    void deInitEncoder();
+    void encodeFrames(const uint8_t *data, size_t size);
+
+   private:
+    void *mEncState = nullptr;
+    void *mSidState = nullptr;
+};
+
+Word16 Codec::initEncoder(const uint8_t *data) {
+    return AMREncodeInit(&mEncState, &mSidState, (*data >> 1) & 0x01 /* dtx_enable flag */);
+}
+
+void Codec::deInitEncoder() {
+    if (mEncState) {
+        AMREncodeExit(&mEncState, &mSidState);
+        mEncState = nullptr;
+        mSidState = nullptr;
+    }
+}
+
+void Codec::encodeFrames(const uint8_t *data, size_t size) {
+    AMREncodeReset(mEncState, mSidState);
+    uint8_t startByte = *data;
+    int modeIndex = ((startByte >> 3) % 9);
+    int outputFormatIndex = (startByte % 3);
+    Mode mode = kModes[modeIndex];
+    Word16 outputFormat = kOutputFormat[outputFormatIndex];
+
+    // Consume startByte
+    data++;
+    size--;
+
+    while (size > 0) {
+        Frame_Type_3GPP frameType = (Frame_Type_3GPP)mode;
+
+        Word16 inputBuf[kNumInputSamples] = {};
+        int32_t minSize = std::min(size, sizeof(inputBuf));
+
+        uint8_t outputBuf[kOutputBufferSize] = {};
+        memcpy(inputBuf, data, minSize);
+
+        AMREncode(mEncState, mSidState, mode, inputBuf, outputBuf, &frameType, outputFormat);
+
+        data += minSize;
+        size -= minSize;
+    }
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+    if (size < 1) {
+        return 0;
+    }
+    Codec *codec = new Codec();
+    if (!codec) {
+        return 0;
+    }
+    if (codec->initEncoder(data) == 0) {
+        codec->encodeFrames(data, size);
+    }
+    delete codec;
+    return 0;
+}
diff --git a/media/libstagefright/codecs/amrwbenc/fuzzer/Android.bp b/media/libstagefright/codecs/amrwbenc/fuzzer/Android.bp
new file mode 100644
index 0000000..e3473d6
--- /dev/null
+++ b/media/libstagefright/codecs/amrwbenc/fuzzer/Android.bp
@@ -0,0 +1,41 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *****************************************************************************
+ * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
+ */
+
+cc_fuzz {
+    name: "amrwb_enc_fuzzer",
+    host_supported: true,
+
+    srcs: [
+        "amrwb_enc_fuzzer.cpp",
+    ],
+
+    static_libs: [
+        "liblog",
+        "libstagefright_amrwbenc",
+        "libstagefright_enc_common",
+    ],
+
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
diff --git a/media/libstagefright/codecs/amrwbenc/fuzzer/README.md b/media/libstagefright/codecs/amrwbenc/fuzzer/README.md
new file mode 100644
index 0000000..447fbfa
--- /dev/null
+++ b/media/libstagefright/codecs/amrwbenc/fuzzer/README.md
@@ -0,0 +1,60 @@
+# Fuzzer for libstagefright_amrwbenc encoder
+
+## Plugin Design Considerations
+The fuzzer plugin for AMR-WB is designed based on the understanding of the
+codec and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+AMR-WB supports the following parameters:
+1. Frame Type (parameter name: `frameType`)
+2. Mode (parameter name: `mode`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `frameType` | 0. `VOAMRWB_DEFAULT` 1. `VOAMRWB_ITU` 2. `VOAMRWB_RFC3267` | Bits 0, 1 and 2 of 1st byte of data. |
+| `mode`   | 0. `VOAMRWB_MD66` 1. `VOAMRWB_MD885` 2. `VOAMRWB_MD1265` 3. `VOAMRWB_MD1425`  4. `VOAMRWB_MD1585 ` 5. `VOAMRWB_MD1825` 6. `VOAMRWB_MD1985` 7. `VOAMRWB_MD2305` 8. `VOAMRWB_MD2385` 9. `VOAMRWB_N_MODES` | Bits 4, 5, 6 and 7 of 1st byte of data. |
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feeds the entire input data to the codec using a loop.
+If the encode operation was successful, the input is advanced by the frame size.
+If the encode operation was un-successful, the input is still advanced by frame size so
+that the fuzzer can proceed to feed the next frame.
+
+This ensures that the plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesnt `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build amrwb_enc_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+  $ mm -j$(nproc) amrwb_enc_fuzzer
+```
+
+#### Steps to run
+Create a directory CORPUS_DIR and copy some pcm files to that folder
+Push this directory to device.
+
+To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/amrwb_enc_fuzzer/amrwb_enc_fuzzer CORPUS_DIR
+```
+To run on host
+```
+  $ $ANDROID_HOST_OUT/fuzz/x86_64/amrwb_enc_fuzzer/amrwb_enc_fuzzer CORPUS_DIR
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/media/libstagefright/codecs/amrwbenc/fuzzer/amrwb_enc_fuzzer.cpp b/media/libstagefright/codecs/amrwbenc/fuzzer/amrwb_enc_fuzzer.cpp
new file mode 100644
index 0000000..4773a1f
--- /dev/null
+++ b/media/libstagefright/codecs/amrwbenc/fuzzer/amrwb_enc_fuzzer.cpp
@@ -0,0 +1,142 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *****************************************************************************
+ * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
+ */
+#include <string.h>
+#include <utils/Log.h>
+#include <algorithm>
+#include "cmnMemory.h"
+#include "voAMRWB.h"
+#include "cnst.h"
+
+typedef int(VO_API *VOGETAUDIOENCAPI)(VO_AUDIO_CODECAPI *pEncHandle);
+const int32_t kInputBufferSize = L_FRAME16k * sizeof(int16_t) * 2;
+const int32_t kOutputBufferSize = 2 * kInputBufferSize;
+const int32_t kModes[] = {VOAMRWB_MD66 /* 6.60kbps */,    VOAMRWB_MD885 /* 8.85kbps */,
+                          VOAMRWB_MD1265 /* 12.65kbps */, VOAMRWB_MD1425 /* 14.25kbps */,
+                          VOAMRWB_MD1585 /* 15.85kbps */, VOAMRWB_MD1825 /* 18.25kbps */,
+                          VOAMRWB_MD1985 /* 19.85kbps */, VOAMRWB_MD2305 /* 23.05kbps */,
+                          VOAMRWB_MD2385 /* 23.85kbps */, VOAMRWB_N_MODES /* Invalid Mode */};
+const VOAMRWBFRAMETYPE kFrameTypes[] = {VOAMRWB_DEFAULT, VOAMRWB_ITU, VOAMRWB_RFC3267};
+
+class Codec {
+   public:
+    Codec() = default;
+    ~Codec() { deInitEncoder(); }
+    bool initEncoder(const uint8_t *data);
+    void deInitEncoder();
+    void encodeFrames(const uint8_t *data, size_t size);
+
+   private:
+    VO_AUDIO_CODECAPI *mApiHandle = nullptr;
+    VO_MEM_OPERATOR *mMemOperator = nullptr;
+    VO_HANDLE mEncoderHandle = nullptr;
+};
+
+bool Codec::initEncoder(const uint8_t *data) {
+    uint8_t startByte = *data;
+    int32_t mode = kModes[(startByte >> 4) % 10];
+    VOAMRWBFRAMETYPE frameType = kFrameTypes[startByte % 3];
+    mMemOperator = new VO_MEM_OPERATOR;
+    if (!mMemOperator) {
+        return false;
+    }
+
+    mMemOperator->Alloc = cmnMemAlloc;
+    mMemOperator->Copy = cmnMemCopy;
+    mMemOperator->Free = cmnMemFree;
+    mMemOperator->Set = cmnMemSet;
+    mMemOperator->Check = cmnMemCheck;
+
+    VO_CODEC_INIT_USERDATA userData;
+    memset(&userData, 0, sizeof(userData));
+    userData.memflag = VO_IMF_USERMEMOPERATOR;
+    userData.memData = (VO_PTR)mMemOperator;
+
+    mApiHandle = new VO_AUDIO_CODECAPI;
+    if (!mApiHandle) {
+        return false;
+    }
+    if (VO_ERR_NONE != voGetAMRWBEncAPI(mApiHandle)) {
+        // Failed to get api handle
+        return false;
+    }
+    if (VO_ERR_NONE != mApiHandle->Init(&mEncoderHandle, VO_AUDIO_CodingAMRWB, &userData)) {
+        // Failed to init AMRWB encoder
+        return false;
+    }
+    if (VO_ERR_NONE != mApiHandle->SetParam(mEncoderHandle, VO_PID_AMRWB_FRAMETYPE, &frameType)) {
+        // Failed to set AMRWB encoder frame type
+        return false;
+    }
+    if (VO_ERR_NONE != mApiHandle->SetParam(mEncoderHandle, VO_PID_AMRWB_MODE, &mode)) {
+        // Failed to set AMRWB encoder mode
+        return false;
+    }
+    return true;
+}
+
+void Codec::deInitEncoder() {
+    if (mEncoderHandle) {
+        mApiHandle->Uninit(mEncoderHandle);
+        mEncoderHandle = nullptr;
+    }
+    if (mApiHandle) {
+        delete mApiHandle;
+        mApiHandle = nullptr;
+    }
+    if (mMemOperator) {
+        delete mMemOperator;
+        mMemOperator = nullptr;
+    }
+}
+
+void Codec::encodeFrames(const uint8_t *data, size_t size) {
+    do {
+        int32_t minSize = std::min((int32_t)size, kInputBufferSize);
+        uint8_t outputBuf[kOutputBufferSize] = {};
+        VO_CODECBUFFER inData;
+        VO_CODECBUFFER outData;
+        VO_AUDIO_OUTPUTINFO outFormat;
+        inData.Buffer = (unsigned char *)data;
+        inData.Length = minSize;
+        outData.Buffer = outputBuf;
+        mApiHandle->SetInputData(mEncoderHandle, &inData);
+        mApiHandle->GetOutputData(mEncoderHandle, &outData, &outFormat);
+        data += minSize;
+        size -= minSize;
+    } while (size > 0);
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+    if (size < 1) {
+        return 0;
+    }
+    Codec *codec = new Codec();
+    if (!codec) {
+        return 0;
+    }
+    if (codec->initEncoder(data)) {
+        // Consume first byte
+        ++data;
+        --size;
+        codec->encodeFrames(data, size);
+    }
+    delete codec;
+    return 0;
+}
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.bp b/media/libstagefright/codecs/m4v_h263/enc/Android.bp
index b8bc24e..13d310d 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.bp
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.bp
@@ -6,6 +6,12 @@
         "com.android.media.swcodec",
     ],
     min_sdk_version: "29",
+    host_supported: true,
+    target: {
+        darwin: {
+            enabled: false,
+        },
+    },
 
     srcs: [
         "src/bitstream_io.cpp",
diff --git a/media/libstagefright/codecs/m4v_h263/fuzzer/Android.bp b/media/libstagefright/codecs/m4v_h263/fuzzer/Android.bp
index 56fc782..778dafb 100644
--- a/media/libstagefright/codecs/m4v_h263/fuzzer/Android.bp
+++ b/media/libstagefright/codecs/m4v_h263/fuzzer/Android.bp
@@ -18,25 +18,24 @@
  * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
  */
 
-cc_fuzz {
-    name: "mpeg4_dec_fuzzer",
+cc_defaults {
+    name: "mpeg4_h263_dec_fuzz_defaults",
+
     host_supported: true,
+
     srcs: [
         "mpeg4_h263_dec_fuzzer.cpp",
     ],
+
     static_libs: [
         "libstagefright_m4vh263dec",
         "liblog",
     ],
+
     cflags: [
         "-DOSCL_IMPORT_REF=",
-        "-DMPEG4",
     ],
-    target: {
-        darwin: {
-            enabled: false,
-        },
-    },
+
     fuzz_config: {
         cc: [
             "android-media-fuzzing-reports@google.com",
@@ -46,23 +45,45 @@
 }
 
 cc_fuzz {
-    name: "h263_dec_fuzzer",
-    host_supported: true,
-    srcs: [
-        "mpeg4_h263_dec_fuzzer.cpp",
+    name: "mpeg4_dec_fuzzer",
+
+    defaults: [
+        "mpeg4_h263_dec_fuzz_defaults",
     ],
-    static_libs: [
-        "libstagefright_m4vh263dec",
+
+    cflags: [
+        "-DMPEG4",
+    ],
+}
+
+cc_fuzz {
+    name: "h263_dec_fuzzer",
+
+    defaults: [
+        "mpeg4_h263_dec_fuzz_defaults",
+    ],
+}
+
+cc_defaults {
+    name: "mpeg4_h263_enc_fuzz_defaults",
+
+    host_supported: true,
+
+    srcs: ["mpeg4_h263_enc_fuzzer.cpp"],
+
+    shared_libs: [
+        "libutils",
         "liblog",
     ],
-    cflags: [
-        "-DOSCL_IMPORT_REF=",
+
+    static_libs: [
+        "libstagefright_m4vh263enc",
     ],
-    target: {
-        darwin: {
-            enabled: false,
-        },
-    },
+
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
     fuzz_config: {
         cc: [
             "android-media-fuzzing-reports@google.com",
@@ -70,3 +91,21 @@
         componentid: 155276,
     },
 }
+
+cc_fuzz {
+    name: "mpeg4_enc_fuzzer",
+
+    defaults: [
+        "mpeg4_h263_enc_fuzz_defaults",
+    ],
+
+    cflags: ["-DMPEG4"],
+}
+
+cc_fuzz {
+    name: "h263_enc_fuzzer",
+
+    defaults: [
+        "mpeg4_h263_enc_fuzz_defaults",
+    ],
+}
diff --git a/media/libstagefright/codecs/m4v_h263/fuzzer/README.md b/media/libstagefright/codecs/m4v_h263/fuzzer/README.md
index c2a4f69..ad4ff97 100644
--- a/media/libstagefright/codecs/m4v_h263/fuzzer/README.md
+++ b/media/libstagefright/codecs/m4v_h263/fuzzer/README.md
@@ -52,6 +52,107 @@
   $ $ANDROID_HOST_OUT/fuzz/x86_64/h263_dec_fuzzer/h263_dec_fuzzer CORPUS_DIR
 ```
 
+# Fuzzer for libstagefright_m4vh263enc encoder
+
+## Plugin Design Considerations
+The fuzzer plugin for MPEG4/H263 is designed based on the understanding of the
+codec and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+MPEG4/H263 supports the following parameters:
+1. Frame Width (parameter name: `encWidth`)
+2. Frame Height (parameter name: `encHeight`)
+3. Rate control mode (parameter name: `rcType`)
+4. Number of bytes per packet (parameter name: `packetSize`)
+5. Qp for I-Vop(parameter name: `iQuant`)
+6. Qp for P-Vop (parameter name: `pQuant`)
+7. Enable RVLC mode (parameter name: `rvlcEnable`)
+8. Quantization mode (parameter name: `quantType`)
+9. Disable frame skipping (parameter name: `noFrameSkipped`)
+10. Enable scene change detection (parameter name: `sceneDetect`)
+11. Number of intra MBs in P-frame(parameter name: `numIntraMB`)
+12. Search range of ME (parameter name: `searchRange`)
+13. Enable 8x8 ME and MC (parameter name: `mv8x8Enable`)
+14. Enable AC prediction (parameter name: `useACPred`)
+15. Threshold for intra DC VLC (parameter name: `intraDCVlcTh`)
+16. Encoding Mode (parameter name: `encMode`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `rcType` | 0. `CONSTANT_Q` 1. `CBR_1` 2. `VBR_1` 3. `CBR_2` 4. `VBR_2` 5. `CBR_LOWDELAY` | All the bits of 6th byte of data modulus 6 |
+| `packetSize` | In the range `0 to 255` | All the bits of 7th byte of data |
+| `iQuant` | In the range `1 to 31` | All the bits of 8th byte of data |
+| `pQuant` | In the range `1 to 31` | All the bits of 9th byte of data |
+| `rvlcEnable` | 0. `PV_OFF` 1. `PV_ON` | bit 0 of 10th byte of data |
+| `quantType` | 0. `0` 1. `1` | bit 0 of 11th byte of data |
+| `noFrameSkipped` | 0. `PV_OFF` 1. `PV_ON` | bit 0 of 12th byte of data |
+| `sceneDetect` | 0. `PV_OFF` 1. `PV_ON` | bit 0 of 13th byte of data |
+| `numIntraMB` | In the range `0 to 7` | bit 0, 1 and 2 of 14th byte of data |
+| `searchRange` | In the range `0 to 31` | bit 0, 1, 2, 3 and 4 of 15th byte of data |
+| `mv8x8Enable` | 0. `PV_OFF` 1. `PV_ON` | bit 0 of 16th byte of data |
+| `useACPred` | 0. `PV_OFF` 1. `PV_ON` | bit 0 of 17th byte of data |
+| `intraDCVlcTh` | In the range `0 to 7` | bit 0, 1 and 2 of 18th byte of data |
+
+Following parameters are only for mpeg4_enc_fuzzer
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `encWidth` | In the range `0 to 10239` | All the bits of 1st and 2nd byte of data |
+| `encHeight` | In the range `0 to 10239` | All the bits of 3rd and 4th byte of data |
+| `encMode` | 0. `H263_MODE` 1. `H263_MODE_WITH_ERR_RES` 2. `DATA_PARTITIONING_MODE` 3. `COMBINE_MODE_NO_ERR_RES` 4. `COMBINE_MODE_WITH_ERR_RES` | All the bits of 19th byte of data modulus 5 |
+
+Following parameters are only for h263_enc_fuzzer
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `encWidth` | 0. `128` 1. `176` 2. `352` 3. `704` 4. `1408` | All the bits of 1st byte of data modulus 5|
+| `encHeight` | 0. `96` 1. `144` 2. `288` 3. `576` 4. `1152 ` | All the bits of 3rd byte of data modulus 5|
+| `encMode` | 0. `SHORT_HEADER` 1. `SHORT_HEADER_WITH_ERR_RES` | All the bits of 19th byte of data modulus 2 |
+
+This also ensures that the plugin is always deterministic for any given input.
+
+##### Maximize utilization of input data
+The plugin feeds the entire input data to the codec using a loop.
+If the encode operation was successful, the input is advanced by the frame size.
+If the encode operation was un-successful, the input is still advanced by frame size so
+that the fuzzer can proceed to feed the next frame.
+
+This ensures that the plugin tolerates any kind of input (empty, huge,
+malformed, etc) and doesnt `exit()` on any input and thereby increasing the
+chance of identifying vulnerabilities.
+
+## Build
+
+This describes steps to build mpeg4_enc_fuzzer and h263_enc_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+  $ mm -j$(nproc) mpeg4_enc_fuzzer
+  $ mm -j$(nproc) h263_enc_fuzzer
+```
+
+#### Steps to run
+Create a directory CORPUS_DIR and copy some yuv files to that folder
+Push this directory to device.
+
+To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/m4v_h263_enc_fuzzer/m4v_h263_enc_fuzzer CORPUS_DIR
+  $ adb shell /data/fuzz/arm64/h263_enc_fuzzer/h263_enc_fuzzer CORPUS_DIR
+```
+To run on host
+```
+  $ $ANDROID_HOST_OUT/fuzz/x86_64/mpeg4_enc_fuzzer/mpeg4_enc_fuzzer CORPUS_DIR
+  $ $ANDROID_HOST_OUT/fuzz/x86_64/h263_enc_fuzzer/h263_enc_fuzzer CORPUS_DIR
+```
+
 ## References:
  * http://llvm.org/docs/LibFuzzer.html
  * https://github.com/google/oss-fuzz
diff --git a/media/libstagefright/codecs/m4v_h263/fuzzer/mpeg4_h263_enc_fuzzer.cpp b/media/libstagefright/codecs/m4v_h263/fuzzer/mpeg4_h263_enc_fuzzer.cpp
new file mode 100644
index 0000000..f154706
--- /dev/null
+++ b/media/libstagefright/codecs/m4v_h263/fuzzer/mpeg4_h263_enc_fuzzer.cpp
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <algorithm>
+#include "mp4enc_api.h"
+
+constexpr int8_t kIDRFrameRefreshIntervalInSec = 1;
+constexpr MP4RateControlType krcType[] = {CONSTANT_Q, CBR_1, VBR_1, CBR_2, VBR_2, CBR_LOWDELAY};
+#ifdef MPEG4
+constexpr MP4EncodingMode kEncodingMode[] = {SHORT_HEADER, SHORT_HEADER_WITH_ERR_RES,
+                                             DATA_PARTITIONING_MODE, COMBINE_MODE_NO_ERR_RES,
+                                             COMBINE_MODE_WITH_ERR_RES};
+constexpr size_t kMaxWidth = 10240;
+constexpr size_t kMaxHeight = 10240;
+#else
+constexpr MP4EncodingMode kEncodingMode[] = {H263_MODE, H263_MODE_WITH_ERR_RES};
+constexpr int kWidth[] = {128, 176, 352, 704, 1408};
+constexpr int kHeight[] = {96, 144, 288, 576, 1152};
+constexpr size_t kWidthNum = std::size(kWidth);
+constexpr size_t kHeightNum = std::size(kHeight);
+#endif
+
+constexpr size_t krcTypeNum = std::size(krcType);
+constexpr size_t kEncodingModeNum = std::size(kEncodingMode);
+constexpr size_t kMaxQP = 51;
+
+enum {
+    IDX_WD_BYTE_1,
+    IDX_WD_BYTE_2,
+    IDX_HT_BYTE_1,
+    IDX_HT_BYTE_2,
+    IDX_FRAME_RATE,
+    IDX_RC_TYPE,
+    IDX_PACKET_SIZE,
+    IDX_I_FRAME_QP,
+    IDX_P_FRAME_QP,
+    IDX_ENABLE_RVLC,
+    IDX_QUANT_TYPE,
+    IDX_NO_FRAME_SKIPPED_FLAG,
+    IDX_ENABLE_SCENE_DETECT,
+    IDX_NUM_INTRA_MB,
+    IDX_SEARCH_RANGE,
+    IDX_ENABLE_MV_8x8,
+    IDX_USE_AC_PRED,
+    IDX_INTRA_DC_VLC_THRESHOLD,
+    IDX_ENC_MODE,
+    IDX_LAST
+};
+
+class Codec {
+   public:
+    Codec() = default;
+    ~Codec() { deInitEncoder(); }
+    bool initEncoder(const uint8_t *data);
+    void encodeFrames(const uint8_t *data, size_t size);
+    void deInitEncoder();
+
+   private:
+    int32_t mFrameWidth = 352;
+    int32_t mFrameHeight = 288;
+    float mFrameRate = 25.0f;
+    VideoEncOptions *mEncodeHandle = nullptr;
+    VideoEncControls *mEncodeControl = nullptr;
+};
+
+bool Codec::initEncoder(const uint8_t *data) {
+    mEncodeHandle = new VideoEncOptions;
+    if (!mEncodeHandle) {
+        return false;
+    }
+    memset(mEncodeHandle, 0, sizeof(VideoEncOptions));
+    mEncodeControl = new VideoEncControls;
+    if (!mEncodeControl) {
+        return false;
+    }
+    memset(mEncodeControl, 0, sizeof(VideoEncControls));
+    PVGetDefaultEncOption(mEncodeHandle, 0);
+
+#ifdef MPEG4
+    mFrameWidth = ((data[IDX_WD_BYTE_1] << 8) | data[IDX_WD_BYTE_2]) % kMaxWidth;
+    mFrameHeight = ((data[IDX_HT_BYTE_1] << 8) | data[IDX_HT_BYTE_2]) % kMaxHeight;
+#else
+    mFrameWidth = kWidth[data[IDX_WD_BYTE_1] % kWidthNum];
+    mFrameHeight = kHeight[data[IDX_HT_BYTE_1] % kHeightNum];
+#endif
+    mFrameRate = data[IDX_FRAME_RATE];
+    mEncodeHandle->rcType = krcType[data[IDX_RC_TYPE] % krcTypeNum];
+    mEncodeHandle->profile_level = CORE_PROFILE_LEVEL2;
+    mEncodeHandle->packetSize = data[IDX_PACKET_SIZE];
+    mEncodeHandle->iQuant[0] = (data[IDX_I_FRAME_QP] % kMaxQP) + 1;
+    mEncodeHandle->pQuant[0] = (data[IDX_P_FRAME_QP] % kMaxQP) + 1;
+    mEncodeHandle->rvlcEnable = (data[IDX_ENABLE_RVLC] & 0x01) ? PV_OFF : PV_ON;
+    mEncodeHandle->quantType[0] = (data[IDX_QUANT_TYPE] & 0x01) ? 0 : 1;
+    mEncodeHandle->noFrameSkipped = (data[IDX_NO_FRAME_SKIPPED_FLAG] & 0x01) ? PV_OFF : PV_ON;
+    mEncodeHandle->sceneDetect = (data[IDX_ENABLE_SCENE_DETECT] & 0x01) ? PV_OFF : PV_ON;
+    mEncodeHandle->numIntraMB = data[IDX_NUM_INTRA_MB] & 0x07;
+    mEncodeHandle->searchRange = data[IDX_SEARCH_RANGE] & 0x1F;
+    mEncodeHandle->mv8x8Enable = (data[IDX_ENABLE_MV_8x8] & 0x01) ? PV_OFF : PV_ON;
+    mEncodeHandle->useACPred = (data[IDX_USE_AC_PRED] & 0x01) ? PV_OFF : PV_ON;
+    mEncodeHandle->intraDCVlcTh = data[IDX_INTRA_DC_VLC_THRESHOLD] & 0x07;
+    mEncodeHandle->encMode = kEncodingMode[data[IDX_ENC_MODE] % kEncodingModeNum];
+    mEncodeHandle->encWidth[0] = mFrameWidth;
+    mEncodeHandle->encHeight[0] = mFrameHeight;
+    mEncodeHandle->encFrameRate[0] = mFrameRate;
+    mEncodeHandle->tickPerSrc = mEncodeHandle->timeIncRes / mFrameRate;
+    mEncodeHandle->intraPeriod = (kIDRFrameRefreshIntervalInSec * mFrameRate);
+    if (!PVInitVideoEncoder(mEncodeControl, mEncodeHandle)) {
+        return false;
+    }
+    return true;
+}
+
+void Codec::deInitEncoder() {
+    if (mEncodeControl) {
+        PVCleanUpVideoEncoder(mEncodeControl);
+        delete mEncodeControl;
+        mEncodeControl = nullptr;
+    }
+    if (mEncodeHandle) {
+        delete mEncodeHandle;
+        mEncodeHandle = nullptr;
+    }
+}
+
+void Codec::encodeFrames(const uint8_t *data, size_t size) {
+    size_t inputBufferSize = (mFrameWidth * mFrameHeight * 3) / 2;
+    size_t outputBufferSize = inputBufferSize * 2;
+    uint8_t outputBuffer[outputBufferSize];
+
+    // Get VOL header.
+    int32_t sizeOutputBuffer = outputBufferSize;
+    PVGetVolHeader(mEncodeControl, outputBuffer, &sizeOutputBuffer, 0);
+
+    size_t numFrame = 0;
+    while (size > 0) {
+        size_t bytesConsumed = std::min(size, inputBufferSize);
+        uint8_t inputBuffer[inputBufferSize];
+        memcpy(inputBuffer, data, bytesConsumed);
+        if (bytesConsumed < sizeof(inputBuffer)) {
+            memset(inputBuffer + bytesConsumed, data[0], sizeof(inputBuffer) - bytesConsumed);
+        }
+        VideoEncFrameIO videoIn{}, videoOut{};
+        videoIn.height = mFrameHeight;
+        videoIn.pitch = mFrameWidth;
+        videoIn.timestamp = (numFrame * 1000) / mFrameRate;
+        videoIn.yChan = inputBuffer;
+        videoIn.uChan = videoIn.yChan + videoIn.height * videoIn.pitch;
+        videoIn.vChan = videoIn.uChan + ((videoIn.height * videoIn.pitch) >> 2);
+        uint32_t modTimeMs = 0;
+        int32_t dataLength = outputBufferSize;
+        int32_t nLayer = 0;
+        PVEncodeVideoFrame(mEncodeControl, &videoIn, &videoOut, &modTimeMs, outputBuffer,
+                           &dataLength, &nLayer);
+        MP4HintTrack hintTrack;
+        PVGetHintTrack(mEncodeControl, &hintTrack);
+        PVGetOverrunBuffer(mEncodeControl);
+        ++numFrame;
+        data += bytesConsumed;
+        size -= bytesConsumed;
+    }
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+    if (size < IDX_LAST) {
+        return 0;
+    }
+    Codec *codec = new Codec();
+    if (!codec) {
+        return 0;
+    }
+    if (codec->initEncoder(data)) {
+        data += IDX_LAST;
+        size -= IDX_LAST;
+        codec->encodeFrames(data, size);
+    }
+    delete codec;
+    return 0;
+}
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index a4836cd..4705e4a 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -418,6 +418,10 @@
 
     sp<ICrypto> mCrypto;
 
+    int32_t mTunneledInputWidth;
+    int32_t mTunneledInputHeight;
+    bool mTunneled;
+
     sp<IDescrambler> mDescrambler;
 
     List<sp<ABuffer> > mCSD;
diff --git a/media/libstagefright/tests/fuzzers/Android.bp b/media/libstagefright/tests/fuzzers/Android.bp
new file mode 100644
index 0000000..49ff69a
--- /dev/null
+++ b/media/libstagefright/tests/fuzzers/Android.bp
@@ -0,0 +1,53 @@
+cc_defaults {
+    name: "libstagefright_fuzzer_defaults",
+    cflags: [
+        "-Wno-multichar",
+        "-Werror",
+        "-Wno-error=deprecated-declarations",
+        "-Wall",
+    ],
+    shared_libs: [
+        "libstagefright",
+	"libstagefright_codecbase",
+        "libutils",
+        "libstagefright_foundation",
+        "libmedia",
+        "libaudioclient",
+        "libmedia_omx",
+        "libgui",
+        "libbinder",
+        "libcutils",
+    ],
+}
+
+cc_fuzz {
+    name: "libstagefright_mediaclock_fuzzer",
+    srcs: [
+        "MediaClockFuzzer.cpp",
+    ],
+    defaults: ["libstagefright_fuzzer_defaults"],
+}
+
+cc_fuzz {
+    name: "libstagefright_mediascanner_fuzzer",
+    srcs: [
+        "StagefrightMediaScannerFuzzer.cpp",
+    ],
+    defaults: ["libstagefright_fuzzer_defaults"],
+}
+
+cc_fuzz {
+    name: "libstagefright_skipcutbuffer_fuzzer",
+    srcs: [
+        "SkipCutBufferFuzzer.cpp",
+    ],
+    defaults: ["libstagefright_fuzzer_defaults"],
+}
+
+cc_fuzz {
+    name: "libstagefright_mediamuxer_fuzzer",
+    srcs: [
+        "MediaMuxerFuzzer.cpp",
+    ],
+    defaults: ["libstagefright_fuzzer_defaults"],
+}
diff --git a/media/libstagefright/tests/fuzzers/MediaClockFuzzer.cpp b/media/libstagefright/tests/fuzzers/MediaClockFuzzer.cpp
new file mode 100644
index 0000000..e473541
--- /dev/null
+++ b/media/libstagefright/tests/fuzzers/MediaClockFuzzer.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Authors: corbin.souffrant@leviathansecurity.com
+//          dylan.katz@leviathansecurity.com
+
+#include <fuzzer/FuzzedDataProvider.h>
+#include <media/stagefright/MediaClock.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+namespace android {
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+  FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+  sp<MediaClock> mClock(new MediaClock);
+
+  bool registered = false;
+  while (fdp.remaining_bytes() > 0) {
+    switch (fdp.ConsumeIntegralInRange<uint8_t>(0, 5)) {
+    case 0: {
+      if (registered == false) {
+        mClock->init();
+        registered = true;
+      }
+      break;
+    }
+    case 1: {
+      int64_t startingTimeMediaUs = fdp.ConsumeIntegral<int64_t>();
+      mClock->setStartingTimeMedia(startingTimeMediaUs);
+      break;
+    }
+    case 2: {
+      mClock->clearAnchor();
+      break;
+    }
+    case 3: {
+      int64_t anchorTimeRealUs = fdp.ConsumeIntegral<int64_t>();
+      int64_t anchorTimeMediaUs = fdp.ConsumeIntegral<int64_t>();
+      int64_t maxTimeMediaUs = fdp.ConsumeIntegral<int64_t>();
+      mClock->updateAnchor(anchorTimeMediaUs, anchorTimeRealUs, maxTimeMediaUs);
+      break;
+    }
+    case 4: {
+      int64_t maxTimeMediaUs = fdp.ConsumeIntegral<int64_t>();
+      mClock->updateMaxTimeMedia(maxTimeMediaUs);
+      break;
+    }
+    case 5: {
+      wp<AMessage> msg(new AMessage);
+      mClock->setNotificationMessage(msg.promote());
+    }
+    }
+  }
+
+  return 0;
+}
+} // namespace android
diff --git a/media/libstagefright/tests/fuzzers/MediaMuxerFuzzer.cpp b/media/libstagefright/tests/fuzzers/MediaMuxerFuzzer.cpp
new file mode 100644
index 0000000..5df3267
--- /dev/null
+++ b/media/libstagefright/tests/fuzzers/MediaMuxerFuzzer.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Authors: corbin.souffrant@leviathansecurity.com
+//          dylan.katz@leviathansecurity.com
+
+#include <MediaMuxerFuzzer.h>
+#include <cutils/ashmem.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <media/stagefright/MediaMuxer.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+namespace android {
+
+// Can't seem to get setBuffer or setString working. It always segfaults on a
+// null pointer read or memleaks. So that functionality is missing.
+void createMessage(AMessage *msg, FuzzedDataProvider *fdp) {
+  size_t count = fdp->ConsumeIntegralInRange<size_t>(0, 32);
+  while (fdp->remaining_bytes() > 0 && count > 0) {
+    uint8_t function_id =
+        fdp->ConsumeIntegralInRange<uint8_t>(0, amessage_setvals.size() - 1);
+    amessage_setvals[function_id](msg, fdp);
+    count--;
+  }
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+  FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+
+  size_t data_size = fdp.ConsumeIntegralInRange<size_t>(0, size);
+  int fd = ashmem_create_region("mediamuxer_fuzz_region", data_size);
+  if (fd < 0)
+    return 0;
+
+  uint8_t *sh_data = static_cast<uint8_t *>(
+      mmap(NULL, data_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0));
+  if (sh_data == MAP_FAILED)
+    return 0;
+
+  MediaMuxer::OutputFormat format =
+      (MediaMuxer::OutputFormat)fdp.ConsumeIntegralInRange<int32_t>(0, 4);
+  sp<MediaMuxer> mMuxer(new MediaMuxer(fd, format));
+
+  while (fdp.remaining_bytes() > 1) {
+    switch (fdp.ConsumeIntegralInRange<uint8_t>(0, 4)) {
+    case 0: {
+      // For some reason it only likes mp4s here...
+      if (format == 1 || format == 4)
+        break;
+
+      sp<AMessage> a_format(new AMessage);
+      createMessage(a_format.get(), &fdp);
+      mMuxer->addTrack(a_format);
+      break;
+    }
+    case 1: {
+      mMuxer->start();
+      break;
+    }
+    case 2: {
+      int degrees = fdp.ConsumeIntegral<int>();
+      mMuxer->setOrientationHint(degrees);
+      break;
+    }
+    case 3: {
+      int latitude = fdp.ConsumeIntegral<int>();
+      int longitude = fdp.ConsumeIntegral<int>();
+      mMuxer->setLocation(latitude, longitude);
+      break;
+    }
+    case 4: {
+      size_t buf_size = fdp.ConsumeIntegralInRange<size_t>(0, data_size);
+      sp<ABuffer> a_buffer(new ABuffer(buf_size));
+
+      size_t trackIndex = fdp.ConsumeIntegral<size_t>();
+      int64_t timeUs = fdp.ConsumeIntegral<int64_t>();
+      uint32_t flags = fdp.ConsumeIntegral<uint32_t>();
+      mMuxer->writeSampleData(a_buffer, trackIndex, timeUs, flags);
+    }
+    }
+  }
+
+  if (fdp.ConsumeBool())
+    mMuxer->stop();
+
+  munmap(sh_data, data_size);
+  close(fd);
+  return 0;
+}
+} // namespace android
diff --git a/media/libstagefright/tests/fuzzers/MediaMuxerFuzzer.h b/media/libstagefright/tests/fuzzers/MediaMuxerFuzzer.h
new file mode 100644
index 0000000..7d4421d
--- /dev/null
+++ b/media/libstagefright/tests/fuzzers/MediaMuxerFuzzer.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Authors: corbin.souffrant@leviathansecurity.com
+//          dylan.katz@leviathansecurity.com
+
+#pragma once
+
+#include <fuzzer/FuzzedDataProvider.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+namespace android {
+
+// Mappings vectors are the list of attributes that the MediaMuxer
+// class looks for in the message.
+static std::vector<const char *> floatMappings{
+    "capture-rate",
+    "time-lapse-fps",
+    "frame-rate",
+};
+
+static std::vector<const char *> int64Mappings{
+    "exif-offset",    "exif-size", "target-time",
+    "thumbnail-time", "timeUs",    "durationUs",
+};
+
+static std::vector<const char *> int32Mappings{"loop",
+                                               "time-scale",
+                                               "crypto-mode",
+                                               "crypto-default-iv-size",
+                                               "crypto-encrypted-byte-block",
+                                               "crypto-skip-byte-block",
+                                               "frame-count",
+                                               "max-bitrate",
+                                               "pcm-big-endian",
+                                               "temporal-layer-count",
+                                               "temporal-layer-id",
+                                               "thumbnail-width",
+                                               "thumbnail-height",
+                                               "track-id",
+                                               "valid-samples",
+                                               "color-format",
+                                               "ca-system-id",
+                                               "is-sync-frame",
+                                               "bitrate",
+                                               "max-bitrate",
+                                               "width",
+                                               "height",
+                                               "sar-width",
+                                               "sar-height",
+                                               "display-width",
+                                               "display-height",
+                                               "is-default",
+                                               "tile-width",
+                                               "tile-height",
+                                               "grid-rows",
+                                               "grid-cols",
+                                               "rotation-degrees",
+                                               "channel-count",
+                                               "sample-rate",
+                                               "bits-per-sample",
+                                               "channel-mask",
+                                               "encoder-delay",
+                                               "encoder-padding",
+                                               "is-adts",
+                                               "frame-rate",
+                                               "max-height",
+                                               "max-width",
+                                               "max-input-size",
+                                               "haptic-channel-count",
+                                               "pcm-encoding",
+                                               "aac-profile"};
+
+static const std::vector<std::function<void(AMessage *, FuzzedDataProvider *)>>
+    amessage_setvals = {
+        [](AMessage *msg, FuzzedDataProvider *fdp) -> void {
+          msg->setRect("crop", fdp->ConsumeIntegral<int32_t>(),
+                       fdp->ConsumeIntegral<int32_t>(),
+                       fdp->ConsumeIntegral<int32_t>(),
+                       fdp->ConsumeIntegral<int32_t>());
+        },
+        [](AMessage *msg, FuzzedDataProvider *fdp) -> void {
+          msg->setFloat(floatMappings[fdp->ConsumeIntegralInRange<size_t>(
+                            0, floatMappings.size() - 1)],
+                        fdp->ConsumeFloatingPoint<float>());
+        },
+        [](AMessage *msg, FuzzedDataProvider *fdp) -> void {
+          msg->setInt64(int64Mappings[fdp->ConsumeIntegralInRange<size_t>(
+                            0, int64Mappings.size() - 1)],
+                        fdp->ConsumeIntegral<int64_t>());
+        },
+        [](AMessage *msg, FuzzedDataProvider *fdp) -> void {
+          msg->setInt32(int32Mappings[fdp->ConsumeIntegralInRange<size_t>(
+                            0, int32Mappings.size() - 1)],
+                        fdp->ConsumeIntegral<int32_t>());
+        }};
+} // namespace android
diff --git a/media/libstagefright/tests/fuzzers/SkipCutBufferFuzzer.cpp b/media/libstagefright/tests/fuzzers/SkipCutBufferFuzzer.cpp
new file mode 100644
index 0000000..1f78e6d
--- /dev/null
+++ b/media/libstagefright/tests/fuzzers/SkipCutBufferFuzzer.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Authors: corbin.souffrant@leviathansecurity.com
+//          dylan.katz@leviathansecurity.com
+
+#include <fuzzer/FuzzedDataProvider.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/SkipCutBuffer.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+namespace android {
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+  FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+  size_t skip = fdp.ConsumeIntegral<size_t>();
+  size_t cut = fdp.ConsumeIntegral<size_t>();
+  size_t num16Channels = fdp.ConsumeIntegral<size_t>();
+  sp<SkipCutBuffer> sBuffer(new SkipCutBuffer(skip, cut, num16Channels));
+
+  while (fdp.remaining_bytes() > 0) {
+    // Cap size to 1024 to limit max amount allocated.
+    size_t buf_size = fdp.ConsumeIntegralInRange<size_t>(0, 1024);
+    size_t range = fdp.ConsumeIntegralInRange<size_t>(0, buf_size);
+    size_t length = fdp.ConsumeIntegralInRange<size_t>(0, buf_size - range);
+
+    switch (fdp.ConsumeIntegralInRange<uint8_t>(0, 4)) {
+    case 0: {
+      sp<ABuffer> a_buffer(new ABuffer(buf_size));
+      sp<AMessage> format(new AMessage);
+      sp<MediaCodecBuffer> s_buffer(new MediaCodecBuffer(format, a_buffer));
+      s_buffer->setRange(range, length);
+      sBuffer->submit(s_buffer);
+      break;
+    }
+    case 1: {
+      std::unique_ptr<MediaBufferBase> m_buffer(new MediaBuffer(buf_size));
+      m_buffer->set_range(range, length);
+      sBuffer->submit(reinterpret_cast<MediaBuffer *>(m_buffer.get()));
+      break;
+    }
+    case 2: {
+      sp<ABuffer> a_buffer(new ABuffer(buf_size));
+      sp<AMessage> format(new AMessage);
+      sp<MediaCodecBuffer> s_buffer(new MediaCodecBuffer(format, a_buffer));
+      a_buffer->setRange(range, length);
+      sBuffer->submit(a_buffer);
+      break;
+    }
+    case 3: {
+      sBuffer->clear();
+      break;
+    }
+    case 4: {
+      sBuffer->size();
+    }
+    }
+  }
+  return 0;
+}
+} // namespace android
diff --git a/media/libstagefright/tests/fuzzers/StagefrightMediaScannerFuzzer.cpp b/media/libstagefright/tests/fuzzers/StagefrightMediaScannerFuzzer.cpp
new file mode 100644
index 0000000..a072b7c
--- /dev/null
+++ b/media/libstagefright/tests/fuzzers/StagefrightMediaScannerFuzzer.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Authors: corbin.souffrant@leviathansecurity.com
+//          dylan.katz@leviathansecurity.com
+
+#include <cutils/ashmem.h>
+#include <fuzzer/FuzzedDataProvider.h>
+#include <media/stagefright/StagefrightMediaScanner.h>
+
+#include <cstdio>
+
+namespace android {
+class FuzzMediaScannerClient : public MediaScannerClient {
+public:
+  virtual status_t scanFile(const char *, long long, long long, bool, bool) {
+    return 0;
+  }
+
+  virtual status_t handleStringTag(const char *, const char *) { return 0; }
+
+  virtual status_t setMimeType(const char *) { return 0; }
+};
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
+  FuzzedDataProvider fdp = FuzzedDataProvider(data, size);
+  StagefrightMediaScanner mScanner = StagefrightMediaScanner();
+  // Without this, the fuzzer crashes for some reason.
+  mScanner.setLocale("");
+
+  size_t data_size = fdp.ConsumeIntegralInRange<size_t>(0, size);
+  int fd =
+      ashmem_create_region("stagefrightmediascanner_fuzz_region", data_size);
+  if (fd < 0)
+    return 0;
+
+  uint8_t *sh_data = static_cast<uint8_t *>(
+      mmap(NULL, data_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0));
+  if (sh_data == MAP_FAILED)
+    return 0;
+
+  while (fdp.remaining_bytes() > 8) {
+    switch (fdp.ConsumeIntegralInRange<uint8_t>(0, 1)) {
+    case 0: {
+      std::string path = fdp.ConsumeRandomLengthString(fdp.remaining_bytes());
+      std::string mimeType =
+          fdp.ConsumeRandomLengthString(fdp.remaining_bytes());
+      std::shared_ptr<MediaScannerClient> client(new FuzzMediaScannerClient());
+      mScanner.processFile(path.c_str(), mimeType.c_str(), *client);
+      break;
+    }
+    case 1: {
+      size_t to_copy = fdp.ConsumeIntegralInRange<size_t>(1, data_size);
+      std::vector<uint8_t> rand_buf = fdp.ConsumeBytes<uint8_t>(to_copy);
+
+      // If fdp doesn't have enough bytes left it will just make a shorter
+      // vector.
+      to_copy = std::min(rand_buf.size(), data_size);
+
+      std::copy(sh_data, sh_data + to_copy, rand_buf.begin());
+      mScanner.extractAlbumArt(fd);
+    }
+    }
+  }
+
+  munmap(sh_data, data_size);
+  close(fd);
+  return 0;
+}
+} // namespace android
diff --git a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
index 3be5e74..dbdb43c 100644
--- a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
+++ b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
@@ -493,7 +493,7 @@
       mPath(path),
       mStatus(NO_INIT) {
     // determine href_base
-    std::string::size_type end = path.rfind("/");
+    std::string::size_type end = path.rfind('/');
     if (end != std::string::npos) {
         mHrefBase = path.substr(0, end + 1);
     }
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 1935cde..5f0b575 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -3141,7 +3141,8 @@
 // dumpToThreadLog_l() must be called with AudioFlinger::mLock held
 void AudioFlinger::dumpToThreadLog_l(const sp<ThreadBase> &thread)
 {
-    audio_utils::FdToString fdToString;
+    constexpr int THREAD_DUMP_TIMEOUT_MS = 2;
+    audio_utils::FdToString fdToString("- ", THREAD_DUMP_TIMEOUT_MS);
     const int fd = fdToString.fd();
     if (fd >= 0) {
         thread->dump(fd, {} /* args */);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 384ddb5..b143388 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -7429,7 +7429,7 @@
                         (framesRead - part1) * mFrameSize);
             }
         }
-        rear = mRsmpInRear += framesRead;
+        mRsmpInRear = audio_utils::safe_add_overflow(mRsmpInRear, (int32_t)framesRead);
 
         size = activeTracks.size();
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 0f9bcc1..016aaa5 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -65,6 +65,9 @@
 
     bool supportsFormat(audio_format_t format);
 
+    void setDynamic() { mIsDynamic = true; }
+    bool isDynamic() const { return mIsDynamic; }
+
     // PolicyAudioPortConfig
     virtual sp<PolicyAudioPort> getPolicyAudioPort() const {
         return static_cast<PolicyAudioPort*>(const_cast<DeviceDescriptor*>(this));
@@ -97,6 +100,8 @@
     std::string mTagName; // Unique human readable identifier for a device port found in conf file.
     FormatVector        mEncodedFormats;
     audio_format_t      mCurrentEncodedFormat;
+    bool                mIsDynamic = false;
+    const std::string   mDeclaredAddress; // Original device address
 };
 
 class DeviceVector : public SortedVector<sp<DeviceDescriptor> >
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index 23f0c9a..b5b10f3 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -131,8 +131,17 @@
 public:
     sp<HwModule> getModuleFromName(const char *name) const;
 
+    /**
+     * @brief getModuleForDeviceType try to get a device from type / format on all modules
+     * @param device type to consider
+     * @param encodedFormat to consider
+     * @param[out] tagName if not null, if a matching device is found, will return the tagName
+     * of original device from XML file so that audio routes matchin rules work.
+     * @return valid module if considered device found, nullptr otherwise.
+     */
     sp<HwModule> getModuleForDeviceType(audio_devices_t device,
-                                        audio_format_t encodedFormat) const;
+                                        audio_format_t encodedFormat,
+                                        std::string *tagName = nullptr) const;
 
     sp<HwModule> getModuleForDevice(const sp<DeviceDescriptor> &device,
                                     audio_format_t encodedFormat) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index 5f551d5..11d3a99 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -112,6 +112,19 @@
     }
 
     /**
+     * @brief getTag
+     * @param deviceTypes to be considered
+     * @return tagName of first matching device for the considered types, empty string otherwise.
+     */
+    std::string getTag(const DeviceTypeSet& deviceTypes) const
+    {
+        if (supportsDeviceTypes(deviceTypes)) {
+            return mSupportedDevices.getDevicesFromTypes(deviceTypes).itemAt(0)->getTagName();
+        }
+        return {};
+    }
+
+    /**
      * @brief supportsDevice
      * @param device to be checked against
      *        forceCheckOnAddress if true, check on type and address whatever the type, otherwise
@@ -150,6 +163,12 @@
     }
     void removeSupportedDevice(const sp<DeviceDescriptor> &device)
     {
+        ssize_t ret = mSupportedDevices.indexOf(device);
+        if (ret >= 0 && !mSupportedDevices.itemAt(ret)->isDynamic()) {
+            // devices equality checks only type, address, name and format
+            // Prevents from removing non dynamically added devices
+            return;
+        }
         mSupportedDevices.remove(device);
     }
     void setSupportedDevices(const DeviceVector &devices)
diff --git a/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
index d2f6297..e6eef24 100644
--- a/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
@@ -42,6 +42,11 @@
 
     virtual const std::string getTagName() const = 0;
 
+    bool equals(const sp<PolicyAudioPort> &right) const
+    {
+        return getTagName() == right->getTagName();
+    }
+
     virtual sp<AudioPort> asAudioPort() const = 0;
 
     virtual void setFlags(uint32_t flags)
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
index 2a18f19..c8e4e76 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
@@ -39,12 +39,12 @@
 bool AudioRoute::supportsPatch(const sp<PolicyAudioPort> &srcPort,
                                const sp<PolicyAudioPort> &dstPort) const
 {
-    if (mSink == 0 || dstPort == 0 || dstPort != mSink) {
+    if (mSink == 0 || dstPort == 0 || !dstPort->equals(mSink)) {
         return false;
     }
     ALOGV("%s: sinks %s matching", __FUNCTION__, mSink->getTagName().c_str());
     for (const auto &sourcePort : mSources) {
-        if (sourcePort == srcPort) {
+        if (sourcePort->equals(srcPort)) {
             ALOGV("%s: sources %s matching", __FUNCTION__, sourcePort->getTagName().c_str());
             return true;
         }
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 68a32a2..5120aeb 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -52,7 +52,8 @@
 DeviceDescriptor::DeviceDescriptor(const AudioDeviceTypeAddr &deviceTypeAddr,
                                    const std::string &tagName,
                                    const FormatVector &encodedFormats) :
-        DeviceDescriptorBase(deviceTypeAddr), mTagName(tagName), mEncodedFormats(encodedFormats)
+        DeviceDescriptorBase(deviceTypeAddr), mTagName(tagName), mEncodedFormats(encodedFormats),
+        mDeclaredAddress(deviceTypeAddr.getAddress())
 {
     mCurrentEncodedFormat = AUDIO_FORMAT_DEFAULT;
     /* If framework runs against a pre 5.0 Audio HAL, encoded formats are absent from the config.
@@ -75,6 +76,10 @@
 void DeviceDescriptor::detach() {
     mId = AUDIO_PORT_HANDLE_NONE;
     PolicyAudioPort::detach();
+    // The device address may have been overwritten on device connection
+    setAddress(mDeclaredAddress);
+    // Device Port does not have a name unless provided by setDeviceConnectionState
+    setName("");
 }
 
 template<typename T>
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index d31e443..2967014 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -271,8 +271,9 @@
     return nullptr;
 }
 
-sp <HwModule> HwModuleCollection::getModuleForDeviceType(audio_devices_t type,
-                                                         audio_format_t encodedFormat) const
+sp<HwModule> HwModuleCollection::getModuleForDeviceType(audio_devices_t type,
+                                                        audio_format_t encodedFormat,
+                                                        std::string *tagName) const
 {
     for (const auto& module : *this) {
         const auto& profiles = audio_is_output_device(type) ?
@@ -284,9 +285,15 @@
                     sp <DeviceDescriptor> deviceDesc =
                             declaredDevices.getDevice(type, String8(), encodedFormat);
                     if (deviceDesc) {
+                        if (tagName != nullptr) {
+                            *tagName = deviceDesc->getTagName();
+                        }
                         return module;
                     }
                 } else {
+                    if (tagName != nullptr) {
+                        *tagName = profile->getTag({type});
+                    }
                     return module;
                 }
             }
@@ -325,15 +332,32 @@
     }
 
     for (const auto& hwModule : *this) {
+        if (!allowToCreate) {
+            auto dynamicDevices = hwModule->getDynamicDevices();
+            auto dynamicDevice = dynamicDevices.getDevice(deviceType, devAddress, encodedFormat);
+            if (dynamicDevice) {
+                return dynamicDevice;
+            }
+        }
         DeviceVector moduleDevices = hwModule->getAllDevices();
         auto moduleDevice = moduleDevices.getDevice(deviceType, devAddress, encodedFormat);
+
+        // Prevent overwritting moduleDevice address if connected device does not have the same
+        // address (since getDevice with empty address ignores match on address), use dynamic device
+        if (moduleDevice && allowToCreate &&
+                (!moduleDevice->address().empty() &&
+                 (moduleDevice->address().compare(devAddress.c_str()) != 0))) {
+            break;
+        }
         if (moduleDevice) {
             if (encodedFormat != AUDIO_FORMAT_DEFAULT) {
                 moduleDevice->setEncodedFormat(encodedFormat);
             }
             if (allowToCreate) {
                 moduleDevice->attach(hwModule);
+                // Name may be overwritten, restored on detach.
                 moduleDevice->setAddress(devAddress.string());
+                // Name may be overwritten, restored on detach.
                 moduleDevice->setName(name);
             }
             return moduleDevice;
@@ -352,18 +376,19 @@
                                                       const char *name,
                                                       const audio_format_t encodedFormat) const
 {
-    sp<HwModule> hwModule = getModuleForDeviceType(type, encodedFormat);
+    std::string tagName = {};
+    sp<HwModule> hwModule = getModuleForDeviceType(type, encodedFormat, &tagName);
     if (hwModule == 0) {
         ALOGE("%s: could not find HW module for device %04x address %s", __FUNCTION__, type,
               address);
         return nullptr;
     }
 
-    sp<DeviceDescriptor> device = new DeviceDescriptor(type, name, address);
+    sp<DeviceDescriptor> device = new DeviceDescriptor(type, tagName, address);
     device->setName(name);
     device->setEncodedFormat(encodedFormat);
-
-  // Add the device to the list of dynamic devices
+    device->setDynamic();
+    // Add the device to the list of dynamic devices
     hwModule->addDynamicDevice(device);
     // Reciprocally attach the device to the module
     device->attach(hwModule);
@@ -375,7 +400,7 @@
     for (const auto &profile : profiles) {
         // Add the device as supported to all profile supporting "weakly" or not the device
         // according to its type
-        if (profile->supportsDevice(device, false /*matchAdress*/)) {
+        if (profile->supportsDevice(device, false /*matchAddress*/)) {
 
             // @todo quid of audio profile? import the profile from device of the same type?
             const auto &isoTypeDeviceForProfile =
@@ -406,10 +431,9 @@
 
         device->detach();
         // Only remove from dynamic list, not from declared list!!!
-        if (!hwModule->getDynamicDevices().contains(device)) {
+        if (!hwModule->removeDynamicDevice(device)) {
             return;
         }
-        hwModule->removeDynamicDevice(device);
         ALOGV("%s: removed dynamic device %s from module %s", __FUNCTION__,
               device->toString().c_str(), hwModule->getName());
 
diff --git a/services/medialog/Android.bp b/services/medialog/Android.bp
index 74b63d5..3a27a43 100644
--- a/services/medialog/Android.bp
+++ b/services/medialog/Android.bp
@@ -1,4 +1,4 @@
-cc_library_shared {
+cc_library {
     name: "libmedialogservice",
 
     srcs: [
diff --git a/services/medialog/fuzzer/Android.bp b/services/medialog/fuzzer/Android.bp
new file mode 100644
index 0000000..2afaaae
--- /dev/null
+++ b/services/medialog/fuzzer/Android.bp
@@ -0,0 +1,33 @@
+cc_fuzz {
+    name: "media_log_fuzzer",
+    static_libs: [
+        "libmedialogservice",
+    ],
+    srcs: [
+        "media_log_fuzzer.cpp",
+    ],
+    header_libs: [
+        "libmedia_headers",
+    ],
+    shared_libs: [
+        "libaudioutils",
+        "libbinder",
+        "liblog",
+        "libmediautils",
+        "libnblog",
+        "libutils",
+    ],
+    include_dirs: [
+        "frameworks/av/services/medialog",
+    ],
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
+    fuzz_config: {
+        cc: [
+            "android-media-fuzzing-reports@google.com",
+        ],
+        componentid: 155276,
+    },
+}
diff --git a/services/medialog/fuzzer/README.md b/services/medialog/fuzzer/README.md
new file mode 100644
index 0000000..b79e5c8
--- /dev/null
+++ b/services/medialog/fuzzer/README.md
@@ -0,0 +1,50 @@
+# Fuzzer for libmedialogservice
+
+## Plugin Design Considerations
+The fuzzer plugin for libmedialogservice is designed based on the understanding of the
+service and tries to achieve the following:
+
+##### Maximize code coverage
+The configuration parameters are not hardcoded, but instead selected based on
+incoming data. This ensures more code paths are reached by the fuzzer.
+
+medialogservice supports the following parameters:
+1. Writer name (parameter name: `writerNameIdx`)
+2. Log size (parameter name: `logSize`)
+3. Enable dump before unrgister API (parameter name: `shouldDumpBeforeUnregister`)
+5. size of string for log dump (parameter name: `numberOfLines`)
+
+| Parameter| Valid Values| Configured Value|
+|------------- |-------------| ----- |
+| `writerNameIdx` | 0. `0` 1. `1` | Value obtained from FuzzedDataProvider |
+| `logSize` | In the range `256 to 65536` | Value obtained from FuzzedDataProvider |
+| `shouldDumpBeforeUnregister` | 0. `0` 1. `1` | Value obtained from FuzzedDataProvider |
+| `numberOfLines` | In the range `0 to 65535` | Value obtained from FuzzedDataProvider |
+
+This also ensures that the plugin is always deterministic for any given input.
+
+## Build
+
+This describes steps to build media_log_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+  $ mm -j$(nproc) media_log_fuzzer
+```
+
+#### Steps to run
+Create a directory CORPUS_DIR and copy some files to that folder
+Push this directory to device.
+
+To run on device
+```
+  $ adb sync data
+  $ adb shell /data/fuzz/arm64/media_log_fuzzer/media_log_fuzzer CORPUS_DIR
+```
+
+## References:
+ * http://llvm.org/docs/LibFuzzer.html
+ * https://github.com/google/oss-fuzz
diff --git a/services/medialog/fuzzer/media_log_fuzzer.cpp b/services/medialog/fuzzer/media_log_fuzzer.cpp
new file mode 100644
index 0000000..bd50d0f
--- /dev/null
+++ b/services/medialog/fuzzer/media_log_fuzzer.cpp
@@ -0,0 +1,76 @@
+/**
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <binder/IMemory.h>
+#include <binder/MemoryDealer.h>
+#include <private/android_filesystem_config.h>
+#include "MediaLogService.h"
+#include "fuzzer/FuzzedDataProvider.h"
+
+constexpr const char* kWriterNames[2] = {"FastMixer", "FastCapture"};
+constexpr size_t kMinSize = 0x100;
+constexpr size_t kMaxSize = 0x10000;
+constexpr size_t kLogMemorySize = 400 * 1024;
+constexpr size_t kMaxNumLines = USHRT_MAX;
+
+using namespace android;
+
+class MediaLogFuzzer {
+   public:
+    void init();
+    void process(const uint8_t* data, size_t size);
+
+   private:
+    sp<MemoryDealer> mMemoryDealer = nullptr;
+    sp<MediaLogService> mService = nullptr;
+};
+
+void MediaLogFuzzer::init() {
+    setuid(AID_MEDIA);
+    mService = new MediaLogService();
+    mMemoryDealer = new MemoryDealer(kLogMemorySize, "MediaLogFuzzer", MemoryHeapBase::READ_ONLY);
+}
+
+void MediaLogFuzzer::process(const uint8_t* data, size_t size) {
+    FuzzedDataProvider fuzzedDataProvider(data, size);
+    size_t writerNameIdx =
+        fuzzedDataProvider.ConsumeIntegralInRange<size_t>(0, std::size(kWriterNames) - 1);
+    bool shouldDumpBeforeUnregister = fuzzedDataProvider.ConsumeBool();
+    size_t logSize = fuzzedDataProvider.ConsumeIntegralInRange<size_t>(kMinSize, kMaxSize);
+    sp<IMemory> logBuffer = mMemoryDealer->allocate(NBLog::Timeline::sharedSize(logSize));
+    Vector<String16> args;
+    size_t numberOfLines = fuzzedDataProvider.ConsumeIntegralInRange<size_t>(0, kMaxNumLines);
+    for (size_t lineIdx = 0; lineIdx < numberOfLines; ++lineIdx) {
+        args.add(static_cast<String16>(fuzzedDataProvider.ConsumeRandomLengthString().c_str()));
+    }
+    const char* fileName = "logDumpFile";
+    int fd = memfd_create(fileName, MFD_ALLOW_SEALING);
+    fuzzedDataProvider.ConsumeData(logBuffer->unsecurePointer(), logBuffer->size());
+    mService->registerWriter(logBuffer, logSize, kWriterNames[writerNameIdx]);
+    if (shouldDumpBeforeUnregister) {
+        mService->dump(fd, args);
+        mService->unregisterWriter(logBuffer);
+    } else {
+        mService->unregisterWriter(logBuffer);
+        mService->dump(fd, args);
+    }
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+    MediaLogFuzzer mediaLogFuzzer = MediaLogFuzzer();
+    mediaLogFuzzer.init();
+    mediaLogFuzzer.process(data, size);
+    return 0;
+}