Merge "Create libstagefright_mp3dec_headers"
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
index 12ed725..b520c17 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
@@ -734,7 +734,7 @@
             }
             if (timestampMax < timestamp) timestampMax = timestamp;
         }
-        timestampOffset = timestampMax;
+        timestampOffset = timestampMax + 33333;
         eleInfo.close();
 
         // Reset Total frames before second decode loop
diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp
index 692da58..566a18f 100644
--- a/media/codec2/sfplugin/CCodecBuffers.cpp
+++ b/media/codec2/sfplugin/CCodecBuffers.cpp
@@ -96,6 +96,9 @@
                 int32_t vstride = int32_t(offsetDelta / stride);
                 newFormat->setInt32(KEY_SLICE_HEIGHT, vstride);
                 ALOGD("[%s] updating vstride = %d", mName, vstride);
+                buffer->setRange(
+                        img->mPlane[0].mOffset,
+                        buffer->size() - img->mPlane[0].mOffset);
             }
         }
         setFormat(newFormat);
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 25e7da9..19414a0 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -276,20 +276,22 @@
                             int32_t planeSize = 0;
                             for (uint32_t i = 0; i < layout.numPlanes; ++i) {
                                 const C2PlaneInfo &plane = layout.planes[i];
-                                ssize_t minOffset = plane.minOffset(mWidth, mHeight);
-                                ssize_t maxOffset = plane.maxOffset(mWidth, mHeight);
+                                int64_t planeStride = std::abs(plane.rowInc / plane.colInc);
+                                ssize_t minOffset = plane.minOffset(
+                                        mWidth / plane.colSampling, mHeight / plane.rowSampling);
+                                ssize_t maxOffset = plane.maxOffset(
+                                        mWidth / plane.colSampling, mHeight / plane.rowSampling);
                                 if (minPtr > mView.data()[i] + minOffset) {
                                     minPtr = mView.data()[i] + minOffset;
                                 }
                                 if (maxPtr < mView.data()[i] + maxOffset) {
                                     maxPtr = mView.data()[i] + maxOffset;
                                 }
-                                planeSize += std::abs(plane.rowInc) * align(mHeight, 64)
-                                        / plane.rowSampling / plane.colSampling
-                                        * divUp(mAllocatedDepth, 8u);
+                                planeSize += planeStride * divUp(mAllocatedDepth, 8u)
+                                        * align(mHeight, 64) / plane.rowSampling;
                             }
 
-                            if ((maxPtr - minPtr + 1) <= planeSize) {
+                            if (minPtr == mView.data()[0] && (maxPtr - minPtr + 1) <= planeSize) {
                                 // FIXME: this is risky as reading/writing data out of bound results
                                 //        in an undefined behavior, but gralloc does assume a
                                 //        contiguous mapping
diff --git a/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp b/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp
index 5bee605..ad8f6e5 100644
--- a/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp
+++ b/media/codec2/sfplugin/tests/CCodecBuffers_test.cpp
@@ -18,22 +18,31 @@
 
 #include <gtest/gtest.h>
 
+#include <media/stagefright/foundation/AString.h>
 #include <media/stagefright/MediaCodecConstants.h>
 
+#include <C2BlockInternal.h>
 #include <C2PlatformSupport.h>
 
 namespace android {
 
+static std::shared_ptr<RawGraphicOutputBuffers> GetRawGraphicOutputBuffers(
+        int32_t width, int32_t height) {
+    std::shared_ptr<RawGraphicOutputBuffers> buffers =
+        std::make_shared<RawGraphicOutputBuffers>("test");
+    sp<AMessage> format{new AMessage};
+    format->setInt32(KEY_WIDTH, width);
+    format->setInt32(KEY_HEIGHT, height);
+    buffers->setFormat(format);
+    return buffers;
+}
+
 TEST(RawGraphicOutputBuffersTest, ChangeNumSlots) {
     constexpr int32_t kWidth = 3840;
     constexpr int32_t kHeight = 2160;
 
     std::shared_ptr<RawGraphicOutputBuffers> buffers =
-        std::make_shared<RawGraphicOutputBuffers>("test");
-    sp<AMessage> format{new AMessage};
-    format->setInt32("width", kWidth);
-    format->setInt32("height", kHeight);
-    buffers->setFormat(format);
+        GetRawGraphicOutputBuffers(kWidth, kHeight);
 
     std::shared_ptr<C2BlockPool> pool;
     ASSERT_EQ(OK, GetCodec2BlockPool(C2BlockPool::BASIC_GRAPHIC, nullptr, &pool));
@@ -96,4 +105,435 @@
     }
 }
 
+class TestGraphicAllocation : public C2GraphicAllocation {
+public:
+    TestGraphicAllocation(
+            uint32_t width,
+            uint32_t height,
+            const C2PlanarLayout &layout,
+            size_t capacity,
+            std::vector<size_t> offsets)
+        : C2GraphicAllocation(width, height),
+          mLayout(layout),
+          mMemory(capacity, 0xAA),
+          mOffsets(offsets) {
+    }
+
+    c2_status_t map(
+            C2Rect rect, C2MemoryUsage usage, C2Fence *fence,
+            C2PlanarLayout *layout, uint8_t **addr) override {
+        (void)rect;
+        (void)usage;
+        (void)fence;
+        *layout = mLayout;
+        for (size_t i = 0; i < mLayout.numPlanes; ++i) {
+            addr[i] = mMemory.data() + mOffsets[i];
+        }
+        return C2_OK;
+    }
+
+    c2_status_t unmap(uint8_t **, C2Rect, C2Fence *) override { return C2_OK; }
+
+    C2Allocator::id_t getAllocatorId() const override { return -1; }
+
+    const C2Handle *handle() const override { return nullptr; }
+
+    bool equals(const std::shared_ptr<const C2GraphicAllocation> &other) const override {
+        return other.get() == this;
+    }
+
+private:
+    C2PlanarLayout mLayout;
+    std::vector<uint8_t> mMemory;
+    std::vector<uint8_t *> mAddr;
+    std::vector<size_t> mOffsets;
+};
+
+class LayoutTest : public ::testing::TestWithParam<std::tuple<bool, std::string, bool, int32_t>> {
+private:
+    static C2PlanarLayout YUVPlanarLayout(int32_t stride) {
+        C2PlanarLayout layout = {
+            C2PlanarLayout::TYPE_YUV,
+            3,  /* numPlanes */
+            3,  /* rootPlanes */
+            {},  /* planes --- to be filled below */
+        };
+        layout.planes[C2PlanarLayout::PLANE_Y] = {
+            C2PlaneInfo::CHANNEL_Y,
+            1,  /* colInc */
+            stride,  /* rowInc */
+            1,  /* colSampling */
+            1,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_Y,  /* rootIx */
+            0,  /* offset */
+        };
+        layout.planes[C2PlanarLayout::PLANE_U] = {
+            C2PlaneInfo::CHANNEL_CB,
+            1,  /* colInc */
+            stride / 2,  /* rowInc */
+            2,  /* colSampling */
+            2,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_U,  /* rootIx */
+            0,  /* offset */
+        };
+        layout.planes[C2PlanarLayout::PLANE_V] = {
+            C2PlaneInfo::CHANNEL_CR,
+            1,  /* colInc */
+            stride / 2,  /* rowInc */
+            2,  /* colSampling */
+            2,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_V,  /* rootIx */
+            0,  /* offset */
+        };
+        return layout;
+    }
+
+    static C2PlanarLayout YUVSemiPlanarLayout(int32_t stride) {
+        C2PlanarLayout layout = {
+            C2PlanarLayout::TYPE_YUV,
+            3,  /* numPlanes */
+            2,  /* rootPlanes */
+            {},  /* planes --- to be filled below */
+        };
+        layout.planes[C2PlanarLayout::PLANE_Y] = {
+            C2PlaneInfo::CHANNEL_Y,
+            1,  /* colInc */
+            stride,  /* rowInc */
+            1,  /* colSampling */
+            1,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_Y,  /* rootIx */
+            0,  /* offset */
+        };
+        layout.planes[C2PlanarLayout::PLANE_U] = {
+            C2PlaneInfo::CHANNEL_CB,
+            2,  /* colInc */
+            stride,  /* rowInc */
+            2,  /* colSampling */
+            2,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_U,  /* rootIx */
+            0,  /* offset */
+        };
+        layout.planes[C2PlanarLayout::PLANE_V] = {
+            C2PlaneInfo::CHANNEL_CR,
+            2,  /* colInc */
+            stride,  /* rowInc */
+            2,  /* colSampling */
+            2,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_U,  /* rootIx */
+            1,  /* offset */
+        };
+        return layout;
+    }
+
+    static C2PlanarLayout YVUSemiPlanarLayout(int32_t stride) {
+        C2PlanarLayout layout = {
+            C2PlanarLayout::TYPE_YUV,
+            3,  /* numPlanes */
+            2,  /* rootPlanes */
+            {},  /* planes --- to be filled below */
+        };
+        layout.planes[C2PlanarLayout::PLANE_Y] = {
+            C2PlaneInfo::CHANNEL_Y,
+            1,  /* colInc */
+            stride,  /* rowInc */
+            1,  /* colSampling */
+            1,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_Y,  /* rootIx */
+            0,  /* offset */
+        };
+        layout.planes[C2PlanarLayout::PLANE_U] = {
+            C2PlaneInfo::CHANNEL_CB,
+            2,  /* colInc */
+            stride,  /* rowInc */
+            2,  /* colSampling */
+            2,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_V,  /* rootIx */
+            1,  /* offset */
+        };
+        layout.planes[C2PlanarLayout::PLANE_V] = {
+            C2PlaneInfo::CHANNEL_CR,
+            2,  /* colInc */
+            stride,  /* rowInc */
+            2,  /* colSampling */
+            2,  /* rowSampling */
+            8,  /* allocatedDepth */
+            8,  /* bitDepth */
+            0,  /* rightShift */
+            C2PlaneInfo::NATIVE,
+            C2PlanarLayout::PLANE_V,  /* rootIx */
+            0,  /* offset */
+        };
+        return layout;
+    }
+
+    static std::shared_ptr<C2GraphicBlock> CreateGraphicBlock(
+            uint32_t width,
+            uint32_t height,
+            const C2PlanarLayout &layout,
+            size_t capacity,
+            std::vector<size_t> offsets) {
+        std::shared_ptr<C2GraphicAllocation> alloc = std::make_shared<TestGraphicAllocation>(
+                width,
+                height,
+                layout,
+                capacity,
+                offsets);
+
+        return _C2BlockFactory::CreateGraphicBlock(alloc);
+    }
+
+    static constexpr uint8_t GetPixelValue(uint8_t value, uint32_t row, uint32_t col) {
+        return (uint32_t(value) * row + col) & 0xFF;
+    }
+
+    static void FillPlane(C2GraphicView &view, size_t index, uint8_t value) {
+        C2PlanarLayout layout = view.layout();
+
+        uint8_t *rowPtr = view.data()[index];
+        C2PlaneInfo plane = layout.planes[index];
+        for (uint32_t row = 0; row < view.height() / plane.rowSampling; ++row) {
+            uint8_t *colPtr = rowPtr;
+            for (uint32_t col = 0; col < view.width() / plane.colSampling; ++col) {
+                *colPtr = GetPixelValue(value, row, col);
+                colPtr += plane.colInc;
+            }
+            rowPtr += plane.rowInc;
+        }
+    }
+
+    static void FillBlock(const std::shared_ptr<C2GraphicBlock> &block) {
+        C2GraphicView view = block->map().get();
+
+        FillPlane(view, C2PlanarLayout::PLANE_Y, 'Y');
+        FillPlane(view, C2PlanarLayout::PLANE_U, 'U');
+        FillPlane(view, C2PlanarLayout::PLANE_V, 'V');
+    }
+
+    static bool VerifyPlane(
+            const MediaImage2 *mediaImage,
+            const uint8_t *base,
+            uint32_t index,
+            uint8_t value,
+            std::string *errorMsg) {
+        *errorMsg = "";
+        MediaImage2::PlaneInfo plane = mediaImage->mPlane[index];
+        const uint8_t *rowPtr = base + plane.mOffset;
+        for (uint32_t row = 0; row < mediaImage->mHeight / plane.mVertSubsampling; ++row) {
+            const uint8_t *colPtr = rowPtr;
+            for (uint32_t col = 0; col < mediaImage->mWidth / plane.mHorizSubsampling; ++col) {
+                if (GetPixelValue(value, row, col) != *colPtr) {
+                    *errorMsg = AStringPrintf("row=%u col=%u expected=%02x actual=%02x",
+                            row, col, GetPixelValue(value, row, col), *colPtr).c_str();
+                    return false;
+                }
+                colPtr += plane.mColInc;
+            }
+            rowPtr += plane.mRowInc;
+        }
+        return true;
+    }
+
+public:
+    static constexpr int32_t kWidth = 320;
+    static constexpr int32_t kHeight = 240;
+    static constexpr int32_t kGapLength = kWidth * kHeight * 10;
+
+    static std::shared_ptr<C2Buffer> CreateAndFillBufferFromParam(const ParamType &param) {
+        bool contiguous = std::get<0>(param);
+        std::string planeOrderStr = std::get<1>(param);
+        bool planar = std::get<2>(param);
+        int32_t stride = std::get<3>(param);
+
+        C2PlanarLayout::plane_index_t planeOrder[3];
+        C2PlanarLayout layout;
+
+        if (planeOrderStr.size() != 3) {
+            return nullptr;
+        }
+        for (size_t i = 0; i < 3; ++i) {
+            C2PlanarLayout::plane_index_t planeIndex;
+            switch (planeOrderStr[i]) {
+                case 'Y': planeIndex = C2PlanarLayout::PLANE_Y; break;
+                case 'U': planeIndex = C2PlanarLayout::PLANE_U; break;
+                case 'V': planeIndex = C2PlanarLayout::PLANE_V; break;
+                default:  return nullptr;
+            }
+            planeOrder[i] = planeIndex;
+        }
+
+        if (planar) {
+            layout = YUVPlanarLayout(stride);
+        } else {  // semi-planar
+            for (size_t i = 0; i < 3; ++i) {
+                if (planeOrder[i] == C2PlanarLayout::PLANE_U) {
+                    layout = YUVSemiPlanarLayout(stride);
+                    break;
+                }
+                if (planeOrder[i] == C2PlanarLayout::PLANE_V) {
+                    layout = YVUSemiPlanarLayout(stride);
+                    break;
+                }
+            }
+        }
+
+        size_t yPlaneSize = stride * kHeight;
+        size_t uvPlaneSize = stride * kHeight / 4;
+        size_t capacity = yPlaneSize + uvPlaneSize * 2;
+        std::vector<size_t> offsets(3);
+
+        if (!contiguous) {
+            if (planar) {
+                capacity += kGapLength * 2;
+            } else {  // semi-planar
+                capacity += kGapLength;
+            }
+        }
+
+        offsets[planeOrder[0]] = 0;
+        size_t planeSize = (planeOrder[0] == C2PlanarLayout::PLANE_Y) ? yPlaneSize : uvPlaneSize;
+        for (size_t i = 1; i < 3; ++i) {
+            offsets[planeOrder[i]] = offsets[planeOrder[i - 1]] + planeSize;
+            if (!contiguous) {
+                offsets[planeOrder[i]] += kGapLength;
+            }
+            planeSize = (planeOrder[i] == C2PlanarLayout::PLANE_Y) ? yPlaneSize : uvPlaneSize;
+            if (!planar  // semi-planar
+                    && planeOrder[i - 1] != C2PlanarLayout::PLANE_Y
+                    && planeOrder[i] != C2PlanarLayout::PLANE_Y) {
+                offsets[planeOrder[i]] = offsets[planeOrder[i - 1]] + 1;
+                planeSize = uvPlaneSize * 2 - 1;
+            }
+        }
+
+        std::shared_ptr<C2GraphicBlock> block = CreateGraphicBlock(
+                kWidth,
+                kHeight,
+                layout,
+                capacity,
+                offsets);
+        FillBlock(block);
+        return C2Buffer::CreateGraphicBuffer(
+                block->share(block->crop(), C2Fence()));
+    }
+
+    static bool VerifyClientBuffer(
+            const sp<MediaCodecBuffer> &buffer, std::string *errorMsg) {
+        *errorMsg = "";
+        sp<ABuffer> imageData;
+        if (!buffer->format()->findBuffer("image-data", &imageData)) {
+            *errorMsg = "Missing image data";
+            return false;
+        }
+        MediaImage2 *mediaImage = (MediaImage2 *)imageData->data();
+        if (mediaImage->mType != MediaImage2::MEDIA_IMAGE_TYPE_YUV) {
+            *errorMsg = AStringPrintf("Unexpected type: %d", mediaImage->mType).c_str();
+            return false;
+        }
+        std::string planeErrorMsg;
+        if (!VerifyPlane(mediaImage, buffer->base(), MediaImage2::Y, 'Y', &planeErrorMsg)) {
+            *errorMsg = "Y plane does not match: " + planeErrorMsg;
+            return false;
+        }
+        if (!VerifyPlane(mediaImage, buffer->base(), MediaImage2::U, 'U', &planeErrorMsg)) {
+            *errorMsg = "U plane does not match: " + planeErrorMsg;
+            return false;
+        }
+        if (!VerifyPlane(mediaImage, buffer->base(), MediaImage2::V, 'V', &planeErrorMsg)) {
+            *errorMsg = "V plane does not match: " + planeErrorMsg;
+            return false;
+        }
+
+        int32_t width, height, stride;
+        buffer->format()->findInt32(KEY_WIDTH, &width);
+        buffer->format()->findInt32(KEY_HEIGHT, &height);
+        buffer->format()->findInt32(KEY_STRIDE, &stride);
+
+        MediaImage2 legacyYLayout = {
+            MediaImage2::MEDIA_IMAGE_TYPE_Y,
+            1,  // mNumPlanes
+            uint32_t(width),
+            uint32_t(height),
+            8,
+            8,
+            {},  // mPlane
+        };
+        legacyYLayout.mPlane[MediaImage2::Y] = {
+            0,  // mOffset
+            1,  // mColInc
+            stride,  // mRowInc
+            1,  // mHorizSubsampling
+            1,  // mVertSubsampling
+        };
+        if (!VerifyPlane(&legacyYLayout, buffer->data(), MediaImage2::Y, 'Y', &planeErrorMsg)) {
+            *errorMsg = "Y plane by legacy layout does not match: " + planeErrorMsg;
+            return false;
+        }
+        return true;
+    }
+
+};
+
+TEST_P(LayoutTest, VerifyLayout) {
+    std::shared_ptr<RawGraphicOutputBuffers> buffers =
+        GetRawGraphicOutputBuffers(kWidth, kHeight);
+
+    std::shared_ptr<C2Buffer> c2Buffer = CreateAndFillBufferFromParam(GetParam());
+    ASSERT_NE(nullptr, c2Buffer);
+    sp<MediaCodecBuffer> clientBuffer;
+    size_t index;
+    ASSERT_EQ(OK, buffers->registerBuffer(c2Buffer, &index, &clientBuffer));
+    ASSERT_NE(nullptr, clientBuffer);
+    std::string errorMsg;
+    ASSERT_TRUE(VerifyClientBuffer(clientBuffer, &errorMsg)) << errorMsg;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+        RawGraphicOutputBuffersTest,
+        LayoutTest,
+        ::testing::Combine(
+            ::testing::Bool(),  /* contiguous */
+            ::testing::Values("YUV", "YVU", "UVY", "VUY"),
+            ::testing::Bool(),  /* planar */
+            ::testing::Values(320, 512)),
+        [](const ::testing::TestParamInfo<LayoutTest::ParamType> &info) {
+            std::string contiguous = std::get<0>(info.param) ? "Contiguous" : "Noncontiguous";
+            std::string planar = std::get<2>(info.param) ? "Planar" : "SemiPlanar";
+            return contiguous
+                    + std::get<1>(info.param)
+                    + planar
+                    + std::to_string(std::get<3>(info.param));
+        });
+
 } // namespace android
diff --git a/media/libaudiohal/impl/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
index caf575c..506feb8 100644
--- a/media/libaudiohal/impl/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -25,9 +25,9 @@
 
 #include "EffectBufferHalHidl.h"
 #include "EffectHalHidl.h"
-#include "HidlUtils.h"
+#include "UuidUtils.h"
 
-using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
+using ::android::hardware::audio::common::CPP_VERSION::implementation::UuidUtils;
 using ::android::hardware::audio::common::utils::EnumBitfield;
 using ::android::hardware::hidl_vec;
 using ::android::hardware::MQDescriptorSync;
@@ -58,8 +58,8 @@
 // static
 void EffectHalHidl::effectDescriptorToHal(
         const EffectDescriptor& descriptor, effect_descriptor_t* halDescriptor) {
-    HidlUtils::uuidToHal(descriptor.type, &halDescriptor->type);
-    HidlUtils::uuidToHal(descriptor.uuid, &halDescriptor->uuid);
+    UuidUtils::uuidToHal(descriptor.type, &halDescriptor->type);
+    UuidUtils::uuidToHal(descriptor.uuid, &halDescriptor->uuid);
     halDescriptor->flags = static_cast<uint32_t>(descriptor.flags);
     halDescriptor->cpuLoad = descriptor.cpuLoad;
     halDescriptor->memoryUsage = descriptor.memoryUsage;
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index 9192a31..b48acaa 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -23,9 +23,9 @@
 #include "EffectBufferHalHidl.h"
 #include "EffectHalHidl.h"
 #include "EffectsFactoryHalHidl.h"
-#include "HidlUtils.h"
+#include "UuidUtils.h"
 
-using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
+using ::android::hardware::audio::common::CPP_VERSION::implementation::UuidUtils;
 using ::android::hardware::Return;
 
 namespace android {
@@ -85,7 +85,7 @@
     // TODO: check for nullptr
     if (mEffectsFactory == 0) return NO_INIT;
     Uuid hidlUuid;
-    HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
+    UuidUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
     Result retval = Result::NOT_INITIALIZED;
     Return<void> ret = mEffectsFactory->getDescriptor(hidlUuid,
             [&](Result r, const EffectDescriptor& result) {
@@ -107,7 +107,7 @@
         int32_t deviceId __unused, sp<EffectHalInterface> *effect) {
     if (mEffectsFactory == 0) return NO_INIT;
     Uuid hidlUuid;
-    HidlUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
+    UuidUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
     Result retval = Result::NOT_INITIALIZED;
     Return<void> ret;
 #if MAJOR_VERSION >= 6
diff --git a/media/libaudioprocessing/include/media/AudioResamplerPublic.h b/media/libaudioprocessing/include/media/AudioResamplerPublic.h
index 1b39067..200a4c8 100644
--- a/media/libaudioprocessing/include/media/AudioResamplerPublic.h
+++ b/media/libaudioprocessing/include/media/AudioResamplerPublic.h
@@ -59,7 +59,7 @@
 
 static inline bool isAudioPlaybackRateValid(const AudioPlaybackRate &playbackRate) {
     if (playbackRate.mFallbackMode == AUDIO_TIMESTRETCH_FALLBACK_FAIL &&
-            (playbackRate.mStretchMode == AUDIO_TIMESTRETCH_STRETCH_SPEECH ||
+            (playbackRate.mStretchMode == AUDIO_TIMESTRETCH_STRETCH_VOICE ||
                     playbackRate.mStretchMode == AUDIO_TIMESTRETCH_STRETCH_DEFAULT)) {
         //test sonic specific constraints
         return playbackRate.mSpeed >= TIMESTRETCH_SONIC_SPEED_MIN &&
diff --git a/media/libeffects/lvm/lib/Android.bp b/media/libeffects/lvm/lib/Android.bp
index 8f2f016..dbe0d62 100644
--- a/media/libeffects/lvm/lib/Android.bp
+++ b/media/libeffects/lvm/lib/Android.bp
@@ -131,12 +131,15 @@
     shared_libs: [
         "liblog",
     ],
+    static_libs: [
+        "libaudioutils",
+    ],
     header_libs: [
         "libhardware_headers",
     ],
     cppflags: [
+        "-DBIQUAD_OPT",
         "-fvisibility=hidden",
-
         "-Wall",
         "-Werror",
     ],
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp b/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp
index 5b47aa6..1f0b459 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Control.cpp
@@ -21,6 +21,9 @@
 /*                                                                                      */
 /****************************************************************************************/
 
+#ifdef BIQUAD_OPT
+#include <audio_utils/BiquadFilter.h>
+#endif
 #include "LVDBE.h"
 #include "LVDBE_Private.h"
 #include "VectorArithmetic.h"
@@ -107,12 +110,20 @@
     /*
      * Setup the high pass filter
      */
+#ifdef BIQUAD_OPT
+    std::array<LVM_FLOAT, android::audio_utils::kBiquadNumCoefs> coefs = {
+            LVDBE_HPF_Table[Offset].A0, LVDBE_HPF_Table[Offset].A1, LVDBE_HPF_Table[Offset].A2,
+            -(LVDBE_HPF_Table[Offset].B1), -(LVDBE_HPF_Table[Offset].B2)};
+    pInstance->pBqInstance
+            ->setCoefficients<std::array<LVM_FLOAT, android::audio_utils::kBiquadNumCoefs>>(coefs);
+#else
     LoadConst_Float(0,                                      /* Clear the history, value 0 */
                     (LVM_FLOAT*)&pInstance->pData->HPFTaps, /* Destination */
                     sizeof(pInstance->pData->HPFTaps) / sizeof(LVM_FLOAT)); /* Number of words */
     BQ_2I_D32F32Cll_TRC_WRA_01_Init(&pInstance->pCoef->HPFInstance, /* Initialise the filter */
                                     &pInstance->pData->HPFTaps,
                                     (BQ_FLOAT_Coefs_t*)&LVDBE_HPF_Table[Offset]);
+#endif
 
     /*
      * Setup the band pass filter
@@ -275,6 +286,15 @@
     LVDBE_Instance_t* pInstance = (LVDBE_Instance_t*)hInstance;
     LVMixer3_2St_FLOAT_st* pBypassMixer_Instance = &pInstance->pData->BypassMixer;
 
+#ifdef BIQUAD_OPT
+    /*
+     * Create biquad instance
+     */
+    pInstance->pBqInstance.reset(
+            new android::audio_utils::BiquadFilter<LVM_FLOAT>(pParams->NrChannels));
+    pInstance->pBqInstance->clear();
+#endif
+
     /*
      * Update the filters
      */
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp b/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
index 12af162..611b762 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Init.cpp
@@ -94,6 +94,14 @@
         return LVDBE_NULLADDRESS;
     }
 
+#ifdef BIQUAD_OPT
+    /*
+     * Create biquad instance
+     */
+    pInstance->pBqInstance.reset(
+            new android::audio_utils::BiquadFilter<LVM_FLOAT>(LVM_MAX_CHANNELS));
+#endif
+
     /*
      * Initialise the filters
      */
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h b/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
index 4fef1ef..fa85638 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Private.h
@@ -33,6 +33,9 @@
 /*                                                                                      */
 /****************************************************************************************/
 
+#ifdef BIQUAD_OPT
+#include <audio_utils/BiquadFilter.h>
+#endif
 #include "LVDBE.h" /* Calling or Application layer definitions */
 #include "BIQUAD.h"
 #include "LVC_Mixer.h"
@@ -63,7 +66,9 @@
     AGC_MIX_VOL_2St1Mon_FLOAT_t AGCInstance; /* AGC instance parameters */
 
     /* Process variables */
+#ifndef BIQUAD_OPT
     Biquad_2I_Order2_FLOAT_Taps_t HPFTaps; /* High pass filter taps */
+#endif
     Biquad_1I_Order2_FLOAT_Taps_t BPFTaps; /* Band pass filter taps */
     LVMixer3_1St_FLOAT_st BypassVolume;    /* Bypass volume scaler */
     LVMixer3_2St_FLOAT_st BypassMixer;     /* Bypass Mixer for Click Removal */
@@ -73,7 +78,9 @@
 /* Coefs structure */
 typedef struct {
     /* Process variables */
+#ifndef BIQUAD_OPT
     Biquad_FLOAT_Instance_t HPFInstance; /* High pass filter instance */
+#endif
     Biquad_FLOAT_Instance_t BPFInstance; /* Band pass filter instance */
 } LVDBE_Coef_FLOAT_t;
 /* Instance structure */
@@ -86,6 +93,10 @@
     LVDBE_Data_FLOAT_t* pData; /* Instance data */
     LVDBE_Coef_FLOAT_t* pCoef; /* Instance coefficients */
     void* pScratch;            /* scratch pointer */
+#ifdef BIQUAD_OPT
+    std::unique_ptr<android::audio_utils::BiquadFilter<LVM_FLOAT>>
+            pBqInstance; /* Biquad filter instance */
+#endif
 } LVDBE_Instance_t;
 
 /****************************************************************************************/
diff --git a/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp b/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
index f4a4d6f..bd04a02 100644
--- a/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
+++ b/media/libeffects/lvm/lib/Bass/src/LVDBE_Process.cpp
@@ -20,6 +20,9 @@
 /*    Includes                                                                          */
 /*                                                                                      */
 /****************************************************************************************/
+#ifdef BIQUAD_OPT
+#include <audio_utils/BiquadFilter.h>
+#endif
 
 #include <string.h>  // memset
 #include "LVDBE.h"
@@ -125,10 +128,14 @@
          * Apply the high pass filter if selected
          */
         if (pInstance->Params.HPFSelect == LVDBE_HPF_ON) {
+#ifdef BIQUAD_OPT
+            pInstance->pBqInstance->process(pScratch, pScratch, NrFrames);
+#else
             BQ_MC_D32F32C30_TRC_WRA_01(&pInstance->pCoef->HPFInstance, /* Filter instance      */
                                        pScratch,                       /* Source               */
                                        pScratch,                       /* Destination          */
                                        (LVM_INT16)NrFrames, (LVM_INT16)NrChannels);
+#endif
         }
 
         /*
diff --git a/media/libeffects/preprocessing/Android.bp b/media/libeffects/preprocessing/Android.bp
index 5217cf9..681e247 100644
--- a/media/libeffects/preprocessing/Android.bp
+++ b/media/libeffects/preprocessing/Android.bp
@@ -1,35 +1,5 @@
 // audio preprocessing wrapper
 cc_library_shared {
-    name: "libaudiopreprocessing_legacy",
-
-    vendor: true,
-
-    relative_install_path: "soundfx",
-
-    srcs: ["PreProcessing.cpp"],
-
-    shared_libs: [
-        "libwebrtc_audio_preprocessing",
-        "libspeexresampler",
-        "libutils",
-        "liblog",
-    ],
-
-    cflags: [
-        "-DWEBRTC_POSIX",
-        "-DWEBRTC_LEGACY",
-        "-fvisibility=hidden",
-        "-Wall",
-        "-Werror",
-    ],
-
-    header_libs: [
-        "libaudioeffects",
-        "libhardware_headers",
-    ],
-}
-
-cc_library_shared {
     name: "libaudiopreprocessing",
     vendor: true,
     relative_install_path: "soundfx",
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index 1a5547b..03ccc34 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -23,15 +23,10 @@
 #include <hardware/audio_effect.h>
 #include <utils/Log.h>
 #include <utils/Timers.h>
-#ifndef WEBRTC_LEGACY
 #include <audio_effects/effect_agc2.h>
-#endif
 #include <audio_effects/effect_ns.h>
 #include <audio_processing.h>
 #include <module_common_types.h>
-#ifdef WEBRTC_LEGACY
-#include "speex/speex_resampler.h"
-#endif
 
 // undefine to perform multi channels API functional tests
 //#define DUAL_MIC_TEST
@@ -46,9 +41,7 @@
 // types of pre processing modules
 enum preproc_id {
     PREPROC_AGC,  // Automatic Gain Control
-#ifndef WEBRTC_LEGACY
     PREPROC_AGC2,  // Automatic Gain Control 2
-#endif
     PREPROC_AEC,  // Acoustic Echo Canceler
     PREPROC_NS,   // Noise Suppressor
     PREPROC_NUM_EFFECTS
@@ -110,10 +103,8 @@
     int id;                        // audio session ID
     int io;                        // handle of input stream this session is on
     webrtc::AudioProcessing* apm;  // handle on webRTC audio processing module (APM)
-#ifndef WEBRTC_LEGACY
     // Audio Processing module builder
     webrtc::AudioProcessingBuilder ap_builder;
-#endif
     size_t apmFrameCount;      // buffer size for webRTC process (10 ms)
     uint32_t apmSamplingRate;  // webRTC APM sampling rate (8/16 or 32 kHz)
     size_t frameCount;         // buffer size before input resampler ( <=> apmFrameCount)
@@ -124,42 +115,25 @@
     uint32_t enabledMsk;       // bit field containing IDs of enabled pre processors
     uint32_t processedMsk;     // bit field containing IDs of pre processors already
                                // processed in current round
-#ifdef WEBRTC_LEGACY
-    webrtc::AudioFrame* procFrame;  // audio frame passed to webRTC AMP ProcessStream()
-#else
     // audio config strucutre
     webrtc::AudioProcessing::Config config;
     webrtc::StreamConfig inputConfig;   // input stream configuration
     webrtc::StreamConfig outputConfig;  // output stream configuration
-#endif
     int16_t* inBuf;    // input buffer used when resampling
     size_t inBufSize;  // input buffer size in frames
     size_t framesIn;   // number of frames in input buffer
-#ifdef WEBRTC_LEGACY
-    SpeexResamplerState* inResampler;  // handle on input speex resampler
-#endif
     int16_t* outBuf;    // output buffer used when resampling
     size_t outBufSize;  // output buffer size in frames
     size_t framesOut;   // number of frames in output buffer
-#ifdef WEBRTC_LEGACY
-    SpeexResamplerState* outResampler;  // handle on output speex resampler
-#endif
     uint32_t revChannelCount;  // number of channels on reverse stream
     uint32_t revEnabledMsk;    // bit field containing IDs of enabled pre processors
                                // with reverse channel
     uint32_t revProcessedMsk;  // bit field containing IDs of pre processors with reverse
                                // channel already processed in current round
-#ifdef WEBRTC_LEGACY
-    webrtc::AudioFrame* revFrame;  // audio frame passed to webRTC AMP AnalyzeReverseStream()
-#else
     webrtc::StreamConfig revConfig;     // reverse stream configuration.
-#endif
     int16_t* revBuf;    // reverse channel input buffer
     size_t revBufSize;  // reverse channel input buffer size
     size_t framesRev;   // number of frames in reverse channel input buffer
-#ifdef WEBRTC_LEGACY
-    SpeexResamplerState* revResampler;  // handle on reverse channel input speex resampler
-#endif
 };
 
 #ifdef DUAL_MIC_TEST
@@ -213,7 +187,6 @@
         "Automatic Gain Control",
         "The Android Open Source Project"};
 
-#ifndef WEBRTC_LEGACY
 // Automatic Gain Control 2
 static const effect_descriptor_t sAgc2Descriptor = {
         {0xae3c653b, 0xbe18, 0x4ab8, 0x8938, {0x41, 0x8f, 0x0a, 0x7f, 0x06, 0xac}},  // type
@@ -224,7 +197,6 @@
         0,  // FIXME indicate memory usage
         "Automatic Gain Control 2",
         "The Android Open Source Project"};
-#endif
 
 // Acoustic Echo Cancellation
 static const effect_descriptor_t sAecDescriptor = {
@@ -249,9 +221,7 @@
         "The Android Open Source Project"};
 
 static const effect_descriptor_t* sDescriptors[PREPROC_NUM_EFFECTS] = {&sAgcDescriptor,
-#ifndef WEBRTC_LEGACY
                                                                        &sAgc2Descriptor,
-#endif
                                                                        &sAecDescriptor,
                                                                        &sNsDescriptor};
 
@@ -260,9 +230,7 @@
 //------------------------------------------------------------------------------
 
 const effect_uuid_t* const sUuidToPreProcTable[PREPROC_NUM_EFFECTS] = {FX_IID_AGC,
-#ifndef WEBRTC_LEGACY
                                                                        FX_IID_AGC2,
-#endif
                                                                        FX_IID_AEC, FX_IID_NS};
 
 const effect_uuid_t* ProcIdToUuid(int procId) {
@@ -297,7 +265,6 @@
 static const int kAgcDefaultCompGain = 9;
 static const bool kAgcDefaultLimiter = true;
 
-#ifndef WEBRTC_LEGACY
 int Agc2Init(preproc_effect_t* effect) {
     ALOGV("Agc2Init");
     effect->session->config = effect->session->apm->GetConfig();
@@ -308,48 +275,27 @@
     effect->session->apm->ApplyConfig(effect->session->config);
     return 0;
 }
-#endif
 
 int AgcInit(preproc_effect_t* effect) {
     ALOGV("AgcInit");
-#ifdef WEBRTC_LEGACY
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-    agc->set_mode(webrtc::GainControl::kFixedDigital);
-    agc->set_target_level_dbfs(kAgcDefaultTargetLevel);
-    agc->set_compression_gain_db(kAgcDefaultCompGain);
-    agc->enable_limiter(kAgcDefaultLimiter);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller1.target_level_dbfs = kAgcDefaultTargetLevel;
     effect->session->config.gain_controller1.compression_gain_db = kAgcDefaultCompGain;
     effect->session->config.gain_controller1.enable_limiter = kAgcDefaultLimiter;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
     return 0;
 }
 
-#ifndef WEBRTC_LEGACY
 int Agc2Create(preproc_effect_t* effect) {
     Agc2Init(effect);
     return 0;
 }
-#endif
 
 int AgcCreate(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::GainControl* agc = effect->session->apm->gain_control();
-    ALOGV("AgcCreate got agc %p", agc);
-    if (agc == NULL) {
-        ALOGW("AgcCreate Error");
-        return -ENOMEM;
-    }
-    effect->engine = static_cast<preproc_fx_handle_t>(agc);
-#endif
     AgcInit(effect);
     return 0;
 }
 
-#ifndef WEBRTC_LEGACY
 int Agc2GetParameter(preproc_effect_t* effect, void* pParam, uint32_t* pValueSize, void* pValue) {
     int status = 0;
     uint32_t param = *(uint32_t*)pParam;
@@ -422,15 +368,11 @@
 
     return status;
 }
-#endif
 
 int AgcGetParameter(preproc_effect_t* effect, void* pParam, uint32_t* pValueSize, void* pValue) {
     int status = 0;
     uint32_t param = *(uint32_t*)pParam;
     t_agc_settings* pProperties = (t_agc_settings*)pValue;
-#ifdef WEBRTC_LEGACY
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-#endif
 
     switch (param) {
         case AGC_PARAM_TARGET_LEVEL:
@@ -459,32 +401,6 @@
             break;
     }
 
-#ifdef WEBRTC_LEGACY
-    switch (param) {
-        case AGC_PARAM_TARGET_LEVEL:
-            *(int16_t*)pValue = (int16_t)(agc->target_level_dbfs() * -100);
-            ALOGV("AgcGetParameter() target level %d milliBels", *(int16_t*)pValue);
-            break;
-        case AGC_PARAM_COMP_GAIN:
-            *(int16_t*)pValue = (int16_t)(agc->compression_gain_db() * 100);
-            ALOGV("AgcGetParameter() comp gain %d milliBels", *(int16_t*)pValue);
-            break;
-        case AGC_PARAM_LIMITER_ENA:
-            *(bool*)pValue = (bool)agc->is_limiter_enabled();
-            ALOGV("AgcGetParameter() limiter enabled %s",
-                  (*(int16_t*)pValue != 0) ? "true" : "false");
-            break;
-        case AGC_PARAM_PROPERTIES:
-            pProperties->targetLevel = (int16_t)(agc->target_level_dbfs() * -100);
-            pProperties->compGain = (int16_t)(agc->compression_gain_db() * 100);
-            pProperties->limiterEnabled = (bool)agc->is_limiter_enabled();
-            break;
-        default:
-            ALOGW("AgcGetParameter() unknown param %d", param);
-            status = -EINVAL;
-            break;
-    }
-#else
     effect->session->config = effect->session->apm->GetConfig();
     switch (param) {
         case AGC_PARAM_TARGET_LEVEL:
@@ -515,11 +431,9 @@
             status = -EINVAL;
             break;
     }
-#endif
     return status;
 }
 
-#ifndef WEBRTC_LEGACY
 int Agc2SetParameter(preproc_effect_t* effect, void* pParam, void* pValue) {
     int status = 0;
     uint32_t param = *(uint32_t*)pParam;
@@ -567,43 +481,9 @@
 
     return status;
 }
-#endif
 
 int AgcSetParameter(preproc_effect_t* effect, void* pParam, void* pValue) {
     int status = 0;
-#ifdef WEBRTC_LEGACY
-    uint32_t param = *(uint32_t*)pParam;
-    t_agc_settings* pProperties = (t_agc_settings*)pValue;
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-
-    switch (param) {
-        case AGC_PARAM_TARGET_LEVEL:
-            ALOGV("AgcSetParameter() target level %d milliBels", *(int16_t*)pValue);
-            status = agc->set_target_level_dbfs(-(*(int16_t*)pValue / 100));
-            break;
-        case AGC_PARAM_COMP_GAIN:
-            ALOGV("AgcSetParameter() comp gain %d milliBels", *(int16_t*)pValue);
-            status = agc->set_compression_gain_db(*(int16_t*)pValue / 100);
-            break;
-        case AGC_PARAM_LIMITER_ENA:
-            ALOGV("AgcSetParameter() limiter enabled %s", *(bool*)pValue ? "true" : "false");
-            status = agc->enable_limiter(*(bool*)pValue);
-            break;
-        case AGC_PARAM_PROPERTIES:
-            ALOGV("AgcSetParameter() properties level %d, gain %d limiter %d",
-                  pProperties->targetLevel, pProperties->compGain, pProperties->limiterEnabled);
-            status = agc->set_target_level_dbfs(-(pProperties->targetLevel / 100));
-            if (status != 0) break;
-            status = agc->set_compression_gain_db(pProperties->compGain / 100);
-            if (status != 0) break;
-            status = agc->enable_limiter(pProperties->limiterEnabled);
-            break;
-        default:
-            ALOGW("AgcSetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
-            status = -EINVAL;
-            break;
-    }
-#else
     uint32_t param = *(uint32_t*)pParam;
     t_agc_settings* pProperties = (t_agc_settings*)pValue;
     effect->session->config = effect->session->apm->GetConfig();
@@ -637,96 +517,57 @@
             break;
     }
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 
     ALOGV("AgcSetParameter() done status %d", status);
 
     return status;
 }
 
-#ifndef WEBRTC_LEGACY
 void Agc2Enable(preproc_effect_t* effect) {
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller2.enabled = true;
     effect->session->apm->ApplyConfig(effect->session->config);
 }
-#endif
 
 void AgcEnable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-    ALOGV("AgcEnable agc %p", agc);
-    agc->Enable(true);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller1.enabled = true;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
-#ifndef WEBRTC_LEGACY
 void Agc2Disable(preproc_effect_t* effect) {
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller2.enabled = false;
     effect->session->apm->ApplyConfig(effect->session->config);
 }
-#endif
 
 void AgcDisable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    ALOGV("AgcDisable");
-    webrtc::GainControl* agc = static_cast<webrtc::GainControl*>(effect->engine);
-    agc->Enable(false);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.gain_controller1.enabled = false;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 static const preproc_ops_t sAgcOps = {AgcCreate,       AgcInit,         NULL, AgcEnable, AgcDisable,
                                       AgcSetParameter, AgcGetParameter, NULL};
 
-#ifndef WEBRTC_LEGACY
 static const preproc_ops_t sAgc2Ops = {Agc2Create,       Agc2Init,    NULL,
                                        Agc2Enable,       Agc2Disable, Agc2SetParameter,
                                        Agc2GetParameter, NULL};
-#endif
 
 //------------------------------------------------------------------------------
 // Acoustic Echo Canceler (AEC)
 //------------------------------------------------------------------------------
 
-#ifdef WEBRTC_LEGACY
-static const webrtc::EchoControlMobile::RoutingMode kAecDefaultMode =
-        webrtc::EchoControlMobile::kEarpiece;
-static const bool kAecDefaultComfortNoise = true;
-#endif
 
 int AecInit(preproc_effect_t* effect) {
     ALOGV("AecInit");
-#ifdef WEBRTC_LEGACY
-    webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
-    aec->set_routing_mode(kAecDefaultMode);
-    aec->enable_comfort_noise(kAecDefaultComfortNoise);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.echo_canceller.mobile_mode = true;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
     return 0;
 }
 
 int AecCreate(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::EchoControlMobile* aec = effect->session->apm->echo_control_mobile();
-    ALOGV("AecCreate got aec %p", aec);
-    if (aec == NULL) {
-        ALOGW("AgcCreate Error");
-        return -ENOMEM;
-    }
-    effect->engine = static_cast<preproc_fx_handle_t>(aec);
-#endif
     AecInit(effect);
     return 0;
 }
@@ -744,13 +585,11 @@
             *(uint32_t*)pValue = 1000 * effect->session->apm->stream_delay_ms();
             ALOGV("AecGetParameter() echo delay %d us", *(uint32_t*)pValue);
             break;
-#ifndef WEBRTC_LEGACY
         case AEC_PARAM_MOBILE_MODE:
             effect->session->config = effect->session->apm->GetConfig();
             *(uint32_t*)pValue = effect->session->config.echo_canceller.mobile_mode;
             ALOGV("AecGetParameter() mobile mode %d us", *(uint32_t*)pValue);
             break;
-#endif
         default:
             ALOGW("AecGetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
             status = -EINVAL;
@@ -770,14 +609,12 @@
             status = effect->session->apm->set_stream_delay_ms(value / 1000);
             ALOGV("AecSetParameter() echo delay %d us, status %d", value, status);
             break;
-#ifndef WEBRTC_LEGACY
         case AEC_PARAM_MOBILE_MODE:
             effect->session->config = effect->session->apm->GetConfig();
             effect->session->config.echo_canceller.mobile_mode = value;
             ALOGV("AecSetParameter() mobile mode %d us", value);
             effect->session->apm->ApplyConfig(effect->session->config);
             break;
-#endif
         default:
             ALOGW("AecSetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
             status = -EINVAL;
@@ -787,57 +624,24 @@
 }
 
 void AecEnable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
-    ALOGV("AecEnable aec %p", aec);
-    aec->Enable(true);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.echo_canceller.enabled = true;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 void AecDisable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    ALOGV("AecDisable");
-    webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
-    aec->Enable(false);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.echo_canceller.enabled = false;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 int AecSetDevice(preproc_effect_t* effect, uint32_t device) {
     ALOGV("AecSetDevice %08x", device);
-#ifdef WEBRTC_LEGACY
-    webrtc::EchoControlMobile* aec = static_cast<webrtc::EchoControlMobile*>(effect->engine);
-    webrtc::EchoControlMobile::RoutingMode mode =
-            webrtc::EchoControlMobile::kQuietEarpieceOrHeadset;
-#endif
 
     if (audio_is_input_device(device)) {
         return 0;
     }
 
-#ifdef WEBRTC_LEGACY
-    switch (device) {
-        case AUDIO_DEVICE_OUT_EARPIECE:
-            mode = webrtc::EchoControlMobile::kEarpiece;
-            break;
-        case AUDIO_DEVICE_OUT_SPEAKER:
-            mode = webrtc::EchoControlMobile::kSpeakerphone;
-            break;
-        case AUDIO_DEVICE_OUT_WIRED_HEADSET:
-        case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
-        case AUDIO_DEVICE_OUT_USB_HEADSET:
-        default:
-            break;
-    }
-    aec->set_routing_mode(mode);
-#endif
     return 0;
 }
 
@@ -849,49 +653,19 @@
 // Noise Suppression (NS)
 //------------------------------------------------------------------------------
 
-#ifdef WEBRTC_LEGACY
-static const webrtc::NoiseSuppression::Level kNsDefaultLevel = webrtc::NoiseSuppression::kModerate;
-#else
 static const webrtc::AudioProcessing::Config::NoiseSuppression::Level kNsDefaultLevel =
         webrtc::AudioProcessing::Config::NoiseSuppression::kModerate;
-#endif
 
 int NsInit(preproc_effect_t* effect) {
     ALOGV("NsInit");
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
-    ns->set_level(kNsDefaultLevel);
-    webrtc::Config config;
-    std::vector<webrtc::Point> geometry;
-    // TODO(aluebs): Make the geometry settable.
-    geometry.push_back(webrtc::Point(-0.03f, 0.f, 0.f));
-    geometry.push_back(webrtc::Point(-0.01f, 0.f, 0.f));
-    geometry.push_back(webrtc::Point(0.01f, 0.f, 0.f));
-    geometry.push_back(webrtc::Point(0.03f, 0.f, 0.f));
-    // The geometry needs to be set with Beamforming enabled.
-    config.Set<webrtc::Beamforming>(new webrtc::Beamforming(true, geometry));
-    effect->session->apm->SetExtraOptions(config);
-    config.Set<webrtc::Beamforming>(new webrtc::Beamforming(false, geometry));
-    effect->session->apm->SetExtraOptions(config);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.noise_suppression.level = kNsDefaultLevel;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
     effect->type = NS_TYPE_SINGLE_CHANNEL;
     return 0;
 }
 
 int NsCreate(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = effect->session->apm->noise_suppression();
-    ALOGV("NsCreate got ns %p", ns);
-    if (ns == NULL) {
-        ALOGW("AgcCreate Error");
-        return -ENOMEM;
-    }
-    effect->engine = static_cast<preproc_fx_handle_t>(ns);
-#endif
     NsInit(effect);
     return 0;
 }
@@ -904,31 +678,6 @@
 
 int NsSetParameter(preproc_effect_t* effect, void* pParam, void* pValue) {
     int status = 0;
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
-    uint32_t param = *(uint32_t*)pParam;
-    uint32_t value = *(uint32_t*)pValue;
-    switch (param) {
-        case NS_PARAM_LEVEL:
-            ns->set_level((webrtc::NoiseSuppression::Level)value);
-            ALOGV("NsSetParameter() level %d", value);
-            break;
-        case NS_PARAM_TYPE: {
-            webrtc::Config config;
-            std::vector<webrtc::Point> geometry;
-            bool is_beamforming_enabled = value == NS_TYPE_MULTI_CHANNEL && ns->is_enabled();
-            config.Set<webrtc::Beamforming>(
-                    new webrtc::Beamforming(is_beamforming_enabled, geometry));
-            effect->session->apm->SetExtraOptions(config);
-            effect->type = value;
-            ALOGV("NsSetParameter() type %d", value);
-            break;
-        }
-        default:
-            ALOGW("NsSetParameter() unknown param %08x value %08x", param, value);
-            status = -EINVAL;
-    }
-#else
     uint32_t param = *(uint32_t*)pParam;
     uint32_t value = *(uint32_t*)pValue;
     effect->session->config = effect->session->apm->GetConfig();
@@ -943,52 +692,28 @@
             status = -EINVAL;
     }
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 
     return status;
 }
 
 void NsEnable(preproc_effect_t* effect) {
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
-    ALOGV("NsEnable ns %p", ns);
-    ns->Enable(true);
-    if (effect->type == NS_TYPE_MULTI_CHANNEL) {
-        webrtc::Config config;
-        std::vector<webrtc::Point> geometry;
-        config.Set<webrtc::Beamforming>(new webrtc::Beamforming(true, geometry));
-        effect->session->apm->SetExtraOptions(config);
-    }
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.noise_suppression.enabled = true;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 void NsDisable(preproc_effect_t* effect) {
     ALOGV("NsDisable");
-#ifdef WEBRTC_LEGACY
-    webrtc::NoiseSuppression* ns = static_cast<webrtc::NoiseSuppression*>(effect->engine);
-    ns->Enable(false);
-    webrtc::Config config;
-    std::vector<webrtc::Point> geometry;
-    config.Set<webrtc::Beamforming>(new webrtc::Beamforming(false, geometry));
-    effect->session->apm->SetExtraOptions(config);
-#else
     effect->session->config = effect->session->apm->GetConfig();
     effect->session->config.noise_suppression.enabled = false;
     effect->session->apm->ApplyConfig(effect->session->config);
-#endif
 }
 
 static const preproc_ops_t sNsOps = {NsCreate,  NsInit,         NULL,           NsEnable,
                                      NsDisable, NsSetParameter, NsGetParameter, NULL};
 
 static const preproc_ops_t* sPreProcOps[PREPROC_NUM_EFFECTS] = {&sAgcOps,
-#ifndef WEBRTC_LEGACY
                                                                 &sAgc2Ops,
-#endif
                                                                 &sAecOps, &sNsOps};
 
 //------------------------------------------------------------------------------
@@ -1119,9 +844,6 @@
     session->id = 0;
     session->io = 0;
     session->createdMsk = 0;
-#ifdef WEBRTC_LEGACY
-    session->apm = NULL;
-#endif
     for (i = 0; i < PREPROC_NUM_EFFECTS && status == 0; i++) {
         status = Effect_Init(&session->effects[i], i);
     }
@@ -1135,75 +857,32 @@
     ALOGV("Session_CreateEffect procId %d, createdMsk %08x", procId, session->createdMsk);
 
     if (session->createdMsk == 0) {
-#ifdef WEBRTC_LEGACY
-        session->apm = webrtc::AudioProcessing::Create();
-        if (session->apm == NULL) {
-            ALOGW("Session_CreateEffect could not get apm engine");
-            goto error;
-        }
-        const webrtc::ProcessingConfig processing_config = {
-                {{kPreprocDefaultSr, kPreProcDefaultCnl},
-                 {kPreprocDefaultSr, kPreProcDefaultCnl},
-                 {kPreprocDefaultSr, kPreProcDefaultCnl},
-                 {kPreprocDefaultSr, kPreProcDefaultCnl}}};
-        session->apm->Initialize(processing_config);
-        session->procFrame = new webrtc::AudioFrame();
-        if (session->procFrame == NULL) {
-            ALOGW("Session_CreateEffect could not allocate audio frame");
-            goto error;
-        }
-        session->revFrame = new webrtc::AudioFrame();
-        if (session->revFrame == NULL) {
-            ALOGW("Session_CreateEffect could not allocate reverse audio frame");
-            goto error;
-        }
-#else
         session->apm = session->ap_builder.Create();
         if (session->apm == NULL) {
             ALOGW("Session_CreateEffect could not get apm engine");
             goto error;
         }
-#endif
         session->apmSamplingRate = kPreprocDefaultSr;
         session->apmFrameCount = (kPreprocDefaultSr) / 100;
         session->frameCount = session->apmFrameCount;
         session->samplingRate = kPreprocDefaultSr;
         session->inChannelCount = kPreProcDefaultCnl;
         session->outChannelCount = kPreProcDefaultCnl;
-#ifdef WEBRTC_LEGACY
-        session->procFrame->sample_rate_hz_ = kPreprocDefaultSr;
-        session->procFrame->num_channels_ = kPreProcDefaultCnl;
-#else
         session->inputConfig.set_sample_rate_hz(kPreprocDefaultSr);
         session->inputConfig.set_num_channels(kPreProcDefaultCnl);
         session->outputConfig.set_sample_rate_hz(kPreprocDefaultSr);
         session->outputConfig.set_num_channels(kPreProcDefaultCnl);
-#endif
         session->revChannelCount = kPreProcDefaultCnl;
-#ifdef WEBRTC_LEGACY
-        session->revFrame->sample_rate_hz_ = kPreprocDefaultSr;
-        session->revFrame->num_channels_ = kPreProcDefaultCnl;
-#else
         session->revConfig.set_sample_rate_hz(kPreprocDefaultSr);
         session->revConfig.set_num_channels(kPreProcDefaultCnl);
-#endif
         session->enabledMsk = 0;
         session->processedMsk = 0;
         session->revEnabledMsk = 0;
         session->revProcessedMsk = 0;
-#ifdef WEBRTC_LEGACY
-        session->inResampler = NULL;
-#endif
         session->inBuf = NULL;
         session->inBufSize = 0;
-#ifdef WEBRTC_LEGACY
-        session->outResampler = NULL;
-#endif
         session->outBuf = NULL;
         session->outBufSize = 0;
-#ifdef WEBRTC_LEGACY
-        session->revResampler = NULL;
-#endif
         session->revBuf = NULL;
         session->revBufSize = 0;
     }
@@ -1217,17 +896,8 @@
 
 error:
     if (session->createdMsk == 0) {
-#ifdef WEBRTC_LEGACY
-        delete session->revFrame;
-        session->revFrame = NULL;
-        delete session->procFrame;
-        session->procFrame = NULL;
-        delete session->apm;
-        session->apm = NULL;  // NOLINT(clang-analyzer-cplusplus.NewDelete)
-#else
         delete session->apm;
         session->apm = NULL;
-#endif
     }
     return status;
 }
@@ -1236,29 +906,8 @@
     ALOGW_IF(Effect_Release(fx) != 0, " Effect_Release() failed for proc ID %d", fx->procId);
     session->createdMsk &= ~(1 << fx->procId);
     if (session->createdMsk == 0) {
-#ifdef WEBRTC_LEGACY
         delete session->apm;
         session->apm = NULL;
-        delete session->procFrame;
-        session->procFrame = NULL;
-        delete session->revFrame;
-        session->revFrame = NULL;
-        if (session->inResampler != NULL) {
-            speex_resampler_destroy(session->inResampler);
-            session->inResampler = NULL;
-        }
-        if (session->outResampler != NULL) {
-            speex_resampler_destroy(session->outResampler);
-            session->outResampler = NULL;
-        }
-        if (session->revResampler != NULL) {
-            speex_resampler_destroy(session->revResampler);
-            session->revResampler = NULL;
-        }
-#else
-        delete session->apm;
-        session->apm = NULL;
-#endif
         delete session->inBuf;
         session->inBuf = NULL;
         delete session->outBuf;
@@ -1284,9 +933,6 @@
 
     ALOGV("Session_SetConfig sr %d cnl %08x", config->inputCfg.samplingRate,
           config->inputCfg.channels);
-#ifdef WEBRTC_LEGACY
-    int status;
-#endif
 
     // AEC implementation is limited to 16kHz
     if (config->inputCfg.samplingRate >= 32000 && !(session->createdMsk & (1 << PREPROC_AEC))) {
@@ -1297,51 +943,25 @@
         session->apmSamplingRate = 8000;
     }
 
-#ifdef WEBRTC_LEGACY
-    const webrtc::ProcessingConfig processing_config = {
-            {{static_cast<int>(session->apmSamplingRate), inCnl},
-             {static_cast<int>(session->apmSamplingRate), outCnl},
-             {static_cast<int>(session->apmSamplingRate), inCnl},
-             {static_cast<int>(session->apmSamplingRate), inCnl}}};
-    status = session->apm->Initialize(processing_config);
-    if (status < 0) {
-        return -EINVAL;
-    }
-#endif
 
     session->samplingRate = config->inputCfg.samplingRate;
     session->apmFrameCount = session->apmSamplingRate / 100;
     if (session->samplingRate == session->apmSamplingRate) {
         session->frameCount = session->apmFrameCount;
     } else {
-#ifdef WEBRTC_LEGACY
-        session->frameCount =
-                (session->apmFrameCount * session->samplingRate) / session->apmSamplingRate + 1;
-#else
         session->frameCount =
                 (session->apmFrameCount * session->samplingRate) / session->apmSamplingRate;
-#endif
     }
     session->inChannelCount = inCnl;
     session->outChannelCount = outCnl;
-#ifdef WEBRTC_LEGACY
-    session->procFrame->num_channels_ = inCnl;
-    session->procFrame->sample_rate_hz_ = session->apmSamplingRate;
-#else
     session->inputConfig.set_sample_rate_hz(session->samplingRate);
     session->inputConfig.set_num_channels(inCnl);
     session->outputConfig.set_sample_rate_hz(session->samplingRate);
     session->outputConfig.set_num_channels(inCnl);
-#endif
 
     session->revChannelCount = inCnl;
-#ifdef WEBRTC_LEGACY
-    session->revFrame->num_channels_ = inCnl;
-    session->revFrame->sample_rate_hz_ = session->apmSamplingRate;
-#else
     session->revConfig.set_sample_rate_hz(session->samplingRate);
     session->revConfig.set_num_channels(inCnl);
-#endif
 
     // force process buffer reallocation
     session->inBufSize = 0;
@@ -1349,53 +969,6 @@
     session->framesIn = 0;
     session->framesOut = 0;
 
-#ifdef WEBRTC_LEGACY
-    if (session->inResampler != NULL) {
-        speex_resampler_destroy(session->inResampler);
-        session->inResampler = NULL;
-    }
-    if (session->outResampler != NULL) {
-        speex_resampler_destroy(session->outResampler);
-        session->outResampler = NULL;
-    }
-    if (session->revResampler != NULL) {
-        speex_resampler_destroy(session->revResampler);
-        session->revResampler = NULL;
-    }
-    if (session->samplingRate != session->apmSamplingRate) {
-        int error;
-        session->inResampler =
-                speex_resampler_init(session->inChannelCount, session->samplingRate,
-                                     session->apmSamplingRate, RESAMPLER_QUALITY, &error);
-        if (session->inResampler == NULL) {
-            ALOGW("Session_SetConfig Cannot create speex resampler: %s",
-                  speex_resampler_strerror(error));
-            return -EINVAL;
-        }
-        session->outResampler =
-                speex_resampler_init(session->outChannelCount, session->apmSamplingRate,
-                                     session->samplingRate, RESAMPLER_QUALITY, &error);
-        if (session->outResampler == NULL) {
-            ALOGW("Session_SetConfig Cannot create speex resampler: %s",
-                  speex_resampler_strerror(error));
-            speex_resampler_destroy(session->inResampler);
-            session->inResampler = NULL;
-            return -EINVAL;
-        }
-        session->revResampler =
-                speex_resampler_init(session->inChannelCount, session->samplingRate,
-                                     session->apmSamplingRate, RESAMPLER_QUALITY, &error);
-        if (session->revResampler == NULL) {
-            ALOGW("Session_SetConfig Cannot create speex resampler: %s",
-                  speex_resampler_strerror(error));
-            speex_resampler_destroy(session->inResampler);
-            session->inResampler = NULL;
-            speex_resampler_destroy(session->outResampler);
-            session->outResampler = NULL;
-            return -EINVAL;
-        }
-    }
-#endif
 
     session->state = PREPROC_SESSION_STATE_CONFIG;
     return 0;
@@ -1430,22 +1003,7 @@
         return -EINVAL;
     }
     uint32_t inCnl = audio_channel_count_from_out_mask(config->inputCfg.channels);
-#ifdef WEBRTC_LEGACY
-    const webrtc::ProcessingConfig processing_config = {
-            {{static_cast<int>(session->apmSamplingRate), session->inChannelCount},
-             {static_cast<int>(session->apmSamplingRate), session->outChannelCount},
-             {static_cast<int>(session->apmSamplingRate), inCnl},
-             {static_cast<int>(session->apmSamplingRate), inCnl}}};
-    int status = session->apm->Initialize(processing_config);
-    if (status < 0) {
-        return -EINVAL;
-    }
-#endif
     session->revChannelCount = inCnl;
-#ifdef WEBRTC_LEGACY
-    session->revFrame->num_channels_ = inCnl;
-    session->revFrame->sample_rate_hz_ = session->apmSamplingRate;
-#endif
     // force process buffer reallocation
     session->revBufSize = 0;
     session->framesRev = 0;
@@ -1467,24 +1025,10 @@
     if (enabled) {
         if (session->enabledMsk == 0) {
             session->framesIn = 0;
-#ifdef WEBRTC_LEGACY
-            if (session->inResampler != NULL) {
-                speex_resampler_reset_mem(session->inResampler);
-            }
-            session->framesOut = 0;
-            if (session->outResampler != NULL) {
-                speex_resampler_reset_mem(session->outResampler);
-            }
-#endif
         }
         session->enabledMsk |= (1 << procId);
         if (HasReverseStream(procId)) {
             session->framesRev = 0;
-#ifdef WEBRTC_LEGACY
-            if (session->revResampler != NULL) {
-                speex_resampler_reset_mem(session->revResampler);
-            }
-#endif
             session->revEnabledMsk |= (1 << procId);
         }
     } else {
@@ -1600,82 +1144,6 @@
             return 0;
         }
 
-#ifdef WEBRTC_LEGACY
-        if (session->inResampler != NULL) {
-            size_t fr = session->frameCount - session->framesIn;
-            if (inBuffer->frameCount < fr) {
-                fr = inBuffer->frameCount;
-            }
-            if (session->inBufSize < session->framesIn + fr) {
-                int16_t* buf;
-                session->inBufSize = session->framesIn + fr;
-                buf = (int16_t*)realloc(
-                        session->inBuf,
-                        session->inBufSize * session->inChannelCount * sizeof(int16_t));
-                if (buf == NULL) {
-                    session->framesIn = 0;
-                    free(session->inBuf);
-                    session->inBuf = NULL;
-                    return -ENOMEM;
-                }
-                session->inBuf = buf;
-            }
-            memcpy(session->inBuf + session->framesIn * session->inChannelCount, inBuffer->s16,
-                   fr * session->inChannelCount * sizeof(int16_t));
-#ifdef DUAL_MIC_TEST
-            pthread_mutex_lock(&gPcmDumpLock);
-            if (gPcmDumpFh != NULL) {
-                fwrite(inBuffer->raw, fr * session->inChannelCount * sizeof(int16_t), 1,
-                       gPcmDumpFh);
-            }
-            pthread_mutex_unlock(&gPcmDumpLock);
-#endif
-
-            session->framesIn += fr;
-            inBuffer->frameCount = fr;
-            if (session->framesIn < session->frameCount) {
-                return 0;
-            }
-            spx_uint32_t frIn = session->framesIn;
-            spx_uint32_t frOut = session->apmFrameCount;
-            if (session->inChannelCount == 1) {
-                speex_resampler_process_int(session->inResampler, 0, session->inBuf, &frIn,
-                                            session->procFrame->data_, &frOut);
-            } else {
-                speex_resampler_process_interleaved_int(session->inResampler, session->inBuf, &frIn,
-                                                        session->procFrame->data_, &frOut);
-            }
-            memmove(session->inBuf, session->inBuf + frIn * session->inChannelCount,
-                    (session->framesIn - frIn) * session->inChannelCount * sizeof(int16_t));
-            session->framesIn -= frIn;
-        } else {
-            size_t fr = session->frameCount - session->framesIn;
-            if (inBuffer->frameCount < fr) {
-                fr = inBuffer->frameCount;
-            }
-            memcpy(session->procFrame->data_ + session->framesIn * session->inChannelCount,
-                   inBuffer->s16, fr * session->inChannelCount * sizeof(int16_t));
-
-#ifdef DUAL_MIC_TEST
-            pthread_mutex_lock(&gPcmDumpLock);
-            if (gPcmDumpFh != NULL) {
-                fwrite(inBuffer->raw, fr * session->inChannelCount * sizeof(int16_t), 1,
-                       gPcmDumpFh);
-            }
-            pthread_mutex_unlock(&gPcmDumpLock);
-#endif
-
-            session->framesIn += fr;
-            inBuffer->frameCount = fr;
-            if (session->framesIn < session->frameCount) {
-                return 0;
-            }
-            session->framesIn = 0;
-        }
-        session->procFrame->samples_per_channel_ = session->apmFrameCount;
-
-        effect->session->apm->ProcessStream(session->procFrame);
-#else
         size_t fr = session->frameCount - session->framesIn;
         if (inBuffer->frameCount < fr) {
             fr = inBuffer->frameCount;
@@ -1696,7 +1164,6 @@
             return status;
         }
         outBuffer->frameCount = inBuffer->frameCount;
-#endif
 
         if (session->outBufSize < session->framesOut + session->frameCount) {
             int16_t* buf;
@@ -1713,30 +1180,7 @@
             session->outBuf = buf;
         }
 
-#ifdef WEBRTC_LEGACY
-        if (session->outResampler != NULL) {
-            spx_uint32_t frIn = session->apmFrameCount;
-            spx_uint32_t frOut = session->frameCount;
-            if (session->inChannelCount == 1) {
-                speex_resampler_process_int(
-                        session->outResampler, 0, session->procFrame->data_, &frIn,
-                        session->outBuf + session->framesOut * session->outChannelCount, &frOut);
-            } else {
-                speex_resampler_process_interleaved_int(
-                        session->outResampler, session->procFrame->data_, &frIn,
-                        session->outBuf + session->framesOut * session->outChannelCount, &frOut);
-            }
-            session->framesOut += frOut;
-        } else {
-            memcpy(session->outBuf + session->framesOut * session->outChannelCount,
-                   session->procFrame->data_,
-                   session->frameCount * session->outChannelCount * sizeof(int16_t));
-            session->framesOut += session->frameCount;
-        }
-        size_t fr = session->framesOut;
-#else
         fr = session->framesOut;
-#endif
         if (framesRq - framesWr < fr) {
             fr = framesRq - framesWr;
         }
@@ -2129,63 +1573,6 @@
 
     if ((session->revProcessedMsk & session->revEnabledMsk) == session->revEnabledMsk) {
         effect->session->revProcessedMsk = 0;
-#ifdef WEBRTC_LEGACY
-        if (session->revResampler != NULL) {
-            size_t fr = session->frameCount - session->framesRev;
-            if (inBuffer->frameCount < fr) {
-                fr = inBuffer->frameCount;
-            }
-            if (session->revBufSize < session->framesRev + fr) {
-                int16_t* buf;
-                session->revBufSize = session->framesRev + fr;
-                buf = (int16_t*)realloc(
-                        session->revBuf,
-                        session->revBufSize * session->inChannelCount * sizeof(int16_t));
-                if (buf == NULL) {
-                    session->framesRev = 0;
-                    free(session->revBuf);
-                    session->revBuf = NULL;
-                    return -ENOMEM;
-                }
-                session->revBuf = buf;
-            }
-            memcpy(session->revBuf + session->framesRev * session->inChannelCount, inBuffer->s16,
-                   fr * session->inChannelCount * sizeof(int16_t));
-
-            session->framesRev += fr;
-            inBuffer->frameCount = fr;
-            if (session->framesRev < session->frameCount) {
-                return 0;
-            }
-            spx_uint32_t frIn = session->framesRev;
-            spx_uint32_t frOut = session->apmFrameCount;
-            if (session->inChannelCount == 1) {
-                speex_resampler_process_int(session->revResampler, 0, session->revBuf, &frIn,
-                                            session->revFrame->data_, &frOut);
-            } else {
-                speex_resampler_process_interleaved_int(session->revResampler, session->revBuf,
-                                                        &frIn, session->revFrame->data_, &frOut);
-            }
-            memmove(session->revBuf, session->revBuf + frIn * session->inChannelCount,
-                    (session->framesRev - frIn) * session->inChannelCount * sizeof(int16_t));
-            session->framesRev -= frIn;
-        } else {
-            size_t fr = session->frameCount - session->framesRev;
-            if (inBuffer->frameCount < fr) {
-                fr = inBuffer->frameCount;
-            }
-            memcpy(session->revFrame->data_ + session->framesRev * session->inChannelCount,
-                   inBuffer->s16, fr * session->inChannelCount * sizeof(int16_t));
-            session->framesRev += fr;
-            inBuffer->frameCount = fr;
-            if (session->framesRev < session->frameCount) {
-                return 0;
-            }
-            session->framesRev = 0;
-        }
-        session->revFrame->samples_per_channel_ = session->apmFrameCount;
-        effect->session->apm->AnalyzeReverseStream(session->revFrame);
-#else
         size_t fr = session->frameCount - session->framesRev;
         if (inBuffer->frameCount < fr) {
             fr = inBuffer->frameCount;
@@ -2205,7 +1592,6 @@
             ALOGE("Process Reverse Stream failed with error %d\n", status);
             return status;
         }
-#endif
         return 0;
     } else {
         return -ENODATA;
diff --git a/media/libeffects/preprocessing/benchmarks/Android.bp b/media/libeffects/preprocessing/benchmarks/Android.bp
index 2808293..262fd19 100644
--- a/media/libeffects/preprocessing/benchmarks/Android.bp
+++ b/media/libeffects/preprocessing/benchmarks/Android.bp
@@ -1,31 +1,4 @@
 cc_benchmark {
-    name: "preprocessing_legacy_benchmark",
-    vendor: true,
-    relative_install_path: "soundfx",
-    srcs: ["preprocessing_benchmark.cpp"],
-    shared_libs: [
-        "libaudiopreprocessing_legacy",
-        "libaudioutils",
-        "liblog",
-        "libutils",
-        "libwebrtc_audio_preprocessing",
-    ],
-    cflags: [
-        "-DWEBRTC_POSIX",
-        "-DWEBRTC_LEGACY",
-        "-fvisibility=default",
-        "-Wall",
-        "-Werror",
-        "-Wextra",
-    ],
-    header_libs: [
-        "libaudioeffects",
-        "libhardware_headers",
-        "libwebrtc_absl_headers",
-    ],
-}
-
-cc_benchmark {
     name: "preprocessing_benchmark",
     vendor: true,
     relative_install_path: "soundfx",
diff --git a/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp b/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp
index 3a0ad6d..694a6c4 100644
--- a/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp
+++ b/media/libeffects/preprocessing/benchmarks/preprocessing_benchmark.cpp
@@ -54,9 +54,7 @@
 #include <cstdlib>
 #include <random>
 #include <vector>
-#ifndef WEBRTC_LEGACY
 #include <audio_effects/effect_agc2.h>
-#endif
 #include <audio_effects/effect_ns.h>
 #include <benchmark/benchmark.h>
 #include <hardware/audio_effect.h>
@@ -76,10 +74,8 @@
         {0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
         // ns  uuid
         {0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},
-#ifndef WEBRTC_LEGACY
         // agc2 uuid
         {0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, {0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86}},
-#endif
 };
 constexpr size_t kNumEffectUuids = std::size(kEffectUuids);
 constexpr audio_channel_mask_t kChMasks[] = {
@@ -93,9 +89,7 @@
     PREPROC_AGC,  // Automatic Gain Control
     PREPROC_AEC,  // Acoustic Echo Canceler
     PREPROC_NS,   // Noise Suppressor
-#ifndef WEBRTC_LEGACY
     PREPROC_AGC2,  // Automatic Gain Control 2
-#endif
     PREPROC_NUM_EFFECTS
 };
 
diff --git a/media/libeffects/preprocessing/tests/Android.bp b/media/libeffects/preprocessing/tests/Android.bp
index 045b0d3..b439880 100644
--- a/media/libeffects/preprocessing/tests/Android.bp
+++ b/media/libeffects/preprocessing/tests/Android.bp
@@ -1,37 +1,5 @@
 // audio preprocessing unit test
 cc_test {
-    name: "AudioPreProcessingLegacyTest",
-
-    vendor: true,
-
-    relative_install_path: "soundfx",
-
-    srcs: ["PreProcessingTest.cpp"],
-
-    shared_libs: [
-        "libaudiopreprocessing_legacy",
-        "libaudioutils",
-        "liblog",
-        "libutils",
-        "libwebrtc_audio_preprocessing",
-    ],
-
-    cflags: [
-        "-DWEBRTC_POSIX",
-        "-DWEBRTC_LEGACY",
-        "-fvisibility=default",
-        "-Wall",
-        "-Werror",
-        "-Wextra",
-    ],
-
-    header_libs: [
-        "libaudioeffects",
-        "libhardware_headers",
-    ],
-}
-
-cc_test {
     name: "AudioPreProcessingTest",
 
     vendor: true,
diff --git a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
index 65b9469..5f223c9 100644
--- a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
+++ b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
@@ -22,9 +22,7 @@
 
 #include <audio_effects/effect_aec.h>
 #include <audio_effects/effect_agc.h>
-#ifndef WEBRTC_LEGACY
 #include <audio_effects/effect_agc2.h>
-#endif
 #include <audio_effects/effect_ns.h>
 #include <log/log.h>
 
@@ -38,9 +36,7 @@
 // types of pre processing modules
 enum PreProcId {
     PREPROC_AGC,  // Automatic Gain Control
-#ifndef WEBRTC_LEGACY
     PREPROC_AGC2,  // Automatic Gain Control 2
-#endif
     PREPROC_AEC,  // Acoustic Echo Canceler
     PREPROC_NS,   // Noise Suppressor
     PREPROC_NUM_EFFECTS
@@ -57,11 +53,9 @@
     ARG_AGC_COMP_LVL,
     ARG_AEC_DELAY,
     ARG_NS_LVL,
-#ifndef WEBRTC_LEGACY
     ARG_AGC2_GAIN,
     ARG_AGC2_LVL,
     ARG_AGC2_SAT_MGN
-#endif
 };
 
 struct preProcConfigParams_t {
@@ -70,19 +64,15 @@
     int nsLevel = 0;         // a value between 0-3
     int agcTargetLevel = 3;  // in dB
     int agcCompLevel = 9;    // in dB
-#ifndef WEBRTC_LEGACY
     float agc2Gain = 0.f;              // in dB
     float agc2SaturationMargin = 2.f;  // in dB
     int agc2Level = 0;                 // either kRms(0) or kPeak(1)
-#endif
     int aecDelay = 0;  // in ms
 };
 
 const effect_uuid_t kPreProcUuids[PREPROC_NUM_EFFECTS] = {
         {0xaa8130e0, 0x66fc, 0x11e0, 0xbad0, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // agc uuid
-#ifndef WEBRTC_LEGACY
         {0x89f38e65, 0xd4d2, 0x4d64, 0xad0e, {0x2b, 0x3e, 0x79, 0x9e, 0xa8, 0x86}},  // agc2 uuid
-#endif
         {0xbb392ec0, 0x8d4d, 0x11e0, 0xa896, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // aec uuid
         {0xc06c8400, 0x8e06, 0x11e0, 0x9cb6, {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}},  // ns  uuid
 };
@@ -138,24 +128,20 @@
     printf("\n           Enable Noise Suppression, default disabled");
     printf("\n     --agc");
     printf("\n           Enable Gain Control, default disabled");
-#ifndef WEBRTC_LEGACY
     printf("\n     --agc2");
     printf("\n           Enable Gain Controller 2, default disabled");
-#endif
     printf("\n     --ns_lvl <ns_level>");
     printf("\n           Noise Suppression level in dB, default value 0dB");
     printf("\n     --agc_tgt_lvl <target_level>");
     printf("\n           AGC Target Level in dB, default value 3dB");
     printf("\n     --agc_comp_lvl <comp_level>");
     printf("\n           AGC Comp Level in dB, default value 9dB");
-#ifndef WEBRTC_LEGACY
     printf("\n     --agc2_gain <fixed_digital_gain>");
     printf("\n           AGC Fixed Digital Gain in dB, default value 0dB");
     printf("\n     --agc2_lvl <level_estimator>");
     printf("\n           AGC Adaptive Digital Level Estimator, default value kRms");
     printf("\n     --agc2_sat_mgn <saturation_margin>");
     printf("\n           AGC Adaptive Digital Saturation Margin in dB, default value 2dB");
-#endif
     printf("\n     --aec_delay <delay>");
     printf("\n           AEC delay value in ms, default value 0ms");
     printf("\n");
@@ -217,18 +203,14 @@
             {"ch_mask", required_argument, nullptr, ARG_CH_MASK},
             {"agc_tgt_lvl", required_argument, nullptr, ARG_AGC_TGT_LVL},
             {"agc_comp_lvl", required_argument, nullptr, ARG_AGC_COMP_LVL},
-#ifndef WEBRTC_LEGACY
             {"agc2_gain", required_argument, nullptr, ARG_AGC2_GAIN},
             {"agc2_lvl", required_argument, nullptr, ARG_AGC2_LVL},
             {"agc2_sat_mgn", required_argument, nullptr, ARG_AGC2_SAT_MGN},
-#endif
             {"aec_delay", required_argument, nullptr, ARG_AEC_DELAY},
             {"ns_lvl", required_argument, nullptr, ARG_NS_LVL},
             {"aec", no_argument, &effectEn[PREPROC_AEC], 1},
             {"agc", no_argument, &effectEn[PREPROC_AGC], 1},
-#ifndef WEBRTC_LEGACY
             {"agc2", no_argument, &effectEn[PREPROC_AGC2], 1},
-#endif
             {"ns", no_argument, &effectEn[PREPROC_NS], 1},
             {nullptr, 0, nullptr, 0},
     };
@@ -277,7 +259,6 @@
                 preProcCfgParams.agcCompLevel = atoi(optarg);
                 break;
             }
-#ifndef WEBRTC_LEGACY
             case ARG_AGC2_GAIN: {
                 preProcCfgParams.agc2Gain = atof(optarg);
                 break;
@@ -290,7 +271,6 @@
                 preProcCfgParams.agc2SaturationMargin = atof(optarg);
                 break;
             }
-#endif
             case ARG_AEC_DELAY: {
                 preProcCfgParams.aecDelay = atoi(optarg);
                 break;
@@ -387,7 +367,6 @@
             return EXIT_FAILURE;
         }
     }
-#ifndef WEBRTC_LEGACY
     if (effectEn[PREPROC_AGC2]) {
         if (int status = preProcSetConfigParam(AGC2_PARAM_FIXED_DIGITAL_GAIN,
                                                (float)preProcCfgParams.agc2Gain,
@@ -411,7 +390,6 @@
             return EXIT_FAILURE;
         }
     }
-#endif
     if (effectEn[PREPROC_NS]) {
         if (int status = preProcSetConfigParam(NS_PARAM_LEVEL, (uint32_t)preProcCfgParams.nsLevel,
                                                effectHandle[PREPROC_NS]);
diff --git a/media/libmediahelper/tests/typeconverter_tests.cpp b/media/libmediahelper/tests/typeconverter_tests.cpp
index 0c3b913..d7bfb89 100644
--- a/media/libmediahelper/tests/typeconverter_tests.cpp
+++ b/media/libmediahelper/tests/typeconverter_tests.cpp
@@ -33,7 +33,8 @@
     for (const auto enumVal : xsdc_enum_range<xsd::AudioChannelMask>{}) {
         const std::string stringVal = toString(enumVal);
         audio_channel_mask_t channelMask = channelMaskFromString(stringVal);
-        EXPECT_EQ(stringVal != "AUDIO_CHANNEL_NONE", audio_channel_mask_is_valid(channelMask))
+        EXPECT_EQ(enumVal != xsd::AudioChannelMask::AUDIO_CHANNEL_NONE,
+                audio_channel_mask_is_valid(channelMask))
                 << "Validity of \"" << stringVal << "\" is not as expected";
     }
 }
@@ -67,7 +68,7 @@
             EXPECT_TRUE(ChannelIndexConverter::toString(channelMask, stringValBack))
                     << "Conversion of indexed channel mask " << channelMask << " failed";
             EXPECT_EQ(stringVal, stringValBack);
-        } else if (stringVal == "AUDIO_CHANNEL_NONE") {
+        } else if (stringVal == toString(xsd::AudioChannelMask::AUDIO_CHANNEL_NONE)) {
             EXPECT_FALSE(InputChannelConverter::fromString(stringVal, channelMask))
                     << "Conversion of \"" << stringVal << "\" succeeded (as input channel mask)";
             EXPECT_FALSE(OutputChannelConverter::fromString(stringVal, channelMask))
@@ -86,6 +87,8 @@
             EXPECT_TRUE(ChannelIndexConverter::toString(channelMask, stringValBack))
                     << "Conversion of indexed channel mask " << channelMask << " failed";
             EXPECT_EQ(stringVal, stringValBack);
+        } else {
+            FAIL() << "Unrecognized channel mask \"" << stringVal << "\"";
         }
     }
 }
@@ -107,7 +110,7 @@
         std::string stringValBack;
         EXPECT_TRUE(DeviceConverter::fromString(stringVal, device))
                 << "Conversion of \"" << stringVal << "\" failed";
-        if (stringVal != "AUDIO_DEVICE_NONE") {
+        if (enumVal != xsd::AudioDevice::AUDIO_DEVICE_NONE) {
             EXPECT_TRUE(audio_is_input_device(device) || audio_is_output_device(device))
                     << "Device \"" << stringVal << "\" is neither input, nor output device";
         } else {
@@ -144,17 +147,19 @@
             EXPECT_TRUE(OutputDeviceConverter::fromString(stringValBack, deviceBack))
                     << "Conversion of \"" << stringValBack << "\" failed";
             EXPECT_EQ(device, deviceBack);
-        } else if (stringVal == "AUDIO_DEVICE_NONE") {
+        } else if (stringVal == toString(xsd::AudioDevice::AUDIO_DEVICE_NONE)) {
             EXPECT_FALSE(InputDeviceConverter::fromString(stringVal, device))
                     << "Conversion of \"" << stringVal << "\" succeeded (as input device)";
             EXPECT_FALSE(OutputDeviceConverter::fromString(stringVal, device))
                     << "Conversion of \"" << stringVal << "\" succeeded (as output device)";
             EXPECT_EQ(stringVal, toString(device));
+        } else {
+            FAIL() << "Unrecognized audio device \"" << stringVal << "\"";
         }
     }
 }
 
-TEST (TypeConverter, ParseInOutFlags) {
+TEST(TypeConverter, ParseInOutFlags) {
     for (const auto enumVal : xsdc_enum_range<xsd::AudioInOutFlag>{}) {
         const std::string stringVal = toString(enumVal);
         if (stringVal.find("_INPUT_FLAG_") != std::string::npos) {
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index 6a4706d..7b36875 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -121,7 +121,7 @@
     unsigned start = (unsigned)((rand()* 1000LL)/RAND_MAX) + 15550;
     start &= ~1;
 
-    for (unsigned port = start; port < 65536; port += 2) {
+    for (unsigned port = start; port < 65535; port += 2) {
         struct sockaddr_in addr;
         memset(addr.sin_zero, 0, sizeof(addr.sin_zero));
         addr.sin_family = AF_INET;
@@ -139,6 +139,13 @@
                  (const struct sockaddr *)&addr, sizeof(addr)) == 0) {
             *rtpPort = port;
             return;
+        } else {
+            // we should recreate a RTP socket to avoid bind other port in same RTP socket
+            close(*rtpSocket);
+
+            *rtpSocket = socket(AF_INET, SOCK_DGRAM, 0);
+            CHECK_GE(*rtpSocket, 0);
+            bumpSocketBufferSize(*rtpSocket);
         }
     }