Merge "Revert "Return DeviceDescriptor or DeviceVector in Engine.""
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 9ec64e1..4a801a7 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -7696,14 +7696,14 @@
      * case, when the application configures a RAW stream, the camera device will make sure
      * the active physical camera will remain active to ensure consistent RAW output
      * behavior, and not switch to other physical cameras.</p>
-     * <p>To maintain backward compatibility, the capture request and result metadata tags
-     * required for basic camera functionalities will be solely based on the
-     * logical camera capabiltity. Other request and result metadata tags, on the other
-     * hand, will be based on current active physical camera. For example, the physical
-     * cameras' sensor sensitivity and lens capability could be different from each other.
-     * So when the application manually controls sensor exposure time/gain, or does manual
-     * focus control, it must checks the current active physical camera's exposure, gain,
-     * and focus distance range.</p>
+     * <p>The capture request and result metadata tags required for backward compatible camera
+     * functionalities will be solely based on the logical camera capabiltity. On the other
+     * hand, the use of manual capture controls (sensor or post-processing) with a
+     * logical camera may result in unexpected behavior when the HAL decides to switch
+     * between physical cameras with different characteristics under the hood. For example,
+     * when the application manually sets exposure time and sensitivity while zooming in,
+     * the brightness of the camera images may suddenly change because HAL switches from one
+     * physical camera to the other.</p>
      *
      * @see ACAMERA_LENS_DISTORTION
      * @see ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index 8fe029a..f07a1e6 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -57,7 +57,7 @@
 #include <algorithm>
 
 using namespace android;
-using ::android::hardware::ICameraServiceDefault;
+using ::android::hardware::ICameraService;
 using ::android::hardware::camera2::ICameraDeviceUser;
 
 #define ASSERT_NOT_NULL(x) \
@@ -507,7 +507,7 @@
         bool queryStatus;
         res = device->isSessionConfigurationSupported(sessionConfiguration, &queryStatus);
         EXPECT_TRUE(res.isOk() ||
-                (res.serviceSpecificErrorCode() == ICameraServiceDefault::ERROR_INVALID_OPERATION))
+                (res.serviceSpecificErrorCode() == ICameraService::ERROR_INVALID_OPERATION))
                 << res;
         if (res.isOk()) {
             EXPECT_TRUE(queryStatus);
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 7aa655f..98164fd 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -37,6 +37,7 @@
 
 #include <binder/IPCThreadState.h>
 #include <utils/Errors.h>
+#include <utils/SystemClock.h>
 #include <utils/Timers.h>
 #include <utils/Trace.h>
 
@@ -95,6 +96,8 @@
 static const uint32_t kFallbackWidth = 1280;        // 720p
 static const uint32_t kFallbackHeight = 720;
 static const char* kMimeTypeAvc = "video/avc";
+static const char* kMimeTypeApplicationOctetstream = "application/octet-stream";
+static const char* kWinscopeMagicString = "#VV1NSC0PET1ME!#";
 
 // Command-line parameters.
 static bool gVerbose = false;           // chatty on stdout
@@ -350,6 +353,50 @@
 }
 
 /*
+ * Writes an unsigned integer byte-by-byte in little endian order regardless
+ * of the platform endianness.
+ */
+template <typename UINT>
+static void writeValueLE(UINT value, uint8_t* buffer) {
+    for (int i = 0; i < sizeof(UINT); ++i) {
+        buffer[i] = static_cast<uint8_t>(value);
+        value >>= 8;
+    }
+}
+
+/*
+ * Saves frames presentation time relative to the elapsed realtime clock in microseconds
+ * preceded by a Winscope magic string and frame count to a metadata track.
+ * This metadata is used by the Winscope tool to sync video with SurfaceFlinger
+ * and WindowManager traces.
+ *
+ * The metadata is written as a binary array as follows:
+ * - winscope magic string (kWinscopeMagicString constant), without trailing null char,
+ * - the number of recorded frames (as little endian uint32),
+ * - for every frame its presentation time relative to the elapsed realtime clock in microseconds
+ *   (as little endian uint64).
+ */
+static status_t writeWinscopeMetadata(const Vector<int64_t>& timestamps,
+        const ssize_t metaTrackIdx, const sp<MediaMuxer>& muxer) {
+    ALOGV("Writing metadata");
+    int64_t systemTimeToElapsedTimeOffsetMicros = (android::elapsedRealtimeNano()
+        - systemTime(SYSTEM_TIME_MONOTONIC)) / 1000;
+    sp<ABuffer> buffer = new ABuffer(timestamps.size() * sizeof(int64_t)
+        + sizeof(uint32_t) + strlen(kWinscopeMagicString));
+    uint8_t* pos = buffer->data();
+    strcpy(reinterpret_cast<char*>(pos), kWinscopeMagicString);
+    pos += strlen(kWinscopeMagicString);
+    writeValueLE<uint32_t>(timestamps.size(), pos);
+    pos += sizeof(uint32_t);
+    for (size_t idx = 0; idx < timestamps.size(); ++idx) {
+        writeValueLE<uint64_t>(static_cast<uint64_t>(timestamps[idx]
+            + systemTimeToElapsedTimeOffsetMicros), pos);
+        pos += sizeof(uint64_t);
+    }
+    return muxer->writeSampleData(buffer, metaTrackIdx, timestamps[0], 0);
+}
+
+/*
  * Runs the MediaCodec encoder, sending the output to the MediaMuxer.  The
  * input frames are coming from the virtual display as fast as SurfaceFlinger
  * wants to send them.
@@ -364,10 +411,12 @@
     static int kTimeout = 250000;   // be responsive on signal
     status_t err;
     ssize_t trackIdx = -1;
+    ssize_t metaTrackIdx = -1;
     uint32_t debugNumFrames = 0;
     int64_t startWhenNsec = systemTime(CLOCK_MONOTONIC);
     int64_t endWhenNsec = startWhenNsec + seconds_to_nanoseconds(gTimeLimitSec);
     DisplayInfo mainDpyInfo;
+    Vector<int64_t> timestamps;
 
     assert((rawFp == NULL && muxer != NULL) || (rawFp != NULL && muxer == NULL));
 
@@ -465,6 +514,9 @@
                             "Failed writing data to muxer (err=%d)\n", err);
                         return err;
                     }
+                    if (gOutputFormat == FORMAT_MP4) {
+                        timestamps.add(ptsUsec);
+                    }
                 }
                 debugNumFrames++;
             }
@@ -491,6 +543,11 @@
                 encoder->getOutputFormat(&newFormat);
                 if (muxer != NULL) {
                     trackIdx = muxer->addTrack(newFormat);
+                    if (gOutputFormat == FORMAT_MP4) {
+                        sp<AMessage> metaFormat = new AMessage;
+                        metaFormat->setString(KEY_MIME, kMimeTypeApplicationOctetstream);
+                        metaTrackIdx = muxer->addTrack(metaFormat);
+                    }
                     ALOGV("Starting muxer");
                     err = muxer->start();
                     if (err != NO_ERROR) {
@@ -527,6 +584,13 @@
                         systemTime(CLOCK_MONOTONIC) - startWhenNsec));
         fflush(stdout);
     }
+    if (metaTrackIdx >= 0 && !timestamps.isEmpty()) {
+        err = writeWinscopeMetadata(timestamps, metaTrackIdx, muxer);
+        if (err != NO_ERROR) {
+            fprintf(stderr, "Failed writing metadata to muxer (err=%d)\n", err);
+            return err;
+        }
+    }
     return NO_ERROR;
 }
 
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index 38fd34a..0c8d44a 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -153,9 +153,9 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-        filters/argbtorgba.rs \
-        filters/nightvision.rs \
-        filters/saturation.rs \
+        filters/argbtorgba.rscript \
+        filters/nightvision.rscript \
+        filters/saturation.rscript \
         mediafilter.cpp \
 
 LOCAL_SHARED_LIBRARIES := \
diff --git a/cmds/stagefright/filters/argbtorgba.rs b/cmds/stagefright/filters/argbtorgba.rscript
similarity index 100%
rename from cmds/stagefright/filters/argbtorgba.rs
rename to cmds/stagefright/filters/argbtorgba.rscript
diff --git a/cmds/stagefright/filters/nightvision.rs b/cmds/stagefright/filters/nightvision.rscript
similarity index 100%
rename from cmds/stagefright/filters/nightvision.rs
rename to cmds/stagefright/filters/nightvision.rscript
diff --git a/cmds/stagefright/filters/saturation.rs b/cmds/stagefright/filters/saturation.rscript
similarity index 100%
rename from cmds/stagefright/filters/saturation.rs
rename to cmds/stagefright/filters/saturation.rscript
diff --git a/include/media/AudioMixer.h b/include/media/AudioMixer.h
index de839c6..85ee950 120000
--- a/include/media/AudioMixer.h
+++ b/include/media/AudioMixer.h
@@ -1 +1 @@
-../../media/libaudioclient/include/media/AudioMixer.h
\ No newline at end of file
+../../media/libaudioprocessing/include/media/AudioMixer.h
\ No newline at end of file
diff --git a/include/media/BufferProviders.h b/include/media/BufferProviders.h
index 779bb15..778e1d8 120000
--- a/include/media/BufferProviders.h
+++ b/include/media/BufferProviders.h
@@ -1 +1 @@
-../../media/libmedia/include/media/BufferProviders.h
\ No newline at end of file
+../../media/libaudioprocessing/include/media/BufferProviders.h
\ No newline at end of file
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index 969f2ee..e11af12 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -9,6 +9,7 @@
 	libaaudioservice \
 	libaudioflinger \
 	libaudiopolicyservice \
+	libaudioprocessing \
 	libbinder \
 	libcutils \
 	liblog \
@@ -39,7 +40,6 @@
 	frameworks/av/media/libaaudio/src \
 	frameworks/av/media/libaaudio/src/binding \
 	frameworks/av/media/libmedia \
-	$(call include-path-for, audio-utils) \
 	external/sonic \
 
 # If AUDIOSERVER_MULTILIB in device.mk is non-empty then it is used to control
diff --git a/media/bufferpool/1.0/AccessorImpl.cpp b/media/bufferpool/1.0/AccessorImpl.cpp
index fa17f15..6b90088 100644
--- a/media/bufferpool/1.0/AccessorImpl.cpp
+++ b/media/bufferpool/1.0/AccessorImpl.cpp
@@ -247,7 +247,7 @@
     ALOGD("Destruction - bufferpool %p "
           "cached: %zu/%zuM, %zu/%d%% in use; "
           "allocs: %zu, %d%% recycled; "
-          "transfers: %zu, %d%% unfetced",
+          "transfers: %zu, %d%% unfetched",
           this, mStats.mBuffersCached, mStats.mSizeCached >> 20,
           mStats.mBuffersInUse, percentage(mStats.mBuffersInUse, mStats.mBuffersCached),
           mStats.mTotalAllocations, percentage(mStats.mTotalRecycles, mStats.mTotalAllocations),
diff --git a/media/bufferpool/2.0/AccessorImpl.cpp b/media/bufferpool/2.0/AccessorImpl.cpp
index 94cf006..32eaae9 100644
--- a/media/bufferpool/2.0/AccessorImpl.cpp
+++ b/media/bufferpool/2.0/AccessorImpl.cpp
@@ -303,7 +303,7 @@
     ALOGD("Destruction - bufferpool2 %p "
           "cached: %zu/%zuM, %zu/%d%% in use; "
           "allocs: %zu, %d%% recycled; "
-          "transfers: %zu, %d%% unfetced",
+          "transfers: %zu, %d%% unfetched",
           this, mStats.mBuffersCached, mStats.mSizeCached >> 20,
           mStats.mBuffersInUse, percentage(mStats.mBuffersInUse, mStats.mBuffersCached),
           mStats.mTotalAllocations, percentage(mStats.mTotalRecycles, mStats.mTotalAllocations),
diff --git a/media/bufferpool/2.0/ClientManager.cpp b/media/bufferpool/2.0/ClientManager.cpp
index c31d313..48c2da4 100644
--- a/media/bufferpool/2.0/ClientManager.cpp
+++ b/media/bufferpool/2.0/ClientManager.cpp
@@ -351,7 +351,17 @@
         }
         client = it->second;
     }
-    return client->allocate(params, handle, buffer);
+    native_handle_t *origHandle;
+    ResultStatus res = client->allocate(params, &origHandle, buffer);
+    if (res != ResultStatus::OK) {
+        return res;
+    }
+    *handle = native_handle_clone(origHandle);
+    if (handle == NULL) {
+        buffer->reset();
+        return ResultStatus::NO_MEMORY;
+    }
+    return ResultStatus::OK;
 }
 
 ResultStatus ClientManager::Impl::receive(
@@ -367,7 +377,18 @@
         }
         client = it->second;
     }
-    return client->receive(transactionId, bufferId, timestampUs, handle, buffer);
+    native_handle_t *origHandle;
+    ResultStatus res = client->receive(
+            transactionId, bufferId, timestampUs, &origHandle, buffer);
+    if (res != ResultStatus::OK) {
+        return res;
+    }
+    *handle = native_handle_clone(origHandle);
+    if (handle == NULL) {
+        buffer->reset();
+        return ResultStatus::NO_MEMORY;
+    }
+    return ResultStatus::OK;
 }
 
 ResultStatus ClientManager::Impl::postSend(
diff --git a/media/bufferpool/2.0/include/bufferpool/ClientManager.h b/media/bufferpool/2.0/include/bufferpool/ClientManager.h
index 953c304..24b61f4 100644
--- a/media/bufferpool/2.0/include/bufferpool/ClientManager.h
+++ b/media/bufferpool/2.0/include/bufferpool/ClientManager.h
@@ -104,7 +104,9 @@
     ResultStatus flush(ConnectionId connectionId);
 
     /**
-     * Allocates a buffer from the specified connection.
+     * Allocates a buffer from the specified connection. The output parameter
+     * handle is cloned from the internal handle. So it is safe to use directly,
+     * and it should be deleted and destroyed after use.
      *
      * @param connectionId  The id of the connection.
      * @param params        The allocation parameters.
@@ -123,7 +125,9 @@
                           std::shared_ptr<BufferPoolData> *buffer);
 
     /**
-     * Receives a buffer for the transaction.
+     * Receives a buffer for the transaction. The output parameter handle is
+     * cloned from the internal handle. So it is safe to use directly, and it
+     * should be deleted and destoyed after use.
      *
      * @param connectionId  The id of the receiving connection.
      * @param transactionId The id for the transaction.
diff --git a/media/codec2/components/aac/C2SoftAacEnc.cpp b/media/codec2/components/aac/C2SoftAacEnc.cpp
index 8e3852c..1dc676b 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.cpp
+++ b/media/codec2/components/aac/C2SoftAacEnc.cpp
@@ -157,7 +157,7 @@
       mSentCodecSpecificData(false),
       mInputTimeSet(false),
       mInputSize(0),
-      mInputTimeUs(0),
+      mNextFrameTimestampUs(0),
       mSignalledError(false),
       mOutIndex(0u) {
 }
@@ -183,7 +183,7 @@
     mSentCodecSpecificData = false;
     mInputTimeSet = false;
     mInputSize = 0u;
-    mInputTimeUs = 0;
+    mNextFrameTimestampUs = 0;
     mSignalledError = false;
     return C2_OK;
 }
@@ -201,7 +201,7 @@
     mSentCodecSpecificData = false;
     mInputTimeSet = false;
     mInputSize = 0u;
-    mInputTimeUs = 0;
+    mNextFrameTimestampUs = 0;
     return C2_OK;
 }
 
@@ -365,17 +365,18 @@
         capacity = view.capacity();
     }
     if (!mInputTimeSet && capacity > 0) {
-        mInputTimeUs = work->input.ordinal.timestamp;
+        mNextFrameTimestampUs = work->input.ordinal.timestamp;
         mInputTimeSet = true;
     }
 
     size_t numFrames = (capacity + mInputSize + (eos ? mNumBytesPerInputFrame - 1 : 0))
             / mNumBytesPerInputFrame;
-    ALOGV("capacity = %zu; mInputSize = %zu; numFrames = %zu mNumBytesPerInputFrame = %u",
-          capacity, mInputSize, numFrames, mNumBytesPerInputFrame);
+    ALOGV("capacity = %zu; mInputSize = %zu; numFrames = %zu "
+          "mNumBytesPerInputFrame = %u inputTS = %lld",
+          capacity, mInputSize, numFrames,
+          mNumBytesPerInputFrame, work->input.ordinal.timestamp.peekll());
 
     std::shared_ptr<C2LinearBlock> block;
-    std::shared_ptr<C2Buffer> buffer;
     std::unique_ptr<C2WriteView> wView;
     uint8_t *outPtr = temp;
     size_t outAvailable = 0u;
@@ -442,7 +443,11 @@
         const std::shared_ptr<C2Buffer> mBuffer;
     };
 
-    C2WorkOrdinalStruct outOrdinal = work->input.ordinal;
+    struct OutputBuffer {
+        std::shared_ptr<C2Buffer> buffer;
+        c2_cntr64_t timestampUs;
+    };
+    std::list<OutputBuffer> outputBuffers;
 
     while (encoderErr == AACENC_OK && inargs.numInSamples > 0) {
         if (numFrames && !block) {
@@ -473,29 +478,22 @@
                                   &outargs);
 
         if (encoderErr == AACENC_OK) {
-            if (buffer) {
-                outOrdinal.frameIndex = mOutIndex++;
-                outOrdinal.timestamp = mInputTimeUs;
-                cloneAndSend(
-                        inputIndex,
-                        work,
-                        FillWork(C2FrameData::FLAG_INCOMPLETE, outOrdinal, buffer));
-                buffer.reset();
-            }
-
             if (outargs.numOutBytes > 0) {
                 mInputSize = 0;
                 int consumed = (capacity / sizeof(int16_t)) - inargs.numInSamples
                         + outargs.numInSamples;
-                mInputTimeUs = work->input.ordinal.timestamp
+                c2_cntr64_t currentFrameTimestampUs = mNextFrameTimestampUs;
+                mNextFrameTimestampUs = work->input.ordinal.timestamp
                         + (consumed * 1000000ll / channelCount / sampleRate);
-                buffer = createLinearBuffer(block, 0, outargs.numOutBytes);
+                std::shared_ptr<C2Buffer> buffer = createLinearBuffer(block, 0, outargs.numOutBytes);
 #if defined(LOG_NDEBUG) && !LOG_NDEBUG
                 hexdump(outPtr, std::min(outargs.numOutBytes, 256));
 #endif
                 outPtr = temp;
                 outAvailable = 0;
                 block.reset();
+
+                outputBuffers.push_back({buffer, currentFrameTimestampUs});
             } else {
                 mInputSize += outargs.numInSamples * sizeof(int16_t);
             }
@@ -506,8 +504,9 @@
                 inargs.numInSamples -= outargs.numInSamples;
             }
         }
-        ALOGV("encoderErr = %d mInputSize = %zu inargs.numInSamples = %d, mInputTimeUs = %lld",
-              encoderErr, mInputSize, inargs.numInSamples, mInputTimeUs.peekll());
+        ALOGV("encoderErr = %d mInputSize = %zu "
+              "inargs.numInSamples = %d, mNextFrameTimestampUs = %lld",
+              encoderErr, mInputSize, inargs.numInSamples, mNextFrameTimestampUs.peekll());
     }
 
     if (eos && inBufferSize[0] > 0) {
@@ -542,10 +541,27 @@
                            &outargs);
     }
 
-    outOrdinal.frameIndex = mOutIndex++;
-    outOrdinal.timestamp = mInputTimeUs;
+    while (outputBuffers.size() > 1) {
+        const OutputBuffer& front = outputBuffers.front();
+        C2WorkOrdinalStruct ordinal = work->input.ordinal;
+        ordinal.frameIndex = mOutIndex++;
+        ordinal.timestamp = front.timestampUs;
+        cloneAndSend(
+                inputIndex,
+                work,
+                FillWork(C2FrameData::FLAG_INCOMPLETE, ordinal, front.buffer));
+        outputBuffers.pop_front();
+    }
+    std::shared_ptr<C2Buffer> buffer;
+    C2WorkOrdinalStruct ordinal = work->input.ordinal;
+    ordinal.frameIndex = mOutIndex++;
+    if (!outputBuffers.empty()) {
+        ordinal.timestamp = outputBuffers.front().timestampUs;
+        buffer = outputBuffers.front().buffer;
+    }
+    // Mark the end of frame
     FillWork((C2FrameData::flags_t)(eos ? C2FrameData::FLAG_END_OF_STREAM : 0),
-             outOrdinal, buffer)(work);
+             ordinal, buffer)(work);
 }
 
 c2_status_t C2SoftAacEnc::drain(
@@ -569,7 +585,7 @@
     mSentCodecSpecificData = false;
     mInputTimeSet = false;
     mInputSize = 0u;
-    mInputTimeUs = 0;
+    mNextFrameTimestampUs = 0;
 
     // TODO: we don't have any pending work at this time to drain.
     return C2_OK;
diff --git a/media/codec2/components/aac/C2SoftAacEnc.h b/media/codec2/components/aac/C2SoftAacEnc.h
index a38be19..2655039 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.h
+++ b/media/codec2/components/aac/C2SoftAacEnc.h
@@ -56,7 +56,7 @@
     bool mSentCodecSpecificData;
     bool mInputTimeSet;
     size_t mInputSize;
-    c2_cntr64_t mInputTimeUs;
+    c2_cntr64_t mNextFrameTimestampUs;
 
     bool mSignalledError;
     std::atomic_uint64_t mOutIndex;
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index 769895c..0cf277f 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -340,6 +340,7 @@
     aom_codec_flags_t flags;
     memset(&flags, 0, sizeof(aom_codec_flags_t));
 
+    ALOGV("Using libaom AV1 software decoder.");
     aom_codec_err_t err;
     if ((err = aom_codec_dec_init(mCodecCtx, aom_codec_av1_dx(), &cfg, 0))) {
         ALOGE("av1 decoder failed to initialize. (%d)", err);
diff --git a/media/codec2/components/flac/Android.bp b/media/codec2/components/flac/Android.bp
index e5eb51d..48cc51b 100644
--- a/media/codec2/components/flac/Android.bp
+++ b/media/codec2/components/flac/Android.bp
@@ -23,8 +23,11 @@
 
     srcs: ["C2SoftFlacEnc.cpp"],
 
-    static_libs: [
+    shared_libs: [
         "libaudioutils",
+    ],
+
+    static_libs: [
         "libFLAC",
     ],
 }
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index 3ba480a..f5321ba 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -27,8 +27,6 @@
 
 namespace android {
 
-// TODO(vigneshv): This will be changed to c2.android.av1.decoder once this
-// component is fully functional.
 constexpr char COMPONENT_NAME[] = "c2.android.gav1.decoder";
 
 class C2SoftGav1Dec::IntfImpl : public SimpleInterface<void>::BaseParams {
@@ -338,6 +336,7 @@
   libgav1::DecoderSettings settings = {};
   settings.threads = GetCPUCoreCount();
 
+  ALOGV("Using libgav1 AV1 software decoder.");
   Libgav1StatusCode status = mCodecCtx->Init(&settings);
   if (status != kLibgav1StatusOk) {
     ALOGE("av1 decoder failed to initialize. status: %d.", status);
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.cpp b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
index 6509a88..6dab70b 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
@@ -127,14 +127,14 @@
     }
 
     switch (mBitrateMode->value) {
-        case C2Config::BITRATE_VARIABLE:
-            mBitrateControlMode = VPX_VBR;
-            break;
         case C2Config::BITRATE_CONST:
-        default:
             mBitrateControlMode = VPX_CBR;
             break;
-        break;
+        case C2Config::BITRATE_VARIABLE:
+        [[fallthrough]];
+        default:
+            mBitrateControlMode = VPX_VBR;
+            break;
     }
 
     setCodecSpecificInterface();
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.h b/media/codec2/components/vpx/C2SoftVpxEnc.h
index 90758f9..62ccd1b 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.h
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.h
@@ -275,7 +275,7 @@
         addParameter(
             DefineParam(mBitrateMode, C2_PARAMKEY_BITRATE_MODE)
                 .withDefault(new C2StreamBitrateModeTuning::output(
-                        0u, C2Config::BITRATE_CONST))
+                        0u, C2Config::BITRATE_VARIABLE))
                 .withFields({
                     C2F(mBitrateMode, value).oneOf({
                         C2Config::BITRATE_CONST, C2Config::BITRATE_VARIABLE })
diff --git a/media/codec2/components/xaac/C2SoftXaacDec.cpp b/media/codec2/components/xaac/C2SoftXaacDec.cpp
index a3ebadb..60ae93c 100644
--- a/media/codec2/components/xaac/C2SoftXaacDec.cpp
+++ b/media/codec2/components/xaac/C2SoftXaacDec.cpp
@@ -1309,69 +1309,84 @@
                                 &ui_exec_done);
     RETURN_IF_FATAL(err_code,  "IA_CMD_TYPE_DONE_QUERY");
 
-    if (ui_exec_done != 1) {
-        VOID* p_array;        // ITTIAM:buffer to handle gain payload
-        WORD32 buf_size = 0;  // ITTIAM:gain payload length
-        WORD32 bit_str_fmt = 1;
-        WORD32 gain_stream_flag = 1;
-
-        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
-                                    IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
-        RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN");
-
-        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
-                                    IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
-        RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF");
-
-        if (buf_size > 0) {
-            /*Set bitstream_split_format */
-            err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
-                                      IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
-            RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
-
-            memcpy(mDrcInBuf, p_array, buf_size);
-            /* Set number of bytes to be processed */
-            err_code =
-                ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_BS, 0, &buf_size);
-            RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
-
-            err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
-                                      IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG, &gain_stream_flag);
-            RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
-
-            /* Execute process */
-            err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
-                                      IA_CMD_TYPE_INIT_CPY_BSF_BUFF, nullptr);
-            RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
-
-            mMpegDDRCPresent = 1;
-        }
-    }
-
-    /* How much buffer is used in input buffers */
+    int32_t num_preroll = 0;
     err_code = ixheaacd_dec_api(mXheaacCodecHandle,
-                                IA_API_CMD_GET_CURIDX_INPUT_BUF,
-                                0,
-                                bytesConsumed);
-    RETURN_IF_FATAL(err_code,  "IA_API_CMD_GET_CURIDX_INPUT_BUF");
+                                IA_API_CMD_GET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_GET_NUM_PRE_ROLL_FRAMES,
+                                &num_preroll);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GET_NUM_PRE_ROLL_FRAMES");
 
-    /* Get the output bytes */
-    err_code = ixheaacd_dec_api(mXheaacCodecHandle,
-                                IA_API_CMD_GET_OUTPUT_BYTES,
-                                0,
-                                outBytes);
-    RETURN_IF_FATAL(err_code,  "IA_API_CMD_GET_OUTPUT_BYTES");
+    {
+      int32_t preroll_frame_offset = 0;
 
-    if (mMpegDDRCPresent == 1) {
-        memcpy(mDrcInBuf, mOutputBuffer, *outBytes);
-        err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES, 0, outBytes);
-        RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+        do {
+            if (ui_exec_done != 1) {
+                VOID* p_array;        // ITTIAM:buffer to handle gain payload
+                WORD32 buf_size = 0;  // ITTIAM:gain payload length
+                WORD32 bit_str_fmt = 1;
+                WORD32 gain_stream_flag = 1;
 
-        err_code =
-            ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DO_EXECUTE, nullptr);
-        RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
+                err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                            IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
+                RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN");
 
-        memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
+                err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                            IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
+                RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF");
+
+                if (buf_size > 0) {
+                    /*Set bitstream_split_format */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                            IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+                    memcpy(mDrcInBuf, p_array, buf_size);
+                    /* Set number of bytes to be processed */
+                    err_code =
+                        ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_BS, 0, &buf_size);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                            IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG, &gain_stream_flag);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+                    /* Execute process */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+                                            IA_CMD_TYPE_INIT_CPY_BSF_BUFF, nullptr);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+                    mMpegDDRCPresent = 1;
+                }
+            }
+
+            /* How much buffer is used in input buffers */
+            err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+                                        IA_API_CMD_GET_CURIDX_INPUT_BUF,
+                                        0,
+                                        bytesConsumed);
+            RETURN_IF_FATAL(err_code,  "IA_API_CMD_GET_CURIDX_INPUT_BUF");
+
+            /* Get the output bytes */
+            err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+                                        IA_API_CMD_GET_OUTPUT_BYTES,
+                                        0,
+                                        outBytes);
+            RETURN_IF_FATAL(err_code,  "IA_API_CMD_GET_OUTPUT_BYTES");
+
+            if (mMpegDDRCPresent == 1) {
+                memcpy(mDrcInBuf, mOutputBuffer + preroll_frame_offset, *outBytes);
+                preroll_frame_offset += *outBytes;
+                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES, 0, outBytes);
+                RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+
+                err_code =
+                    ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DO_EXECUTE, nullptr);
+                RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
+
+                memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
+            }
+            num_preroll--;
+        } while (num_preroll > 0);
     }
     return IA_NO_ERROR;
 }
diff --git a/media/codec2/core/include/C2Param.h b/media/codec2/core/include/C2Param.h
index cc8c17a..51d417a 100644
--- a/media/codec2/core/include/C2Param.h
+++ b/media/codec2/core/include/C2Param.h
@@ -176,9 +176,9 @@
             DIR_INPUT      = 0x00000000,
             DIR_OUTPUT     = 0x10000000,
 
-            IS_STREAM_FLAG  = 0x00100000,
-            STREAM_ID_MASK  = 0x03E00000,
-            STREAM_ID_SHIFT = 21,
+            IS_STREAM_FLAG  = 0x02000000,
+            STREAM_ID_MASK  = 0x01F00000,
+            STREAM_ID_SHIFT = 20,
             MAX_STREAM_ID   = STREAM_ID_MASK >> STREAM_ID_SHIFT,
             STREAM_MASK     = IS_STREAM_FLAG | STREAM_ID_MASK,
 
diff --git a/media/codec2/hidl/1.0/utils/InputBufferManager.cpp b/media/codec2/hidl/1.0/utils/InputBufferManager.cpp
index a023a05..8c0d0a4 100644
--- a/media/codec2/hidl/1.0/utils/InputBufferManager.cpp
+++ b/media/codec2/hidl/1.0/utils/InputBufferManager.cpp
@@ -70,7 +70,7 @@
                  << ".";
     std::lock_guard<std::mutex> lock(mMutex);
 
-    std::set<TrackedBuffer> &bufferIds =
+    std::set<TrackedBuffer*> &bufferIds =
             mTrackedBuffersMap[listener][frameIndex];
 
     for (size_t i = 0; i < input.buffers.size(); ++i) {
@@ -79,13 +79,14 @@
                          << "Input buffer at index " << i << " is null.";
             continue;
         }
-        const TrackedBuffer &bufferId =
-                *bufferIds.emplace(listener, frameIndex, i, input.buffers[i]).
-                first;
+        TrackedBuffer *bufferId =
+            new TrackedBuffer(listener, frameIndex, i, input.buffers[i]);
+        mTrackedBufferCache.emplace(bufferId);
+        bufferIds.emplace(bufferId);
 
         c2_status_t status = input.buffers[i]->registerOnDestroyNotify(
                 onBufferDestroyed,
-                const_cast<void*>(reinterpret_cast<const void*>(&bufferId)));
+                reinterpret_cast<void*>(bufferId));
         if (status != C2_OK) {
             LOG(DEBUG) << "InputBufferManager::_registerFrameData -- "
                        << "registerOnDestroyNotify() failed "
@@ -119,31 +120,32 @@
 
     auto findListener = mTrackedBuffersMap.find(listener);
     if (findListener != mTrackedBuffersMap.end()) {
-        std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds
+        std::map<uint64_t, std::set<TrackedBuffer*>> &frameIndex2BufferIds
                 = findListener->second;
         auto findFrameIndex = frameIndex2BufferIds.find(frameIndex);
         if (findFrameIndex != frameIndex2BufferIds.end()) {
-            std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
-            for (const TrackedBuffer& bufferId : bufferIds) {
-                std::shared_ptr<C2Buffer> buffer = bufferId.buffer.lock();
+            std::set<TrackedBuffer*> &bufferIds = findFrameIndex->second;
+            for (TrackedBuffer* bufferId : bufferIds) {
+                std::shared_ptr<C2Buffer> buffer = bufferId->buffer.lock();
                 if (buffer) {
                     c2_status_t status = buffer->unregisterOnDestroyNotify(
                             onBufferDestroyed,
-                            const_cast<void*>(
-                            reinterpret_cast<const void*>(&bufferId)));
+                            reinterpret_cast<void*>(bufferId));
                     if (status != C2_OK) {
                         LOG(DEBUG) << "InputBufferManager::_unregisterFrameData "
                                    << "-- unregisterOnDestroyNotify() failed "
                                    << "(listener @ 0x"
                                         << std::hex
-                                        << bufferId.listener.unsafe_get()
+                                        << bufferId->listener.unsafe_get()
                                    << ", frameIndex = "
-                                        << std::dec << bufferId.frameIndex
-                                   << ", bufferIndex = " << bufferId.bufferIndex
+                                        << std::dec << bufferId->frameIndex
+                                   << ", bufferIndex = " << bufferId->bufferIndex
                                    << ") => status = " << status
                                    << ".";
                     }
                 }
+                mTrackedBufferCache.erase(bufferId);
+                delete bufferId;
             }
 
             frameIndex2BufferIds.erase(findFrameIndex);
@@ -179,31 +181,32 @@
 
     auto findListener = mTrackedBuffersMap.find(listener);
     if (findListener != mTrackedBuffersMap.end()) {
-        std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds =
+        std::map<uint64_t, std::set<TrackedBuffer*>> &frameIndex2BufferIds =
                 findListener->second;
         for (auto findFrameIndex = frameIndex2BufferIds.begin();
                 findFrameIndex != frameIndex2BufferIds.end();
                 ++findFrameIndex) {
-            std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
-            for (const TrackedBuffer& bufferId : bufferIds) {
-                std::shared_ptr<C2Buffer> buffer = bufferId.buffer.lock();
+            std::set<TrackedBuffer*> &bufferIds = findFrameIndex->second;
+            for (TrackedBuffer* bufferId : bufferIds) {
+                std::shared_ptr<C2Buffer> buffer = bufferId->buffer.lock();
                 if (buffer) {
                     c2_status_t status = buffer->unregisterOnDestroyNotify(
                             onBufferDestroyed,
-                            const_cast<void*>(
-                            reinterpret_cast<const void*>(&bufferId)));
+                            reinterpret_cast<void*>(bufferId));
                     if (status != C2_OK) {
                         LOG(DEBUG) << "InputBufferManager::_unregisterFrameData "
                                    << "-- unregisterOnDestroyNotify() failed "
                                    << "(listener @ 0x"
                                         << std::hex
-                                        << bufferId.listener.unsafe_get()
+                                        << bufferId->listener.unsafe_get()
                                    << ", frameIndex = "
-                                        << std::dec << bufferId.frameIndex
-                                   << ", bufferIndex = " << bufferId.bufferIndex
+                                        << std::dec << bufferId->frameIndex
+                                   << ", bufferIndex = " << bufferId->bufferIndex
                                    << ") => status = " << status
                                    << ".";
                     }
+                    mTrackedBufferCache.erase(bufferId);
+                    delete bufferId;
                 }
             }
         }
@@ -236,50 +239,59 @@
                      << std::dec << ".";
         return;
     }
-    TrackedBuffer id(*reinterpret_cast<TrackedBuffer*>(arg));
+
+    std::lock_guard<std::mutex> lock(mMutex);
+    TrackedBuffer *bufferId = reinterpret_cast<TrackedBuffer*>(arg);
+
+    if (mTrackedBufferCache.find(bufferId) == mTrackedBufferCache.end()) {
+        LOG(VERBOSE) << "InputBufferManager::_onBufferDestroyed -- called with "
+                     << "unregistered buffer: "
+                     << "buf @ 0x" << std::hex << buf
+                     << ", arg @ 0x" << std::hex << arg
+                     << std::dec << ".";
+        return;
+    }
+
     LOG(VERBOSE) << "InputBufferManager::_onBufferDestroyed -- called with "
                  << "buf @ 0x" << std::hex << buf
                  << ", arg @ 0x" << std::hex << arg
                  << std::dec << " -- "
-                 << "listener @ 0x" << std::hex << id.listener.unsafe_get()
-                 << ", frameIndex = " << std::dec << id.frameIndex
-                 << ", bufferIndex = " << id.bufferIndex
+                 << "listener @ 0x" << std::hex << bufferId->listener.unsafe_get()
+                 << ", frameIndex = " << std::dec << bufferId->frameIndex
+                 << ", bufferIndex = " << bufferId->bufferIndex
                  << ".";
-
-    std::lock_guard<std::mutex> lock(mMutex);
-
-    auto findListener = mTrackedBuffersMap.find(id.listener);
+    auto findListener = mTrackedBuffersMap.find(bufferId->listener);
     if (findListener == mTrackedBuffersMap.end()) {
-        LOG(DEBUG) << "InputBufferManager::_onBufferDestroyed -- "
-                   << "received invalid listener: "
-                   << "listener @ 0x" << std::hex << id.listener.unsafe_get()
-                   << " (frameIndex = " << std::dec << id.frameIndex
-                   << ", bufferIndex = " << id.bufferIndex
-                   << ").";
+        LOG(VERBOSE) << "InputBufferManager::_onBufferDestroyed -- "
+                     << "received invalid listener: "
+                     << "listener @ 0x" << std::hex << bufferId->listener.unsafe_get()
+                     << " (frameIndex = " << std::dec << bufferId->frameIndex
+                     << ", bufferIndex = " << bufferId->bufferIndex
+                     << ").";
         return;
     }
 
-    std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds
+    std::map<uint64_t, std::set<TrackedBuffer*>> &frameIndex2BufferIds
             = findListener->second;
-    auto findFrameIndex = frameIndex2BufferIds.find(id.frameIndex);
+    auto findFrameIndex = frameIndex2BufferIds.find(bufferId->frameIndex);
     if (findFrameIndex == frameIndex2BufferIds.end()) {
         LOG(DEBUG) << "InputBufferManager::_onBufferDestroyed -- "
                    << "received invalid frame index: "
-                   << "frameIndex = " << id.frameIndex
-                   << " (listener @ 0x" << std::hex << id.listener.unsafe_get()
-                   << ", bufferIndex = " << std::dec << id.bufferIndex
+                   << "frameIndex = " << bufferId->frameIndex
+                   << " (listener @ 0x" << std::hex << bufferId->listener.unsafe_get()
+                   << ", bufferIndex = " << std::dec << bufferId->bufferIndex
                    << ").";
         return;
     }
 
-    std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
-    auto findBufferId = bufferIds.find(id);
+    std::set<TrackedBuffer*> &bufferIds = findFrameIndex->second;
+    auto findBufferId = bufferIds.find(bufferId);
     if (findBufferId == bufferIds.end()) {
         LOG(DEBUG) << "InputBufferManager::_onBufferDestroyed -- "
                    << "received invalid buffer index: "
-                   << "bufferIndex = " << id.bufferIndex
-                   << " (frameIndex = " << id.frameIndex
-                   << ", listener @ 0x" << std::hex << id.listener.unsafe_get()
+                   << "bufferIndex = " << bufferId->bufferIndex
+                   << " (frameIndex = " << bufferId->frameIndex
+                   << ", listener @ 0x" << std::hex << bufferId->listener.unsafe_get()
                    << std::dec << ").";
         return;
     }
@@ -292,10 +304,13 @@
         }
     }
 
-    DeathNotifications &deathNotifications = mDeathNotifications[id.listener];
-    deathNotifications.indices[id.frameIndex].emplace_back(id.bufferIndex);
+    DeathNotifications &deathNotifications = mDeathNotifications[bufferId->listener];
+    deathNotifications.indices[bufferId->frameIndex].emplace_back(bufferId->bufferIndex);
     ++deathNotifications.count;
     mOnBufferDestroyed.notify_one();
+
+    mTrackedBufferCache.erase(bufferId);
+    delete bufferId;
 }
 
 // Notify the clients about buffer destructions.
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h
index b6857d5..42fa557 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h
@@ -196,13 +196,9 @@
                 frameIndex(frameIndex),
                 bufferIndex(bufferIndex),
                 buffer(buffer) {}
-        TrackedBuffer(const TrackedBuffer&) = default;
-        bool operator<(const TrackedBuffer& other) const {
-            return bufferIndex < other.bufferIndex;
-        }
     };
 
-    // Map: listener -> frameIndex -> set<TrackedBuffer>.
+    // Map: listener -> frameIndex -> set<TrackedBuffer*>.
     // Essentially, this is used to store triples (listener, frameIndex,
     // bufferIndex) that's searchable by listener and (listener, frameIndex).
     // However, the value of the innermost map is TrackedBuffer, which also
@@ -210,7 +206,7 @@
     // because onBufferDestroyed() needs to know listener and frameIndex too.
     typedef std::map<wp<IComponentListener>,
                      std::map<uint64_t,
-                              std::set<TrackedBuffer>>> TrackedBuffersMap;
+                              std::set<TrackedBuffer*>>> TrackedBuffersMap;
 
     // Storage for pending (unsent) death notifications for one listener.
     // Each pair in member named "indices" are (frameIndex, bufferIndex) from
@@ -247,6 +243,16 @@
     // Mutex for the management of all input buffers.
     std::mutex mMutex;
 
+    // Cache for all TrackedBuffers.
+    //
+    // Whenever registerOnDestroyNotify() is called, an argument of type
+    // TrackedBuffer is created and stored into this cache.
+    // Whenever unregisterOnDestroyNotify() or onBufferDestroyed() is called,
+    // the TrackedBuffer is removed from this cache.
+    //
+    // mTrackedBuffersMap stores references to TrackedBuffers inside this cache.
+    std::set<TrackedBuffer*> mTrackedBufferCache;
+
     // Tracked input buffers.
     TrackedBuffersMap mTrackedBuffersMap;
 
diff --git a/media/codec2/hidl/1.0/utils/types.cpp b/media/codec2/hidl/1.0/utils/types.cpp
index 07dbf67..04fa59c 100644
--- a/media/codec2/hidl/1.0/utils/types.cpp
+++ b/media/codec2/hidl/1.0/utils/types.cpp
@@ -1434,6 +1434,11 @@
                 d->type = C2BaseBlock::GRAPHIC;
                 return true;
             }
+            if (cHandle) {
+                // Though we got cloned handle, creating block failed.
+                native_handle_close(cHandle);
+                native_handle_delete(cHandle);
+            }
 
             LOG(ERROR) << "Unknown handle type in BaseBlock::pooledBlock.";
             return false;
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/Android.bp b/media/codec2/hidl/1.0/vts/functional/audio/Android.bp
index 687754b..65f0d09 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/Android.bp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/Android.bp
@@ -15,19 +15,19 @@
  */
 
 cc_test {
-    name: "VtsHidlC2V1_0TargetAudioDecTest",
-    defaults: ["VtsMediaC2V1_0Defaults"],
+    name: "VtsHalMediaC2V1_0TargetAudioDecTest",
+    defaults: ["VtsHalMediaC2V1_0Defaults"],
     srcs: [
-        "VtsHidlC2V1_0TargetAudioDecTest.cpp",
+        "VtsHalMediaC2V1_0TargetAudioDecTest.cpp",
         //"media_audio_hidl_test_common.cpp"
     ],
 }
 
 cc_test {
-    name: "VtsHidlC2V1_0TargetAudioEncTest",
-    defaults: ["VtsMediaC2V1_0Defaults"],
+    name: "VtsHalMediaC2V1_0TargetAudioEncTest",
+    defaults: ["VtsHalMediaC2V1_0Defaults"],
     srcs: [
-        "VtsHidlC2V1_0TargetAudioEncTest.cpp",
+        "VtsHalMediaC2V1_0TargetAudioEncTest.cpp",
         //"media_audio_hidl_test_common.cpp"
     ],
 }
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
similarity index 100%
rename from media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
similarity index 100%
rename from media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
diff --git a/media/codec2/hidl/1.0/vts/functional/common/Android.bp b/media/codec2/hidl/1.0/vts/functional/common/Android.bp
index da0061a..a011ba3 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/Android.bp
+++ b/media/codec2/hidl/1.0/vts/functional/common/Android.bp
@@ -1,5 +1,5 @@
 cc_library_static {
-    name: "VtsMediaC2V1_0CommonUtil",
+    name: "VtsHalMediaC2V1_0CommonUtil",
     defaults: [
         "VtsHalTargetTestDefaults",
         "libcodec2-hidl-client-defaults",
@@ -17,14 +17,14 @@
 }
 
 cc_defaults {
-    name: "VtsMediaC2V1_0Defaults",
+    name: "VtsHalMediaC2V1_0Defaults",
     defaults: [
         "VtsHalTargetTestDefaults",
         "libcodec2-hidl-client-defaults",
     ],
 
     static_libs: [
-        "VtsMediaC2V1_0CommonUtil",
+        "VtsHalMediaC2V1_0CommonUtil",
     ],
 
     shared_libs: [
diff --git a/media/codec2/hidl/1.0/vts/functional/common/README.md b/media/codec2/hidl/1.0/vts/functional/common/README.md
index 3deab10..50e8356 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/README.md
+++ b/media/codec2/hidl/1.0/vts/functional/common/README.md
@@ -3,29 +3,29 @@
 #### master :
 Functionality of master is to enumerate all the Codec2 components available in C2 media service.
 
-usage: VtsHidlC2V1\_0TargetMasterTest -I default
+usage: VtsHalMediaC2V1\_0TargetMasterTest -I default
 
 #### component :
 Functionality of component test is to validate common functionality across all the Codec2 components available in C2 media service. For a standard C2 component, these tests are expected to pass.
 
-usage: VtsHidlC2V1\_0TargetComponentTest -I software -C <comp name>
-example: VtsHidlC2V1\_0TargetComponentTest -I software -C c2.android.vorbis.decoder
+usage: VtsHalMediaC2V1\_0TargetComponentTest -I software -C <comp name>
+example: VtsHalMediaC2V1\_0TargetComponentTest -I software -C c2.android.vorbis.decoder
 
 #### audio :
 Functionality of audio test is to validate audio specific functionality Codec2 components. The resource files for this test are taken from media/codec2/hidl/1.0/vts/functional/res. The path to these files on the device is required to be given for bitstream tests.
 
-usage: VtsHidlC2V1\_0TargetAudioDecTest -I default -C <comp name> -P /sdcard/res/
-usage: VtsHidlC2V1\_0TargetAudioEncTest -I software -C <comp name> -P /sdcard/res/
+usage: VtsHalMediaC2V1\_0TargetAudioDecTest -I default -C <comp name> -P /sdcard/media/
+usage: VtsHalMediaC2V1\_0TargetAudioEncTest -I software -C <comp name> -P /sdcard/media/
 
-example: VtsHidlC2V1\_0TargetAudioDecTest -I software -C c2.android.flac.decoder -P /sdcard/res/
-example: VtsHidlC2V1\_0TargetAudioEncTest -I software -C c2.android.opus.encoder -P /sdcard/res/
+example: VtsHalMediaC2V1\_0TargetAudioDecTest -I software -C c2.android.flac.decoder -P /sdcard/media/
+example: VtsHalMediaC2V1\_0TargetAudioEncTest -I software -C c2.android.opus.encoder -P /sdcard/media/
 
 #### video :
 Functionality of video test is to validate video specific functionality Codec2 components. The resource files for this test are taken from media/codec2/hidl/1.0/vts/functional/res. The path to these files on the device is required to be given for bitstream tests.
 
-usage: VtsHidlC2V1\_0TargetVideoDecTest -I default -C <comp name> -P /sdcard/res/
-usage: VtsHidlC2V1\_0TargetVideoEncTest -I software -C <comp name> -P /sdcard/res/
+usage: VtsHalMediaC2V1\_0TargetVideoDecTest -I default -C <comp name> -P /sdcard/media/
+usage: VtsHalMediaC2V1\_0TargetVideoEncTest -I software -C <comp name> -P /sdcard/media/
 
-example: VtsHidlC2V1\_0TargetVideoDecTest -I software -C c2.android.avc.decoder -P /sdcard/res/
-example: VtsHidlC2V1\_0TargetVideoEncTest -I software -C c2.android.vp9.encoder -P /sdcard/res/
+example: VtsHalMediaC2V1\_0TargetVideoDecTest -I software -C c2.android.avc.decoder -P /sdcard/media/
+example: VtsHalMediaC2V1\_0TargetVideoEncTest -I software -C c2.android.vp9.encoder -P /sdcard/media/
 
diff --git a/media/codec2/hidl/1.0/vts/functional/component/Android.bp b/media/codec2/hidl/1.0/vts/functional/component/Android.bp
index 4b913b6..7ec64ee 100644
--- a/media/codec2/hidl/1.0/vts/functional/component/Android.bp
+++ b/media/codec2/hidl/1.0/vts/functional/component/Android.bp
@@ -15,8 +15,8 @@
  */
 
 cc_test {
-    name: "VtsHidlC2V1_0TargetComponentTest",
-    defaults: ["VtsMediaC2V1_0Defaults"],
-    srcs: ["VtsHidlC2V1_0TargetComponentTest.cpp"],
+    name: "VtsHalMediaC2V1_0TargetComponentTest",
+    defaults: ["VtsHalMediaC2V1_0Defaults"],
+    srcs: ["VtsHalMediaC2V1_0TargetComponentTest.cpp"],
 }
 
diff --git a/media/codec2/hidl/1.0/vts/functional/component/VtsHidlC2V1_0TargetComponentTest.cpp b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
similarity index 95%
rename from media/codec2/hidl/1.0/vts/functional/component/VtsHidlC2V1_0TargetComponentTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
index 74548b5..9dc541c 100644
--- a/media/codec2/hidl/1.0/vts/functional/component/VtsHidlC2V1_0TargetComponentTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
@@ -144,8 +144,7 @@
 
     // Queueing an empty WorkBundle
     std::list<std::unique_ptr<C2Work>> workList;
-    err = mComponent->queue(&workList);
-    ASSERT_EQ(err, C2_OK);
+    mComponent->queue(&workList);
 
     err = mComponent->reset();
     ASSERT_EQ(err, C2_OK);
@@ -183,33 +182,23 @@
 // Test Multiple Start Stop Reset Test
 TEST_F(Codec2ComponentHidlTest, MultipleStartStopReset) {
     ALOGV("Multiple Start Stop and Reset Test");
-    c2_status_t err = C2_OK;
 
     for (size_t i = 0; i < MAX_RETRY; i++) {
-        err = mComponent->start();
-        ASSERT_EQ(err, C2_OK);
-
-        err = mComponent->stop();
-        ASSERT_EQ(err, C2_OK);
+        mComponent->start();
+        mComponent->stop();
     }
 
-    err = mComponent->start();
-    ASSERT_EQ(err, C2_OK);
+    ASSERT_EQ(mComponent->start(), C2_OK);
 
     for (size_t i = 0; i < MAX_RETRY; i++) {
-        err = mComponent->reset();
-        ASSERT_EQ(err, C2_OK);
+        mComponent->reset();
     }
 
-    err = mComponent->start();
-    ASSERT_EQ(err, C2_OK);
-
-    err = mComponent->stop();
-    ASSERT_EQ(err, C2_OK);
+    ASSERT_EQ(mComponent->start(), C2_OK);
+    ASSERT_EQ(mComponent->stop(), C2_OK);
 
     // Second stop should return error
-    err = mComponent->stop();
-    ASSERT_NE(err, C2_OK);
+    ASSERT_NE(mComponent->stop(), C2_OK);
 }
 
 // Test Component Release API
@@ -233,8 +222,7 @@
     ASSERT_EQ(failures.size(), 0u);
 
     for (size_t i = 0; i < MAX_RETRY; i++) {
-        err = mComponent->release();
-        ASSERT_EQ(err, C2_OK);
+        mComponent->release();
     }
 }
 
@@ -332,14 +320,12 @@
     timeConsumed = getNowUs() - startTime;
     ALOGV("mComponent->queue() timeConsumed=%" PRId64 " us", timeConsumed);
     CHECK_TIMEOUT(timeConsumed, QUEUE_TIME_OUT, "queue()");
-    ASSERT_EQ(err, C2_OK);
 
     startTime = getNowUs();
     err = mComponent->flush(C2Component::FLUSH_COMPONENT, &workList);
     timeConsumed = getNowUs() - startTime;
     ALOGV("mComponent->flush() timeConsumed=%" PRId64 " us", timeConsumed);
     CHECK_TIMEOUT(timeConsumed, FLUSH_TIME_OUT, "flush()");
-    ASSERT_EQ(err, C2_OK);
 
     startTime = getNowUs();
     err = mComponent->stop();
diff --git a/media/codec2/hidl/1.0/vts/functional/master/Android.bp b/media/codec2/hidl/1.0/vts/functional/master/Android.bp
index e164d68..53e323e 100644
--- a/media/codec2/hidl/1.0/vts/functional/master/Android.bp
+++ b/media/codec2/hidl/1.0/vts/functional/master/Android.bp
@@ -15,8 +15,8 @@
  */
 
 cc_test {
-    name: "VtsHidlC2V1_0TargetMasterTest",
-    defaults: ["VtsMediaC2V1_0Defaults"],
-    srcs: ["VtsHidlC2V1_0TargetMasterTest.cpp"],
+    name: "VtsHalMediaC2V1_0TargetMasterTest",
+    defaults: ["VtsHalMediaC2V1_0Defaults"],
+    srcs: ["VtsHalMediaC2V1_0TargetMasterTest.cpp"],
 }
 
diff --git a/media/codec2/hidl/1.0/vts/functional/master/VtsHidlC2V1_0TargetMasterTest.cpp b/media/codec2/hidl/1.0/vts/functional/master/VtsHalMediaC2V1_0TargetMasterTest.cpp
similarity index 100%
rename from media/codec2/hidl/1.0/vts/functional/master/VtsHidlC2V1_0TargetMasterTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/master/VtsHalMediaC2V1_0TargetMasterTest.cpp
diff --git a/media/codec2/hidl/1.0/vts/functional/video/Android.bp b/media/codec2/hidl/1.0/vts/functional/video/Android.bp
index 6e57ee7..be35b02 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/Android.bp
+++ b/media/codec2/hidl/1.0/vts/functional/video/Android.bp
@@ -15,14 +15,14 @@
  */
 
 cc_test {
-    name: "VtsHidlC2V1_0TargetVideoDecTest",
-    defaults: ["VtsMediaC2V1_0Defaults"],
-    srcs: ["VtsHidlC2V1_0TargetVideoDecTest.cpp"],
+    name: "VtsHalMediaC2V1_0TargetVideoDecTest",
+    defaults: ["VtsHalMediaC2V1_0Defaults"],
+    srcs: ["VtsHalMediaC2V1_0TargetVideoDecTest.cpp"],
 }
 
 cc_test {
-    name: "VtsHidlC2V1_0TargetVideoEncTest",
-    defaults: ["VtsMediaC2V1_0Defaults"],
-    srcs: ["VtsHidlC2V1_0TargetVideoEncTest.cpp"],
+    name: "VtsHalMediaC2V1_0TargetVideoEncTest",
+    defaults: ["VtsHalMediaC2V1_0Defaults"],
+    srcs: ["VtsHalMediaC2V1_0TargetVideoEncTest.cpp"],
 }
 
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
similarity index 97%
rename from media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoDecTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
index 33fa848..5e28750 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
@@ -46,6 +46,10 @@
     explicit LinearBuffer(const std::shared_ptr<C2LinearBlock>& block)
         : C2Buffer(
               {block->share(block->offset(), block->size(), ::C2Fence())}) {}
+
+    explicit LinearBuffer(const std::shared_ptr<C2LinearBlock>& block, size_t size)
+        : C2Buffer(
+              {block->share(block->offset(), size, ::C2Fence())}) {}
 };
 
 static ComponentTestEnvironment* gEnv = nullptr;
@@ -120,6 +124,13 @@
         mTimestampUs = 0u;
         mTimestampDevTest = false;
         if (mCompName == unknown_comp) mDisableTest = true;
+
+        C2SecureModeTuning secureModeTuning{};
+        mComponent->query({ &secureModeTuning }, {}, C2_MAY_BLOCK, nullptr);
+        if (secureModeTuning.value == C2Config::SM_READ_PROTECTED) {
+            mDisableTest = true;
+        }
+
         if (mDisableTest) std::cout << "[   WARN   ] Test Disabled \n";
     }
 
@@ -371,11 +382,12 @@
         ASSERT_EQ(eleStream.gcount(), size);
 
         work->input.buffers.clear();
+        auto alignedSize = ALIGN(size, PAGE_SIZE);
         if (size) {
             std::shared_ptr<C2LinearBlock> block;
             ASSERT_EQ(C2_OK,
                     linearPool->fetchLinearBlock(
-                        size, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE},
+                        alignedSize, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE},
                         &block));
             ASSERT_TRUE(block);
 
@@ -385,13 +397,13 @@
                 fprintf(stderr, "C2LinearBlock::map() failed : %d", view.error());
                 break;
             }
-            ASSERT_EQ((size_t)size, view.capacity());
+            ASSERT_EQ((size_t)alignedSize, view.capacity());
             ASSERT_EQ(0u, view.offset());
-            ASSERT_EQ((size_t)size, view.size());
+            ASSERT_EQ((size_t)alignedSize, view.size());
 
             memcpy(view.base(), data, size);
 
-            work->input.buffers.emplace_back(new LinearBuffer(block));
+            work->input.buffers.emplace_back(new LinearBuffer(block, size));
             free(data);
         }
         work->worklets.clear();
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
similarity index 89%
rename from media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
index 6bcf840..c1f5a92 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
@@ -284,15 +284,16 @@
                    std::list<std::unique_ptr<C2Work>>& workQueue,
                    std::list<uint64_t>& flushedIndices,
                    std::shared_ptr<C2BlockPool>& graphicPool,
-                   std::ifstream& eleStream, uint32_t frameID,
-                   uint32_t nFrames, uint32_t nWidth, int32_t nHeight,
-                   bool flushed = false,bool signalEOS = true) {
+                   std::ifstream& eleStream, bool& disableTest,
+                   uint32_t frameID, uint32_t nFrames, uint32_t nWidth,
+                   int32_t nHeight, bool flushed = false, bool signalEOS = true) {
     typedef std::unique_lock<std::mutex> ULock;
 
     uint32_t maxRetry = 0;
     int bytesCount = nWidth * nHeight * 3 >> 1;
     int32_t timestampIncr = ENCODER_TIMESTAMP_INCREMENT;
     uint64_t timestamp = 0;
+    c2_status_t err = C2_OK;
     while (1) {
         if (nFrames == 0) break;
         uint32_t flags = 0;
@@ -333,16 +334,21 @@
             ASSERT_EQ(eleStream.gcount(), bytesCount);
         }
         std::shared_ptr<C2GraphicBlock> block;
-        ASSERT_EQ(
-            C2_OK,
-            graphicPool->fetchGraphicBlock(
+        err = graphicPool->fetchGraphicBlock(
                 nWidth, nHeight, HAL_PIXEL_FORMAT_YV12,
-                {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block));
+                {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
+        if (err != C2_OK) {
+            fprintf(stderr, "fetchGraphicBlock failed : %d\n", err);
+            disableTest = true;
+            break;
+        }
+
         ASSERT_TRUE(block);
         // Graphic View
         C2GraphicView view = block->map().get();
         if (view.error() != C2_OK) {
             fprintf(stderr, "C2GraphicBlock::map() failed : %d", view.error());
+            disableTest = true;
             break;
         }
 
@@ -420,8 +426,16 @@
     ASSERT_EQ(mComponent->start(), C2_OK);
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
-                      mFlushedIndices, mGraphicPool, eleStream,
+                      mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
                       0, ENC_NUM_FRAMES, nWidth, nHeight, false, signalEOS));
+    // mDisableTest will be set if buffer was not fetched properly.
+    // This may happen when resolution is not proper but config suceeded
+    // In this cases, we skip encoding the input stream
+    if (mDisableTest) {
+        std::cout << "[   WARN   ] Test Disabled \n";
+        ASSERT_EQ(mComponent->stop(), C2_OK);
+        return;
+    }
 
     // If EOS is not sent, sending empty input with EOS flag
     inputFrames = ENC_NUM_FRAMES;
@@ -531,8 +545,17 @@
     ALOGV("mURL : %s", mURL);
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
-                      mFlushedIndices, mGraphicPool, eleStream,
+                      mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
                       0, numFramesFlushed, nWidth, nHeight));
+    // mDisableTest will be set if buffer was not fetched properly.
+    // This may happen when resolution is not proper but config suceeded
+    // In this cases, we skip encoding the input stream
+    if (mDisableTest) {
+        std::cout << "[   WARN   ] Test Disabled \n";
+        ASSERT_EQ(mComponent->stop(), C2_OK);
+        return;
+    }
+
     std::list<std::unique_ptr<C2Work>> flushedWork;
     c2_status_t err =
         mComponent->flush(C2Component::FLUSH_COMPONENT, &flushedWork);
@@ -561,10 +584,19 @@
     mFlushedIndices.clear();
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
-                      mFlushedIndices, mGraphicPool, eleStream,
+                      mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
                       numFramesFlushed, numFrames - numFramesFlushed,
                       nWidth, nHeight, true));
     eleStream.close();
+    // mDisableTest will be set if buffer was not fetched properly.
+    // This may happen when resolution is not proper but config suceeded
+    // In this cases, we skip encoding the input stream
+    if (mDisableTest) {
+        std::cout << "[   WARN   ] Test Disabled \n";
+        ASSERT_EQ(mComponent->stop(), C2_OK);
+        return;
+    }
+
     err = mComponent->flush(C2Component::FLUSH_COMPONENT, &flushedWork);
     ASSERT_EQ(err, C2_OK);
     ASSERT_NO_FATAL_FAILURE(
@@ -607,19 +639,19 @@
 
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
-                      mFlushedIndices, mGraphicPool, eleStream,
+                      mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
                       0, 1, nWidth, nHeight, false, false));
 
     // Feed larger input buffer.
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
-                      mFlushedIndices, mGraphicPool, eleStream,
+                      mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
                       1, 1, nWidth*2, nHeight*2, false, false));
 
     // Feed smaller input buffer.
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
-                      mFlushedIndices, mGraphicPool, eleStream,
+                      mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
                       2, 1, nWidth/2, nHeight/2, false, true));
 
     // blocking call to ensures application to Wait till all the inputs are
@@ -629,15 +661,13 @@
         waitOnInputConsumption(mQueueLock, mQueueCondition, mWorkQueue));
 
     if (mFramesReceived != 3) {
-        ALOGE("Input buffer count and Output buffer count mismatch");
-        ALOGE("framesReceived : %d inputFrames : 3", mFramesReceived);
-        ASSERT_TRUE(false);
+        std::cout << "[   WARN   ] Component didn't receive all buffers back \n";
+        ALOGW("framesReceived : %d inputFrames : 3", mFramesReceived);
     }
 
     if (mFailedWorkReceived == 0) {
-        ALOGE("Expected failed frame count mismatch");
-        ALOGE("failedFramesReceived : %d", mFailedWorkReceived);
-        ASSERT_TRUE(false);
+        std::cout << "[   WARN   ] Expected failed frame count mismatch \n";
+        ALOGW("failedFramesReceived : %d", mFailedWorkReceived);
     }
 
     ASSERT_EQ(mComponent->stop(), C2_OK);
@@ -665,8 +695,17 @@
 
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
-                      mFlushedIndices, mGraphicPool, eleStream, 0,
-                      MAX_INPUT_BUFFERS, nWidth, nHeight));
+                      mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
+                      0, MAX_INPUT_BUFFERS, nWidth, nHeight, false, true));
+
+    // mDisableTest will be set if buffer was not fetched properly.
+    // This may happen when resolution is not proper but config suceeded
+    // In this cases, we skip encoding the input stream
+    if (mDisableTest) {
+        std::cout << "[   WARN   ] Test Disabled \n";
+        ASSERT_EQ(mComponent->stop(), C2_OK);
+        return;
+    }
 
     ALOGD("Waiting for input consumption");
     ASSERT_NO_FATAL_FAILURE(
@@ -676,6 +715,7 @@
     ASSERT_EQ(mComponent->stop(), C2_OK);
     ASSERT_EQ(mComponent->reset(), C2_OK);
 }
+
 INSTANTIATE_TEST_CASE_P(NonStdSizes, Codec2VideoEncResolutionTest, ::testing::Values(
     std::make_pair(52, 18),
     std::make_pair(365, 365),
diff --git a/media/codec2/hidl/1.0/vts/functional/video/media_c2_video_hidl_test_common.h b/media/codec2/hidl/1.0/vts/functional/video/media_c2_video_hidl_test_common.h
index dd45557..e37ca38 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/media_c2_video_hidl_test_common.h
+++ b/media/codec2/hidl/1.0/vts/functional/video/media_c2_video_hidl_test_common.h
@@ -23,6 +23,8 @@
 #define ENC_DEFAULT_FRAME_HEIGHT 288
 #define MAX_ITERATIONS 128
 
+#define ALIGN(_sz, _align) ((_sz + (_align - 1)) & ~(_align - 1))
+
 /*
  * Common video utils
  */
diff --git a/media/codec2/hidl/client/client.cpp b/media/codec2/hidl/client/client.cpp
index 2b417a6..5ed54f1 100644
--- a/media/codec2/hidl/client/client.cpp
+++ b/media/codec2/hidl/client/client.cpp
@@ -959,9 +959,9 @@
 
 std::shared_ptr<Codec2Client::InputSurface> Codec2Client::CreateInputSurface(
         char const* serviceName) {
-    uint32_t inputSurfaceSetting = ::android::base::GetUintProperty(
-            "debug.stagefright.c2inputsurface", uint32_t(0));
-    if (inputSurfaceSetting == 0) {
+    int32_t inputSurfaceSetting = ::android::base::GetIntProperty(
+            "debug.stagefright.c2inputsurface", int32_t(0));
+    if (inputSurfaceSetting <= 0) {
         return nullptr;
     }
     size_t index = GetServiceNames().size();
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
index 8ae80ee..9c84c71 100644
--- a/media/codec2/sfplugin/Android.bp
+++ b/media/codec2/sfplugin/Android.bp
@@ -9,6 +9,7 @@
         "CCodecConfig.cpp",
         "Codec2Buffer.cpp",
         "Codec2InfoBuilder.cpp",
+        "Omx2IGraphicBufferSource.cpp",
         "PipelineWatcher.cpp",
         "ReflectedParamUpdater.cpp",
         "SkipCutBuffer.cpp",
@@ -41,8 +42,10 @@
         "libmedia",
         "libmedia_omx",
         "libsfplugin_ccodec_utils",
+        "libstagefright_bufferqueue_helper",
         "libstagefright_codecbase",
         "libstagefright_foundation",
+        "libstagefright_omx",
         "libstagefright_omx_utils",
         "libstagefright_xmlparser",
         "libui",
diff --git a/media/codec2/sfplugin/C2OMXNode.cpp b/media/codec2/sfplugin/C2OMXNode.cpp
index 3a93c2a..78d221e 100644
--- a/media/codec2/sfplugin/C2OMXNode.cpp
+++ b/media/codec2/sfplugin/C2OMXNode.cpp
@@ -36,6 +36,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <ui/Fence.h>
 #include <ui/GraphicBuffer.h>
+#include <utils/Thread.h>
 
 #include "C2OMXNode.h"
 
@@ -50,16 +51,128 @@
 
 }  // namespace
 
+class C2OMXNode::QueueThread : public Thread {
+public:
+    QueueThread() : Thread(false) {}
+    ~QueueThread() override = default;
+    void queue(
+            const std::shared_ptr<Codec2Client::Component> &comp,
+            int fenceFd,
+            std::unique_ptr<C2Work> &&work,
+            android::base::unique_fd &&fd0,
+            android::base::unique_fd &&fd1) {
+        Mutexed<Jobs>::Locked jobs(mJobs);
+        auto it = jobs->queues.try_emplace(comp, comp, systemTime()).first;
+        it->second.workList.emplace_back(
+                std::move(work), fenceFd, std::move(fd0), std::move(fd1));
+        jobs->cond.broadcast();
+    }
+
+protected:
+    bool threadLoop() override {
+        constexpr nsecs_t kIntervalNs = nsecs_t(10) * 1000 * 1000;  // 10ms
+        constexpr nsecs_t kWaitNs = kIntervalNs * 2;
+        for (int i = 0; i < 2; ++i) {
+            Mutexed<Jobs>::Locked jobs(mJobs);
+            nsecs_t nowNs = systemTime();
+            bool queued = false;
+            for (auto it = jobs->queues.begin(); it != jobs->queues.end(); ) {
+                Queue &queue = it->second;
+                if (queue.workList.empty()
+                        || nowNs - queue.lastQueuedTimestampNs < kIntervalNs) {
+                    ++it;
+                    continue;
+                }
+                std::shared_ptr<Codec2Client::Component> comp = queue.component.lock();
+                if (!comp) {
+                    it = jobs->queues.erase(it);
+                    continue;
+                }
+                std::list<std::unique_ptr<C2Work>> items;
+                std::vector<int> fenceFds;
+                std::vector<android::base::unique_fd> uniqueFds;
+                while (!queue.workList.empty()) {
+                    items.push_back(std::move(queue.workList.front().work));
+                    fenceFds.push_back(queue.workList.front().fenceFd);
+                    uniqueFds.push_back(std::move(queue.workList.front().fd0));
+                    uniqueFds.push_back(std::move(queue.workList.front().fd1));
+                    queue.workList.pop_front();
+                }
+
+                jobs.unlock();
+                for (int fenceFd : fenceFds) {
+                    sp<Fence> fence(new Fence(fenceFd));
+                    fence->waitForever(LOG_TAG);
+                }
+                comp->queue(&items);
+                for (android::base::unique_fd &ufd : uniqueFds) {
+                    (void)ufd.release();
+                }
+                jobs.lock();
+
+                it = jobs->queues.upper_bound(comp);
+                queued = true;
+            }
+            if (queued) {
+                return true;
+            }
+            if (i == 0) {
+                jobs.waitForConditionRelative(jobs->cond, kWaitNs);
+            }
+        }
+        return true;
+    }
+
+private:
+    struct WorkFence {
+        WorkFence(std::unique_ptr<C2Work> &&w, int fd) : work(std::move(w)), fenceFd(fd) {}
+
+        WorkFence(
+                std::unique_ptr<C2Work> &&w,
+                int fd,
+                android::base::unique_fd &&uniqueFd0,
+                android::base::unique_fd &&uniqueFd1)
+            : work(std::move(w)),
+              fenceFd(fd),
+              fd0(std::move(uniqueFd0)),
+              fd1(std::move(uniqueFd1)) {}
+
+        std::unique_ptr<C2Work> work;
+        int fenceFd;
+        android::base::unique_fd fd0;
+        android::base::unique_fd fd1;
+    };
+    struct Queue {
+        Queue(const std::shared_ptr<Codec2Client::Component> &comp, nsecs_t timestamp)
+            : component(comp), lastQueuedTimestampNs(timestamp) {}
+        Queue(const Queue &) = delete;
+        Queue &operator =(const Queue &) = delete;
+
+        std::weak_ptr<Codec2Client::Component> component;
+        std::list<WorkFence> workList;
+        nsecs_t lastQueuedTimestampNs;
+    };
+    struct Jobs {
+        std::map<std::weak_ptr<Codec2Client::Component>,
+                 Queue,
+                 std::owner_less<std::weak_ptr<Codec2Client::Component>>> queues;
+        Condition cond;
+    };
+    Mutexed<Jobs> mJobs;
+};
+
 C2OMXNode::C2OMXNode(const std::shared_ptr<Codec2Client::Component> &comp)
     : mComp(comp), mFrameIndex(0), mWidth(0), mHeight(0), mUsage(0),
-      mAdjustTimestampGapUs(0), mFirstInputFrame(true) {
+      mAdjustTimestampGapUs(0), mFirstInputFrame(true),
+      mQueueThread(new QueueThread) {
     android_fdsan_set_error_level(ANDROID_FDSAN_ERROR_LEVEL_WARN_ALWAYS);
+    mQueueThread->run("C2OMXNode", PRIORITY_AUDIO);
 }
 
 status_t C2OMXNode::freeNode() {
     mComp.reset();
     android_fdsan_set_error_level(ANDROID_FDSAN_ERROR_LEVEL_WARN_ONCE);
-    return OK;
+    return mQueueThread->requestExitAndWait();
 }
 
 status_t C2OMXNode::sendCommand(OMX_COMMANDTYPE cmd, OMX_S32 param) {
@@ -216,11 +329,6 @@
 status_t C2OMXNode::emptyBuffer(
         buffer_id buffer, const OMXBuffer &omxBuf,
         OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
-    // TODO: better fence handling
-    if (fenceFd >= 0) {
-        sp<Fence> fence = new Fence(fenceFd);
-        fence->waitForever(LOG_TAG);
-    }
     std::shared_ptr<Codec2Client::Component> comp = mComp.lock();
     if (!comp) {
         return NO_INIT;
@@ -299,22 +407,8 @@
     }
     work->worklets.clear();
     work->worklets.emplace_back(new C2Worklet);
-    std::list<std::unique_ptr<C2Work>> items;
-    uint64_t index = work->input.ordinal.frameIndex.peeku();
-    items.push_back(std::move(work));
-
-    c2_status_t err = comp->queue(&items);
-    if (err != C2_OK) {
-        (void)fd0.release();
-        (void)fd1.release();
-        return UNKNOWN_ERROR;
-    }
-
-    mBufferIdsInUse.lock()->emplace(index, buffer);
-
-    // release ownership of the fds
-    (void)fd0.release();
-    (void)fd1.release();
+    mBufferIdsInUse.lock()->emplace(work->input.ordinal.frameIndex.peeku(), buffer);
+    mQueueThread->queue(comp, fenceFd, std::move(work), std::move(fd0), std::move(fd1));
 
     return OK;
 }
diff --git a/media/codec2/sfplugin/C2OMXNode.h b/media/codec2/sfplugin/C2OMXNode.h
index 3ca6c0a..1717c96 100644
--- a/media/codec2/sfplugin/C2OMXNode.h
+++ b/media/codec2/sfplugin/C2OMXNode.h
@@ -113,6 +113,9 @@
     c2_cntr64_t mPrevCodecTimestamp; // adjusted (codec) timestamp for previous frame
 
     Mutexed<std::map<uint64_t, buffer_id>> mBufferIdsInUse;
+
+    class QueueThread;
+    sp<QueueThread> mQueueThread;
 };
 
 }  // namespace android
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index aa7189c..8223273 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -45,6 +45,7 @@
 #include "CCodec.h"
 #include "CCodecBufferChannel.h"
 #include "InputSurfaceWrapper.h"
+#include "Omx2IGraphicBufferSource.h"
 
 extern "C" android::PersistentSurface *CreateInputSurface();
 
@@ -374,7 +375,11 @@
 
         // consumer usage is queried earlier.
 
-        ALOGD("ISConfig%s", status.str().c_str());
+        if (status.str().empty()) {
+            ALOGD("ISConfig not changed");
+        } else {
+            ALOGD("ISConfig%s", status.str().c_str());
+        }
         return err;
     }
 
@@ -1067,6 +1072,7 @@
     OmxStatus s;
     android::sp<HGraphicBufferProducer> gbp;
     android::sp<HGraphicBufferSource> gbs;
+
     using ::android::hardware::Return;
     Return<void> transStatus = omx->createInputSurface(
             [&s, &gbp, &gbs](
@@ -1852,15 +1858,30 @@
 
 // Create Codec 2.0 input surface
 extern "C" android::PersistentSurface *CreateInputSurface() {
+    using namespace android;
     // Attempt to create a Codec2's input surface.
-    std::shared_ptr<android::Codec2Client::InputSurface> inputSurface =
-            android::Codec2Client::CreateInputSurface();
+    std::shared_ptr<Codec2Client::InputSurface> inputSurface =
+            Codec2Client::CreateInputSurface();
     if (!inputSurface) {
-        return nullptr;
+        if (property_get_int32("debug.stagefright.c2inputsurface", 0) == -1) {
+            sp<IGraphicBufferProducer> gbp;
+            sp<OmxGraphicBufferSource> gbs = new OmxGraphicBufferSource();
+            status_t err = gbs->initCheck();
+            if (err != OK) {
+                ALOGE("Failed to create persistent input surface: error %d", err);
+                return nullptr;
+            }
+            return new PersistentSurface(
+                    gbs->getIGraphicBufferProducer(),
+                    sp<IGraphicBufferSource>(
+                        new Omx2IGraphicBufferSource(gbs)));
+        } else {
+            return nullptr;
+        }
     }
-    return new android::PersistentSurface(
+    return new PersistentSurface(
             inputSurface->getGraphicBufferProducer(),
-            static_cast<android::sp<android::hidl::base::V1_0::IBase>>(
+            static_cast<sp<android::hidl::base::V1_0::IBase>>(
             inputSurface->getHalInterface()));
 }
 
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 2babc83..0e1bb0a 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -220,12 +220,11 @@
         const std::shared_ptr<CCodecCallback> &callback)
     : mHeapSeqNum(-1),
       mCCodecCallback(callback),
-      mDelay(0),
       mFrameIndex(0u),
       mFirstValidFrameIndex(0u),
       mMetaMode(MODE_NONE),
       mInputMetEos(false) {
-    mOutputSurface.lock()->maxDequeueBuffers = kSmoothnessFactor + kRenderingDepth;
+    mOutputSurface.lock()->maxDequeueBuffers = 2 * kSmoothnessFactor + kRenderingDepth;
     {
         Mutexed<Input>::Locked input(mInput);
         input->buffers.reset(new DummyInputBuffers(""));
@@ -814,7 +813,6 @@
 
     size_t numInputSlots = inputDelayValue + pipelineDelayValue + kSmoothnessFactor;
     size_t numOutputSlots = outputDelayValue + kSmoothnessFactor;
-    mDelay = inputDelayValue + pipelineDelayValue + outputDelayValue;
 
     // TODO: get this from input format
     bool secure = mComponent->getName().find(".secure") != std::string::npos;
@@ -888,6 +886,8 @@
 
         bool forceArrayMode = false;
         Mutexed<Input>::Locked input(mInput);
+        input->inputDelay = inputDelayValue;
+        input->pipelineDelay = pipelineDelayValue;
         input->numSlots = numInputSlots;
         input->extraBuffers.flush();
         input->numExtraSlots = 0u;
@@ -896,6 +896,9 @@
                 input->buffers.reset(new DummyInputBuffers(mName));
             } else if (mMetaMode == MODE_ANW) {
                 input->buffers.reset(new GraphicMetadataInputBuffers(mName));
+                // This is to ensure buffers do not get released prematurely.
+                // TODO: handle this without going into array mode
+                forceArrayMode = true;
             } else {
                 input->buffers.reset(new GraphicInputBuffers(numInputSlots, mName));
             }
@@ -945,7 +948,8 @@
         uint32_t outputGeneration;
         {
             Mutexed<OutputSurface>::Locked output(mOutputSurface);
-            output->maxDequeueBuffers = numOutputSlots + reorderDepth.value + kRenderingDepth;
+            output->maxDequeueBuffers = numOutputSlots + numInputSlots +
+                    reorderDepth.value + kRenderingDepth;
             outputSurface = output->surface ?
                     output->surface->getIGraphicBufferProducer() : nullptr;
             if (outputSurface) {
@@ -1054,6 +1058,7 @@
         }
 
         Mutexed<Output>::Locked output(mOutput);
+        output->outputDelay = outputDelayValue;
         output->numSlots = numOutputSlots;
         if (graphic) {
             if (outputSurface) {
@@ -1328,9 +1333,10 @@
                     ALOGV("[%s] onWorkDone: updated reorder depth to %u",
                           mName, reorderDepth.value);
                     size_t numOutputSlots = mOutput.lock()->numSlots;
+                    size_t numInputSlots = mInput.lock()->numSlots;
                     Mutexed<OutputSurface>::Locked output(mOutputSurface);
-                    output->maxDequeueBuffers =
-                        numOutputSlots + reorderDepth.value + kRenderingDepth;
+                    output->maxDequeueBuffers = numOutputSlots + numInputSlots +
+                            reorderDepth.value + kRenderingDepth;
                     if (output->surface) {
                         output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
                     }
@@ -1377,25 +1383,37 @@
                         (void)mPipelineWatcher.lock()->outputDelay(outputDelay.value);
 
                         bool outputBuffersChanged = false;
-                        Mutexed<Output>::Locked output(mOutput);
-                        output->outputDelay = outputDelay.value;
-                        size_t numOutputSlots = outputDelay.value + kSmoothnessFactor;
-                        if (output->numSlots < numOutputSlots) {
-                            output->numSlots = numOutputSlots;
-                            if (output->buffers->isArrayMode()) {
-                                OutputBuffersArray *array =
-                                    (OutputBuffersArray *)output->buffers.get();
-                                ALOGV("[%s] onWorkDone: growing output buffer array to %zu",
-                                      mName, numOutputSlots);
-                                array->grow(numOutputSlots);
-                                outputBuffersChanged = true;
+                        size_t numOutputSlots = 0;
+                        size_t numInputSlots = mInput.lock()->numSlots;
+                        {
+                            Mutexed<Output>::Locked output(mOutput);
+                            output->outputDelay = outputDelay.value;
+                            numOutputSlots = outputDelay.value + kSmoothnessFactor;
+                            if (output->numSlots < numOutputSlots) {
+                                output->numSlots = numOutputSlots;
+                                if (output->buffers->isArrayMode()) {
+                                    OutputBuffersArray *array =
+                                        (OutputBuffersArray *)output->buffers.get();
+                                    ALOGV("[%s] onWorkDone: growing output buffer array to %zu",
+                                          mName, numOutputSlots);
+                                    array->grow(numOutputSlots);
+                                    outputBuffersChanged = true;
+                                }
                             }
+                            numOutputSlots = output->numSlots;
                         }
-                        output.unlock();
 
                         if (outputBuffersChanged) {
                             mCCodecCallback->onOutputBuffersChanged();
                         }
+
+                        uint32_t depth = mReorderStash.lock()->depth();
+                        Mutexed<OutputSurface>::Locked output(mOutputSurface);
+                        output->maxDequeueBuffers = numOutputSlots + numInputSlots +
+                                depth + kRenderingDepth;
+                        if (output->surface) {
+                            output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
+                        }
                     }
                 }
                 break;
@@ -1620,7 +1638,12 @@
     // When client pushed EOS, we want all the work to be done quickly.
     // Otherwise, component may have stalled work due to input starvation up to
     // the sum of the delay in the pipeline.
-    size_t n = mInputMetEos ? 0 : mDelay;
+    size_t n = 0;
+    if (!mInputMetEos) {
+        size_t outputDelay = mOutput.lock()->outputDelay;
+        Mutexed<Input>::Locked input(mInput);
+        n = input->inputDelay + input->pipelineDelay + outputDelay;
+    }
     return mPipelineWatcher.lock()->elapsed(PipelineWatcher::Clock::now(), n);
 }
 
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index ae57678..ee3455d 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -228,8 +228,6 @@
     QueueSync mQueueSync;
     std::vector<std::unique_ptr<C2Param>> mParamsToBeSet;
 
-    size_t mDelay;
-
     struct Input {
         Input();
 
@@ -306,6 +304,7 @@
                 const C2WorkOrdinalStruct &ordinal);
         void defer(const Entry &entry);
         bool hasPending() const;
+        uint32_t depth() const { return mDepth; }
 
     private:
         std::list<Entry> mPending;
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 1cfdc19..5adcd94 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -235,7 +235,10 @@
     const std::vector<ConfigMapper> &getConfigMappersForSdkKey(std::string key) const {
         auto it = mConfigMappers.find(key);
         if (it == mConfigMappers.end()) {
-            ALOGD("no c2 equivalents for %s", key.c_str());
+            if (mComplained.count(key) == 0) {
+                ALOGD("no c2 equivalents for %s", key.c_str());
+                mComplained.insert(key);
+            }
             return NO_MAPPERS;
         }
         ALOGV("found %zu eqs for %s", it->second.size(), key.c_str());
@@ -304,6 +307,7 @@
 
 private:
     std::map<SdkKey, std::vector<ConfigMapper>> mConfigMappers;
+    mutable std::set<std::string> mComplained;
 };
 
 const std::vector<ConfigMapper> StandardParams::NO_MAPPERS;
@@ -508,7 +512,8 @@
                .limitTo(D::ENCODER & D::VIDEO));
     // convert to timestamp base
     add(ConfigMapper(KEY_I_FRAME_INTERVAL, C2_PARAMKEY_SYNC_FRAME_INTERVAL, "value")
-        .withMappers([](C2Value v) -> C2Value {
+        .limitTo(D::VIDEO & D::ENCODER & D::CONFIG)
+        .withMapper([](C2Value v) -> C2Value {
             // convert from i32 to float
             int32_t i32Value;
             float fpValue;
@@ -518,12 +523,6 @@
                 return int64_t(c2_min(1000000 * fpValue + 0.5, (double)INT64_MAX));
             }
             return C2Value();
-        }, [](C2Value v) -> C2Value {
-            int64_t i64;
-            if (v.get(&i64)) {
-                return float(i64) / 1000000;
-            }
-            return C2Value();
         }));
     // remove when codecs switch to proper coding.gop (add support for calculating gop)
     deprecated(ConfigMapper("i-frame-period", "coding.gop", "intra-period")
@@ -1033,7 +1032,25 @@
     }
 
     ReflectedParamUpdater::Dict reflected = mParamUpdater->getParams(paramPointers);
-    ALOGD("c2 config is %s", reflected.debugString().c_str());
+    std::string config = reflected.debugString();
+    std::set<std::string> configLines;
+    std::string diff;
+    for (size_t start = 0; start != std::string::npos; ) {
+        size_t end = config.find('\n', start);
+        size_t count = (end == std::string::npos)
+                ? std::string::npos
+                : end - start + 1;
+        std::string line = config.substr(start, count);
+        configLines.insert(line);
+        if (mLastConfig.count(line) == 0) {
+            diff.append(line);
+        }
+        start = (end == std::string::npos) ? std::string::npos : end + 1;
+    }
+    if (!diff.empty()) {
+        ALOGD("c2 config diff is %s", diff.c_str());
+    }
+    mLastConfig.swap(configLines);
 
     bool changed = false;
     if (domain & mInputDomain) {
diff --git a/media/codec2/sfplugin/CCodecConfig.h b/media/codec2/sfplugin/CCodecConfig.h
index 3bafe3f..a61c8b7 100644
--- a/media/codec2/sfplugin/CCodecConfig.h
+++ b/media/codec2/sfplugin/CCodecConfig.h
@@ -134,6 +134,8 @@
     /// For now support a validation function.
     std::map<C2Param::Index, LocalParamValidator> mLocalParams;
 
+    std::set<std::string> mLastConfig;
+
     CCodecConfig();
 
     /// initializes the members required to manage the format: descriptors, reflector,
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 702ad6f..5c8ad56 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -25,6 +25,7 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/AUtils.h>
 #include <nativebase/nativebase.h>
+#include <ui/Fence.h>
 
 #include <C2AllocatorGralloc.h>
 #include <C2BlockInternal.h>
@@ -590,7 +591,12 @@
     std::shared_ptr<C2GraphicBlock> block = _C2BlockFactory::CreateGraphicBlock(alloc);
 
     meta->pBuffer = 0;
-    // TODO: fence
+    // TODO: wrap this in C2Fence so that the component can wait when it
+    //       actually starts processing.
+    if (meta->nFenceFd >= 0) {
+        sp<Fence> fence(new Fence(meta->nFenceFd));
+        fence->waitForever(LOG_TAG);
+    }
     return C2Buffer::CreateGraphicBuffer(
             block->share(C2Rect(buffer->width, buffer->height), C2Fence()));
 #else
diff --git a/media/codec2/sfplugin/Omx2IGraphicBufferSource.cpp b/media/codec2/sfplugin/Omx2IGraphicBufferSource.cpp
new file mode 100644
index 0000000..764fa00
--- /dev/null
+++ b/media/codec2/sfplugin/Omx2IGraphicBufferSource.cpp
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef __LP64__
+#define OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
+#endif
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Omx2IGraphicBufferSource"
+#include <android-base/logging.h>
+
+#include "Omx2IGraphicBufferSource.h"
+
+#include <android/BnOMXBufferSource.h>
+#include <media/OMXBuffer.h>
+#include <media/stagefright/omx/OMXUtils.h>
+
+#include <OMX_Component.h>
+#include <OMX_Index.h>
+#include <OMX_IndexExt.h>
+
+namespace android {
+
+namespace /* unnamed */ {
+
+// OmxGraphicBufferSource -> IOMXBufferSource
+
+struct OmxGbs2IOmxBs : public BnOMXBufferSource {
+    sp<OmxGraphicBufferSource> mBase;
+    OmxGbs2IOmxBs(sp<OmxGraphicBufferSource> const& base) : mBase{base} {}
+    BnStatus onOmxExecuting() override {
+        return mBase->onOmxExecuting();
+    }
+    BnStatus onOmxIdle() override {
+        return mBase->onOmxIdle();
+    }
+    BnStatus onOmxLoaded() override {
+        return mBase->onOmxLoaded();
+    }
+    BnStatus onInputBufferAdded(int32_t bufferId) override {
+        return mBase->onInputBufferAdded(bufferId);
+    }
+    BnStatus onInputBufferEmptied(
+            int32_t bufferId,
+            OMXFenceParcelable const& fenceParcel) override {
+        return mBase->onInputBufferEmptied(bufferId, fenceParcel.get());
+    }
+};
+
+struct OmxNodeWrapper : public IOmxNodeWrapper {
+    sp<IOMXNode> mBase;
+    OmxNodeWrapper(sp<IOMXNode> const& base) : mBase{base} {}
+    status_t emptyBuffer(
+            int32_t bufferId, uint32_t flags,
+            const sp<GraphicBuffer> &buffer,
+            int64_t timestamp, int fenceFd) override {
+        return mBase->emptyBuffer(bufferId, buffer, flags, timestamp, fenceFd);
+    }
+    void dispatchDataSpaceChanged(
+            int32_t dataSpace, int32_t aspects, int32_t pixelFormat) override {
+        omx_message msg{};
+        msg.type = omx_message::EVENT;
+        msg.fenceFd = -1;
+        msg.u.event_data.event = OMX_EventDataSpaceChanged;
+        msg.u.event_data.data1 = dataSpace;
+        msg.u.event_data.data2 = aspects;
+        msg.u.event_data.data3 = pixelFormat;
+        mBase->dispatchMessage(msg);
+    }
+};
+
+} // unnamed namespace
+
+// Omx2IGraphicBufferSource
+Omx2IGraphicBufferSource::Omx2IGraphicBufferSource(
+        sp<OmxGraphicBufferSource> const& base)
+      : mBase{base},
+        mOMXBufferSource{new OmxGbs2IOmxBs(base)} {
+}
+
+BnStatus Omx2IGraphicBufferSource::setSuspend(
+        bool suspend, int64_t timeUs) {
+    return BnStatus::fromStatusT(mBase->setSuspend(suspend, timeUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::setRepeatPreviousFrameDelayUs(
+        int64_t repeatAfterUs) {
+    return BnStatus::fromStatusT(mBase->setRepeatPreviousFrameDelayUs(repeatAfterUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::setMaxFps(float maxFps) {
+    return BnStatus::fromStatusT(mBase->setMaxFps(maxFps));
+}
+
+BnStatus Omx2IGraphicBufferSource::setTimeLapseConfig(
+        double fps, double captureFps) {
+    return BnStatus::fromStatusT(mBase->setTimeLapseConfig(fps, captureFps));
+}
+
+BnStatus Omx2IGraphicBufferSource::setStartTimeUs(
+        int64_t startTimeUs) {
+    return BnStatus::fromStatusT(mBase->setStartTimeUs(startTimeUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::setStopTimeUs(
+        int64_t stopTimeUs) {
+    return BnStatus::fromStatusT(mBase->setStopTimeUs(stopTimeUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::getStopTimeOffsetUs(
+        int64_t *stopTimeOffsetUs) {
+    return BnStatus::fromStatusT(mBase->getStopTimeOffsetUs(stopTimeOffsetUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::setColorAspects(
+        int32_t aspects) {
+    return BnStatus::fromStatusT(mBase->setColorAspects(aspects));
+}
+
+BnStatus Omx2IGraphicBufferSource::setTimeOffsetUs(
+        int64_t timeOffsetsUs) {
+    return BnStatus::fromStatusT(mBase->setTimeOffsetUs(timeOffsetsUs));
+}
+
+BnStatus Omx2IGraphicBufferSource::signalEndOfInputStream() {
+    return BnStatus::fromStatusT(mBase->signalEndOfInputStream());
+}
+
+BnStatus Omx2IGraphicBufferSource::configure(
+        const sp<IOMXNode>& omxNode, int32_t dataSpace) {
+    if (omxNode == NULL) {
+        return BnStatus::fromServiceSpecificError(BAD_VALUE);
+    }
+
+    // Do setInputSurface() first, the node will try to enable metadata
+    // mode on input, and does necessary error checking. If this fails,
+    // we can't use this input surface on the node.
+    status_t err = omxNode->setInputSurface(mOMXBufferSource);
+    if (err != NO_ERROR) {
+        ALOGE("Unable to set input surface: %d", err);
+        return BnStatus::fromServiceSpecificError(err);
+    }
+
+    uint32_t consumerUsage;
+    if (omxNode->getParameter(
+            (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
+            &consumerUsage, sizeof(consumerUsage)) != OK) {
+        consumerUsage = 0;
+    }
+
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+    def.nPortIndex = 0; // kPortIndexInput
+
+    err = omxNode->getParameter(
+            OMX_IndexParamPortDefinition, &def, sizeof(def));
+    if (err != NO_ERROR) {
+        ALOGE("Failed to get port definition: %d", err);
+        return BnStatus::fromServiceSpecificError(UNKNOWN_ERROR);
+    }
+
+    return BnStatus::fromStatusT(mBase->configure(
+            new OmxNodeWrapper(omxNode),
+            dataSpace,
+            def.nBufferCountActual,
+            def.format.video.nFrameWidth,
+            def.format.video.nFrameHeight,
+            consumerUsage));
+}
+
+} // namespace android
+
diff --git a/media/codec2/sfplugin/Omx2IGraphicBufferSource.h b/media/codec2/sfplugin/Omx2IGraphicBufferSource.h
new file mode 100644
index 0000000..20fd1ec
--- /dev/null
+++ b/media/codec2/sfplugin/Omx2IGraphicBufferSource.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OMX_2_IGRAPHICBUFFERSOURCE_H_
+#define OMX_2_IGRAPHICBUFFERSOURCE_H_
+
+#include <android/BnGraphicBufferSource.h>
+#include <media/stagefright/omx/OmxGraphicBufferSource.h>
+
+namespace android {
+
+using BnStatus = ::android::binder::Status;
+
+struct Omx2IGraphicBufferSource : public BnGraphicBufferSource {
+    sp<OmxGraphicBufferSource> mBase;
+    sp<IOMXBufferSource> mOMXBufferSource;
+    Omx2IGraphicBufferSource(sp<OmxGraphicBufferSource> const& base);
+    BnStatus configure(const sp<IOMXNode>& omxNode, int32_t dataSpace) override;
+    BnStatus setSuspend(bool suspend, int64_t timeUs) override;
+    BnStatus setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) override;
+    BnStatus setMaxFps(float maxFps) override;
+    BnStatus setTimeLapseConfig(double fps, double captureFps) override;
+    BnStatus setStartTimeUs(int64_t startTimeUs) override;
+    BnStatus setStopTimeUs(int64_t stopTimeUs) override;
+    BnStatus getStopTimeOffsetUs(int64_t *stopTimeOffsetUs) override;
+    BnStatus setColorAspects(int32_t aspects) override;
+    BnStatus setTimeOffsetUs(int64_t timeOffsetsUs) override;
+    BnStatus signalEndOfInputStream() override;
+};
+
+} // namespace android
+
+#endif // OMX_2_IGRAPHICBUFFERSOURCE_H_
+
diff --git a/media/codec2/sfplugin/PipelineWatcher.cpp b/media/codec2/sfplugin/PipelineWatcher.cpp
index 74d14e8..0ee9056 100644
--- a/media/codec2/sfplugin/PipelineWatcher.cpp
+++ b/media/codec2/sfplugin/PipelineWatcher.cpp
@@ -146,7 +146,7 @@
               std::chrono::duration_cast<std::chrono::milliseconds>(elapsed).count());
         durations.push_back(elapsed);
     }
-    std::nth_element(durations.begin(), durations.end(), durations.begin() + n,
+    std::nth_element(durations.begin(), durations.begin() + n, durations.end(),
                      std::greater<Clock::duration>());
     return durations[n];
 }
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index 40160c7..7334834 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -629,7 +629,7 @@
 // static
 std::shared_ptr<C2Mapper::ProfileLevelMapper>
 C2Mapper::GetProfileLevelMapper(std::string mediaType) {
-    std::transform(mediaType.begin(), mediaType.begin(), mediaType.end(), ::tolower);
+    std::transform(mediaType.begin(), mediaType.end(), mediaType.begin(), ::tolower);
     if (mediaType == MIMETYPE_AUDIO_AAC) {
         return std::make_shared<AacProfileLevelMapper>();
     } else if (mediaType == MIMETYPE_VIDEO_AVC) {
@@ -657,7 +657,7 @@
 // static
 std::shared_ptr<C2Mapper::ProfileLevelMapper>
 C2Mapper::GetHdrProfileLevelMapper(std::string mediaType, bool isHdr10Plus) {
-    std::transform(mediaType.begin(), mediaType.begin(), mediaType.end(), ::tolower);
+    std::transform(mediaType.begin(), mediaType.end(), mediaType.begin(), ::tolower);
     if (mediaType == MIMETYPE_VIDEO_HEVC) {
         return std::make_shared<HevcProfileLevelMapper>(true, isHdr10Plus);
     } else if (mediaType == MIMETYPE_VIDEO_VP9) {
diff --git a/media/codec2/vndk/C2Buffer.cpp b/media/codec2/vndk/C2Buffer.cpp
index 710b536..2d99b53 100644
--- a/media/codec2/vndk/C2Buffer.cpp
+++ b/media/codec2/vndk/C2Buffer.cpp
@@ -413,17 +413,14 @@
 
     std::shared_ptr<C2LinearAllocation> alloc;
     if (C2AllocatorIon::isValid(cHandle)) {
-        native_handle_t *handle = native_handle_clone(cHandle);
-        if (handle) {
-            c2_status_t err = sAllocator->priorLinearAllocation(handle, &alloc);
-            const std::shared_ptr<C2PooledBlockPoolData> poolData =
-                    std::make_shared<C2PooledBlockPoolData>(data);
-            if (err == C2_OK && poolData) {
-                // TODO: config params?
-                std::shared_ptr<C2LinearBlock> block =
-                        _C2BlockFactory::CreateLinearBlock(alloc, poolData);
-                return block;
-            }
+        c2_status_t err = sAllocator->priorLinearAllocation(cHandle, &alloc);
+        const std::shared_ptr<C2PooledBlockPoolData> poolData =
+                std::make_shared<C2PooledBlockPoolData>(data);
+        if (err == C2_OK && poolData) {
+            // TODO: config params?
+            std::shared_ptr<C2LinearBlock> block =
+                    _C2BlockFactory::CreateLinearBlock(alloc, poolData);
+            return block;
         }
     }
     return nullptr;
@@ -674,17 +671,14 @@
         ResultStatus status = mBufferPoolManager->allocate(
                 mConnectionId, params, &cHandle, &bufferPoolData);
         if (status == ResultStatus::OK) {
-            native_handle_t *handle = native_handle_clone(cHandle);
-            if (handle) {
-                std::shared_ptr<C2LinearAllocation> alloc;
-                std::shared_ptr<C2PooledBlockPoolData> poolData =
-                        std::make_shared<C2PooledBlockPoolData>(bufferPoolData);
-                c2_status_t err = mAllocator->priorLinearAllocation(handle, &alloc);
-                if (err == C2_OK && poolData && alloc) {
-                    *block = _C2BlockFactory::CreateLinearBlock(alloc, poolData, 0, capacity);
-                    if (*block) {
-                        return C2_OK;
-                    }
+            std::shared_ptr<C2LinearAllocation> alloc;
+            std::shared_ptr<C2PooledBlockPoolData> poolData =
+                    std::make_shared<C2PooledBlockPoolData>(bufferPoolData);
+            c2_status_t err = mAllocator->priorLinearAllocation(cHandle, &alloc);
+            if (err == C2_OK && poolData && alloc) {
+                *block = _C2BlockFactory::CreateLinearBlock(alloc, poolData, 0, capacity);
+                if (*block) {
+                    return C2_OK;
                 }
             }
             return C2_NO_MEMORY;
@@ -710,19 +704,16 @@
         ResultStatus status = mBufferPoolManager->allocate(
                 mConnectionId, params, &cHandle, &bufferPoolData);
         if (status == ResultStatus::OK) {
-            native_handle_t *handle = native_handle_clone(cHandle);
-            if (handle) {
-                std::shared_ptr<C2GraphicAllocation> alloc;
-                std::shared_ptr<C2PooledBlockPoolData> poolData =
-                    std::make_shared<C2PooledBlockPoolData>(bufferPoolData);
-                c2_status_t err = mAllocator->priorGraphicAllocation(
-                        handle, &alloc);
-                if (err == C2_OK && poolData && alloc) {
-                    *block = _C2BlockFactory::CreateGraphicBlock(
-                            alloc, poolData, C2Rect(width, height));
-                    if (*block) {
-                        return C2_OK;
-                    }
+            std::shared_ptr<C2GraphicAllocation> alloc;
+            std::shared_ptr<C2PooledBlockPoolData> poolData =
+                std::make_shared<C2PooledBlockPoolData>(bufferPoolData);
+            c2_status_t err = mAllocator->priorGraphicAllocation(
+                    cHandle, &alloc);
+            if (err == C2_OK && poolData && alloc) {
+                *block = _C2BlockFactory::CreateGraphicBlock(
+                        alloc, poolData, C2Rect(width, height));
+                if (*block) {
+                    return C2_OK;
                 }
             }
             return C2_NO_MEMORY;
@@ -1117,17 +1108,14 @@
 
     std::shared_ptr<C2GraphicAllocation> alloc;
     if (C2AllocatorGralloc::isValid(cHandle)) {
-        native_handle_t *handle = native_handle_clone(cHandle);
-        if (handle) {
-            c2_status_t err = sAllocator->priorGraphicAllocation(handle, &alloc);
-            const std::shared_ptr<C2PooledBlockPoolData> poolData =
-                    std::make_shared<C2PooledBlockPoolData>(data);
-            if (err == C2_OK && poolData) {
-                // TODO: config setup?
-                std::shared_ptr<C2GraphicBlock> block =
-                        _C2BlockFactory::CreateGraphicBlock(alloc, poolData);
-                return block;
-            }
+        c2_status_t err = sAllocator->priorGraphicAllocation(cHandle, &alloc);
+        const std::shared_ptr<C2PooledBlockPoolData> poolData =
+                std::make_shared<C2PooledBlockPoolData>(data);
+        if (err == C2_OK && poolData) {
+            // TODO: config setup?
+            std::shared_ptr<C2GraphicBlock> block =
+                    _C2BlockFactory::CreateGraphicBlock(alloc, poolData);
+            return block;
         }
     }
     return nullptr;
diff --git a/media/codec2/vndk/platform/C2BqBuffer.cpp b/media/codec2/vndk/platform/C2BqBuffer.cpp
index 5fa48a8..8304f74 100644
--- a/media/codec2/vndk/platform/C2BqBuffer.cpp
+++ b/media/codec2/vndk/platform/C2BqBuffer.cpp
@@ -413,6 +413,7 @@
         }
 
         sp<GraphicBuffer> &slotBuffer = mBuffers[slot];
+        uint32_t outGeneration;
         if (bufferNeedsReallocation || !slotBuffer) {
             if (!slotBuffer) {
                 slotBuffer = new GraphicBuffer();
@@ -421,7 +422,7 @@
             // instead of a new allocation.
             Return<void> transResult = mProducer->requestBuffer(
                     slot,
-                    [&status, &slotBuffer](
+                    [&status, &slotBuffer, &outGeneration](
                             HStatus hStatus,
                             HBuffer const& hBuffer,
                             uint32_t generationNumber){
@@ -429,17 +430,23 @@
                                 h2b(hBuffer, &slotBuffer) &&
                                 slotBuffer) {
                             slotBuffer->setGenerationNumber(generationNumber);
+                            outGeneration = generationNumber;
                         } else {
                             status = android::BAD_VALUE;
                         }
                     });
             if (!transResult.isOk()) {
+                slotBuffer.clear();
                 return C2_BAD_VALUE;
             } else if (status != android::NO_ERROR) {
                 slotBuffer.clear();
                 (void)mProducer->cancelBuffer(slot, hFenceWrapper.getHandle()).isOk();
                 return C2_BAD_VALUE;
             }
+            if (mGeneration == 0) {
+                // getting generation # lazily due to dequeue failure.
+                mGeneration = outGeneration;
+            }
         }
         if (slotBuffer) {
             ALOGV("buffer wraps %llu %d", (unsigned long long)mProducerId, slot);
@@ -563,6 +570,10 @@
             producerId = static_cast<uint64_t>(transResult);
             // TODO: provide gneration number from parameter.
             haveGeneration = getGenerationNumber(producer, &generation);
+            if (!haveGeneration) {
+                ALOGW("get generationNumber failed %llu",
+                      (unsigned long long)producerId);
+            }
         }
         int migrated = 0;
         {
@@ -580,10 +591,10 @@
                 }
             }
             int32_t oldGeneration = mGeneration;
-            if (producer && haveGeneration) {
+            if (producer) {
                 mProducer = producer;
                 mProducerId = producerId;
-                mGeneration = generation;
+                mGeneration = haveGeneration ? generation : 0;
             } else {
                 mProducer = nullptr;
                 mProducerId = 0;
@@ -591,7 +602,7 @@
                 ALOGW("invalid producer producer(%d), generation(%d)",
                       (bool)producer, haveGeneration);
             }
-            if (mProducer) { // migrate buffers
+            if (mProducer && haveGeneration) { // migrate buffers
                 for (int i = 0; i < NUM_BUFFER_SLOTS; ++i) {
                     std::shared_ptr<C2BufferQueueBlockPoolData> data =
                             mPoolDatas[i].lock();
diff --git a/media/extractors/mkv/Android.bp b/media/extractors/mkv/Android.bp
index 1744d3d..38821fd 100644
--- a/media/extractors/mkv/Android.bp
+++ b/media/extractors/mkv/Android.bp
@@ -12,10 +12,10 @@
     shared_libs: [
         "liblog",
         "libmediandk",
+        "libstagefright_flacdec",
     ],
 
     static_libs: [
-        "libstagefright_flacdec",
         "libstagefright_foundation",
         "libstagefright_metadatautils",
         "libwebm",
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index 72b94bb..298dab1 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -1062,8 +1062,15 @@
     size_t size = buffer->range_length();
 
     if (size < kOpusHeaderSize
-            || memcmp(data, "OpusHead", 8)
-            || /* version = */ data[8] != 1) {
+            || memcmp(data, "OpusHead", 8)) {
+        return AMEDIA_ERROR_MALFORMED;
+    }
+    // allow both version 0 and 1. Per the opus specification:
+    // An earlier draft of the specification described a version 0, but the only difference
+    // between version 1 and version 0 is that version 0 did not specify the semantics for
+    // handling the version field
+    if ( /* version = */ data[8] > 1) {
+        ALOGW("no support for opus version %d", data[8]);
         return AMEDIA_ERROR_MALFORMED;
     }
 
diff --git a/media/libaaudio/Android.bp b/media/libaaudio/Android.bp
index 16958f9..140052f 100644
--- a/media/libaaudio/Android.bp
+++ b/media/libaaudio/Android.bp
@@ -24,7 +24,7 @@
 ndk_library {
     name: "libaaudio",
     // deliberately includes symbols from AAudioTesting.h
-    symbol_file: "libaaudio.map.txt",
+    symbol_file: "src/libaaudio.map.txt",
     first_version: "26",
     unversioned_until: "current",
 }
@@ -32,6 +32,5 @@
 cc_library_headers {
     name: "libaaudio_headers",
     export_include_dirs: ["include"],
-    version_script: "libaaudio.map.txt",
 }
 
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 4090286..56c0170 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -10,14 +10,71 @@
         "legacy",
         "utility",
     ],
+    header_libs: ["libaaudio_headers"],
+    export_header_lib_headers: ["libaaudio_headers"],
+    version_script: "libaaudio.map.txt",
+
+    srcs: [
+        "core/AAudioAudio.cpp",
+    ],
+
+    cflags: [
+        "-Wno-unused-parameter",
+        "-Wall",
+        "-Werror",
+
+        // By default, all symbols are hidden.
+        // "-fvisibility=hidden",
+        // AAUDIO_API is used to explicitly export a function or a variable as a visible symbol.
+        "-DAAUDIO_API=__attribute__((visibility(\"default\")))",
+    ],
+
+    shared_libs: [
+        "libaaudio_internal",
+        "libaudioclient",
+        "libaudioutils",
+        "liblog",
+        "libcutils",
+        "libutils",
+        "libbinder",
+    ],
+}
+
+cc_library {
+    name: "libaaudio_internal",
+
+    local_include_dirs: [
+        "binding",
+        "client",
+        "core",
+        "fifo",
+        "legacy",
+        "utility",
+    ],
+
     export_include_dirs: ["."],
     header_libs: ["libaaudio_headers"],
     export_header_lib_headers: ["libaaudio_headers"],
 
+    shared_libs: [
+        "libaudioclient",
+        "libaudioutils",
+        "liblog",
+        "libcutils",
+        "libutils",
+        "libbinder",
+    ],
+
+    cflags: [
+        "-Wno-unused-parameter",
+        "-Wall",
+        "-Werror",
+    ],
+
     srcs: [
+        "core/AudioGlobal.cpp",
         "core/AudioStream.cpp",
         "core/AudioStreamBuilder.cpp",
-        "core/AAudioAudio.cpp",
         "core/AAudioStreamParameters.cpp",
         "legacy/AudioStreamLegacy.cpp",
         "legacy/AudioStreamRecord.cpp",
@@ -54,25 +111,4 @@
         "flowgraph/SourceI16.cpp",
         "flowgraph/SourceI24.cpp",
     ],
-
-    cflags: [
-        "-Wno-unused-parameter",
-        "-Wall",
-        "-Werror",
-
-        // By default, all symbols are hidden.
-        // "-fvisibility=hidden",
-        // AAUDIO_API is used to explicitly export a function or a variable as a visible symbol.
-        "-DAAUDIO_API=__attribute__((visibility(\"default\")))",
-    ],
-
-    shared_libs: [
-        "libaudioclient",
-        "libaudioutils",
-        "liblog",
-        "libcutils",
-        "libutils",
-        "libbinder",
-        "libaudiomanager",
-    ],
 }
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index a6cc45b..366cc87 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -89,7 +89,11 @@
     if (mAudioEndpoint.isFreeRunning()) {
         //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
         // Update data queue based on the timing model.
-        int64_t estimatedRemoteCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+        // Jitter in the DSP can cause late writes to the FIFO.
+        // This might be caused by resampling.
+        // We want to read the FIFO after the latest possible time
+        // that the DSP could have written the data.
+        int64_t estimatedRemoteCounter = mClockModel.convertLatestTimeToPosition(currentNanoTime);
         // TODO refactor, maybe use setRemoteCounter()
         mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
     }
@@ -139,7 +143,7 @@
                 // the writeCounter might have just advanced in the background,
                 // causing us to sleep until a later burst.
                 int64_t nextPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
-                wakeTime = mClockModel.convertPositionToTime(nextPosition);
+                wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
             }
                 break;
             default:
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index d26b352..9abdf53 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -19,12 +19,11 @@
 #include <log/log.h>
 
 #include <stdint.h>
+#include <algorithm>
 
 #include "utility/AudioClock.h"
 #include "IsochronousClockModel.h"
 
-#define MIN_LATENESS_NANOS (10 * AAUDIO_NANOS_PER_MICROSECOND)
-
 using namespace aaudio;
 
 IsochronousClockModel::IsochronousClockModel()
@@ -32,7 +31,7 @@
         , mMarkerNanoTime(0)
         , mSampleRate(48000)
         , mFramesPerBurst(64)
-        , mMaxLatenessInNanos(0)
+        , mMaxMeasuredLatenessNanos(0)
         , mState(STATE_STOPPED)
 {
 }
@@ -41,8 +40,7 @@
 }
 
 void IsochronousClockModel::setPositionAndTime(int64_t framePosition, int64_t nanoTime) {
-    ALOGV("setPositionAndTime(%lld, %lld)",
-          (long long) framePosition, (long long) nanoTime);
+    ALOGV("setPositionAndTime, %lld, %lld", (long long) framePosition, (long long) nanoTime);
     mMarkerFramePosition = framePosition;
     mMarkerNanoTime = nanoTime;
 }
@@ -54,7 +52,9 @@
 }
 
 void IsochronousClockModel::stop(int64_t nanoTime) {
-    ALOGV("stop(nanos = %lld)\n", (long long) nanoTime);
+    ALOGD("stop(nanos = %lld) max lateness = %d micros\n",
+        (long long) nanoTime,
+        (int) (mMaxMeasuredLatenessNanos / 1000));
     setPositionAndTime(convertTimeToPosition(nanoTime), nanoTime);
     // TODO should we set position?
     mState = STATE_STOPPED;
@@ -69,9 +69,10 @@
 }
 
 void IsochronousClockModel::processTimestamp(int64_t framePosition, int64_t nanoTime) {
-//    ALOGD("processTimestamp() - framePosition = %lld at nanoTime %llu",
-//         (long long)framePosition,
-//         (long long)nanoTime);
+    mTimestampCount++;
+// Log position and time in CSV format so we can import it easily into spreadsheets.
+    //ALOGD("%s() CSV, %d, %lld, %lld", __func__,
+          //mTimestampCount, (long long)framePosition, (long long)nanoTime);
     int64_t framesDelta = framePosition - mMarkerFramePosition;
     int64_t nanosDelta = nanoTime - mMarkerNanoTime;
     if (nanosDelta < 1000) {
@@ -110,22 +111,54 @@
             // Earlier than expected timestamp.
             // This data is probably more accurate, so use it.
             // Or we may be drifting due to a fast HW clock.
-//            int microsDelta = (int) (nanosDelta / 1000);
-//            int expectedMicrosDelta = (int) (expectedNanosDelta / 1000);
-//            ALOGD("processTimestamp() - STATE_RUNNING - %7d < %7d so %4d micros EARLY",
-//                 microsDelta, expectedMicrosDelta, (expectedMicrosDelta - microsDelta));
+            //int microsDelta = (int) (nanosDelta / 1000);
+            //int expectedMicrosDelta = (int) (expectedNanosDelta / 1000);
+            //ALOGD("%s() - STATE_RUNNING - #%d, %4d micros EARLY",
+                //__func__, mTimestampCount, expectedMicrosDelta - microsDelta);
 
             setPositionAndTime(framePosition, nanoTime);
-        } else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
-            // Later than expected timestamp.
-//            int microsDelta = (int) (nanosDelta / 1000);
-//            int expectedMicrosDeadline = (int) ((expectedNanosDelta + mMaxLatenessInNanos) / 1000);
-//            ALOGD("processTimestamp() - STATE_RUNNING - %7d > %7d so %4d micros LATE",
-//                  microsDelta, expectedMicrosDeadline, (microsDelta - expectedMicrosDeadline));
+        } else if (nanosDelta > (expectedNanosDelta + (2 * mBurstPeriodNanos))) {
+            // In this case we do not update mMaxMeasuredLatenessNanos because it
+            // would force it too high.
+            // mMaxMeasuredLatenessNanos should range from 1 to 2 * mBurstPeriodNanos
+            //int32_t measuredLatenessNanos = (int32_t)(nanosDelta - expectedNanosDelta);
+            //ALOGD("%s() - STATE_RUNNING - #%d, lateness %d - max %d = %4d micros VERY LATE",
+                  //__func__,
+                  //mTimestampCount,
+                  //measuredLatenessNanos / 1000,
+                  //mMaxMeasuredLatenessNanos / 1000,
+                  //(measuredLatenessNanos - mMaxMeasuredLatenessNanos) / 1000
+                  //);
 
-            // When we are late it may be because of preemption in the kernel or
-            //  we may be drifting due to a slow HW clock.
-            setPositionAndTime(framePosition,  nanoTime - mMaxLatenessInNanos);
+            // This typically happens when we are modelling a service instead of a DSP.
+            setPositionAndTime(framePosition,  nanoTime - (2 * mBurstPeriodNanos));
+        } else if (nanosDelta > (expectedNanosDelta + mMaxMeasuredLatenessNanos)) {
+            //int32_t previousLatenessNanos = mMaxMeasuredLatenessNanos;
+            mMaxMeasuredLatenessNanos = (int32_t)(nanosDelta - expectedNanosDelta);
+
+            //ALOGD("%s() - STATE_RUNNING - #%d, newmax %d - oldmax %d = %4d micros LATE",
+                  //__func__,
+                  //mTimestampCount,
+                  //mMaxMeasuredLatenessNanos / 1000,
+                  //previousLatenessNanos / 1000,
+                  //(mMaxMeasuredLatenessNanos - previousLatenessNanos) / 1000
+                  //);
+
+            // When we are late, it may be because of preemption in the kernel,
+            // or timing jitter caused by resampling in the DSP,
+            // or we may be drifting due to a slow HW clock.
+            // We add slight drift value just in case there is actual long term drift
+            // forward caused by a slower clock.
+            // If the clock is faster than the model will get pushed earlier
+            // by the code in the preceding branch.
+            // The two opposing forces should allow the model to track the real clock
+            // over a long time.
+            int64_t driftingTime = mMarkerNanoTime + expectedNanosDelta + kDriftNanos;
+            setPositionAndTime(framePosition,  driftingTime);
+            //ALOGD("%s() - #%d, max lateness = %d micros",
+                  //__func__,
+                  //mTimestampCount,
+                  //(int) (mMaxMeasuredLatenessNanos / 1000));
         }
         break;
     default:
@@ -145,9 +178,12 @@
     update();
 }
 
+// Update expected lateness based on sampleRate and framesPerBurst
 void IsochronousClockModel::update() {
-    int64_t nanosLate = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
-    mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
+    mBurstPeriodNanos = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
+    // Timestamps may be late by up to a burst because we are randomly sampling the time period
+    // after the DSP position is actually updated.
+    mMaxMeasuredLatenessNanos = mBurstPeriodNanos;
 }
 
 int64_t IsochronousClockModel::convertDeltaPositionToTime(int64_t framesDelta) const {
@@ -190,11 +226,25 @@
     return position;
 }
 
+int32_t IsochronousClockModel::getLateTimeOffsetNanos() const {
+    // This will never be < 0 because mMaxLatenessNanos starts at
+    // mBurstPeriodNanos and only gets bigger.
+    return (mMaxMeasuredLatenessNanos - mBurstPeriodNanos) + kExtraLatenessNanos;
+}
+
+int64_t IsochronousClockModel::convertPositionToLatestTime(int64_t framePosition) const {
+    return convertPositionToTime(framePosition) + getLateTimeOffsetNanos();
+}
+
+int64_t IsochronousClockModel::convertLatestTimeToPosition(int64_t nanoTime) const {
+    return convertTimeToPosition(nanoTime - getLateTimeOffsetNanos());
+}
+
 void IsochronousClockModel::dump() const {
     ALOGD("mMarkerFramePosition = %lld", (long long) mMarkerFramePosition);
     ALOGD("mMarkerNanoTime      = %lld", (long long) mMarkerNanoTime);
     ALOGD("mSampleRate          = %6d", mSampleRate);
     ALOGD("mFramesPerBurst      = %6d", mFramesPerBurst);
-    ALOGD("mMaxLatenessInNanos  = %6d", mMaxLatenessInNanos);
+    ALOGD("mMaxMeasuredLatenessNanos = %6d", mMaxMeasuredLatenessNanos);
     ALOGD("mState               = %6d", mState);
 }
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 46ca48e..582bf4e 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -18,6 +18,7 @@
 #define ANDROID_AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
 
 #include <stdint.h>
+#include "utility/AudioClock.h"
 
 namespace aaudio {
 
@@ -79,6 +80,15 @@
     int64_t convertPositionToTime(int64_t framePosition) const;
 
     /**
+     * Calculate the latest estimated time that the stream will be at that position.
+     * The more jittery the clock is then the later this will be.
+     *
+     * @param framePosition
+     * @return time in nanoseconds
+     */
+    int64_t convertPositionToLatestTime(int64_t framePosition) const;
+
+    /**
      * Calculate an estimated position where the stream will be at the specified time.
      *
      * @param nanoTime time of interest
@@ -87,6 +97,18 @@
     int64_t convertTimeToPosition(int64_t nanoTime) const;
 
     /**
+     * Calculate the corresponding estimated position based on the specified time being
+     * the latest possible time.
+     *
+     * For the same nanoTime, this may return an earlier position than
+     * convertTimeToPosition().
+     *
+     * @param nanoTime
+     * @return position in frames
+     */
+    int64_t convertLatestTimeToPosition(int64_t nanoTime) const;
+
+    /**
      * @param framesDelta difference in frames
      * @return duration in nanoseconds
      */
@@ -101,6 +123,9 @@
     void dump() const;
 
 private:
+
+    int32_t getLateTimeOffsetNanos() const;
+
     enum clock_model_state_t {
         STATE_STOPPED,
         STATE_STARTING,
@@ -108,13 +133,23 @@
         STATE_RUNNING
     };
 
+    // Amount of time to drift forward when we get a late timestamp.
+    // This value was calculated to allow tracking of a clock with 50 ppm error.
+    static constexpr int32_t   kDriftNanos         =  10 * 1000;
+    // TODO review value of kExtraLatenessNanos
+    static constexpr int32_t   kExtraLatenessNanos = 100 * 1000;
+
     int64_t             mMarkerFramePosition;
     int64_t             mMarkerNanoTime;
     int32_t             mSampleRate;
     int32_t             mFramesPerBurst;
-    int32_t             mMaxLatenessInNanos;
+    int32_t             mBurstPeriodNanos;
+    // Includes mBurstPeriodNanos because we sample randomly over time.
+    int32_t             mMaxMeasuredLatenessNanos;
     clock_model_state_t mState;
 
+    int32_t             mTimestampCount = 0;
+
     void update();
 };
 
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 44d5122..8040e6a 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -27,6 +27,7 @@
 #include <aaudio/AAudioTesting.h>
 
 #include "AudioClock.h"
+#include "AudioGlobal.h"
 #include "AudioStreamBuilder.h"
 #include "AudioStream.h"
 #include "binding/AAudioCommon.h"
@@ -45,63 +46,14 @@
         return AAUDIO_ERROR_NULL; \
     }
 
-#define AAUDIO_CASE_ENUM(name) case name: return #name
-
 AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode) {
-    switch (returnCode) {
-        AAUDIO_CASE_ENUM(AAUDIO_OK);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_DISCONNECTED);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_ILLEGAL_ARGUMENT);
-        // reserved
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INTERNAL);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_STATE);
-        // reserved
-        // reserved
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_HANDLE);
-         // reserved
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNIMPLEMENTED);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNAVAILABLE);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_FREE_HANDLES);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_MEMORY);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NULL);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_TIMEOUT);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_WOULD_BLOCK);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_FORMAT);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_OUT_OF_RANGE);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_SERVICE);
-        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_RATE);
-    }
-    return "Unrecognized AAudio error.";
+    return AudioGlobal_convertResultToText(returnCode);
 }
 
 AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state) {
-    switch (state) {
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNINITIALIZED);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNKNOWN);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_OPEN);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTING);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTED);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_PAUSING);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_PAUSED);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHING);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHED);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPING);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPED);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_DISCONNECTED);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSING);
-        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSED);
-    }
-    return "Unrecognized AAudio state.";
+    return AudioGlobal_convertStreamStateToText(state);
 }
 
-#undef AAUDIO_CASE_ENUM
-
-
-/******************************************
- * Static globals.
- */
-static aaudio_policy_t s_MMapPolicy = AAUDIO_UNSPECIFIED;
-
 static AudioStream *convertAAudioStreamToAudioStream(AAudioStream* stream)
 {
     return (AudioStream*) stream;
@@ -543,23 +495,11 @@
 }
 
 AAUDIO_API aaudio_policy_t AAudio_getMMapPolicy() {
-    return s_MMapPolicy;
+    return AudioGlobal_getMMapPolicy();
 }
 
 AAUDIO_API aaudio_result_t AAudio_setMMapPolicy(aaudio_policy_t policy) {
-    aaudio_result_t result = AAUDIO_OK;
-    switch(policy) {
-        case AAUDIO_UNSPECIFIED:
-        case AAUDIO_POLICY_NEVER:
-        case AAUDIO_POLICY_AUTO:
-        case AAUDIO_POLICY_ALWAYS:
-            s_MMapPolicy = policy;
-            break;
-        default:
-            result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
-            break;
-    }
-    return result;
+    return AudioGlobal_setMMapPolicy(policy);
 }
 
 AAUDIO_API bool AAudioStream_isMMapUsed(AAudioStream* stream)
diff --git a/media/libaaudio/src/core/AudioGlobal.cpp b/media/libaaudio/src/core/AudioGlobal.cpp
new file mode 100644
index 0000000..e6d9a0d
--- /dev/null
+++ b/media/libaaudio/src/core/AudioGlobal.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
+
+#include "AudioGlobal.h"
+
+/******************************************
+ * Static globals.
+ */
+namespace aaudio {
+
+static aaudio_policy_t g_MMapPolicy = AAUDIO_UNSPECIFIED;
+
+aaudio_policy_t AudioGlobal_getMMapPolicy() {
+  return g_MMapPolicy;
+}
+
+aaudio_result_t AudioGlobal_setMMapPolicy(aaudio_policy_t policy) {
+    aaudio_result_t result = AAUDIO_OK;
+    switch(policy) {
+        case AAUDIO_UNSPECIFIED:
+        case AAUDIO_POLICY_NEVER:
+        case AAUDIO_POLICY_AUTO:
+        case AAUDIO_POLICY_ALWAYS:
+            g_MMapPolicy = policy;
+            break;
+        default:
+            result = AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+            break;
+    }
+    return result;
+}
+
+#define AAUDIO_CASE_ENUM(name) case name: return #name
+
+const char* AudioGlobal_convertResultToText(aaudio_result_t returnCode) {
+    switch (returnCode) {
+        AAUDIO_CASE_ENUM(AAUDIO_OK);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_DISCONNECTED);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_ILLEGAL_ARGUMENT);
+        // reserved
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INTERNAL);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_STATE);
+        // reserved
+        // reserved
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_HANDLE);
+         // reserved
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNIMPLEMENTED);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_UNAVAILABLE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_FREE_HANDLES);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_MEMORY);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NULL);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_TIMEOUT);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_WOULD_BLOCK);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_FORMAT);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_OUT_OF_RANGE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_NO_SERVICE);
+        AAUDIO_CASE_ENUM(AAUDIO_ERROR_INVALID_RATE);
+    }
+    return "Unrecognized AAudio error.";
+}
+
+const char* AudioGlobal_convertStreamStateToText(aaudio_stream_state_t state) {
+      switch (state) {
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNINITIALIZED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_UNKNOWN);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_OPEN);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STARTED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_PAUSING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_PAUSED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_FLUSHED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_STOPPED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_DISCONNECTED);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSING);
+        AAUDIO_CASE_ENUM(AAUDIO_STREAM_STATE_CLOSED);
+    }
+    return "Unrecognized AAudio state.";
+}
+
+#undef AAUDIO_CASE_ENUM
+
+}  // namespace aaudio
diff --git a/media/libaaudio/src/core/AudioGlobal.h b/media/libaaudio/src/core/AudioGlobal.h
new file mode 100644
index 0000000..312cef2
--- /dev/null
+++ b/media/libaaudio/src/core/AudioGlobal.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef AAUDIO_AUDIOGLOBAL_H
+#define AAUDIO_AUDIOGLOBAL_H
+
+#include <aaudio/AAudio.h>
+#include <aaudio/AAudioTesting.h>
+
+
+namespace aaudio {
+
+aaudio_policy_t AudioGlobal_getMMapPolicy();
+aaudio_result_t AudioGlobal_setMMapPolicy(aaudio_policy_t policy);
+
+const char* AudioGlobal_convertResultToText(aaudio_result_t returnCode);
+const char* AudioGlobal_convertStreamStateToText(aaudio_stream_state_t state);
+
+}
+
+#endif  // AAUDIO_AUDIOGLOBAL_H
+
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 9b77223..5303631 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -25,8 +25,9 @@
 #include "AudioStreamBuilder.h"
 #include "AudioStream.h"
 #include "AudioClock.h"
+#include "AudioGlobal.h"
 
-using namespace aaudio;
+namespace aaudio {
 
 
 // Sequential number assigned to streams solely for debugging purposes.
@@ -51,7 +52,7 @@
                           || getState() == AAUDIO_STREAM_STATE_UNINITIALIZED
                           || getState() == AAUDIO_STREAM_STATE_DISCONNECTED),
                         "~AudioStream() - still in use, state = %s",
-                        AAudio_convertStreamStateToText(getState()));
+                        AudioGlobal_convertStreamStateToText(getState()));
 
     mPlayerBase->clearParentReference(); // remove reference to this AudioStream
 }
@@ -155,7 +156,7 @@
         case AAUDIO_STREAM_STATE_CLOSED:
         default:
             ALOGW("safePause() stream not running, state = %s",
-                  AAudio_convertStreamStateToText(getState()));
+                  AudioGlobal_convertStreamStateToText(getState()));
             return AAUDIO_ERROR_INVALID_STATE;
     }
 
@@ -240,7 +241,7 @@
         case AAUDIO_STREAM_STATE_CLOSED:
         default:
             ALOGW("%s() stream not running, state = %s", __func__,
-                  AAudio_convertStreamStateToText(getState()));
+                  AudioGlobal_convertStreamStateToText(getState()));
             return AAUDIO_ERROR_INVALID_STATE;
     }
 
@@ -488,3 +489,5 @@
 void AudioStream::MyPlayerBase::destroy() {
     unregisterWithAudioManager();
 }
+
+}  // namespace aaudio
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 08f4958..44f45b3 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -27,6 +27,7 @@
 #include "binding/AAudioBinderClient.h"
 #include "client/AudioStreamInternalCapture.h"
 #include "client/AudioStreamInternalPlay.h"
+#include "core/AudioGlobal.h"
 #include "core/AudioStream.h"
 #include "core/AudioStreamBuilder.h"
 #include "legacy/AudioStreamRecord.h"
@@ -112,7 +113,7 @@
     }
 
     // The API setting is the highest priority.
-    aaudio_policy_t mmapPolicy = AAudio_getMMapPolicy();
+    aaudio_policy_t mmapPolicy = AudioGlobal_getMMapPolicy();
     // If not specified then get from a system property.
     if (mmapPolicy == AAUDIO_UNSPECIFIED) {
         mmapPolicy = AAudioProperty_getMMapPolicy();
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/src/libaaudio.map.txt
similarity index 100%
rename from media/libaaudio/libaaudio.map.txt
rename to media/libaaudio/src/libaaudio.map.txt
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 96ed56a..cdd02c0 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -24,6 +24,7 @@
 #include <utils/Errors.h>
 
 #include "aaudio/AAudio.h"
+#include "core/AudioGlobal.h"
 #include <aaudio/AAudioTesting.h>
 #include <math.h>
 #include <system/audio-base.h>
@@ -355,7 +356,7 @@
         case AAUDIO_STREAM_STATE_DISCONNECTED:
         default:
             ALOGE("can only flush stream when PAUSED, OPEN or STOPPED, state = %s",
-                  AAudio_convertStreamStateToText(state));
+                  aaudio::AudioGlobal_convertStreamStateToText(state));
             result =  AAUDIO_ERROR_INVALID_STATE;
             break;
     }
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 6101e99..19cd0a0 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -11,7 +11,7 @@
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_marshalling.cpp"],
     shared_libs: [
-        "libaaudio",
+        "libaaudio_internal",
         "libbinder",
         "libcutils",
         "libutils",
@@ -23,7 +23,7 @@
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_clock_model.cpp"],
     shared_libs: [
-        "libaaudio",
+        "libaaudio_internal",
         "libaudioutils",
         "libcutils",
         "libutils",
@@ -34,7 +34,7 @@
     name: "test_block_adapter",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_block_adapter.cpp"],
-    shared_libs: ["libaaudio"],
+    shared_libs: ["libaaudio_internal"],
 }
 
 cc_test {
@@ -170,7 +170,7 @@
     name: "test_atomic_fifo",
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_atomic_fifo.cpp"],
-    shared_libs: ["libaaudio"],
+    shared_libs: ["libaaudio_internal"],
 }
 
 cc_test {
@@ -178,7 +178,7 @@
     defaults: ["libaaudio_tests_defaults"],
     srcs: ["test_flowgraph.cpp"],
     shared_libs: [
-        "libaaudio",
+        "libaaudio_internal",
         "libbinder",
         "libcutils",
         "libutils",
diff --git a/media/libaudioclient/include/media/AudioMixer.h b/media/libaudioclient/include/media/AudioMixer.h
deleted file mode 100644
index 783eef3..0000000
--- a/media/libaudioclient/include/media/AudioMixer.h
+++ /dev/null
@@ -1,519 +0,0 @@
-/*
-**
-** Copyright 2007, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#ifndef ANDROID_AUDIO_MIXER_H
-#define ANDROID_AUDIO_MIXER_H
-
-#include <map>
-#include <pthread.h>
-#include <sstream>
-#include <stdint.h>
-#include <sys/types.h>
-#include <unordered_map>
-#include <vector>
-
-#include <android/os/IExternalVibratorService.h>
-#include <media/AudioBufferProvider.h>
-#include <media/AudioResampler.h>
-#include <media/AudioResamplerPublic.h>
-#include <media/BufferProviders.h>
-#include <system/audio.h>
-#include <utils/Compat.h>
-#include <utils/threads.h>
-
-// FIXME This is actually unity gain, which might not be max in future, expressed in U.12
-#define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT
-
-// This must match frameworks/av/services/audioflinger/Configuration.h
-#define FLOAT_AUX
-
-namespace android {
-
-namespace NBLog {
-class Writer;
-}   // namespace NBLog
-
-// ----------------------------------------------------------------------------
-
-class AudioMixer
-{
-public:
-    // Do not change these unless underlying code changes.
-    // This mixer has a hard-coded upper limit of 8 channels for output.
-    static constexpr uint32_t MAX_NUM_CHANNELS = FCC_8;
-    static constexpr uint32_t MAX_NUM_VOLUMES = FCC_2; // stereo volume only
-    // maximum number of channels supported for the content
-    static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = AUDIO_CHANNEL_COUNT_MAX;
-
-    static const uint16_t UNITY_GAIN_INT = 0x1000;
-    static const CONSTEXPR float UNITY_GAIN_FLOAT = 1.0f;
-
-    enum { // names
-        // setParameter targets
-        TRACK           = 0x3000,
-        RESAMPLE        = 0x3001,
-        RAMP_VOLUME     = 0x3002, // ramp to new volume
-        VOLUME          = 0x3003, // don't ramp
-        TIMESTRETCH     = 0x3004,
-
-        // set Parameter names
-        // for target TRACK
-        CHANNEL_MASK    = 0x4000,
-        FORMAT          = 0x4001,
-        MAIN_BUFFER     = 0x4002,
-        AUX_BUFFER      = 0x4003,
-        DOWNMIX_TYPE    = 0X4004,
-        MIXER_FORMAT    = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
-        MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output
-        // for haptic
-        HAPTIC_ENABLED  = 0x4007, // Set haptic data from this track should be played or not.
-        HAPTIC_INTENSITY = 0x4008, // Set the intensity to play haptic data.
-        // for target RESAMPLE
-        SAMPLE_RATE     = 0x4100, // Configure sample rate conversion on this track name;
-                                  // parameter 'value' is the new sample rate in Hz.
-                                  // Only creates a sample rate converter the first time that
-                                  // the track sample rate is different from the mix sample rate.
-                                  // If the new sample rate is the same as the mix sample rate,
-                                  // and a sample rate converter already exists,
-                                  // then the sample rate converter remains present but is a no-op.
-        RESET           = 0x4101, // Reset sample rate converter without changing sample rate.
-                                  // This clears out the resampler's input buffer.
-        REMOVE          = 0x4102, // Remove the sample rate converter on this track name;
-                                  // the track is restored to the mix sample rate.
-        // for target RAMP_VOLUME and VOLUME (8 channels max)
-        // FIXME use float for these 3 to improve the dynamic range
-        VOLUME0         = 0x4200,
-        VOLUME1         = 0x4201,
-        AUXLEVEL        = 0x4210,
-        // for target TIMESTRETCH
-        PLAYBACK_RATE   = 0x4300, // Configure timestretch on this track name;
-                                  // parameter 'value' is a pointer to the new playback rate.
-    };
-
-    typedef enum { // Haptic intensity, should keep consistent with VibratorService
-        HAPTIC_SCALE_MUTE = os::IExternalVibratorService::SCALE_MUTE,
-        HAPTIC_SCALE_VERY_LOW = os::IExternalVibratorService::SCALE_VERY_LOW,
-        HAPTIC_SCALE_LOW = os::IExternalVibratorService::SCALE_LOW,
-        HAPTIC_SCALE_NONE = os::IExternalVibratorService::SCALE_NONE,
-        HAPTIC_SCALE_HIGH = os::IExternalVibratorService::SCALE_HIGH,
-        HAPTIC_SCALE_VERY_HIGH = os::IExternalVibratorService::SCALE_VERY_HIGH,
-    } haptic_intensity_t;
-    static constexpr float HAPTIC_SCALE_VERY_LOW_RATIO = 2.0f / 3.0f;
-    static constexpr float HAPTIC_SCALE_LOW_RATIO = 3.0f / 4.0f;
-    static const constexpr float HAPTIC_MAX_AMPLITUDE_FLOAT = 1.0f;
-
-    static inline bool isValidHapticIntensity(haptic_intensity_t hapticIntensity) {
-        switch (hapticIntensity) {
-        case HAPTIC_SCALE_MUTE:
-        case HAPTIC_SCALE_VERY_LOW:
-        case HAPTIC_SCALE_LOW:
-        case HAPTIC_SCALE_NONE:
-        case HAPTIC_SCALE_HIGH:
-        case HAPTIC_SCALE_VERY_HIGH:
-            return true;
-        default:
-            return false;
-        }
-    }
-
-    AudioMixer(size_t frameCount, uint32_t sampleRate)
-        : mSampleRate(sampleRate)
-        , mFrameCount(frameCount) {
-        pthread_once(&sOnceControl, &sInitRoutine);
-    }
-
-    // Create a new track in the mixer.
-    //
-    // \param name        a unique user-provided integer associated with the track.
-    //                    If name already exists, the function will abort.
-    // \param channelMask output channel mask.
-    // \param format      PCM format
-    // \param sessionId   Session id for the track. Tracks with the same
-    //                    session id will be submixed together.
-    //
-    // \return OK        on success.
-    //         BAD_VALUE if the format does not satisfy isValidFormat()
-    //                   or the channelMask does not satisfy isValidChannelMask().
-    status_t    create(
-            int name, audio_channel_mask_t channelMask, audio_format_t format, int sessionId);
-
-    bool        exists(int name) const {
-        return mTracks.count(name) > 0;
-    }
-
-    // Free an allocated track by name.
-    void        destroy(int name);
-
-    // Enable or disable an allocated track by name
-    void        enable(int name);
-    void        disable(int name);
-
-    void        setParameter(int name, int target, int param, void *value);
-
-    void        setBufferProvider(int name, AudioBufferProvider* bufferProvider);
-
-    void        process() {
-        for (const auto &pair : mTracks) {
-            // Clear contracted buffer before processing if contracted channels are saved
-            const std::shared_ptr<Track> &t = pair.second;
-            if (t->mKeepContractedChannels) {
-                t->clearContractedBuffer();
-            }
-        }
-        (this->*mHook)();
-        processHapticData();
-    }
-
-    size_t      getUnreleasedFrames(int name) const;
-
-    std::string trackNames() const {
-        std::stringstream ss;
-        for (const auto &pair : mTracks) {
-            ss << pair.first << " ";
-        }
-        return ss.str();
-    }
-
-    void        setNBLogWriter(NBLog::Writer *logWriter) {
-        mNBLogWriter = logWriter;
-    }
-
-    static inline bool isValidFormat(audio_format_t format) {
-        switch (format) {
-        case AUDIO_FORMAT_PCM_8_BIT:
-        case AUDIO_FORMAT_PCM_16_BIT:
-        case AUDIO_FORMAT_PCM_24_BIT_PACKED:
-        case AUDIO_FORMAT_PCM_32_BIT:
-        case AUDIO_FORMAT_PCM_FLOAT:
-            return true;
-        default:
-            return false;
-        }
-    }
-
-    static inline bool isValidChannelMask(audio_channel_mask_t channelMask) {
-        return audio_channel_mask_is_valid(channelMask); // the RemixBufferProvider is flexible.
-    }
-
-private:
-
-    /* For multi-format functions (calls template functions
-     * in AudioMixerOps.h).  The template parameters are as follows:
-     *
-     *   MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
-     *   USEFLOATVOL (set to true if float volume is used)
-     *   ADJUSTVOL   (set to true if volume ramp parameters needs adjustment afterwards)
-     *   TO: int32_t (Q4.27) or float
-     *   TI: int32_t (Q4.27) or int16_t (Q0.15) or float
-     *   TA: int32_t (Q4.27)
-     */
-
-    enum {
-        // FIXME this representation permits up to 8 channels
-        NEEDS_CHANNEL_COUNT__MASK   = 0x00000007,
-    };
-
-    enum {
-        NEEDS_CHANNEL_1             = 0x00000000,   // mono
-        NEEDS_CHANNEL_2             = 0x00000001,   // stereo
-
-        // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT
-
-        NEEDS_MUTE                  = 0x00000100,
-        NEEDS_RESAMPLE              = 0x00001000,
-        NEEDS_AUX                   = 0x00010000,
-    };
-
-    // hook types
-    enum {
-        PROCESSTYPE_NORESAMPLEONETRACK, // others set elsewhere
-    };
-
-    enum {
-        TRACKTYPE_NOP,
-        TRACKTYPE_RESAMPLE,
-        TRACKTYPE_NORESAMPLE,
-        TRACKTYPE_NORESAMPLEMONO,
-    };
-
-    // process hook functionality
-    using process_hook_t = void(AudioMixer::*)();
-
-    struct Track;
-    using hook_t = void(Track::*)(int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux);
-
-    struct Track {
-        Track()
-            : bufferProvider(nullptr)
-        {
-            // TODO: move additional initialization here.
-        }
-
-        ~Track()
-        {
-            // bufferProvider, mInputBufferProvider need not be deleted.
-            mResampler.reset(nullptr);
-            // Ensure the order of destruction of buffer providers as they
-            // release the upstream provider in the destructor.
-            mTimestretchBufferProvider.reset(nullptr);
-            mPostDownmixReformatBufferProvider.reset(nullptr);
-            mDownmixerBufferProvider.reset(nullptr);
-            mReformatBufferProvider.reset(nullptr);
-            mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
-            mAdjustChannelsBufferProvider.reset(nullptr);
-        }
-
-        bool        needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
-        bool        setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
-        bool        doesResample() const { return mResampler.get() != nullptr; }
-        void        resetResampler() { if (mResampler.get() != nullptr) mResampler->reset(); }
-        void        adjustVolumeRamp(bool aux, bool useFloat = false);
-        size_t      getUnreleasedFrames() const { return mResampler.get() != nullptr ?
-                                                    mResampler->getUnreleasedFrames() : 0; };
-
-        status_t    prepareForDownmix();
-        void        unprepareForDownmix();
-        status_t    prepareForReformat();
-        void        unprepareForReformat();
-        status_t    prepareForAdjustChannels();
-        void        unprepareForAdjustChannels();
-        status_t    prepareForAdjustChannelsNonDestructive(size_t frames);
-        void        unprepareForAdjustChannelsNonDestructive();
-        void        clearContractedBuffer();
-        bool        setPlaybackRate(const AudioPlaybackRate &playbackRate);
-        void        reconfigureBufferProviders();
-
-        static hook_t getTrackHook(int trackType, uint32_t channelCount,
-                audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
-
-        void track__nop(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
-
-        template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
-            typename TO, typename TI, typename TA>
-        void volumeMix(TO *out, size_t outFrames, const TI *in, TA *aux, bool ramp);
-
-        uint32_t    needs;
-
-        // TODO: Eventually remove legacy integer volume settings
-        union {
-        int16_t     volume[MAX_NUM_VOLUMES]; // U4.12 fixed point (top bit should be zero)
-        int32_t     volumeRL;
-        };
-
-        int32_t     prevVolume[MAX_NUM_VOLUMES];
-        int32_t     volumeInc[MAX_NUM_VOLUMES];
-        int32_t     auxInc;
-        int32_t     prevAuxLevel;
-        int16_t     auxLevel;       // 0 <= auxLevel <= MAX_GAIN_INT, but signed for mul performance
-
-        uint16_t    frameCount;
-
-        uint8_t     channelCount;   // 1 or 2, redundant with (needs & NEEDS_CHANNEL_COUNT__MASK)
-        uint8_t     unused_padding; // formerly format, was always 16
-        uint16_t    enabled;        // actually bool
-        audio_channel_mask_t channelMask;
-
-        // actual buffer provider used by the track hooks, see DownmixerBufferProvider below
-        //  for how the Track buffer provider is wrapped by another one when dowmixing is required
-        AudioBufferProvider*                bufferProvider;
-
-        mutable AudioBufferProvider::Buffer buffer; // 8 bytes
-
-        hook_t      hook;
-        const void  *mIn;             // current location in buffer
-
-        std::unique_ptr<AudioResampler> mResampler;
-        uint32_t            sampleRate;
-        int32_t*           mainBuffer;
-        int32_t*           auxBuffer;
-
-        /* Buffer providers are constructed to translate the track input data as needed.
-         *
-         * TODO: perhaps make a single PlaybackConverterProvider class to move
-         * all pre-mixer track buffer conversions outside the AudioMixer class.
-         *
-         * 1) mInputBufferProvider: The AudioTrack buffer provider.
-         * 2) mAdjustChannelsBufferProvider: Expands or contracts sample data from one interleaved
-         *    channel format to another. Expanded channels are filled with zeros and put at the end
-         *    of each audio frame. Contracted channels are copied to the end of the buffer.
-         * 3) mContractChannelsNonDestructiveBufferProvider: Non-destructively contract sample data.
-         *    This is currently using at audio-haptic coupled playback to separate audio and haptic
-         *    data. Contracted channels could be written to given buffer.
-         * 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
-         *    match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
-         *    requires reformat. For example, it may convert floating point input to
-         *    PCM_16_bit if that's required by the downmixer.
-         * 5) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
-         *    the number of channels required by the mixer sink.
-         * 6) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
-         *    the downmixer requirements to the mixer engine input requirements.
-         * 7) mTimestretchBufferProvider: Adds timestretching for playback rate
-         */
-        AudioBufferProvider*     mInputBufferProvider;    // externally provided buffer provider.
-        // TODO: combine mAdjustChannelsBufferProvider and
-        // mContractChannelsNonDestructiveBufferProvider
-        std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
-        std::unique_ptr<PassthruBufferProvider> mContractChannelsNonDestructiveBufferProvider;
-        std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
-        std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
-        std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
-        std::unique_ptr<PassthruBufferProvider> mTimestretchBufferProvider;
-
-        int32_t     sessionId;
-
-        audio_format_t mMixerFormat;     // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
-        audio_format_t mFormat;          // input track format
-        audio_format_t mMixerInFormat;   // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
-                                         // each track must be converted to this format.
-        audio_format_t mDownmixRequiresFormat;  // required downmixer format
-                                                // AUDIO_FORMAT_PCM_16_BIT if 16 bit necessary
-                                                // AUDIO_FORMAT_INVALID if no required format
-
-        float          mVolume[MAX_NUM_VOLUMES];     // floating point set volume
-        float          mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume
-        float          mVolumeInc[MAX_NUM_VOLUMES];  // floating point volume increment
-
-        float          mAuxLevel;                     // floating point set aux level
-        float          mPrevAuxLevel;                 // floating point prev aux level
-        float          mAuxInc;                       // floating point aux increment
-
-        audio_channel_mask_t mMixerChannelMask;
-        uint32_t             mMixerChannelCount;
-
-        AudioPlaybackRate    mPlaybackRate;
-
-        // Haptic
-        bool                 mHapticPlaybackEnabled;
-        haptic_intensity_t   mHapticIntensity;
-        audio_channel_mask_t mHapticChannelMask;
-        uint32_t             mHapticChannelCount;
-        audio_channel_mask_t mMixerHapticChannelMask;
-        uint32_t             mMixerHapticChannelCount;
-        uint32_t             mAdjustInChannelCount;
-        uint32_t             mAdjustOutChannelCount;
-        uint32_t             mAdjustNonDestructiveInChannelCount;
-        uint32_t             mAdjustNonDestructiveOutChannelCount;
-        bool                 mKeepContractedChannels;
-
-        float getHapticScaleGamma() const {
-        // Need to keep consistent with the value in VibratorService.
-        switch (mHapticIntensity) {
-        case HAPTIC_SCALE_VERY_LOW:
-            return 2.0f;
-        case HAPTIC_SCALE_LOW:
-            return 1.5f;
-        case HAPTIC_SCALE_HIGH:
-            return 0.5f;
-        case HAPTIC_SCALE_VERY_HIGH:
-            return 0.25f;
-        default:
-            return 1.0f;
-        }
-        }
-
-        float getHapticMaxAmplitudeRatio() const {
-        // Need to keep consistent with the value in VibratorService.
-        switch (mHapticIntensity) {
-        case HAPTIC_SCALE_VERY_LOW:
-            return HAPTIC_SCALE_VERY_LOW_RATIO;
-        case HAPTIC_SCALE_LOW:
-            return HAPTIC_SCALE_LOW_RATIO;
-        case HAPTIC_SCALE_NONE:
-        case HAPTIC_SCALE_HIGH:
-        case HAPTIC_SCALE_VERY_HIGH:
-            return 1.0f;
-        default:
-            return 0.0f;
-        }
-        }
-
-    private:
-        // hooks
-        void track__genericResample(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
-        void track__16BitsStereo(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
-        void track__16BitsMono(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
-
-        void volumeRampStereo(int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
-        void volumeStereo(int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
-
-        // multi-format track hooks
-        template <int MIXTYPE, typename TO, typename TI, typename TA>
-        void track__Resample(TO* out, size_t frameCount, TO* temp __unused, TA* aux);
-        template <int MIXTYPE, typename TO, typename TI, typename TA>
-        void track__NoResample(TO* out, size_t frameCount, TO* temp __unused, TA* aux);
-    };
-
-    // TODO: remove BLOCKSIZE unit of processing - it isn't needed anymore.
-    static constexpr int BLOCKSIZE = 16;
-
-    bool setChannelMasks(int name,
-            audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
-
-    // Called when track info changes and a new process hook should be determined.
-    void invalidate() {
-        mHook = &AudioMixer::process__validate;
-    }
-
-    void process__validate();
-    void process__nop();
-    void process__genericNoResampling();
-    void process__genericResampling();
-    void process__oneTrack16BitsStereoNoResampling();
-
-    template <int MIXTYPE, typename TO, typename TI, typename TA>
-    void process__noResampleOneTrack();
-
-    void processHapticData();
-
-    static process_hook_t getProcessHook(int processType, uint32_t channelCount,
-            audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
-
-    static void convertMixerFormat(void *out, audio_format_t mixerOutFormat,
-            void *in, audio_format_t mixerInFormat, size_t sampleCount);
-
-    static void sInitRoutine();
-
-    // initialization constants
-    const uint32_t mSampleRate;
-    const size_t mFrameCount;
-
-    NBLog::Writer *mNBLogWriter = nullptr;   // associated NBLog::Writer
-
-    process_hook_t mHook = &AudioMixer::process__nop;   // one of process__*, never nullptr
-
-    // the size of the type (int32_t) should be the largest of all types supported
-    // by the mixer.
-    std::unique_ptr<int32_t[]> mOutputTemp;
-    std::unique_ptr<int32_t[]> mResampleTemp;
-
-    // track names grouped by main buffer, in no particular order of main buffer.
-    // however names for a particular main buffer are in order (by construction).
-    std::unordered_map<void * /* mainBuffer */, std::vector<int /* name */>> mGroups;
-
-    // track names that are enabled, in increasing order (by construction).
-    std::vector<int /* name */> mEnabled;
-
-    // track smart pointers, by name, in increasing order of name.
-    std::map<int /* name */, std::shared_ptr<Track>> mTracks;
-
-    static pthread_once_t sOnceControl; // initialized in constructor by first new
-};
-
-// ----------------------------------------------------------------------------
-} // namespace android
-
-#endif // ANDROID_AUDIO_MIXER_H
diff --git a/media/libaudioclient/include/media/AudioParameter.h b/media/libaudioclient/include/media/AudioParameter.h
index 24837e3..3c190f2 100644
--- a/media/libaudioclient/include/media/AudioParameter.h
+++ b/media/libaudioclient/include/media/AudioParameter.h
@@ -67,9 +67,12 @@
     //  keyAudioLanguagePreferred: Preferred audio language
     static const char * const keyAudioLanguagePreferred;
 
-    //  keyStreamConnect / Disconnect: value is an int in audio_devices_t
-    static const char * const keyStreamConnect;
-    static const char * const keyStreamDisconnect;
+    //  keyDeviceConnect / Disconnect: value is an int in audio_devices_t
+    static const char * const keyDeviceConnect;
+    static const char * const keyDeviceDisconnect;
+    //  Need to be here because vendors still use them.
+    static const char * const keyStreamConnect;  // Deprecated: DO NOT USE.
+    static const char * const keyStreamDisconnect;  // Deprecated: DO NOT USE.
 
     // For querying stream capabilities. All the returned values are lists.
     //   keyStreamSupportedFormats: audio_format_t
diff --git a/media/libaudiofoundation/Android.bp b/media/libaudiofoundation/Android.bp
new file mode 100644
index 0000000..5045d87
--- /dev/null
+++ b/media/libaudiofoundation/Android.bp
@@ -0,0 +1,33 @@
+cc_library_headers {
+    name: "libaudiofoundation_headers",
+    vendor_available: true,
+    export_include_dirs: ["include"],
+}
+
+cc_library_shared {
+    name: "libaudiofoundation",
+    vendor_available: true,
+
+    srcs: [
+        "AudioGain.cpp",
+    ],
+
+    shared_libs: [
+        "libbase",
+        "libbinder",
+        "liblog",
+        "libutils",
+    ],
+
+    header_libs: [
+        "libaudio_system_headers",
+        "libaudiofoundation_headers",
+    ],
+
+    export_header_lib_headers: ["libaudiofoundation_headers"],
+
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
+}
diff --git a/media/libaudiofoundation/AudioGain.cpp b/media/libaudiofoundation/AudioGain.cpp
new file mode 100644
index 0000000..9d1d6db
--- /dev/null
+++ b/media/libaudiofoundation/AudioGain.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioGain"
+//#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+#include <android-base/stringprintf.h>
+#include <media/AudioGain.h>
+#include <utils/Log.h>
+
+#include <math.h>
+
+namespace android {
+
+AudioGain::AudioGain(int index, bool useInChannelMask)
+{
+    mIndex = index;
+    mUseInChannelMask = useInChannelMask;
+    memset(&mGain, 0, sizeof(struct audio_gain));
+}
+
+void AudioGain::getDefaultConfig(struct audio_gain_config *config)
+{
+    config->index = mIndex;
+    config->mode = mGain.mode;
+    config->channel_mask = mGain.channel_mask;
+    if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
+        config->values[0] = mGain.default_value;
+    } else {
+        uint32_t numValues;
+        if (mUseInChannelMask) {
+            numValues = audio_channel_count_from_in_mask(mGain.channel_mask);
+        } else {
+            numValues = audio_channel_count_from_out_mask(mGain.channel_mask);
+        }
+        for (size_t i = 0; i < numValues; i++) {
+            config->values[i] = mGain.default_value;
+        }
+    }
+    if ((mGain.mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
+        config->ramp_duration_ms = mGain.min_ramp_ms;
+    }
+}
+
+status_t AudioGain::checkConfig(const struct audio_gain_config *config)
+{
+    if ((config->mode & ~mGain.mode) != 0) {
+        return BAD_VALUE;
+    }
+    if ((config->mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
+        if ((config->values[0] < mGain.min_value) ||
+                    (config->values[0] > mGain.max_value)) {
+            return BAD_VALUE;
+        }
+    } else {
+        if ((config->channel_mask & ~mGain.channel_mask) != 0) {
+            return BAD_VALUE;
+        }
+        uint32_t numValues;
+        if (mUseInChannelMask) {
+            numValues = audio_channel_count_from_in_mask(config->channel_mask);
+        } else {
+            numValues = audio_channel_count_from_out_mask(config->channel_mask);
+        }
+        for (size_t i = 0; i < numValues; i++) {
+            if ((config->values[i] < mGain.min_value) ||
+                    (config->values[i] > mGain.max_value)) {
+                return BAD_VALUE;
+            }
+        }
+    }
+    if ((config->mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
+        if ((config->ramp_duration_ms < mGain.min_ramp_ms) ||
+                    (config->ramp_duration_ms > mGain.max_ramp_ms)) {
+            return BAD_VALUE;
+        }
+    }
+    return NO_ERROR;
+}
+
+void AudioGain::dump(std::string *dst, int spaces, int index) const
+{
+    dst->append(base::StringPrintf("%*sGain %d:\n", spaces, "", index+1));
+    dst->append(base::StringPrintf("%*s- mode: %08x\n", spaces, "", mGain.mode));
+    dst->append(base::StringPrintf("%*s- channel_mask: %08x\n", spaces, "", mGain.channel_mask));
+    dst->append(base::StringPrintf("%*s- min_value: %d mB\n", spaces, "", mGain.min_value));
+    dst->append(base::StringPrintf("%*s- max_value: %d mB\n", spaces, "", mGain.max_value));
+    dst->append(base::StringPrintf("%*s- default_value: %d mB\n", spaces, "", mGain.default_value));
+    dst->append(base::StringPrintf("%*s- step_value: %d mB\n", spaces, "", mGain.step_value));
+    dst->append(base::StringPrintf("%*s- min_ramp_ms: %d ms\n", spaces, "", mGain.min_ramp_ms));
+    dst->append(base::StringPrintf("%*s- max_ramp_ms: %d ms\n", spaces, "", mGain.max_ramp_ms));
+}
+
+status_t AudioGain::writeToParcel(android::Parcel *parcel) const
+{
+    status_t status = NO_ERROR;
+    if ((status = parcel->writeInt32(mIndex)) != NO_ERROR) return status;
+    if ((status = parcel->writeBool(mUseInChannelMask)) != NO_ERROR) return status;
+    if ((status = parcel->writeBool(mUseForVolume)) != NO_ERROR) return status;
+    if ((status = parcel->writeUint32(mGain.mode)) != NO_ERROR) return status;
+    if ((status = parcel->writeUint32(mGain.channel_mask)) != NO_ERROR) return status;
+    if ((status = parcel->writeInt32(mGain.min_value)) != NO_ERROR) return status;
+    if ((status = parcel->writeInt32(mGain.max_value)) != NO_ERROR) return status;
+    if ((status = parcel->writeInt32(mGain.default_value)) != NO_ERROR) return status;
+    if ((status = parcel->writeUint32(mGain.step_value)) != NO_ERROR) return status;
+    if ((status = parcel->writeUint32(mGain.min_ramp_ms)) != NO_ERROR) return status;
+    status = parcel->writeUint32(mGain.max_ramp_ms);
+    return status;
+}
+
+status_t AudioGain::readFromParcel(const android::Parcel *parcel)
+{
+    status_t status = NO_ERROR;
+    if ((status = parcel->readInt32(&mIndex)) != NO_ERROR) return status;
+    if ((status = parcel->readBool(&mUseInChannelMask)) != NO_ERROR) return status;
+    if ((status = parcel->readBool(&mUseForVolume)) != NO_ERROR) return status;
+    if ((status = parcel->readUint32(&mGain.mode)) != NO_ERROR) return status;
+    if ((status = parcel->readUint32(&mGain.channel_mask)) != NO_ERROR) return status;
+    if ((status = parcel->readInt32(&mGain.min_value)) != NO_ERROR) return status;
+    if ((status = parcel->readInt32(&mGain.max_value)) != NO_ERROR) return status;
+    if ((status = parcel->readInt32(&mGain.default_value)) != NO_ERROR) return status;
+    if ((status = parcel->readUint32(&mGain.step_value)) != NO_ERROR) return status;
+    if ((status = parcel->readUint32(&mGain.min_ramp_ms)) != NO_ERROR) return status;
+    status = parcel->readUint32(&mGain.max_ramp_ms);
+    return status;
+}
+
+status_t AudioGains::writeToParcel(android::Parcel *parcel) const {
+    status_t status = NO_ERROR;
+    if ((status = parcel->writeUint64(this->size())) != NO_ERROR) return status;
+    for (const auto &audioGain : *this) {
+        if ((status = parcel->writeParcelable(*audioGain)) != NO_ERROR) {
+            break;
+        }
+    }
+    return status;
+}
+
+status_t AudioGains::readFromParcel(const android::Parcel *parcel) {
+    status_t status = NO_ERROR;
+    uint64_t count;
+    if ((status = parcel->readUint64(&count)) != NO_ERROR) return status;
+    for (uint64_t i = 0; i < count; i++) {
+        sp<AudioGain> audioGain = new AudioGain(0, false);
+        if ((status = parcel->readParcelable(audioGain.get())) != NO_ERROR) {
+            this->clear();
+            break;
+        }
+        this->push_back(audioGain);
+    }
+    return status;
+}
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h b/media/libaudiofoundation/include/media/AudioGain.h
similarity index 84%
rename from services/audiopolicy/common/managerdefinitions/include/AudioGain.h
rename to media/libaudiofoundation/include/media/AudioGain.h
index 4af93e1..6a7fb55 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
+++ b/media/libaudiofoundation/include/media/AudioGain.h
@@ -16,15 +16,17 @@
 
 #pragma once
 
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
-#include <utils/String8.h>
 #include <system/audio.h>
+#include <string>
 #include <vector>
 
 namespace android {
 
-class AudioGain: public RefBase
+class AudioGain: public RefBase, public Parcelable
 {
 public:
     AudioGain(int index, bool useInChannelMask);
@@ -55,7 +57,7 @@
     int getMaxRampInMs() const { return mGain.max_ramp_ms; }
 
     // TODO: remove dump from here (split serialization)
-    void dump(String8 *dst, int spaces, int index) const;
+    void dump(std::string *dst, int spaces, int index) const;
 
     void getDefaultConfig(struct audio_gain_config *config);
     status_t checkConfig(const struct audio_gain_config *config);
@@ -65,6 +67,9 @@
 
     const struct audio_gain &getGain() const { return mGain; }
 
+    status_t writeToParcel(Parcel* parcel) const override;
+    status_t readFromParcel(const Parcel* parcel) override;
+
 private:
     int               mIndex;
     struct audio_gain mGain;
@@ -72,7 +77,7 @@
     bool              mUseForVolume = false;
 };
 
-class AudioGains : public std::vector<sp<AudioGain> >
+class AudioGains : public std::vector<sp<AudioGain> >, public Parcelable
 {
 public:
     bool canUseForVolume() const
@@ -90,6 +95,9 @@
         push_back(gain);
         return 0;
     }
+
+    status_t writeToParcel(Parcel* parcel) const override;
+    status_t readFromParcel(const Parcel* parcel) override;
 };
 
 } // namespace android
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index 584c2c0..9803473 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -13,12 +13,6 @@
     ],
 
     shared_libs: [
-        "android.hardware.audio.effect@2.0",
-        "android.hardware.audio.effect@4.0",
-        "android.hardware.audio.effect@5.0",
-        "android.hardware.audio@2.0",
-        "android.hardware.audio@4.0",
-        "android.hardware.audio@5.0",
         "libaudiohal@2.0",
         "libaudiohal@4.0",
         "libaudiohal@5.0",
@@ -26,7 +20,8 @@
     ],
 
     header_libs: [
-        "libaudiohal_headers"
+        "libaudiohal_headers",
+        "libbase_headers",
     ]
 }
 
diff --git a/media/libaudiohal/DevicesFactoryHalInterface.cpp b/media/libaudiohal/DevicesFactoryHalInterface.cpp
index f86009c..d5336fa 100644
--- a/media/libaudiohal/DevicesFactoryHalInterface.cpp
+++ b/media/libaudiohal/DevicesFactoryHalInterface.cpp
@@ -14,26 +14,16 @@
  * limitations under the License.
  */
 
-#include <android/hardware/audio/2.0/IDevicesFactory.h>
-#include <android/hardware/audio/4.0/IDevicesFactory.h>
-#include <android/hardware/audio/5.0/IDevicesFactory.h>
-
 #include <libaudiohal/FactoryHalHidl.h>
 
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+
 namespace android {
 
 // static
 sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
-    if (hardware::audio::V5_0::IDevicesFactory::getService() != nullptr) {
-        return V5_0::createDevicesFactoryHal();
-    }
-    if (hardware::audio::V4_0::IDevicesFactory::getService() != nullptr) {
-        return V4_0::createDevicesFactoryHal();
-    }
-    if (hardware::audio::V2_0::IDevicesFactory::getService() != nullptr) {
-        return V2_0::createDevicesFactoryHal();
-    }
-    return nullptr;
+    return createPreferedImpl<DevicesFactoryHalInterface>();
 }
 
 } // namespace android
+
diff --git a/media/libaudiohal/EffectsFactoryHalInterface.cpp b/media/libaudiohal/EffectsFactoryHalInterface.cpp
index bd3ef61..d15b14e 100644
--- a/media/libaudiohal/EffectsFactoryHalInterface.cpp
+++ b/media/libaudiohal/EffectsFactoryHalInterface.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2017 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,26 +14,15 @@
  * limitations under the License.
  */
 
-#include <android/hardware/audio/effect/2.0/IEffectsFactory.h>
-#include <android/hardware/audio/effect/4.0/IEffectsFactory.h>
-#include <android/hardware/audio/effect/5.0/IEffectsFactory.h>
-
 #include <libaudiohal/FactoryHalHidl.h>
 
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+
 namespace android {
 
 // static
 sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
-    if (hardware::audio::effect::V5_0::IEffectsFactory::getService() != nullptr) {
-        return effect::V5_0::createEffectsFactoryHal();
-    }
-    if (hardware::audio::effect::V4_0::IEffectsFactory::getService() != nullptr) {
-        return effect::V4_0::createEffectsFactoryHal();
-    }
-    if (hardware::audio::effect::V2_0::IEffectsFactory::getService() != nullptr) {
-        return effect::V2_0::createEffectsFactoryHal();
-    }
-    return nullptr;
+    return createPreferedImpl<EffectsFactoryHalInterface>();
 }
 
 // static
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
index 5e01e42..1335a0c 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
@@ -35,13 +35,10 @@
 namespace android {
 namespace CPP_VERSION {
 
-DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
-    sp<IDevicesFactory> defaultFactory{IDevicesFactory::getService()};
-    if (!defaultFactory) {
-        ALOGE("Failed to obtain IDevicesFactory/default service, terminating process.");
-        exit(1);
-    }
-    mDeviceFactories.push_back(defaultFactory);
+DevicesFactoryHalHidl::DevicesFactoryHalHidl(sp<IDevicesFactory> devicesFactory) {
+    ALOG_ASSERT(devicesFactory != nullptr, "Provided IDevicesFactory service is NULL");
+
+    mDeviceFactories.push_back(devicesFactory);
     if (MAJOR_VERSION >= 4) {
         // The MSD factory is optional and only available starting at HAL 4.0
         sp<IDevicesFactory> msdFactory{IDevicesFactory::getService(AUDIO_HAL_SERVICE_NAME_MSD)};
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.h b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
index 27e0649..8775e7b 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
@@ -32,18 +32,14 @@
 class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
 {
   public:
+    DevicesFactoryHalHidl(sp<IDevicesFactory> devicesFactory);
+
     // Opens a device with the specified name. To close the device, it is
     // necessary to release references to the returned object.
     virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
-
   private:
-    friend class DevicesFactoryHalHybrid;
-
     std::vector<sp<IDevicesFactory>> mDeviceFactories;
 
-    // Can not be constructed directly by clients.
-    DevicesFactoryHalHidl();
-
     virtual ~DevicesFactoryHalHidl() = default;
 };
 
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
index f337a8b..0e1f1bb 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
@@ -17,16 +17,17 @@
 #define LOG_TAG "DevicesFactoryHalHybrid"
 //#define LOG_NDEBUG 0
 
+#include "DevicesFactoryHalHidl.h"
 #include "DevicesFactoryHalHybrid.h"
 #include "DevicesFactoryHalLocal.h"
-#include "DevicesFactoryHalHidl.h"
+#include <libaudiohal/FactoryHalHidl.h>
 
 namespace android {
 namespace CPP_VERSION {
 
-DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
+DevicesFactoryHalHybrid::DevicesFactoryHalHybrid(sp<IDevicesFactory> hidlFactory)
         : mLocalFactory(new DevicesFactoryHalLocal()),
-          mHidlFactory(new DevicesFactoryHalHidl()) {
+          mHidlFactory(new DevicesFactoryHalHidl(hidlFactory)) {
 }
 
 status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
@@ -36,6 +37,12 @@
     }
     return mLocalFactory->openDevice(name, device);
 }
-
 } // namespace CPP_VERSION
+
+template <>
+sp<DevicesFactoryHalInterface> createFactoryHal<AudioHALVersion::CPP_VERSION>() {
+    auto service = hardware::audio::CPP_VERSION::IDevicesFactory::getService();
+    return service ? new CPP_VERSION::DevicesFactoryHalHybrid(service) : nullptr;
+}
+
 } // namespace android
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
index 5ac0d0d..545bb70 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
@@ -17,17 +17,20 @@
 #ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_H
 #define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_H
 
+#include PATH(android/hardware/audio/FILE_VERSION/IDevicesFactory.h)
 #include <media/audiohal/DevicesFactoryHalInterface.h>
 #include <utils/Errors.h>
 #include <utils/RefBase.h>
 
+using ::android::hardware::audio::CPP_VERSION::IDevicesFactory;
+
 namespace android {
 namespace CPP_VERSION {
 
 class DevicesFactoryHalHybrid : public DevicesFactoryHalInterface
 {
   public:
-    DevicesFactoryHalHybrid();
+    DevicesFactoryHalHybrid(sp<IDevicesFactory> hidlFactory);
 
     // Opens a device with the specified name. To close the device, it is
     // necessary to release references to the returned object.
@@ -38,10 +41,6 @@
     sp<DevicesFactoryHalInterface> mHidlFactory;
 };
 
-sp<DevicesFactoryHalInterface> createDevicesFactoryHal() {
-    return new DevicesFactoryHalHybrid();
-}
-
 } // namespace CPP_VERSION
 } // namespace android
 
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index 7fd6bde..ba7b195 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -19,11 +19,12 @@
 
 #include <cutils/native_handle.h>
 
-#include "EffectsFactoryHalHidl.h"
 #include "ConversionHelperHidl.h"
 #include "EffectBufferHalHidl.h"
 #include "EffectHalHidl.h"
+#include "EffectsFactoryHalHidl.h"
 #include "HidlUtils.h"
+#include <libaudiohal/FactoryHalHidl.h>
 
 using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
 using ::android::hardware::Return;
@@ -35,12 +36,10 @@
 using namespace ::android::hardware::audio::common::CPP_VERSION;
 using namespace ::android::hardware::audio::effect::CPP_VERSION;
 
-EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
-    mEffectsFactory = IEffectsFactory::getService();
-    if (mEffectsFactory == 0) {
-        ALOGE("Failed to obtain IEffectsFactory service, terminating process.");
-        exit(1);
-    }
+EffectsFactoryHalHidl::EffectsFactoryHalHidl(sp<IEffectsFactory> effectsFactory)
+        : ConversionHelperHidl("EffectsFactory") {
+    ALOG_ASSERT(effectsFactory != nullptr, "Provided IDevicesFactory service is NULL");
+    mEffectsFactory = effectsFactory;
 }
 
 status_t EffectsFactoryHalHidl::queryAllDescriptors() {
@@ -147,4 +146,11 @@
 
 } // namespace CPP_VERSION
 } // namespace effect
+
+template<>
+sp<EffectsFactoryHalInterface> createFactoryHal<AudioHALVersion::CPP_VERSION>() {
+    auto service = hardware::audio::effect::CPP_VERSION::IEffectsFactory::getService();
+    return service ? new effect::CPP_VERSION::EffectsFactoryHalHidl(service) : nullptr;
+}
+
 } // namespace android
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.h b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
index 01178ff..2828513 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
@@ -18,7 +18,6 @@
 #define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
 
 #include PATH(android/hardware/audio/effect/FILE_VERSION/IEffectsFactory.h)
-#include PATH(android/hardware/audio/effect/FILE_VERSION/types.h)
 #include <media/audiohal/EffectsFactoryHalInterface.h>
 
 #include "ConversionHelperHidl.h"
@@ -34,7 +33,7 @@
 class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
 {
   public:
-    EffectsFactoryHalHidl();
+    EffectsFactoryHalHidl(sp<IEffectsFactory> effectsFactory);
 
     // Returns the number of different effects in all loaded libraries.
     virtual status_t queryNumberEffects(uint32_t *pNumEffects);
@@ -66,10 +65,6 @@
     status_t queryAllDescriptors();
 };
 
-sp<EffectsFactoryHalInterface> createEffectsFactoryHal() {
-    return new EffectsFactoryHalHidl();
-}
-
 } // namespace CPP_VERSION
 } // namespace effect
 } // namespace android
diff --git a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
index c7319d0..829f99c 100644
--- a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
+++ b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
@@ -23,33 +23,42 @@
 #include <media/audiohal/EffectsFactoryHalInterface.h>
 #include <utils/StrongPointer.h>
 
+#include <array>
+#include <utility>
+
 namespace android {
 
-namespace effect {
-namespace V2_0 {
-sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
-} // namespace V2_0
+/** Supported HAL versions, in order of preference.
+ * Implementation should use specialize the `create*FactoryHal` for their version.
+ * Client should use `createPreferedImpl<*FactoryHal>()` to instantiate
+ * the preferred available impl.
+ */
+enum class AudioHALVersion {
+    V5_0,
+    V4_0,
+    V2_0,
+    end, // used for iterating over supported versions
+};
 
-namespace V4_0 {
-sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
-} // namespace V4_0
+/** Template function to fully specialized for each version and each Interface. */
+template <AudioHALVersion, class Interface>
+sp<Interface> createFactoryHal();
 
-namespace V5_0 {
-sp<EffectsFactoryHalInterface> createEffectsFactoryHal();
-} // namespace V5_0
-} // namespace effect
+/** @Return the preferred available implementation or nullptr if none are available. */
+template <class Interface, AudioHALVersion version = AudioHALVersion{}>
+static sp<Interface> createPreferedImpl() {
+    if constexpr (version == AudioHALVersion::end) {
+        return nullptr; // tried all version, all returned nullptr
+    } else {
+        if (auto created = createFactoryHal<version, Interface>(); created != nullptr) {
+           return created;
+        }
 
-namespace V2_0 {
-sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
-} // namespace V2_0
+        using Raw = std::underlying_type_t<AudioHALVersion>; // cast as enum class do not support ++
+        return createPreferedImpl<Interface, AudioHALVersion(Raw(version) + 1)>();
+    }
+}
 
-namespace V4_0 {
-sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
-} // namespace V4_0
-
-namespace V5_0 {
-sp<DevicesFactoryHalInterface> createDevicesFactoryHal();
-} // namespace V5_0
 
 } // namespace android
 
diff --git a/media/libaudioprocessing/Android.bp b/media/libaudioprocessing/Android.bp
index cb78063..e8aa700 100644
--- a/media/libaudioprocessing/Android.bp
+++ b/media/libaudioprocessing/Android.bp
@@ -3,20 +3,13 @@
 
     export_include_dirs: ["include"],
 
+    header_libs: ["libaudioclient_headers"],
+
     shared_libs: [
-        "libaudiohal",
         "libaudioutils",
         "libcutils",
         "liblog",
-        "libnbaio",
-        "libnblog",
-        "libsonic",
         "libutils",
-        "libvibrator",
-    ],
-
-    header_libs: [
-        "libbase_headers",
     ],
 
     cflags: [
@@ -33,18 +26,31 @@
     defaults: ["libaudioprocessing_defaults"],
 
     srcs: [
+        "AudioMixer.cpp",
         "BufferProviders.cpp",
         "RecordBufferConverter.cpp",
     ],
-    whole_static_libs: ["libaudioprocessing_arm"],
+
+    header_libs: [
+        "libbase_headers",
+    ],
+
+    shared_libs: [
+        "libaudiohal",
+        "libsonic",
+        "libvibrator",
+    ],
+
+    whole_static_libs: ["libaudioprocessing_base"],
 }
 
 cc_library_static {
-    name: "libaudioprocessing_arm",
+    name: "libaudioprocessing_base",
     defaults: ["libaudioprocessing_defaults"],
+    vendor_available: true,
 
     srcs: [
-        "AudioMixer.cpp",
+        "AudioMixerBase.cpp",
         "AudioResampler.cpp",
         "AudioResamplerCubic.cpp",
         "AudioResamplerSinc.cpp",
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index f7cc096..c0b11a4 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "AudioMixer"
 //#define LOG_NDEBUG 0
 
+#include <sstream>
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
@@ -27,9 +28,6 @@
 #include <utils/Errors.h>
 #include <utils/Log.h>
 
-#include <cutils/compiler.h>
-#include <utils/Debug.h>
-
 #include <system/audio.h>
 
 #include <audio_utils/primitives.h>
@@ -58,138 +56,15 @@
 #define ALOGVV(a...) do { } while (0)
 #endif
 
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
-#endif
-
-// Set kUseNewMixer to true to use the new mixer engine always. Otherwise the
-// original code will be used for stereo sinks, the new mixer for multichannel.
-static constexpr bool kUseNewMixer = true;
-
-// Set kUseFloat to true to allow floating input into the mixer engine.
-// If kUseNewMixer is false, this is ignored or may be overridden internally
-// because of downmix/upmix support.
-static constexpr bool kUseFloat = true;
-
-#ifdef FLOAT_AUX
-using TYPE_AUX = float;
-static_assert(kUseNewMixer && kUseFloat,
-        "kUseNewMixer and kUseFloat must be true for FLOAT_AUX option");
-#else
-using TYPE_AUX = int32_t; // q4.27
-#endif
-
 // Set to default copy buffer size in frames for input processing.
-static const size_t kCopyBufferFrameCount = 256;
+static constexpr size_t kCopyBufferFrameCount = 256;
 
 namespace android {
 
 // ----------------------------------------------------------------------------
 
-static inline audio_format_t selectMixerInFormat(audio_format_t inputFormat __unused) {
-    return kUseFloat && kUseNewMixer ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
-}
-
-status_t AudioMixer::create(
-        int name, audio_channel_mask_t channelMask, audio_format_t format, int sessionId)
-{
-    LOG_ALWAYS_FATAL_IF(exists(name), "name %d already exists", name);
-
-    if (!isValidChannelMask(channelMask)) {
-        ALOGE("%s invalid channelMask: %#x", __func__, channelMask);
-        return BAD_VALUE;
-    }
-    if (!isValidFormat(format)) {
-        ALOGE("%s invalid format: %#x", __func__, format);
-        return BAD_VALUE;
-    }
-
-    auto t = std::make_shared<Track>();
-    {
-        // TODO: move initialization to the Track constructor.
-        // assume default parameters for the track, except where noted below
-        t->needs = 0;
-
-        // Integer volume.
-        // Currently integer volume is kept for the legacy integer mixer.
-        // Will be removed when the legacy mixer path is removed.
-        t->volume[0] = 0;
-        t->volume[1] = 0;
-        t->prevVolume[0] = 0 << 16;
-        t->prevVolume[1] = 0 << 16;
-        t->volumeInc[0] = 0;
-        t->volumeInc[1] = 0;
-        t->auxLevel = 0;
-        t->auxInc = 0;
-        t->prevAuxLevel = 0;
-
-        // Floating point volume.
-        t->mVolume[0] = 0.f;
-        t->mVolume[1] = 0.f;
-        t->mPrevVolume[0] = 0.f;
-        t->mPrevVolume[1] = 0.f;
-        t->mVolumeInc[0] = 0.;
-        t->mVolumeInc[1] = 0.;
-        t->mAuxLevel = 0.;
-        t->mAuxInc = 0.;
-        t->mPrevAuxLevel = 0.;
-
-        // no initialization needed
-        // t->frameCount
-        t->mHapticChannelMask = channelMask & AUDIO_CHANNEL_HAPTIC_ALL;
-        t->mHapticChannelCount = audio_channel_count_from_out_mask(t->mHapticChannelMask);
-        channelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
-        t->channelCount = audio_channel_count_from_out_mask(channelMask);
-        t->enabled = false;
-        ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
-                "Non-stereo channel mask: %d\n", channelMask);
-        t->channelMask = channelMask;
-        t->sessionId = sessionId;
-        // setBufferProvider(name, AudioBufferProvider *) is required before enable(name)
-        t->bufferProvider = NULL;
-        t->buffer.raw = NULL;
-        // no initialization needed
-        // t->buffer.frameCount
-        t->hook = NULL;
-        t->mIn = NULL;
-        t->sampleRate = mSampleRate;
-        // setParameter(name, TRACK, MAIN_BUFFER, mixBuffer) is required before enable(name)
-        t->mainBuffer = NULL;
-        t->auxBuffer = NULL;
-        t->mInputBufferProvider = NULL;
-        t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
-        t->mFormat = format;
-        t->mMixerInFormat = selectMixerInFormat(format);
-        t->mDownmixRequiresFormat = AUDIO_FORMAT_INVALID; // no format required
-        t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
-                AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
-        t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
-        t->mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
-        // haptic
-        t->mHapticPlaybackEnabled = false;
-        t->mHapticIntensity = HAPTIC_SCALE_NONE;
-        t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
-        t->mMixerHapticChannelCount = 0;
-        t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
-        t->mAdjustOutChannelCount = t->channelCount + t->mMixerHapticChannelCount;
-        t->mAdjustNonDestructiveInChannelCount = t->mAdjustOutChannelCount;
-        t->mAdjustNonDestructiveOutChannelCount = t->channelCount;
-        t->mKeepContractedChannels = false;
-        // Check the downmixing (or upmixing) requirements.
-        status_t status = t->prepareForDownmix();
-        if (status != OK) {
-            ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask);
-            return BAD_VALUE;
-        }
-        // prepareForDownmix() may change mDownmixRequiresFormat
-        ALOGVV("mMixerFormat:%#x  mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
-        t->prepareForReformat();
-        t->prepareForAdjustChannelsNonDestructive(mFrameCount);
-        t->prepareForAdjustChannels();
-
-        mTracks[name] = t;
-        return OK;
-    }
+bool AudioMixer::isValidChannelMask(audio_channel_mask_t channelMask) const {
+    return audio_channel_mask_is_valid(channelMask); // the RemixBufferProvider is flexible.
 }
 
 // Called when channel masks have changed for a track name
@@ -198,7 +73,7 @@
 bool AudioMixer::setChannelMasks(int name,
         audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) {
     LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
-    const std::shared_ptr<Track> &track = mTracks[name];
+    const std::shared_ptr<Track> &track = getTrack(name);
 
     if (trackChannelMask == (track->channelMask | track->mHapticChannelMask)
             && mixerChannelMask == (track->mMixerChannelMask | track->mMixerHapticChannelMask)) {
@@ -255,14 +130,8 @@
     track->prepareForAdjustChannelsNonDestructive(mFrameCount);
     track->prepareForAdjustChannels();
 
-    if (track->mResampler.get() != nullptr) {
-        // resampler channels may have changed.
-        const uint32_t resetToSampleRate = track->sampleRate;
-        track->mResampler.reset(nullptr);
-        track->sampleRate = mSampleRate; // without resampler, track rate is device sample rate.
-        // recreate the resampler with updated format, channels, saved sampleRate.
-        track->setResampler(resetToSampleRate /*trackSampleRate*/, mSampleRate /*devSampleRate*/);
-    }
+    // Resampler channels may have changed.
+    track->recreateResampler(mSampleRate);
     return true;
 }
 
@@ -477,171 +346,10 @@
     }
 }
 
-void AudioMixer::destroy(int name)
-{
-    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
-    ALOGV("deleteTrackName(%d)", name);
-
-    if (mTracks[name]->enabled) {
-        invalidate();
-    }
-    mTracks.erase(name); // deallocate track
-}
-
-void AudioMixer::enable(int name)
-{
-    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
-    const std::shared_ptr<Track> &track = mTracks[name];
-
-    if (!track->enabled) {
-        track->enabled = true;
-        ALOGV("enable(%d)", name);
-        invalidate();
-    }
-}
-
-void AudioMixer::disable(int name)
-{
-    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
-    const std::shared_ptr<Track> &track = mTracks[name];
-
-    if (track->enabled) {
-        track->enabled = false;
-        ALOGV("disable(%d)", name);
-        invalidate();
-    }
-}
-
-/* Sets the volume ramp variables for the AudioMixer.
- *
- * The volume ramp variables are used to transition from the previous
- * volume to the set volume.  ramp controls the duration of the transition.
- * Its value is typically one state framecount period, but may also be 0,
- * meaning "immediate."
- *
- * FIXME: 1) Volume ramp is enabled only if there is a nonzero integer increment
- * even if there is a nonzero floating point increment (in that case, the volume
- * change is immediate).  This restriction should be changed when the legacy mixer
- * is removed (see #2).
- * FIXME: 2) Integer volume variables are used for Legacy mixing and should be removed
- * when no longer needed.
- *
- * @param newVolume set volume target in floating point [0.0, 1.0].
- * @param ramp number of frames to increment over. if ramp is 0, the volume
- * should be set immediately.  Currently ramp should not exceed 65535 (frames).
- * @param pIntSetVolume pointer to the U4.12 integer target volume, set on return.
- * @param pIntPrevVolume pointer to the U4.28 integer previous volume, set on return.
- * @param pIntVolumeInc pointer to the U4.28 increment per output audio frame, set on return.
- * @param pSetVolume pointer to the float target volume, set on return.
- * @param pPrevVolume pointer to the float previous volume, set on return.
- * @param pVolumeInc pointer to the float increment per output audio frame, set on return.
- * @return true if the volume has changed, false if volume is same.
- */
-static inline bool setVolumeRampVariables(float newVolume, int32_t ramp,
-        int16_t *pIntSetVolume, int32_t *pIntPrevVolume, int32_t *pIntVolumeInc,
-        float *pSetVolume, float *pPrevVolume, float *pVolumeInc) {
-    // check floating point volume to see if it is identical to the previously
-    // set volume.
-    // We do not use a tolerance here (and reject changes too small)
-    // as it may be confusing to use a different value than the one set.
-    // If the resulting volume is too small to ramp, it is a direct set of the volume.
-    if (newVolume == *pSetVolume) {
-        return false;
-    }
-    if (newVolume < 0) {
-        newVolume = 0; // should not have negative volumes
-    } else {
-        switch (fpclassify(newVolume)) {
-        case FP_SUBNORMAL:
-        case FP_NAN:
-            newVolume = 0;
-            break;
-        case FP_ZERO:
-            break; // zero volume is fine
-        case FP_INFINITE:
-            // Infinite volume could be handled consistently since
-            // floating point math saturates at infinities,
-            // but we limit volume to unity gain float.
-            // ramp = 0; break;
-            //
-            newVolume = AudioMixer::UNITY_GAIN_FLOAT;
-            break;
-        case FP_NORMAL:
-        default:
-            // Floating point does not have problems with overflow wrap
-            // that integer has.  However, we limit the volume to
-            // unity gain here.
-            // TODO: Revisit the volume limitation and perhaps parameterize.
-            if (newVolume > AudioMixer::UNITY_GAIN_FLOAT) {
-                newVolume = AudioMixer::UNITY_GAIN_FLOAT;
-            }
-            break;
-        }
-    }
-
-    // set floating point volume ramp
-    if (ramp != 0) {
-        // when the ramp completes, *pPrevVolume is set to *pSetVolume, so there
-        // is no computational mismatch; hence equality is checked here.
-        ALOGD_IF(*pPrevVolume != *pSetVolume, "previous float ramp hasn't finished,"
-                " prev:%f  set_to:%f", *pPrevVolume, *pSetVolume);
-        const float inc = (newVolume - *pPrevVolume) / ramp; // could be inf, nan, subnormal
-        // could be inf, cannot be nan, subnormal
-        const float maxv = std::max(newVolume, *pPrevVolume);
-
-        if (isnormal(inc) // inc must be a normal number (no subnormals, infinite, nan)
-                && maxv + inc != maxv) { // inc must make forward progress
-            *pVolumeInc = inc;
-            // ramp is set now.
-            // Note: if newVolume is 0, then near the end of the ramp,
-            // it may be possible that the ramped volume may be subnormal or
-            // temporarily negative by a small amount or subnormal due to floating
-            // point inaccuracies.
-        } else {
-            ramp = 0; // ramp not allowed
-        }
-    }
-
-    // compute and check integer volume, no need to check negative values
-    // The integer volume is limited to "unity_gain" to avoid wrapping and other
-    // audio artifacts, so it never reaches the range limit of U4.28.
-    // We safely use signed 16 and 32 bit integers here.
-    const float scaledVolume = newVolume * AudioMixer::UNITY_GAIN_INT; // not neg, subnormal, nan
-    const int32_t intVolume = (scaledVolume >= (float)AudioMixer::UNITY_GAIN_INT) ?
-            AudioMixer::UNITY_GAIN_INT : (int32_t)scaledVolume;
-
-    // set integer volume ramp
-    if (ramp != 0) {
-        // integer volume is U4.12 (to use 16 bit multiplies), but ramping uses U4.28.
-        // when the ramp completes, *pIntPrevVolume is set to *pIntSetVolume << 16, so there
-        // is no computational mismatch; hence equality is checked here.
-        ALOGD_IF(*pIntPrevVolume != *pIntSetVolume << 16, "previous int ramp hasn't finished,"
-                " prev:%d  set_to:%d", *pIntPrevVolume, *pIntSetVolume << 16);
-        const int32_t inc = ((intVolume << 16) - *pIntPrevVolume) / ramp;
-
-        if (inc != 0) { // inc must make forward progress
-            *pIntVolumeInc = inc;
-        } else {
-            ramp = 0; // ramp not allowed
-        }
-    }
-
-    // if no ramp, or ramp not allowed, then clear float and integer increments
-    if (ramp == 0) {
-        *pVolumeInc = 0;
-        *pPrevVolume = newVolume;
-        *pIntVolumeInc = 0;
-        *pIntPrevVolume = intVolume << 16;
-    }
-    *pSetVolume = newVolume;
-    *pIntSetVolume = intVolume;
-    return true;
-}
-
 void AudioMixer::setParameter(int name, int target, int param, void *value)
 {
     LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
-    const std::shared_ptr<Track> &track = mTracks[name];
+    const std::shared_ptr<Track> &track = getTrack(name);
 
     int valueInt = static_cast<int>(reinterpret_cast<uintptr_t>(value));
     int32_t *valueBuf = reinterpret_cast<int32_t*>(value);
@@ -670,11 +378,7 @@
             }
             break;
         case AUX_BUFFER:
-            if (track->auxBuffer != valueBuf) {
-                track->auxBuffer = valueBuf;
-                ALOGV("setParameter(TRACK, AUX_BUFFER, %p)", valueBuf);
-                invalidate();
-            }
+            AudioMixerBase::setParameter(name, target, param, value);
             break;
         case FORMAT: {
             audio_format_t format = static_cast<audio_format_t>(valueInt);
@@ -730,127 +434,38 @@
         break;
 
     case RESAMPLE:
-        switch (param) {
-        case SAMPLE_RATE:
-            ALOG_ASSERT(valueInt > 0, "bad sample rate %d", valueInt);
-            if (track->setResampler(uint32_t(valueInt), mSampleRate)) {
-                ALOGV("setParameter(RESAMPLE, SAMPLE_RATE, %u)",
-                        uint32_t(valueInt));
-                invalidate();
-            }
-            break;
-        case RESET:
-            track->resetResampler();
-            invalidate();
-            break;
-        case REMOVE:
-            track->mResampler.reset(nullptr);
-            track->sampleRate = mSampleRate;
-            invalidate();
-            break;
-        default:
-            LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param);
-        }
-        break;
-
     case RAMP_VOLUME:
     case VOLUME:
+        AudioMixerBase::setParameter(name, target, param, value);
+        break;
+    case TIMESTRETCH:
         switch (param) {
-        case AUXLEVEL:
-            if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
-                    target == RAMP_VOLUME ? mFrameCount : 0,
-                    &track->auxLevel, &track->prevAuxLevel, &track->auxInc,
-                    &track->mAuxLevel, &track->mPrevAuxLevel, &track->mAuxInc)) {
-                ALOGV("setParameter(%s, AUXLEVEL: %04x)",
-                        target == VOLUME ? "VOLUME" : "RAMP_VOLUME", track->auxLevel);
-                invalidate();
+        case PLAYBACK_RATE: {
+            const AudioPlaybackRate *playbackRate =
+                    reinterpret_cast<AudioPlaybackRate*>(value);
+            ALOGW_IF(!isAudioPlaybackRateValid(*playbackRate),
+                    "bad parameters speed %f, pitch %f",
+                    playbackRate->mSpeed, playbackRate->mPitch);
+            if (track->setPlaybackRate(*playbackRate)) {
+                ALOGV("setParameter(TIMESTRETCH, PLAYBACK_RATE, STRETCH_MODE, FALLBACK_MODE "
+                        "%f %f %d %d",
+                        playbackRate->mSpeed,
+                        playbackRate->mPitch,
+                        playbackRate->mStretchMode,
+                        playbackRate->mFallbackMode);
+                // invalidate();  (should not require reconfigure)
             }
-            break;
+        } break;
         default:
-            if ((unsigned)param >= VOLUME0 && (unsigned)param < VOLUME0 + MAX_NUM_VOLUMES) {
-                if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
-                        target == RAMP_VOLUME ? mFrameCount : 0,
-                        &track->volume[param - VOLUME0],
-                        &track->prevVolume[param - VOLUME0],
-                        &track->volumeInc[param - VOLUME0],
-                        &track->mVolume[param - VOLUME0],
-                        &track->mPrevVolume[param - VOLUME0],
-                        &track->mVolumeInc[param - VOLUME0])) {
-                    ALOGV("setParameter(%s, VOLUME%d: %04x)",
-                            target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0,
-                                    track->volume[param - VOLUME0]);
-                    invalidate();
-                }
-            } else {
-                LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
-            }
+            LOG_ALWAYS_FATAL("setParameter timestretch: bad param %d", param);
         }
         break;
-        case TIMESTRETCH:
-            switch (param) {
-            case PLAYBACK_RATE: {
-                const AudioPlaybackRate *playbackRate =
-                        reinterpret_cast<AudioPlaybackRate*>(value);
-                ALOGW_IF(!isAudioPlaybackRateValid(*playbackRate),
-                        "bad parameters speed %f, pitch %f",
-                        playbackRate->mSpeed, playbackRate->mPitch);
-                if (track->setPlaybackRate(*playbackRate)) {
-                    ALOGV("setParameter(TIMESTRETCH, PLAYBACK_RATE, STRETCH_MODE, FALLBACK_MODE "
-                            "%f %f %d %d",
-                            playbackRate->mSpeed,
-                            playbackRate->mPitch,
-                            playbackRate->mStretchMode,
-                            playbackRate->mFallbackMode);
-                    // invalidate();  (should not require reconfigure)
-                }
-            } break;
-            default:
-                LOG_ALWAYS_FATAL("setParameter timestretch: bad param %d", param);
-            }
-            break;
 
     default:
         LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
     }
 }
 
-bool AudioMixer::Track::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate)
-{
-    if (trackSampleRate != devSampleRate || mResampler.get() != nullptr) {
-        if (sampleRate != trackSampleRate) {
-            sampleRate = trackSampleRate;
-            if (mResampler.get() == nullptr) {
-                ALOGV("Creating resampler from track %d Hz to device %d Hz",
-                        trackSampleRate, devSampleRate);
-                AudioResampler::src_quality quality;
-                // force lowest quality level resampler if use case isn't music or video
-                // FIXME this is flawed for dynamic sample rates, as we choose the resampler
-                // quality level based on the initial ratio, but that could change later.
-                // Should have a way to distinguish tracks with static ratios vs. dynamic ratios.
-                if (isMusicRate(trackSampleRate)) {
-                    quality = AudioResampler::DEFAULT_QUALITY;
-                } else {
-                    quality = AudioResampler::DYN_LOW_QUALITY;
-                }
-
-                // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
-                // but if none exists, it is the channel count (1 for mono).
-                const int resamplerChannelCount = mDownmixerBufferProvider.get() != nullptr
-                        ? mMixerChannelCount : channelCount;
-                ALOGVV("Creating resampler:"
-                        " format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n",
-                        mMixerInFormat, resamplerChannelCount, devSampleRate, quality);
-                mResampler.reset(AudioResampler::create(
-                        mMixerInFormat,
-                        resamplerChannelCount,
-                        devSampleRate, quality));
-            }
-            return true;
-        }
-    }
-    return false;
-}
-
 bool AudioMixer::Track::setPlaybackRate(const AudioPlaybackRate &playbackRate)
 {
     if ((mTimestretchBufferProvider.get() == nullptr &&
@@ -863,8 +478,7 @@
     if (mTimestretchBufferProvider.get() == nullptr) {
         // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
         // but if none exists, it is the channel count (1 for mono).
-        const int timestretchChannelCount = mDownmixerBufferProvider.get() != nullptr
-                ? mMixerChannelCount : channelCount;
+        const int timestretchChannelCount = getOutputChannelCount();
         mTimestretchBufferProvider.reset(new TimestretchBufferProvider(timestretchChannelCount,
                 mMixerInFormat, sampleRate, playbackRate));
         reconfigureBufferProviders();
@@ -875,84 +489,10 @@
     return true;
 }
 
-/* Checks to see if the volume ramp has completed and clears the increment
- * variables appropriately.
- *
- * FIXME: There is code to handle int/float ramp variable switchover should it not
- * complete within a mixer buffer processing call, but it is preferred to avoid switchover
- * due to precision issues.  The switchover code is included for legacy code purposes
- * and can be removed once the integer volume is removed.
- *
- * It is not sufficient to clear only the volumeInc integer variable because
- * if one channel requires ramping, all channels are ramped.
- *
- * There is a bit of duplicated code here, but it keeps backward compatibility.
- */
-inline void AudioMixer::Track::adjustVolumeRamp(bool aux, bool useFloat)
-{
-    if (useFloat) {
-        for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
-            if ((mVolumeInc[i] > 0 && mPrevVolume[i] + mVolumeInc[i] >= mVolume[i]) ||
-                     (mVolumeInc[i] < 0 && mPrevVolume[i] + mVolumeInc[i] <= mVolume[i])) {
-                volumeInc[i] = 0;
-                prevVolume[i] = volume[i] << 16;
-                mVolumeInc[i] = 0.;
-                mPrevVolume[i] = mVolume[i];
-            } else {
-                //ALOGV("ramp: %f %f %f", mVolume[i], mPrevVolume[i], mVolumeInc[i]);
-                prevVolume[i] = u4_28_from_float(mPrevVolume[i]);
-            }
-        }
-    } else {
-        for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
-            if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) ||
-                    ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) {
-                volumeInc[i] = 0;
-                prevVolume[i] = volume[i] << 16;
-                mVolumeInc[i] = 0.;
-                mPrevVolume[i] = mVolume[i];
-            } else {
-                //ALOGV("ramp: %d %d %d", volume[i] << 16, prevVolume[i], volumeInc[i]);
-                mPrevVolume[i]  = float_from_u4_28(prevVolume[i]);
-            }
-        }
-    }
-
-    if (aux) {
-#ifdef FLOAT_AUX
-        if (useFloat) {
-            if ((mAuxInc > 0.f && mPrevAuxLevel + mAuxInc >= mAuxLevel) ||
-                    (mAuxInc < 0.f && mPrevAuxLevel + mAuxInc <= mAuxLevel)) {
-                auxInc = 0;
-                prevAuxLevel = auxLevel << 16;
-                mAuxInc = 0.f;
-                mPrevAuxLevel = mAuxLevel;
-            }
-        } else
-#endif
-        if ((auxInc > 0 && ((prevAuxLevel + auxInc) >> 16) >= auxLevel) ||
-                (auxInc < 0 && ((prevAuxLevel + auxInc) >> 16) <= auxLevel)) {
-            auxInc = 0;
-            prevAuxLevel = auxLevel << 16;
-            mAuxInc = 0.f;
-            mPrevAuxLevel = mAuxLevel;
-        }
-    }
-}
-
-size_t AudioMixer::getUnreleasedFrames(int name) const
-{
-    const auto it = mTracks.find(name);
-    if (it != mTracks.end()) {
-        return it->second->getUnreleasedFrames();
-    }
-    return 0;
-}
-
 void AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider)
 {
     LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
-    const std::shared_ptr<Track> &track = mTracks[name];
+    const std::shared_ptr<Track> &track = getTrack(name);
 
     if (track->mInputBufferProvider == bufferProvider) {
         return; // don't reset any buffer providers if identical.
@@ -976,679 +516,6 @@
     track->reconfigureBufferProviders();
 }
 
-void AudioMixer::process__validate()
-{
-    // TODO: fix all16BitsStereNoResample logic to
-    // either properly handle muted tracks (it should ignore them)
-    // or remove altogether as an obsolete optimization.
-    bool all16BitsStereoNoResample = true;
-    bool resampling = false;
-    bool volumeRamp = false;
-
-    mEnabled.clear();
-    mGroups.clear();
-    for (const auto &pair : mTracks) {
-        const int name = pair.first;
-        const std::shared_ptr<Track> &t = pair.second;
-        if (!t->enabled) continue;
-
-        mEnabled.emplace_back(name);  // we add to mEnabled in order of name.
-        mGroups[t->mainBuffer].emplace_back(name); // mGroups also in order of name.
-
-        uint32_t n = 0;
-        // FIXME can overflow (mask is only 3 bits)
-        n |= NEEDS_CHANNEL_1 + t->channelCount - 1;
-        if (t->doesResample()) {
-            n |= NEEDS_RESAMPLE;
-        }
-        if (t->auxLevel != 0 && t->auxBuffer != NULL) {
-            n |= NEEDS_AUX;
-        }
-
-        if (t->volumeInc[0]|t->volumeInc[1]) {
-            volumeRamp = true;
-        } else if (!t->doesResample() && t->volumeRL == 0) {
-            n |= NEEDS_MUTE;
-        }
-        t->needs = n;
-
-        if (n & NEEDS_MUTE) {
-            t->hook = &Track::track__nop;
-        } else {
-            if (n & NEEDS_AUX) {
-                all16BitsStereoNoResample = false;
-            }
-            if (n & NEEDS_RESAMPLE) {
-                all16BitsStereoNoResample = false;
-                resampling = true;
-                t->hook = Track::getTrackHook(TRACKTYPE_RESAMPLE, t->mMixerChannelCount,
-                        t->mMixerInFormat, t->mMixerFormat);
-                ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
-                        "Track %d needs downmix + resample", name);
-            } else {
-                if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
-                    t->hook = Track::getTrackHook(
-                            (t->mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO  // TODO: MONO_HACK
-                                    && t->channelMask == AUDIO_CHANNEL_OUT_MONO)
-                                ? TRACKTYPE_NORESAMPLEMONO : TRACKTYPE_NORESAMPLE,
-                            t->mMixerChannelCount,
-                            t->mMixerInFormat, t->mMixerFormat);
-                    all16BitsStereoNoResample = false;
-                }
-                if ((n & NEEDS_CHANNEL_COUNT__MASK) >= NEEDS_CHANNEL_2){
-                    t->hook = Track::getTrackHook(TRACKTYPE_NORESAMPLE, t->mMixerChannelCount,
-                            t->mMixerInFormat, t->mMixerFormat);
-                    ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
-                            "Track %d needs downmix", name);
-                }
-            }
-        }
-    }
-
-    // select the processing hooks
-    mHook = &AudioMixer::process__nop;
-    if (mEnabled.size() > 0) {
-        if (resampling) {
-            if (mOutputTemp.get() == nullptr) {
-                mOutputTemp.reset(new int32_t[MAX_NUM_CHANNELS * mFrameCount]);
-            }
-            if (mResampleTemp.get() == nullptr) {
-                mResampleTemp.reset(new int32_t[MAX_NUM_CHANNELS * mFrameCount]);
-            }
-            mHook = &AudioMixer::process__genericResampling;
-        } else {
-            // we keep temp arrays around.
-            mHook = &AudioMixer::process__genericNoResampling;
-            if (all16BitsStereoNoResample && !volumeRamp) {
-                if (mEnabled.size() == 1) {
-                    const std::shared_ptr<Track> &t = mTracks[mEnabled[0]];
-                    if ((t->needs & NEEDS_MUTE) == 0) {
-                        // The check prevents a muted track from acquiring a process hook.
-                        //
-                        // This is dangerous if the track is MONO as that requires
-                        // special case handling due to implicit channel duplication.
-                        // Stereo or Multichannel should actually be fine here.
-                        mHook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
-                                t->mMixerChannelCount, t->mMixerInFormat, t->mMixerFormat);
-                    }
-                }
-            }
-        }
-    }
-
-    ALOGV("mixer configuration change: %zu "
-        "all16BitsStereoNoResample=%d, resampling=%d, volumeRamp=%d",
-        mEnabled.size(), all16BitsStereoNoResample, resampling, volumeRamp);
-
-   process();
-
-    // Now that the volume ramp has been done, set optimal state and
-    // track hooks for subsequent mixer process
-    if (mEnabled.size() > 0) {
-        bool allMuted = true;
-
-        for (const int name : mEnabled) {
-            const std::shared_ptr<Track> &t = mTracks[name];
-            if (!t->doesResample() && t->volumeRL == 0) {
-                t->needs |= NEEDS_MUTE;
-                t->hook = &Track::track__nop;
-            } else {
-                allMuted = false;
-            }
-        }
-        if (allMuted) {
-            mHook = &AudioMixer::process__nop;
-        } else if (all16BitsStereoNoResample) {
-            if (mEnabled.size() == 1) {
-                //const int i = 31 - __builtin_clz(enabledTracks);
-                const std::shared_ptr<Track> &t = mTracks[mEnabled[0]];
-                // Muted single tracks handled by allMuted above.
-                mHook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
-                        t->mMixerChannelCount, t->mMixerInFormat, t->mMixerFormat);
-            }
-        }
-    }
-}
-
-void AudioMixer::Track::track__genericResample(
-        int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux)
-{
-    ALOGVV("track__genericResample\n");
-    mResampler->setSampleRate(sampleRate);
-
-    // ramp gain - resample to temp buffer and scale/mix in 2nd step
-    if (aux != NULL) {
-        // always resample with unity gain when sending to auxiliary buffer to be able
-        // to apply send level after resampling
-        mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
-        memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(int32_t));
-        mResampler->resample(temp, outFrameCount, bufferProvider);
-        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
-            volumeRampStereo(out, outFrameCount, temp, aux);
-        } else {
-            volumeStereo(out, outFrameCount, temp, aux);
-        }
-    } else {
-        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
-            mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
-            memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t));
-            mResampler->resample(temp, outFrameCount, bufferProvider);
-            volumeRampStereo(out, outFrameCount, temp, aux);
-        }
-
-        // constant gain
-        else {
-            mResampler->setVolume(mVolume[0], mVolume[1]);
-            mResampler->resample(out, outFrameCount, bufferProvider);
-        }
-    }
-}
-
-void AudioMixer::Track::track__nop(int32_t* out __unused,
-        size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused)
-{
-}
-
-void AudioMixer::Track::volumeRampStereo(
-        int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
-{
-    int32_t vl = prevVolume[0];
-    int32_t vr = prevVolume[1];
-    const int32_t vlInc = volumeInc[0];
-    const int32_t vrInc = volumeInc[1];
-
-    //ALOGD("[0] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
-    //        t, vlInc/65536.0f, vl/65536.0f, volume[0],
-    //       (vl + vlInc*frameCount)/65536.0f, frameCount);
-
-    // ramp volume
-    if (CC_UNLIKELY(aux != NULL)) {
-        int32_t va = prevAuxLevel;
-        const int32_t vaInc = auxInc;
-        int32_t l;
-        int32_t r;
-
-        do {
-            l = (*temp++ >> 12);
-            r = (*temp++ >> 12);
-            *out++ += (vl >> 16) * l;
-            *out++ += (vr >> 16) * r;
-            *aux++ += (va >> 17) * (l + r);
-            vl += vlInc;
-            vr += vrInc;
-            va += vaInc;
-        } while (--frameCount);
-        prevAuxLevel = va;
-    } else {
-        do {
-            *out++ += (vl >> 16) * (*temp++ >> 12);
-            *out++ += (vr >> 16) * (*temp++ >> 12);
-            vl += vlInc;
-            vr += vrInc;
-        } while (--frameCount);
-    }
-    prevVolume[0] = vl;
-    prevVolume[1] = vr;
-    adjustVolumeRamp(aux != NULL);
-}
-
-void AudioMixer::Track::volumeStereo(
-        int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
-{
-    const int16_t vl = volume[0];
-    const int16_t vr = volume[1];
-
-    if (CC_UNLIKELY(aux != NULL)) {
-        const int16_t va = auxLevel;
-        do {
-            int16_t l = (int16_t)(*temp++ >> 12);
-            int16_t r = (int16_t)(*temp++ >> 12);
-            out[0] = mulAdd(l, vl, out[0]);
-            int16_t a = (int16_t)(((int32_t)l + r) >> 1);
-            out[1] = mulAdd(r, vr, out[1]);
-            out += 2;
-            aux[0] = mulAdd(a, va, aux[0]);
-            aux++;
-        } while (--frameCount);
-    } else {
-        do {
-            int16_t l = (int16_t)(*temp++ >> 12);
-            int16_t r = (int16_t)(*temp++ >> 12);
-            out[0] = mulAdd(l, vl, out[0]);
-            out[1] = mulAdd(r, vr, out[1]);
-            out += 2;
-        } while (--frameCount);
-    }
-}
-
-void AudioMixer::Track::track__16BitsStereo(
-        int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
-{
-    ALOGVV("track__16BitsStereo\n");
-    const int16_t *in = static_cast<const int16_t *>(mIn);
-
-    if (CC_UNLIKELY(aux != NULL)) {
-        int32_t l;
-        int32_t r;
-        // ramp gain
-        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
-            int32_t vl = prevVolume[0];
-            int32_t vr = prevVolume[1];
-            int32_t va = prevAuxLevel;
-            const int32_t vlInc = volumeInc[0];
-            const int32_t vrInc = volumeInc[1];
-            const int32_t vaInc = auxInc;
-            // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
-            //        t, vlInc/65536.0f, vl/65536.0f, volume[0],
-            //        (vl + vlInc*frameCount)/65536.0f, frameCount);
-
-            do {
-                l = (int32_t)*in++;
-                r = (int32_t)*in++;
-                *out++ += (vl >> 16) * l;
-                *out++ += (vr >> 16) * r;
-                *aux++ += (va >> 17) * (l + r);
-                vl += vlInc;
-                vr += vrInc;
-                va += vaInc;
-            } while (--frameCount);
-
-            prevVolume[0] = vl;
-            prevVolume[1] = vr;
-            prevAuxLevel = va;
-            adjustVolumeRamp(true);
-        }
-
-        // constant gain
-        else {
-            const uint32_t vrl = volumeRL;
-            const int16_t va = (int16_t)auxLevel;
-            do {
-                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
-                int16_t a = (int16_t)(((int32_t)in[0] + in[1]) >> 1);
-                in += 2;
-                out[0] = mulAddRL(1, rl, vrl, out[0]);
-                out[1] = mulAddRL(0, rl, vrl, out[1]);
-                out += 2;
-                aux[0] = mulAdd(a, va, aux[0]);
-                aux++;
-            } while (--frameCount);
-        }
-    } else {
-        // ramp gain
-        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
-            int32_t vl = prevVolume[0];
-            int32_t vr = prevVolume[1];
-            const int32_t vlInc = volumeInc[0];
-            const int32_t vrInc = volumeInc[1];
-
-            // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
-            //        t, vlInc/65536.0f, vl/65536.0f, volume[0],
-            //        (vl + vlInc*frameCount)/65536.0f, frameCount);
-
-            do {
-                *out++ += (vl >> 16) * (int32_t) *in++;
-                *out++ += (vr >> 16) * (int32_t) *in++;
-                vl += vlInc;
-                vr += vrInc;
-            } while (--frameCount);
-
-            prevVolume[0] = vl;
-            prevVolume[1] = vr;
-            adjustVolumeRamp(false);
-        }
-
-        // constant gain
-        else {
-            const uint32_t vrl = volumeRL;
-            do {
-                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
-                in += 2;
-                out[0] = mulAddRL(1, rl, vrl, out[0]);
-                out[1] = mulAddRL(0, rl, vrl, out[1]);
-                out += 2;
-            } while (--frameCount);
-        }
-    }
-    mIn = in;
-}
-
-void AudioMixer::Track::track__16BitsMono(
-        int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
-{
-    ALOGVV("track__16BitsMono\n");
-    const int16_t *in = static_cast<int16_t const *>(mIn);
-
-    if (CC_UNLIKELY(aux != NULL)) {
-        // ramp gain
-        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
-            int32_t vl = prevVolume[0];
-            int32_t vr = prevVolume[1];
-            int32_t va = prevAuxLevel;
-            const int32_t vlInc = volumeInc[0];
-            const int32_t vrInc = volumeInc[1];
-            const int32_t vaInc = auxInc;
-
-            // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
-            //         t, vlInc/65536.0f, vl/65536.0f, volume[0],
-            //         (vl + vlInc*frameCount)/65536.0f, frameCount);
-
-            do {
-                int32_t l = *in++;
-                *out++ += (vl >> 16) * l;
-                *out++ += (vr >> 16) * l;
-                *aux++ += (va >> 16) * l;
-                vl += vlInc;
-                vr += vrInc;
-                va += vaInc;
-            } while (--frameCount);
-
-            prevVolume[0] = vl;
-            prevVolume[1] = vr;
-            prevAuxLevel = va;
-            adjustVolumeRamp(true);
-        }
-        // constant gain
-        else {
-            const int16_t vl = volume[0];
-            const int16_t vr = volume[1];
-            const int16_t va = (int16_t)auxLevel;
-            do {
-                int16_t l = *in++;
-                out[0] = mulAdd(l, vl, out[0]);
-                out[1] = mulAdd(l, vr, out[1]);
-                out += 2;
-                aux[0] = mulAdd(l, va, aux[0]);
-                aux++;
-            } while (--frameCount);
-        }
-    } else {
-        // ramp gain
-        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
-            int32_t vl = prevVolume[0];
-            int32_t vr = prevVolume[1];
-            const int32_t vlInc = volumeInc[0];
-            const int32_t vrInc = volumeInc[1];
-
-            // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
-            //         t, vlInc/65536.0f, vl/65536.0f, volume[0],
-            //         (vl + vlInc*frameCount)/65536.0f, frameCount);
-
-            do {
-                int32_t l = *in++;
-                *out++ += (vl >> 16) * l;
-                *out++ += (vr >> 16) * l;
-                vl += vlInc;
-                vr += vrInc;
-            } while (--frameCount);
-
-            prevVolume[0] = vl;
-            prevVolume[1] = vr;
-            adjustVolumeRamp(false);
-        }
-        // constant gain
-        else {
-            const int16_t vl = volume[0];
-            const int16_t vr = volume[1];
-            do {
-                int16_t l = *in++;
-                out[0] = mulAdd(l, vl, out[0]);
-                out[1] = mulAdd(l, vr, out[1]);
-                out += 2;
-            } while (--frameCount);
-        }
-    }
-    mIn = in;
-}
-
-// no-op case
-void AudioMixer::process__nop()
-{
-    ALOGVV("process__nop\n");
-
-    for (const auto &pair : mGroups) {
-        // process by group of tracks with same output buffer to
-        // avoid multiple memset() on same buffer
-        const auto &group = pair.second;
-
-        const std::shared_ptr<Track> &t = mTracks[group[0]];
-        memset(t->mainBuffer, 0,
-                mFrameCount * audio_bytes_per_frame(
-                        t->mMixerChannelCount + t->mMixerHapticChannelCount, t->mMixerFormat));
-
-        // now consume data
-        for (const int name : group) {
-            const std::shared_ptr<Track> &t = mTracks[name];
-            size_t outFrames = mFrameCount;
-            while (outFrames) {
-                t->buffer.frameCount = outFrames;
-                t->bufferProvider->getNextBuffer(&t->buffer);
-                if (t->buffer.raw == NULL) break;
-                outFrames -= t->buffer.frameCount;
-                t->bufferProvider->releaseBuffer(&t->buffer);
-            }
-        }
-    }
-}
-
-// generic code without resampling
-void AudioMixer::process__genericNoResampling()
-{
-    ALOGVV("process__genericNoResampling\n");
-    int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
-
-    for (const auto &pair : mGroups) {
-        // process by group of tracks with same output main buffer to
-        // avoid multiple memset() on same buffer
-        const auto &group = pair.second;
-
-        // acquire buffer
-        for (const int name : group) {
-            const std::shared_ptr<Track> &t = mTracks[name];
-            t->buffer.frameCount = mFrameCount;
-            t->bufferProvider->getNextBuffer(&t->buffer);
-            t->frameCount = t->buffer.frameCount;
-            t->mIn = t->buffer.raw;
-        }
-
-        int32_t *out = (int *)pair.first;
-        size_t numFrames = 0;
-        do {
-            const size_t frameCount = std::min((size_t)BLOCKSIZE, mFrameCount - numFrames);
-            memset(outTemp, 0, sizeof(outTemp));
-            for (const int name : group) {
-                const std::shared_ptr<Track> &t = mTracks[name];
-                int32_t *aux = NULL;
-                if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
-                    aux = t->auxBuffer + numFrames;
-                }
-                for (int outFrames = frameCount; outFrames > 0; ) {
-                    // t->in == nullptr can happen if the track was flushed just after having
-                    // been enabled for mixing.
-                    if (t->mIn == nullptr) {
-                        break;
-                    }
-                    size_t inFrames = (t->frameCount > outFrames)?outFrames:t->frameCount;
-                    if (inFrames > 0) {
-                        (t.get()->*t->hook)(
-                                outTemp + (frameCount - outFrames) * t->mMixerChannelCount,
-                                inFrames, mResampleTemp.get() /* naked ptr */, aux);
-                        t->frameCount -= inFrames;
-                        outFrames -= inFrames;
-                        if (CC_UNLIKELY(aux != NULL)) {
-                            aux += inFrames;
-                        }
-                    }
-                    if (t->frameCount == 0 && outFrames) {
-                        t->bufferProvider->releaseBuffer(&t->buffer);
-                        t->buffer.frameCount = (mFrameCount - numFrames) -
-                                (frameCount - outFrames);
-                        t->bufferProvider->getNextBuffer(&t->buffer);
-                        t->mIn = t->buffer.raw;
-                        if (t->mIn == nullptr) {
-                            break;
-                        }
-                        t->frameCount = t->buffer.frameCount;
-                    }
-                }
-            }
-
-            const std::shared_ptr<Track> &t1 = mTracks[group[0]];
-            convertMixerFormat(out, t1->mMixerFormat, outTemp, t1->mMixerInFormat,
-                    frameCount * t1->mMixerChannelCount);
-            // TODO: fix ugly casting due to choice of out pointer type
-            out = reinterpret_cast<int32_t*>((uint8_t*)out
-                    + frameCount * t1->mMixerChannelCount
-                    * audio_bytes_per_sample(t1->mMixerFormat));
-            numFrames += frameCount;
-        } while (numFrames < mFrameCount);
-
-        // release each track's buffer
-        for (const int name : group) {
-            const std::shared_ptr<Track> &t = mTracks[name];
-            t->bufferProvider->releaseBuffer(&t->buffer);
-        }
-    }
-}
-
-// generic code with resampling
-void AudioMixer::process__genericResampling()
-{
-    ALOGVV("process__genericResampling\n");
-    int32_t * const outTemp = mOutputTemp.get(); // naked ptr
-    size_t numFrames = mFrameCount;
-
-    for (const auto &pair : mGroups) {
-        const auto &group = pair.second;
-        const std::shared_ptr<Track> &t1 = mTracks[group[0]];
-
-        // clear temp buffer
-        memset(outTemp, 0, sizeof(*outTemp) * t1->mMixerChannelCount * mFrameCount);
-        for (const int name : group) {
-            const std::shared_ptr<Track> &t = mTracks[name];
-            int32_t *aux = NULL;
-            if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
-                aux = t->auxBuffer;
-            }
-
-            // this is a little goofy, on the resampling case we don't
-            // acquire/release the buffers because it's done by
-            // the resampler.
-            if (t->needs & NEEDS_RESAMPLE) {
-                (t.get()->*t->hook)(outTemp, numFrames, mResampleTemp.get() /* naked ptr */, aux);
-            } else {
-
-                size_t outFrames = 0;
-
-                while (outFrames < numFrames) {
-                    t->buffer.frameCount = numFrames - outFrames;
-                    t->bufferProvider->getNextBuffer(&t->buffer);
-                    t->mIn = t->buffer.raw;
-                    // t->mIn == nullptr can happen if the track was flushed just after having
-                    // been enabled for mixing.
-                    if (t->mIn == nullptr) break;
-
-                    (t.get()->*t->hook)(
-                            outTemp + outFrames * t->mMixerChannelCount, t->buffer.frameCount,
-                            mResampleTemp.get() /* naked ptr */,
-                            aux != nullptr ? aux + outFrames : nullptr);
-                    outFrames += t->buffer.frameCount;
-
-                    t->bufferProvider->releaseBuffer(&t->buffer);
-                }
-            }
-        }
-        convertMixerFormat(t1->mainBuffer, t1->mMixerFormat,
-                outTemp, t1->mMixerInFormat, numFrames * t1->mMixerChannelCount);
-    }
-}
-
-// one track, 16 bits stereo without resampling is the most common case
-void AudioMixer::process__oneTrack16BitsStereoNoResampling()
-{
-    ALOGVV("process__oneTrack16BitsStereoNoResampling\n");
-    LOG_ALWAYS_FATAL_IF(mEnabled.size() != 0,
-            "%zu != 1 tracks enabled", mEnabled.size());
-    const int name = mEnabled[0];
-    const std::shared_ptr<Track> &t = mTracks[name];
-
-    AudioBufferProvider::Buffer& b(t->buffer);
-
-    int32_t* out = t->mainBuffer;
-    float *fout = reinterpret_cast<float*>(out);
-    size_t numFrames = mFrameCount;
-
-    const int16_t vl = t->volume[0];
-    const int16_t vr = t->volume[1];
-    const uint32_t vrl = t->volumeRL;
-    while (numFrames) {
-        b.frameCount = numFrames;
-        t->bufferProvider->getNextBuffer(&b);
-        const int16_t *in = b.i16;
-
-        // in == NULL can happen if the track was flushed just after having
-        // been enabled for mixing.
-        if (in == NULL || (((uintptr_t)in) & 3)) {
-            if ( AUDIO_FORMAT_PCM_FLOAT == t->mMixerFormat ) {
-                 memset((char*)fout, 0, numFrames
-                         * t->mMixerChannelCount * audio_bytes_per_sample(t->mMixerFormat));
-            } else {
-                 memset((char*)out, 0, numFrames
-                         * t->mMixerChannelCount * audio_bytes_per_sample(t->mMixerFormat));
-            }
-            ALOGE_IF((((uintptr_t)in) & 3),
-                    "process__oneTrack16BitsStereoNoResampling: misaligned buffer"
-                    " %p track %d, channels %d, needs %08x, volume %08x vfl %f vfr %f",
-                    in, name, t->channelCount, t->needs, vrl, t->mVolume[0], t->mVolume[1]);
-            return;
-        }
-        size_t outFrames = b.frameCount;
-
-        switch (t->mMixerFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            do {
-                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
-                in += 2;
-                int32_t l = mulRL(1, rl, vrl);
-                int32_t r = mulRL(0, rl, vrl);
-                *fout++ = float_from_q4_27(l);
-                *fout++ = float_from_q4_27(r);
-                // Note: In case of later int16_t sink output,
-                // conversion and clamping is done by memcpy_to_i16_from_float().
-            } while (--outFrames);
-            break;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN_INT || uint32_t(vr) > UNITY_GAIN_INT)) {
-                // volume is boosted, so we might need to clamp even though
-                // we process only one track.
-                do {
-                    uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
-                    in += 2;
-                    int32_t l = mulRL(1, rl, vrl) >> 12;
-                    int32_t r = mulRL(0, rl, vrl) >> 12;
-                    // clamping...
-                    l = clamp16(l);
-                    r = clamp16(r);
-                    *out++ = (r<<16) | (l & 0xFFFF);
-                } while (--outFrames);
-            } else {
-                do {
-                    uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
-                    in += 2;
-                    int32_t l = mulRL(1, rl, vrl) >> 12;
-                    int32_t r = mulRL(0, rl, vrl) >> 12;
-                    *out++ = (r<<16) | (l & 0xFFFF);
-                } while (--outFrames);
-            }
-            break;
-        default:
-            LOG_ALWAYS_FATAL("bad mixer format: %d", t->mMixerFormat);
-        }
-        numFrames -= b.frameCount;
-        t->bufferProvider->releaseBuffer(&b);
-    }
-}
-
 /*static*/ pthread_once_t AudioMixer::sOnceControl = PTHREAD_ONCE_INIT;
 
 /*static*/ void AudioMixer::sInitRoutine()
@@ -1656,211 +523,71 @@
     DownmixerBufferProvider::init(); // for the downmixer
 }
 
-/* TODO: consider whether this level of optimization is necessary.
- * Perhaps just stick with a single for loop.
- */
-
-// Needs to derive a compile time constant (constexpr).  Could be targeted to go
-// to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication.
-#define MIXTYPE_MONOVOL(mixtype) ((mixtype) == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \
-        (mixtype) == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : (mixtype))
-
-/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE,
-        typename TO, typename TI, typename TV, typename TA, typename TAV>
-static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount,
-        const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc)
+std::shared_ptr<AudioMixerBase::TrackBase> AudioMixer::preCreateTrack()
 {
-    switch (channels) {
-    case 1:
-        volumeRampMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 2:
-        volumeRampMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 3:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 4:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 5:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 6:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 7:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    case 8:
-        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out,
-                frameCount, in, aux, vol, volinc, vola, volainc);
-        break;
-    }
+    return std::make_shared<Track>();
 }
 
-/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE,
-        typename TO, typename TI, typename TV, typename TA, typename TAV>
-static void volumeMulti(uint32_t channels, TO* out, size_t frameCount,
-        const TI* in, TA* aux, const TV *vol, TAV vola)
+status_t AudioMixer::postCreateTrack(TrackBase *track)
 {
-    switch (channels) {
-    case 1:
-        volumeMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 2:
-        volumeMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 3:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 4:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 5:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 6:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 7:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, frameCount, in, aux, vol, vola);
-        break;
-    case 8:
-        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, frameCount, in, aux, vol, vola);
-        break;
+    Track* t = static_cast<Track*>(track);
+
+    audio_channel_mask_t channelMask = t->channelMask;
+    t->mHapticChannelMask = channelMask & AUDIO_CHANNEL_HAPTIC_ALL;
+    t->mHapticChannelCount = audio_channel_count_from_out_mask(t->mHapticChannelMask);
+    channelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
+    t->channelCount = audio_channel_count_from_out_mask(channelMask);
+    ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
+            "Non-stereo channel mask: %d\n", channelMask);
+    t->channelMask = channelMask;
+    t->mInputBufferProvider = NULL;
+    t->mDownmixRequiresFormat = AUDIO_FORMAT_INVALID; // no format required
+    t->mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
+    // haptic
+    t->mHapticPlaybackEnabled = false;
+    t->mHapticIntensity = HAPTIC_SCALE_NONE;
+    t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
+    t->mMixerHapticChannelCount = 0;
+    t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
+    t->mAdjustOutChannelCount = t->channelCount + t->mMixerHapticChannelCount;
+    t->mAdjustNonDestructiveInChannelCount = t->mAdjustOutChannelCount;
+    t->mAdjustNonDestructiveOutChannelCount = t->channelCount;
+    t->mKeepContractedChannels = false;
+    // Check the downmixing (or upmixing) requirements.
+    status_t status = t->prepareForDownmix();
+    if (status != OK) {
+        ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask);
+        return BAD_VALUE;
     }
+    // prepareForDownmix() may change mDownmixRequiresFormat
+    ALOGVV("mMixerFormat:%#x  mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
+    t->prepareForReformat();
+    t->prepareForAdjustChannelsNonDestructive(mFrameCount);
+    t->prepareForAdjustChannels();
+    return OK;
 }
 
-/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
- * USEFLOATVOL (set to true if float volume is used)
- * ADJUSTVOL   (set to true if volume ramp parameters needs adjustment afterwards)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
-    typename TO, typename TI, typename TA>
-void AudioMixer::Track::volumeMix(TO *out, size_t outFrames,
-        const TI *in, TA *aux, bool ramp)
+void AudioMixer::preProcess()
 {
-    if (USEFLOATVOL) {
-        if (ramp) {
-            volumeRampMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
-                    mPrevVolume, mVolumeInc,
-#ifdef FLOAT_AUX
-                    &mPrevAuxLevel, mAuxInc
-#else
-                    &prevAuxLevel, auxInc
-#endif
-                );
-            if (ADJUSTVOL) {
-                adjustVolumeRamp(aux != NULL, true);
-            }
-        } else {
-            volumeMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
-                    mVolume,
-#ifdef FLOAT_AUX
-                    mAuxLevel
-#else
-                    auxLevel
-#endif
-            );
-        }
-    } else {
-        if (ramp) {
-            volumeRampMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
-                    prevVolume, volumeInc, &prevAuxLevel, auxInc);
-            if (ADJUSTVOL) {
-                adjustVolumeRamp(aux != NULL);
-            }
-        } else {
-            volumeMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
-                    volume, auxLevel);
+    for (const auto &pair : mTracks) {
+        // Clear contracted buffer before processing if contracted channels are saved
+        const std::shared_ptr<TrackBase> &tb = pair.second;
+        Track *t = static_cast<Track*>(tb.get());
+        if (t->mKeepContractedChannels) {
+            t->clearContractedBuffer();
         }
     }
 }
 
-/* This process hook is called when there is a single track without
- * aux buffer, volume ramp, or resampling.
- * TODO: Update the hook selection: this can properly handle aux and ramp.
- *
- * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27)
- */
-template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::process__noResampleOneTrack()
+void AudioMixer::postProcess()
 {
-    ALOGVV("process__noResampleOneTrack\n");
-    LOG_ALWAYS_FATAL_IF(mEnabled.size() != 1,
-            "%zu != 1 tracks enabled", mEnabled.size());
-    const std::shared_ptr<Track> &t = mTracks[mEnabled[0]];
-    const uint32_t channels = t->mMixerChannelCount;
-    TO* out = reinterpret_cast<TO*>(t->mainBuffer);
-    TA* aux = reinterpret_cast<TA*>(t->auxBuffer);
-    const bool ramp = t->needsRamp();
-
-    for (size_t numFrames = mFrameCount; numFrames > 0; ) {
-        AudioBufferProvider::Buffer& b(t->buffer);
-        // get input buffer
-        b.frameCount = numFrames;
-        t->bufferProvider->getNextBuffer(&b);
-        const TI *in = reinterpret_cast<TI*>(b.raw);
-
-        // in == NULL can happen if the track was flushed just after having
-        // been enabled for mixing.
-        if (in == NULL || (((uintptr_t)in) & 3)) {
-            memset(out, 0, numFrames
-                    * channels * audio_bytes_per_sample(t->mMixerFormat));
-            ALOGE_IF((((uintptr_t)in) & 3), "process__noResampleOneTrack: bus error: "
-                    "buffer %p track %p, channels %d, needs %#x",
-                    in, &t, t->channelCount, t->needs);
-            return;
-        }
-
-        const size_t outFrames = b.frameCount;
-        t->volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, false /* ADJUSTVOL */> (
-                out, outFrames, in, aux, ramp);
-
-        out += outFrames * channels;
-        if (aux != NULL) {
-            aux += outFrames;
-        }
-        numFrames -= b.frameCount;
-
-        // release buffer
-        t->bufferProvider->releaseBuffer(&b);
-    }
-    if (ramp) {
-        t->adjustVolumeRamp(aux != NULL, is_same<TI, float>::value);
-    }
-}
-
-void AudioMixer::processHapticData()
-{
+    // Process haptic data.
     // Need to keep consistent with VibrationEffect.scale(int, float, int)
     for (const auto &pair : mGroups) {
         // process by group of tracks with same output main buffer.
         const auto &group = pair.second;
         for (const int name : group) {
-            const std::shared_ptr<Track> &t = mTracks[name];
+            const std::shared_ptr<Track> &t = getTrack(name);
             if (t->mHapticPlaybackEnabled) {
                 size_t sampleCount = mFrameCount * t->mMixerHapticChannelCount;
                 float gamma = t->getHapticScaleGamma();
@@ -1887,225 +614,5 @@
     }
 }
 
-/* This track hook is called to do resampling then mixing,
- * pulling from the track's upstream AudioBufferProvider.
- *
- * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::Track::track__Resample(TO* out, size_t outFrameCount, TO* temp, TA* aux)
-{
-    ALOGVV("track__Resample\n");
-    mResampler->setSampleRate(sampleRate);
-    const bool ramp = needsRamp();
-    if (ramp || aux != NULL) {
-        // if ramp:        resample with unity gain to temp buffer and scale/mix in 2nd step.
-        // if aux != NULL: resample with unity gain to temp buffer then apply send level.
-
-        mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
-        memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(TO));
-        mResampler->resample((int32_t*)temp, outFrameCount, bufferProvider);
-
-        volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, true /* ADJUSTVOL */>(
-                out, outFrameCount, temp, aux, ramp);
-
-    } else { // constant volume gain
-        mResampler->setVolume(mVolume[0], mVolume[1]);
-        mResampler->resample((int32_t*)out, outFrameCount, bufferProvider);
-    }
-}
-
-/* This track hook is called to mix a track, when no resampling is required.
- * The input buffer should be present in in.
- *
- * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
- * TO: int32_t (Q4.27) or float
- * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
- * TA: int32_t (Q4.27) or float
- */
-template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::Track::track__NoResample(TO* out, size_t frameCount, TO* temp __unused, TA* aux)
-{
-    ALOGVV("track__NoResample\n");
-    const TI *in = static_cast<const TI *>(mIn);
-
-    volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, true /* ADJUSTVOL */>(
-            out, frameCount, in, aux, needsRamp());
-
-    // MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels.
-    // MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels.
-    in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * mMixerChannelCount;
-    mIn = in;
-}
-
-/* The Mixer engine generates either int32_t (Q4_27) or float data.
- * We use this function to convert the engine buffers
- * to the desired mixer output format, either int16_t (Q.15) or float.
- */
-/* static */
-void AudioMixer::convertMixerFormat(void *out, audio_format_t mixerOutFormat,
-        void *in, audio_format_t mixerInFormat, size_t sampleCount)
-{
-    switch (mixerInFormat) {
-    case AUDIO_FORMAT_PCM_FLOAT:
-        switch (mixerOutFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            memcpy(out, in, sampleCount * sizeof(float)); // MEMCPY. TODO optimize out
-            break;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            memcpy_to_i16_from_float((int16_t*)out, (float*)in, sampleCount);
-            break;
-        default:
-            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
-            break;
-        }
-        break;
-    case AUDIO_FORMAT_PCM_16_BIT:
-        switch (mixerOutFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            memcpy_to_float_from_q4_27((float*)out, (const int32_t*)in, sampleCount);
-            break;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            memcpy_to_i16_from_q4_27((int16_t*)out, (const int32_t*)in, sampleCount);
-            break;
-        default:
-            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
-            break;
-        }
-        break;
-    default:
-        LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
-        break;
-    }
-}
-
-/* Returns the proper track hook to use for mixing the track into the output buffer.
- */
-/* static */
-AudioMixer::hook_t AudioMixer::Track::getTrackHook(int trackType, uint32_t channelCount,
-        audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused)
-{
-    if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
-        switch (trackType) {
-        case TRACKTYPE_NOP:
-            return &Track::track__nop;
-        case TRACKTYPE_RESAMPLE:
-            return &Track::track__genericResample;
-        case TRACKTYPE_NORESAMPLEMONO:
-            return &Track::track__16BitsMono;
-        case TRACKTYPE_NORESAMPLE:
-            return &Track::track__16BitsStereo;
-        default:
-            LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
-            break;
-        }
-    }
-    LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
-    switch (trackType) {
-    case TRACKTYPE_NOP:
-        return &Track::track__nop;
-    case TRACKTYPE_RESAMPLE:
-        switch (mixerInFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            return (AudioMixer::hook_t) &Track::track__Resample<
-                    MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            return (AudioMixer::hook_t) &Track::track__Resample<
-                    MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
-        default:
-            LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
-            break;
-        }
-        break;
-    case TRACKTYPE_NORESAMPLEMONO:
-        switch (mixerInFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            return (AudioMixer::hook_t) &Track::track__NoResample<
-                            MIXTYPE_MONOEXPAND, float /*TO*/, float /*TI*/, TYPE_AUX>;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            return (AudioMixer::hook_t) &Track::track__NoResample<
-                            MIXTYPE_MONOEXPAND, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
-        default:
-            LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
-            break;
-        }
-        break;
-    case TRACKTYPE_NORESAMPLE:
-        switch (mixerInFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            return (AudioMixer::hook_t) &Track::track__NoResample<
-                    MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            return (AudioMixer::hook_t) &Track::track__NoResample<
-                    MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
-        default:
-            LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
-            break;
-        }
-        break;
-    default:
-        LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
-        break;
-    }
-    return NULL;
-}
-
-/* Returns the proper process hook for mixing tracks. Currently works only for
- * PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling.
- *
- * TODO: Due to the special mixing considerations of duplicating to
- * a stereo output track, the input track cannot be MONO.  This should be
- * prevented by the caller.
- */
-/* static */
-AudioMixer::process_hook_t AudioMixer::getProcessHook(
-        int processType, uint32_t channelCount,
-        audio_format_t mixerInFormat, audio_format_t mixerOutFormat)
-{
-    if (processType != PROCESSTYPE_NORESAMPLEONETRACK) { // Only NORESAMPLEONETRACK
-        LOG_ALWAYS_FATAL("bad processType: %d", processType);
-        return NULL;
-    }
-    if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
-        return &AudioMixer::process__oneTrack16BitsStereoNoResampling;
-    }
-    LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
-    switch (mixerInFormat) {
-    case AUDIO_FORMAT_PCM_FLOAT:
-        switch (mixerOutFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            return &AudioMixer::process__noResampleOneTrack<
-                    MIXTYPE_MULTI_SAVEONLY, float /*TO*/, float /*TI*/, TYPE_AUX>;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            return &AudioMixer::process__noResampleOneTrack<
-                    MIXTYPE_MULTI_SAVEONLY, int16_t /*TO*/, float /*TI*/, TYPE_AUX>;
-        default:
-            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
-            break;
-        }
-        break;
-    case AUDIO_FORMAT_PCM_16_BIT:
-        switch (mixerOutFormat) {
-        case AUDIO_FORMAT_PCM_FLOAT:
-            return &AudioMixer::process__noResampleOneTrack<
-                    MIXTYPE_MULTI_SAVEONLY, float /*TO*/, int16_t /*TI*/, TYPE_AUX>;
-        case AUDIO_FORMAT_PCM_16_BIT:
-            return &AudioMixer::process__noResampleOneTrack<
-                    MIXTYPE_MULTI_SAVEONLY, int16_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
-        default:
-            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
-            break;
-        }
-        break;
-    default:
-        LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
-        break;
-    }
-    return NULL;
-}
-
 // ----------------------------------------------------------------------------
 } // namespace android
diff --git a/media/libaudioprocessing/AudioMixerBase.cpp b/media/libaudioprocessing/AudioMixerBase.cpp
new file mode 100644
index 0000000..75c077d
--- /dev/null
+++ b/media/libaudioprocessing/AudioMixerBase.cpp
@@ -0,0 +1,1692 @@
+/*
+**
+** Copyright 2019, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "AudioMixer"
+//#define LOG_NDEBUG 0
+
+#include <sstream>
+#include <string.h>
+
+#include <audio_utils/primitives.h>
+#include <cutils/compiler.h>
+#include <media/AudioMixerBase.h>
+#include <utils/Log.h>
+
+#include "AudioMixerOps.h"
+
+// The FCC_2 macro refers to the Fixed Channel Count of 2 for the legacy integer mixer.
+#ifndef FCC_2
+#define FCC_2 2
+#endif
+
+// Look for MONO_HACK for any Mono hack involving legacy mono channel to
+// stereo channel conversion.
+
+/* VERY_VERY_VERBOSE_LOGGING will show exactly which process hook and track hook is
+ * being used. This is a considerable amount of log spam, so don't enable unless you
+ * are verifying the hook based code.
+ */
+//#define VERY_VERY_VERBOSE_LOGGING
+#ifdef VERY_VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+//define ALOGVV printf  // for test-mixer.cpp
+#else
+#define ALOGVV(a...) do { } while (0)
+#endif
+
+// TODO: remove BLOCKSIZE unit of processing - it isn't needed anymore.
+static constexpr int BLOCKSIZE = 16;
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+bool AudioMixerBase::isValidFormat(audio_format_t format) const
+{
+    switch (format) {
+    case AUDIO_FORMAT_PCM_8_BIT:
+    case AUDIO_FORMAT_PCM_16_BIT:
+    case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+    case AUDIO_FORMAT_PCM_32_BIT:
+    case AUDIO_FORMAT_PCM_FLOAT:
+        return true;
+    default:
+        return false;
+    }
+}
+
+bool AudioMixerBase::isValidChannelMask(audio_channel_mask_t channelMask) const
+{
+    return audio_channel_count_from_out_mask(channelMask) <= MAX_NUM_CHANNELS;
+}
+
+std::shared_ptr<AudioMixerBase::TrackBase> AudioMixerBase::preCreateTrack()
+{
+    return std::make_shared<TrackBase>();
+}
+
+status_t AudioMixerBase::create(
+        int name, audio_channel_mask_t channelMask, audio_format_t format, int sessionId)
+{
+    LOG_ALWAYS_FATAL_IF(exists(name), "name %d already exists", name);
+
+    if (!isValidChannelMask(channelMask)) {
+        ALOGE("%s invalid channelMask: %#x", __func__, channelMask);
+        return BAD_VALUE;
+    }
+    if (!isValidFormat(format)) {
+        ALOGE("%s invalid format: %#x", __func__, format);
+        return BAD_VALUE;
+    }
+
+    auto t = preCreateTrack();
+    {
+        // TODO: move initialization to the Track constructor.
+        // assume default parameters for the track, except where noted below
+        t->needs = 0;
+
+        // Integer volume.
+        // Currently integer volume is kept for the legacy integer mixer.
+        // Will be removed when the legacy mixer path is removed.
+        t->volume[0] = 0;
+        t->volume[1] = 0;
+        t->prevVolume[0] = 0 << 16;
+        t->prevVolume[1] = 0 << 16;
+        t->volumeInc[0] = 0;
+        t->volumeInc[1] = 0;
+        t->auxLevel = 0;
+        t->auxInc = 0;
+        t->prevAuxLevel = 0;
+
+        // Floating point volume.
+        t->mVolume[0] = 0.f;
+        t->mVolume[1] = 0.f;
+        t->mPrevVolume[0] = 0.f;
+        t->mPrevVolume[1] = 0.f;
+        t->mVolumeInc[0] = 0.;
+        t->mVolumeInc[1] = 0.;
+        t->mAuxLevel = 0.;
+        t->mAuxInc = 0.;
+        t->mPrevAuxLevel = 0.;
+
+        // no initialization needed
+        // t->frameCount
+        t->channelCount = audio_channel_count_from_out_mask(channelMask);
+        t->enabled = false;
+        ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
+                "Non-stereo channel mask: %d\n", channelMask);
+        t->channelMask = channelMask;
+        t->sessionId = sessionId;
+        // setBufferProvider(name, AudioBufferProvider *) is required before enable(name)
+        t->bufferProvider = NULL;
+        t->buffer.raw = NULL;
+        // no initialization needed
+        // t->buffer.frameCount
+        t->hook = NULL;
+        t->mIn = NULL;
+        t->sampleRate = mSampleRate;
+        // setParameter(name, TRACK, MAIN_BUFFER, mixBuffer) is required before enable(name)
+        t->mainBuffer = NULL;
+        t->auxBuffer = NULL;
+        t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
+        t->mFormat = format;
+        t->mMixerInFormat = kUseFloat && kUseNewMixer ?
+                AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+        t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
+                AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
+        t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
+        status_t status = postCreateTrack(t.get());
+        if (status != OK) return status;
+        mTracks[name] = t;
+        return OK;
+    }
+}
+
+// Called when channel masks have changed for a track name
+bool AudioMixerBase::setChannelMasks(int name,
+        audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask)
+{
+    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+    const std::shared_ptr<TrackBase> &track = mTracks[name];
+
+    if (trackChannelMask == track->channelMask && mixerChannelMask == track->mMixerChannelMask) {
+        return false;  // no need to change
+    }
+    // always recompute for both channel masks even if only one has changed.
+    const uint32_t trackChannelCount = audio_channel_count_from_out_mask(trackChannelMask);
+    const uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mixerChannelMask);
+
+    ALOG_ASSERT(trackChannelCount && mixerChannelCount);
+    track->channelMask = trackChannelMask;
+    track->channelCount = trackChannelCount;
+    track->mMixerChannelMask = mixerChannelMask;
+    track->mMixerChannelCount = mixerChannelCount;
+
+    // Resampler channels may have changed.
+    track->recreateResampler(mSampleRate);
+    return true;
+}
+
+void AudioMixerBase::destroy(int name)
+{
+    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+    ALOGV("deleteTrackName(%d)", name);
+
+    if (mTracks[name]->enabled) {
+        invalidate();
+    }
+    mTracks.erase(name); // deallocate track
+}
+
+void AudioMixerBase::enable(int name)
+{
+    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+    const std::shared_ptr<TrackBase> &track = mTracks[name];
+
+    if (!track->enabled) {
+        track->enabled = true;
+        ALOGV("enable(%d)", name);
+        invalidate();
+    }
+}
+
+void AudioMixerBase::disable(int name)
+{
+    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+    const std::shared_ptr<TrackBase> &track = mTracks[name];
+
+    if (track->enabled) {
+        track->enabled = false;
+        ALOGV("disable(%d)", name);
+        invalidate();
+    }
+}
+
+/* Sets the volume ramp variables for the AudioMixer.
+ *
+ * The volume ramp variables are used to transition from the previous
+ * volume to the set volume.  ramp controls the duration of the transition.
+ * Its value is typically one state framecount period, but may also be 0,
+ * meaning "immediate."
+ *
+ * FIXME: 1) Volume ramp is enabled only if there is a nonzero integer increment
+ * even if there is a nonzero floating point increment (in that case, the volume
+ * change is immediate).  This restriction should be changed when the legacy mixer
+ * is removed (see #2).
+ * FIXME: 2) Integer volume variables are used for Legacy mixing and should be removed
+ * when no longer needed.
+ *
+ * @param newVolume set volume target in floating point [0.0, 1.0].
+ * @param ramp number of frames to increment over. if ramp is 0, the volume
+ * should be set immediately.  Currently ramp should not exceed 65535 (frames).
+ * @param pIntSetVolume pointer to the U4.12 integer target volume, set on return.
+ * @param pIntPrevVolume pointer to the U4.28 integer previous volume, set on return.
+ * @param pIntVolumeInc pointer to the U4.28 increment per output audio frame, set on return.
+ * @param pSetVolume pointer to the float target volume, set on return.
+ * @param pPrevVolume pointer to the float previous volume, set on return.
+ * @param pVolumeInc pointer to the float increment per output audio frame, set on return.
+ * @return true if the volume has changed, false if volume is same.
+ */
+static inline bool setVolumeRampVariables(float newVolume, int32_t ramp,
+        int16_t *pIntSetVolume, int32_t *pIntPrevVolume, int32_t *pIntVolumeInc,
+        float *pSetVolume, float *pPrevVolume, float *pVolumeInc) {
+    // check floating point volume to see if it is identical to the previously
+    // set volume.
+    // We do not use a tolerance here (and reject changes too small)
+    // as it may be confusing to use a different value than the one set.
+    // If the resulting volume is too small to ramp, it is a direct set of the volume.
+    if (newVolume == *pSetVolume) {
+        return false;
+    }
+    if (newVolume < 0) {
+        newVolume = 0; // should not have negative volumes
+    } else {
+        switch (fpclassify(newVolume)) {
+        case FP_SUBNORMAL:
+        case FP_NAN:
+            newVolume = 0;
+            break;
+        case FP_ZERO:
+            break; // zero volume is fine
+        case FP_INFINITE:
+            // Infinite volume could be handled consistently since
+            // floating point math saturates at infinities,
+            // but we limit volume to unity gain float.
+            // ramp = 0; break;
+            //
+            newVolume = AudioMixerBase::UNITY_GAIN_FLOAT;
+            break;
+        case FP_NORMAL:
+        default:
+            // Floating point does not have problems with overflow wrap
+            // that integer has.  However, we limit the volume to
+            // unity gain here.
+            // TODO: Revisit the volume limitation and perhaps parameterize.
+            if (newVolume > AudioMixerBase::UNITY_GAIN_FLOAT) {
+                newVolume = AudioMixerBase::UNITY_GAIN_FLOAT;
+            }
+            break;
+        }
+    }
+
+    // set floating point volume ramp
+    if (ramp != 0) {
+        // when the ramp completes, *pPrevVolume is set to *pSetVolume, so there
+        // is no computational mismatch; hence equality is checked here.
+        ALOGD_IF(*pPrevVolume != *pSetVolume, "previous float ramp hasn't finished,"
+                " prev:%f  set_to:%f", *pPrevVolume, *pSetVolume);
+        const float inc = (newVolume - *pPrevVolume) / ramp; // could be inf, nan, subnormal
+        // could be inf, cannot be nan, subnormal
+        const float maxv = std::max(newVolume, *pPrevVolume);
+
+        if (isnormal(inc) // inc must be a normal number (no subnormals, infinite, nan)
+                && maxv + inc != maxv) { // inc must make forward progress
+            *pVolumeInc = inc;
+            // ramp is set now.
+            // Note: if newVolume is 0, then near the end of the ramp,
+            // it may be possible that the ramped volume may be subnormal or
+            // temporarily negative by a small amount or subnormal due to floating
+            // point inaccuracies.
+        } else {
+            ramp = 0; // ramp not allowed
+        }
+    }
+
+    // compute and check integer volume, no need to check negative values
+    // The integer volume is limited to "unity_gain" to avoid wrapping and other
+    // audio artifacts, so it never reaches the range limit of U4.28.
+    // We safely use signed 16 and 32 bit integers here.
+    const float scaledVolume = newVolume * AudioMixerBase::UNITY_GAIN_INT; // not neg, subnormal, nan
+    const int32_t intVolume = (scaledVolume >= (float)AudioMixerBase::UNITY_GAIN_INT) ?
+            AudioMixerBase::UNITY_GAIN_INT : (int32_t)scaledVolume;
+
+    // set integer volume ramp
+    if (ramp != 0) {
+        // integer volume is U4.12 (to use 16 bit multiplies), but ramping uses U4.28.
+        // when the ramp completes, *pIntPrevVolume is set to *pIntSetVolume << 16, so there
+        // is no computational mismatch; hence equality is checked here.
+        ALOGD_IF(*pIntPrevVolume != *pIntSetVolume << 16, "previous int ramp hasn't finished,"
+                " prev:%d  set_to:%d", *pIntPrevVolume, *pIntSetVolume << 16);
+        const int32_t inc = ((intVolume << 16) - *pIntPrevVolume) / ramp;
+
+        if (inc != 0) { // inc must make forward progress
+            *pIntVolumeInc = inc;
+        } else {
+            ramp = 0; // ramp not allowed
+        }
+    }
+
+    // if no ramp, or ramp not allowed, then clear float and integer increments
+    if (ramp == 0) {
+        *pVolumeInc = 0;
+        *pPrevVolume = newVolume;
+        *pIntVolumeInc = 0;
+        *pIntPrevVolume = intVolume << 16;
+    }
+    *pSetVolume = newVolume;
+    *pIntSetVolume = intVolume;
+    return true;
+}
+
+void AudioMixerBase::setParameter(int name, int target, int param, void *value)
+{
+    LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
+    const std::shared_ptr<TrackBase> &track = mTracks[name];
+
+    int valueInt = static_cast<int>(reinterpret_cast<uintptr_t>(value));
+    int32_t *valueBuf = reinterpret_cast<int32_t*>(value);
+
+    switch (target) {
+
+    case TRACK:
+        switch (param) {
+        case CHANNEL_MASK: {
+            const audio_channel_mask_t trackChannelMask =
+                static_cast<audio_channel_mask_t>(valueInt);
+            if (setChannelMasks(name, trackChannelMask, track->mMixerChannelMask)) {
+                ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask);
+                invalidate();
+            }
+            } break;
+        case MAIN_BUFFER:
+            if (track->mainBuffer != valueBuf) {
+                track->mainBuffer = valueBuf;
+                ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
+                invalidate();
+            }
+            break;
+        case AUX_BUFFER:
+            if (track->auxBuffer != valueBuf) {
+                track->auxBuffer = valueBuf;
+                ALOGV("setParameter(TRACK, AUX_BUFFER, %p)", valueBuf);
+                invalidate();
+            }
+            break;
+        case FORMAT: {
+            audio_format_t format = static_cast<audio_format_t>(valueInt);
+            if (track->mFormat != format) {
+                ALOG_ASSERT(audio_is_linear_pcm(format), "Invalid format %#x", format);
+                track->mFormat = format;
+                ALOGV("setParameter(TRACK, FORMAT, %#x)", format);
+                invalidate();
+            }
+            } break;
+        case MIXER_FORMAT: {
+            audio_format_t format = static_cast<audio_format_t>(valueInt);
+            if (track->mMixerFormat != format) {
+                track->mMixerFormat = format;
+                ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
+            }
+            } break;
+        case MIXER_CHANNEL_MASK: {
+            const audio_channel_mask_t mixerChannelMask =
+                    static_cast<audio_channel_mask_t>(valueInt);
+            if (setChannelMasks(name, track->channelMask, mixerChannelMask)) {
+                ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask);
+                invalidate();
+            }
+            } break;
+        default:
+            LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
+        }
+        break;
+
+    case RESAMPLE:
+        switch (param) {
+        case SAMPLE_RATE:
+            ALOG_ASSERT(valueInt > 0, "bad sample rate %d", valueInt);
+            if (track->setResampler(uint32_t(valueInt), mSampleRate)) {
+                ALOGV("setParameter(RESAMPLE, SAMPLE_RATE, %u)",
+                        uint32_t(valueInt));
+                invalidate();
+            }
+            break;
+        case RESET:
+            track->resetResampler();
+            invalidate();
+            break;
+        case REMOVE:
+            track->mResampler.reset(nullptr);
+            track->sampleRate = mSampleRate;
+            invalidate();
+            break;
+        default:
+            LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param);
+        }
+        break;
+
+    case RAMP_VOLUME:
+    case VOLUME:
+        switch (param) {
+        case AUXLEVEL:
+            if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
+                    target == RAMP_VOLUME ? mFrameCount : 0,
+                    &track->auxLevel, &track->prevAuxLevel, &track->auxInc,
+                    &track->mAuxLevel, &track->mPrevAuxLevel, &track->mAuxInc)) {
+                ALOGV("setParameter(%s, AUXLEVEL: %04x)",
+                        target == VOLUME ? "VOLUME" : "RAMP_VOLUME", track->auxLevel);
+                invalidate();
+            }
+            break;
+        default:
+            if ((unsigned)param >= VOLUME0 && (unsigned)param < VOLUME0 + MAX_NUM_VOLUMES) {
+                if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
+                        target == RAMP_VOLUME ? mFrameCount : 0,
+                        &track->volume[param - VOLUME0],
+                        &track->prevVolume[param - VOLUME0],
+                        &track->volumeInc[param - VOLUME0],
+                        &track->mVolume[param - VOLUME0],
+                        &track->mPrevVolume[param - VOLUME0],
+                        &track->mVolumeInc[param - VOLUME0])) {
+                    ALOGV("setParameter(%s, VOLUME%d: %04x)",
+                            target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0,
+                                    track->volume[param - VOLUME0]);
+                    invalidate();
+                }
+            } else {
+                LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
+            }
+        }
+        break;
+
+    default:
+        LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
+    }
+}
+
+bool AudioMixerBase::TrackBase::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate)
+{
+    if (trackSampleRate != devSampleRate || mResampler.get() != nullptr) {
+        if (sampleRate != trackSampleRate) {
+            sampleRate = trackSampleRate;
+            if (mResampler.get() == nullptr) {
+                ALOGV("Creating resampler from track %d Hz to device %d Hz",
+                        trackSampleRate, devSampleRate);
+                AudioResampler::src_quality quality;
+                // force lowest quality level resampler if use case isn't music or video
+                // FIXME this is flawed for dynamic sample rates, as we choose the resampler
+                // quality level based on the initial ratio, but that could change later.
+                // Should have a way to distinguish tracks with static ratios vs. dynamic ratios.
+                if (isMusicRate(trackSampleRate)) {
+                    quality = AudioResampler::DEFAULT_QUALITY;
+                } else {
+                    quality = AudioResampler::DYN_LOW_QUALITY;
+                }
+
+                // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
+                // but if none exists, it is the channel count (1 for mono).
+                const int resamplerChannelCount = getOutputChannelCount();
+                ALOGVV("Creating resampler:"
+                        " format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n",
+                        mMixerInFormat, resamplerChannelCount, devSampleRate, quality);
+                mResampler.reset(AudioResampler::create(
+                        mMixerInFormat,
+                        resamplerChannelCount,
+                        devSampleRate, quality));
+            }
+            return true;
+        }
+    }
+    return false;
+}
+
+/* Checks to see if the volume ramp has completed and clears the increment
+ * variables appropriately.
+ *
+ * FIXME: There is code to handle int/float ramp variable switchover should it not
+ * complete within a mixer buffer processing call, but it is preferred to avoid switchover
+ * due to precision issues.  The switchover code is included for legacy code purposes
+ * and can be removed once the integer volume is removed.
+ *
+ * It is not sufficient to clear only the volumeInc integer variable because
+ * if one channel requires ramping, all channels are ramped.
+ *
+ * There is a bit of duplicated code here, but it keeps backward compatibility.
+ */
+void AudioMixerBase::TrackBase::adjustVolumeRamp(bool aux, bool useFloat)
+{
+    if (useFloat) {
+        for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
+            if ((mVolumeInc[i] > 0 && mPrevVolume[i] + mVolumeInc[i] >= mVolume[i]) ||
+                     (mVolumeInc[i] < 0 && mPrevVolume[i] + mVolumeInc[i] <= mVolume[i])) {
+                volumeInc[i] = 0;
+                prevVolume[i] = volume[i] << 16;
+                mVolumeInc[i] = 0.;
+                mPrevVolume[i] = mVolume[i];
+            } else {
+                //ALOGV("ramp: %f %f %f", mVolume[i], mPrevVolume[i], mVolumeInc[i]);
+                prevVolume[i] = u4_28_from_float(mPrevVolume[i]);
+            }
+        }
+    } else {
+        for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
+            if (((volumeInc[i]>0) && (((prevVolume[i]+volumeInc[i])>>16) >= volume[i])) ||
+                    ((volumeInc[i]<0) && (((prevVolume[i]+volumeInc[i])>>16) <= volume[i]))) {
+                volumeInc[i] = 0;
+                prevVolume[i] = volume[i] << 16;
+                mVolumeInc[i] = 0.;
+                mPrevVolume[i] = mVolume[i];
+            } else {
+                //ALOGV("ramp: %d %d %d", volume[i] << 16, prevVolume[i], volumeInc[i]);
+                mPrevVolume[i]  = float_from_u4_28(prevVolume[i]);
+            }
+        }
+    }
+
+    if (aux) {
+#ifdef FLOAT_AUX
+        if (useFloat) {
+            if ((mAuxInc > 0.f && mPrevAuxLevel + mAuxInc >= mAuxLevel) ||
+                    (mAuxInc < 0.f && mPrevAuxLevel + mAuxInc <= mAuxLevel)) {
+                auxInc = 0;
+                prevAuxLevel = auxLevel << 16;
+                mAuxInc = 0.f;
+                mPrevAuxLevel = mAuxLevel;
+            }
+        } else
+#endif
+        if ((auxInc > 0 && ((prevAuxLevel + auxInc) >> 16) >= auxLevel) ||
+                (auxInc < 0 && ((prevAuxLevel + auxInc) >> 16) <= auxLevel)) {
+            auxInc = 0;
+            prevAuxLevel = auxLevel << 16;
+            mAuxInc = 0.f;
+            mPrevAuxLevel = mAuxLevel;
+        }
+    }
+}
+
+void AudioMixerBase::TrackBase::recreateResampler(uint32_t devSampleRate)
+{
+    if (mResampler.get() != nullptr) {
+        const uint32_t resetToSampleRate = sampleRate;
+        mResampler.reset(nullptr);
+        sampleRate = devSampleRate; // without resampler, track rate is device sample rate.
+        // recreate the resampler with updated format, channels, saved sampleRate.
+        setResampler(resetToSampleRate /*trackSampleRate*/, devSampleRate);
+    }
+}
+
+size_t AudioMixerBase::getUnreleasedFrames(int name) const
+{
+    const auto it = mTracks.find(name);
+    if (it != mTracks.end()) {
+        return it->second->getUnreleasedFrames();
+    }
+    return 0;
+}
+
+std::string AudioMixerBase::trackNames() const
+{
+    std::stringstream ss;
+    for (const auto &pair : mTracks) {
+        ss << pair.first << " ";
+    }
+    return ss.str();
+}
+
+void AudioMixerBase::process__validate()
+{
+    // TODO: fix all16BitsStereNoResample logic to
+    // either properly handle muted tracks (it should ignore them)
+    // or remove altogether as an obsolete optimization.
+    bool all16BitsStereoNoResample = true;
+    bool resampling = false;
+    bool volumeRamp = false;
+
+    mEnabled.clear();
+    mGroups.clear();
+    for (const auto &pair : mTracks) {
+        const int name = pair.first;
+        const std::shared_ptr<TrackBase> &t = pair.second;
+        if (!t->enabled) continue;
+
+        mEnabled.emplace_back(name);  // we add to mEnabled in order of name.
+        mGroups[t->mainBuffer].emplace_back(name); // mGroups also in order of name.
+
+        uint32_t n = 0;
+        // FIXME can overflow (mask is only 3 bits)
+        n |= NEEDS_CHANNEL_1 + t->channelCount - 1;
+        if (t->doesResample()) {
+            n |= NEEDS_RESAMPLE;
+        }
+        if (t->auxLevel != 0 && t->auxBuffer != NULL) {
+            n |= NEEDS_AUX;
+        }
+
+        if (t->volumeInc[0]|t->volumeInc[1]) {
+            volumeRamp = true;
+        } else if (!t->doesResample() && t->volumeRL == 0) {
+            n |= NEEDS_MUTE;
+        }
+        t->needs = n;
+
+        if (n & NEEDS_MUTE) {
+            t->hook = &TrackBase::track__nop;
+        } else {
+            if (n & NEEDS_AUX) {
+                all16BitsStereoNoResample = false;
+            }
+            if (n & NEEDS_RESAMPLE) {
+                all16BitsStereoNoResample = false;
+                resampling = true;
+                t->hook = TrackBase::getTrackHook(TRACKTYPE_RESAMPLE, t->mMixerChannelCount,
+                        t->mMixerInFormat, t->mMixerFormat);
+                ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
+                        "Track %d needs downmix + resample", name);
+            } else {
+                if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
+                    t->hook = TrackBase::getTrackHook(
+                            (t->mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO  // TODO: MONO_HACK
+                                    && t->channelMask == AUDIO_CHANNEL_OUT_MONO)
+                                ? TRACKTYPE_NORESAMPLEMONO : TRACKTYPE_NORESAMPLE,
+                            t->mMixerChannelCount,
+                            t->mMixerInFormat, t->mMixerFormat);
+                    all16BitsStereoNoResample = false;
+                }
+                if ((n & NEEDS_CHANNEL_COUNT__MASK) >= NEEDS_CHANNEL_2){
+                    t->hook = TrackBase::getTrackHook(TRACKTYPE_NORESAMPLE, t->mMixerChannelCount,
+                            t->mMixerInFormat, t->mMixerFormat);
+                    ALOGV_IF((n & NEEDS_CHANNEL_COUNT__MASK) > NEEDS_CHANNEL_2,
+                            "Track %d needs downmix", name);
+                }
+            }
+        }
+    }
+
+    // select the processing hooks
+    mHook = &AudioMixerBase::process__nop;
+    if (mEnabled.size() > 0) {
+        if (resampling) {
+            if (mOutputTemp.get() == nullptr) {
+                mOutputTemp.reset(new int32_t[MAX_NUM_CHANNELS * mFrameCount]);
+            }
+            if (mResampleTemp.get() == nullptr) {
+                mResampleTemp.reset(new int32_t[MAX_NUM_CHANNELS * mFrameCount]);
+            }
+            mHook = &AudioMixerBase::process__genericResampling;
+        } else {
+            // we keep temp arrays around.
+            mHook = &AudioMixerBase::process__genericNoResampling;
+            if (all16BitsStereoNoResample && !volumeRamp) {
+                if (mEnabled.size() == 1) {
+                    const std::shared_ptr<TrackBase> &t = mTracks[mEnabled[0]];
+                    if ((t->needs & NEEDS_MUTE) == 0) {
+                        // The check prevents a muted track from acquiring a process hook.
+                        //
+                        // This is dangerous if the track is MONO as that requires
+                        // special case handling due to implicit channel duplication.
+                        // Stereo or Multichannel should actually be fine here.
+                        mHook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+                                t->mMixerChannelCount, t->mMixerInFormat, t->mMixerFormat);
+                    }
+                }
+            }
+        }
+    }
+
+    ALOGV("mixer configuration change: %zu "
+        "all16BitsStereoNoResample=%d, resampling=%d, volumeRamp=%d",
+        mEnabled.size(), all16BitsStereoNoResample, resampling, volumeRamp);
+
+    process();
+
+    // Now that the volume ramp has been done, set optimal state and
+    // track hooks for subsequent mixer process
+    if (mEnabled.size() > 0) {
+        bool allMuted = true;
+
+        for (const int name : mEnabled) {
+            const std::shared_ptr<TrackBase> &t = mTracks[name];
+            if (!t->doesResample() && t->volumeRL == 0) {
+                t->needs |= NEEDS_MUTE;
+                t->hook = &TrackBase::track__nop;
+            } else {
+                allMuted = false;
+            }
+        }
+        if (allMuted) {
+            mHook = &AudioMixerBase::process__nop;
+        } else if (all16BitsStereoNoResample) {
+            if (mEnabled.size() == 1) {
+                //const int i = 31 - __builtin_clz(enabledTracks);
+                const std::shared_ptr<TrackBase> &t = mTracks[mEnabled[0]];
+                // Muted single tracks handled by allMuted above.
+                mHook = getProcessHook(PROCESSTYPE_NORESAMPLEONETRACK,
+                        t->mMixerChannelCount, t->mMixerInFormat, t->mMixerFormat);
+            }
+        }
+    }
+}
+
+void AudioMixerBase::TrackBase::track__genericResample(
+        int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux)
+{
+    ALOGVV("track__genericResample\n");
+    mResampler->setSampleRate(sampleRate);
+
+    // ramp gain - resample to temp buffer and scale/mix in 2nd step
+    if (aux != NULL) {
+        // always resample with unity gain when sending to auxiliary buffer to be able
+        // to apply send level after resampling
+        mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+        memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(int32_t));
+        mResampler->resample(temp, outFrameCount, bufferProvider);
+        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
+            volumeRampStereo(out, outFrameCount, temp, aux);
+        } else {
+            volumeStereo(out, outFrameCount, temp, aux);
+        }
+    } else {
+        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
+            mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+            memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t));
+            mResampler->resample(temp, outFrameCount, bufferProvider);
+            volumeRampStereo(out, outFrameCount, temp, aux);
+        }
+
+        // constant gain
+        else {
+            mResampler->setVolume(mVolume[0], mVolume[1]);
+            mResampler->resample(out, outFrameCount, bufferProvider);
+        }
+    }
+}
+
+void AudioMixerBase::TrackBase::track__nop(int32_t* out __unused,
+        size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused)
+{
+}
+
+void AudioMixerBase::TrackBase::volumeRampStereo(
+        int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
+{
+    int32_t vl = prevVolume[0];
+    int32_t vr = prevVolume[1];
+    const int32_t vlInc = volumeInc[0];
+    const int32_t vrInc = volumeInc[1];
+
+    //ALOGD("[0] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+    //        t, vlInc/65536.0f, vl/65536.0f, volume[0],
+    //       (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+    // ramp volume
+    if (CC_UNLIKELY(aux != NULL)) {
+        int32_t va = prevAuxLevel;
+        const int32_t vaInc = auxInc;
+        int32_t l;
+        int32_t r;
+
+        do {
+            l = (*temp++ >> 12);
+            r = (*temp++ >> 12);
+            *out++ += (vl >> 16) * l;
+            *out++ += (vr >> 16) * r;
+            *aux++ += (va >> 17) * (l + r);
+            vl += vlInc;
+            vr += vrInc;
+            va += vaInc;
+        } while (--frameCount);
+        prevAuxLevel = va;
+    } else {
+        do {
+            *out++ += (vl >> 16) * (*temp++ >> 12);
+            *out++ += (vr >> 16) * (*temp++ >> 12);
+            vl += vlInc;
+            vr += vrInc;
+        } while (--frameCount);
+    }
+    prevVolume[0] = vl;
+    prevVolume[1] = vr;
+    adjustVolumeRamp(aux != NULL);
+}
+
+void AudioMixerBase::TrackBase::volumeStereo(
+        int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
+{
+    const int16_t vl = volume[0];
+    const int16_t vr = volume[1];
+
+    if (CC_UNLIKELY(aux != NULL)) {
+        const int16_t va = auxLevel;
+        do {
+            int16_t l = (int16_t)(*temp++ >> 12);
+            int16_t r = (int16_t)(*temp++ >> 12);
+            out[0] = mulAdd(l, vl, out[0]);
+            int16_t a = (int16_t)(((int32_t)l + r) >> 1);
+            out[1] = mulAdd(r, vr, out[1]);
+            out += 2;
+            aux[0] = mulAdd(a, va, aux[0]);
+            aux++;
+        } while (--frameCount);
+    } else {
+        do {
+            int16_t l = (int16_t)(*temp++ >> 12);
+            int16_t r = (int16_t)(*temp++ >> 12);
+            out[0] = mulAdd(l, vl, out[0]);
+            out[1] = mulAdd(r, vr, out[1]);
+            out += 2;
+        } while (--frameCount);
+    }
+}
+
+void AudioMixerBase::TrackBase::track__16BitsStereo(
+        int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
+{
+    ALOGVV("track__16BitsStereo\n");
+    const int16_t *in = static_cast<const int16_t *>(mIn);
+
+    if (CC_UNLIKELY(aux != NULL)) {
+        int32_t l;
+        int32_t r;
+        // ramp gain
+        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
+            int32_t vl = prevVolume[0];
+            int32_t vr = prevVolume[1];
+            int32_t va = prevAuxLevel;
+            const int32_t vlInc = volumeInc[0];
+            const int32_t vrInc = volumeInc[1];
+            const int32_t vaInc = auxInc;
+            // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+            //        t, vlInc/65536.0f, vl/65536.0f, volume[0],
+            //        (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+            do {
+                l = (int32_t)*in++;
+                r = (int32_t)*in++;
+                *out++ += (vl >> 16) * l;
+                *out++ += (vr >> 16) * r;
+                *aux++ += (va >> 17) * (l + r);
+                vl += vlInc;
+                vr += vrInc;
+                va += vaInc;
+            } while (--frameCount);
+
+            prevVolume[0] = vl;
+            prevVolume[1] = vr;
+            prevAuxLevel = va;
+            adjustVolumeRamp(true);
+        }
+
+        // constant gain
+        else {
+            const uint32_t vrl = volumeRL;
+            const int16_t va = (int16_t)auxLevel;
+            do {
+                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+                int16_t a = (int16_t)(((int32_t)in[0] + in[1]) >> 1);
+                in += 2;
+                out[0] = mulAddRL(1, rl, vrl, out[0]);
+                out[1] = mulAddRL(0, rl, vrl, out[1]);
+                out += 2;
+                aux[0] = mulAdd(a, va, aux[0]);
+                aux++;
+            } while (--frameCount);
+        }
+    } else {
+        // ramp gain
+        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
+            int32_t vl = prevVolume[0];
+            int32_t vr = prevVolume[1];
+            const int32_t vlInc = volumeInc[0];
+            const int32_t vrInc = volumeInc[1];
+
+            // ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+            //        t, vlInc/65536.0f, vl/65536.0f, volume[0],
+            //        (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+            do {
+                *out++ += (vl >> 16) * (int32_t) *in++;
+                *out++ += (vr >> 16) * (int32_t) *in++;
+                vl += vlInc;
+                vr += vrInc;
+            } while (--frameCount);
+
+            prevVolume[0] = vl;
+            prevVolume[1] = vr;
+            adjustVolumeRamp(false);
+        }
+
+        // constant gain
+        else {
+            const uint32_t vrl = volumeRL;
+            do {
+                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+                in += 2;
+                out[0] = mulAddRL(1, rl, vrl, out[0]);
+                out[1] = mulAddRL(0, rl, vrl, out[1]);
+                out += 2;
+            } while (--frameCount);
+        }
+    }
+    mIn = in;
+}
+
+void AudioMixerBase::TrackBase::track__16BitsMono(
+        int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
+{
+    ALOGVV("track__16BitsMono\n");
+    const int16_t *in = static_cast<int16_t const *>(mIn);
+
+    if (CC_UNLIKELY(aux != NULL)) {
+        // ramp gain
+        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
+            int32_t vl = prevVolume[0];
+            int32_t vr = prevVolume[1];
+            int32_t va = prevAuxLevel;
+            const int32_t vlInc = volumeInc[0];
+            const int32_t vrInc = volumeInc[1];
+            const int32_t vaInc = auxInc;
+
+            // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+            //         t, vlInc/65536.0f, vl/65536.0f, volume[0],
+            //         (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+            do {
+                int32_t l = *in++;
+                *out++ += (vl >> 16) * l;
+                *out++ += (vr >> 16) * l;
+                *aux++ += (va >> 16) * l;
+                vl += vlInc;
+                vr += vrInc;
+                va += vaInc;
+            } while (--frameCount);
+
+            prevVolume[0] = vl;
+            prevVolume[1] = vr;
+            prevAuxLevel = va;
+            adjustVolumeRamp(true);
+        }
+        // constant gain
+        else {
+            const int16_t vl = volume[0];
+            const int16_t vr = volume[1];
+            const int16_t va = (int16_t)auxLevel;
+            do {
+                int16_t l = *in++;
+                out[0] = mulAdd(l, vl, out[0]);
+                out[1] = mulAdd(l, vr, out[1]);
+                out += 2;
+                aux[0] = mulAdd(l, va, aux[0]);
+                aux++;
+            } while (--frameCount);
+        }
+    } else {
+        // ramp gain
+        if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
+            int32_t vl = prevVolume[0];
+            int32_t vr = prevVolume[1];
+            const int32_t vlInc = volumeInc[0];
+            const int32_t vrInc = volumeInc[1];
+
+            // ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
+            //         t, vlInc/65536.0f, vl/65536.0f, volume[0],
+            //         (vl + vlInc*frameCount)/65536.0f, frameCount);
+
+            do {
+                int32_t l = *in++;
+                *out++ += (vl >> 16) * l;
+                *out++ += (vr >> 16) * l;
+                vl += vlInc;
+                vr += vrInc;
+            } while (--frameCount);
+
+            prevVolume[0] = vl;
+            prevVolume[1] = vr;
+            adjustVolumeRamp(false);
+        }
+        // constant gain
+        else {
+            const int16_t vl = volume[0];
+            const int16_t vr = volume[1];
+            do {
+                int16_t l = *in++;
+                out[0] = mulAdd(l, vl, out[0]);
+                out[1] = mulAdd(l, vr, out[1]);
+                out += 2;
+            } while (--frameCount);
+        }
+    }
+    mIn = in;
+}
+
+// no-op case
+void AudioMixerBase::process__nop()
+{
+    ALOGVV("process__nop\n");
+
+    for (const auto &pair : mGroups) {
+        // process by group of tracks with same output buffer to
+        // avoid multiple memset() on same buffer
+        const auto &group = pair.second;
+
+        const std::shared_ptr<TrackBase> &t = mTracks[group[0]];
+        memset(t->mainBuffer, 0,
+                mFrameCount * audio_bytes_per_frame(t->getMixerChannelCount(), t->mMixerFormat));
+
+        // now consume data
+        for (const int name : group) {
+            const std::shared_ptr<TrackBase> &t = mTracks[name];
+            size_t outFrames = mFrameCount;
+            while (outFrames) {
+                t->buffer.frameCount = outFrames;
+                t->bufferProvider->getNextBuffer(&t->buffer);
+                if (t->buffer.raw == NULL) break;
+                outFrames -= t->buffer.frameCount;
+                t->bufferProvider->releaseBuffer(&t->buffer);
+            }
+        }
+    }
+}
+
+// generic code without resampling
+void AudioMixerBase::process__genericNoResampling()
+{
+    ALOGVV("process__genericNoResampling\n");
+    int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
+
+    for (const auto &pair : mGroups) {
+        // process by group of tracks with same output main buffer to
+        // avoid multiple memset() on same buffer
+        const auto &group = pair.second;
+
+        // acquire buffer
+        for (const int name : group) {
+            const std::shared_ptr<TrackBase> &t = mTracks[name];
+            t->buffer.frameCount = mFrameCount;
+            t->bufferProvider->getNextBuffer(&t->buffer);
+            t->frameCount = t->buffer.frameCount;
+            t->mIn = t->buffer.raw;
+        }
+
+        int32_t *out = (int *)pair.first;
+        size_t numFrames = 0;
+        do {
+            const size_t frameCount = std::min((size_t)BLOCKSIZE, mFrameCount - numFrames);
+            memset(outTemp, 0, sizeof(outTemp));
+            for (const int name : group) {
+                const std::shared_ptr<TrackBase> &t = mTracks[name];
+                int32_t *aux = NULL;
+                if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
+                    aux = t->auxBuffer + numFrames;
+                }
+                for (int outFrames = frameCount; outFrames > 0; ) {
+                    // t->in == nullptr can happen if the track was flushed just after having
+                    // been enabled for mixing.
+                    if (t->mIn == nullptr) {
+                        break;
+                    }
+                    size_t inFrames = (t->frameCount > outFrames)?outFrames:t->frameCount;
+                    if (inFrames > 0) {
+                        (t.get()->*t->hook)(
+                                outTemp + (frameCount - outFrames) * t->mMixerChannelCount,
+                                inFrames, mResampleTemp.get() /* naked ptr */, aux);
+                        t->frameCount -= inFrames;
+                        outFrames -= inFrames;
+                        if (CC_UNLIKELY(aux != NULL)) {
+                            aux += inFrames;
+                        }
+                    }
+                    if (t->frameCount == 0 && outFrames) {
+                        t->bufferProvider->releaseBuffer(&t->buffer);
+                        t->buffer.frameCount = (mFrameCount - numFrames) -
+                                (frameCount - outFrames);
+                        t->bufferProvider->getNextBuffer(&t->buffer);
+                        t->mIn = t->buffer.raw;
+                        if (t->mIn == nullptr) {
+                            break;
+                        }
+                        t->frameCount = t->buffer.frameCount;
+                    }
+                }
+            }
+
+            const std::shared_ptr<TrackBase> &t1 = mTracks[group[0]];
+            convertMixerFormat(out, t1->mMixerFormat, outTemp, t1->mMixerInFormat,
+                    frameCount * t1->mMixerChannelCount);
+            // TODO: fix ugly casting due to choice of out pointer type
+            out = reinterpret_cast<int32_t*>((uint8_t*)out
+                    + frameCount * t1->mMixerChannelCount
+                    * audio_bytes_per_sample(t1->mMixerFormat));
+            numFrames += frameCount;
+        } while (numFrames < mFrameCount);
+
+        // release each track's buffer
+        for (const int name : group) {
+            const std::shared_ptr<TrackBase> &t = mTracks[name];
+            t->bufferProvider->releaseBuffer(&t->buffer);
+        }
+    }
+}
+
+// generic code with resampling
+void AudioMixerBase::process__genericResampling()
+{
+    ALOGVV("process__genericResampling\n");
+    int32_t * const outTemp = mOutputTemp.get(); // naked ptr
+    size_t numFrames = mFrameCount;
+
+    for (const auto &pair : mGroups) {
+        const auto &group = pair.second;
+        const std::shared_ptr<TrackBase> &t1 = mTracks[group[0]];
+
+        // clear temp buffer
+        memset(outTemp, 0, sizeof(*outTemp) * t1->mMixerChannelCount * mFrameCount);
+        for (const int name : group) {
+            const std::shared_ptr<TrackBase> &t = mTracks[name];
+            int32_t *aux = NULL;
+            if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
+                aux = t->auxBuffer;
+            }
+
+            // this is a little goofy, on the resampling case we don't
+            // acquire/release the buffers because it's done by
+            // the resampler.
+            if (t->needs & NEEDS_RESAMPLE) {
+                (t.get()->*t->hook)(outTemp, numFrames, mResampleTemp.get() /* naked ptr */, aux);
+            } else {
+
+                size_t outFrames = 0;
+
+                while (outFrames < numFrames) {
+                    t->buffer.frameCount = numFrames - outFrames;
+                    t->bufferProvider->getNextBuffer(&t->buffer);
+                    t->mIn = t->buffer.raw;
+                    // t->mIn == nullptr can happen if the track was flushed just after having
+                    // been enabled for mixing.
+                    if (t->mIn == nullptr) break;
+
+                    (t.get()->*t->hook)(
+                            outTemp + outFrames * t->mMixerChannelCount, t->buffer.frameCount,
+                            mResampleTemp.get() /* naked ptr */,
+                            aux != nullptr ? aux + outFrames : nullptr);
+                    outFrames += t->buffer.frameCount;
+
+                    t->bufferProvider->releaseBuffer(&t->buffer);
+                }
+            }
+        }
+        convertMixerFormat(t1->mainBuffer, t1->mMixerFormat,
+                outTemp, t1->mMixerInFormat, numFrames * t1->mMixerChannelCount);
+    }
+}
+
+// one track, 16 bits stereo without resampling is the most common case
+void AudioMixerBase::process__oneTrack16BitsStereoNoResampling()
+{
+    ALOGVV("process__oneTrack16BitsStereoNoResampling\n");
+    LOG_ALWAYS_FATAL_IF(mEnabled.size() != 0,
+            "%zu != 1 tracks enabled", mEnabled.size());
+    const int name = mEnabled[0];
+    const std::shared_ptr<TrackBase> &t = mTracks[name];
+
+    AudioBufferProvider::Buffer& b(t->buffer);
+
+    int32_t* out = t->mainBuffer;
+    float *fout = reinterpret_cast<float*>(out);
+    size_t numFrames = mFrameCount;
+
+    const int16_t vl = t->volume[0];
+    const int16_t vr = t->volume[1];
+    const uint32_t vrl = t->volumeRL;
+    while (numFrames) {
+        b.frameCount = numFrames;
+        t->bufferProvider->getNextBuffer(&b);
+        const int16_t *in = b.i16;
+
+        // in == NULL can happen if the track was flushed just after having
+        // been enabled for mixing.
+        if (in == NULL || (((uintptr_t)in) & 3)) {
+            if ( AUDIO_FORMAT_PCM_FLOAT == t->mMixerFormat ) {
+                 memset((char*)fout, 0, numFrames
+                         * t->mMixerChannelCount * audio_bytes_per_sample(t->mMixerFormat));
+            } else {
+                 memset((char*)out, 0, numFrames
+                         * t->mMixerChannelCount * audio_bytes_per_sample(t->mMixerFormat));
+            }
+            ALOGE_IF((((uintptr_t)in) & 3),
+                    "process__oneTrack16BitsStereoNoResampling: misaligned buffer"
+                    " %p track %d, channels %d, needs %08x, volume %08x vfl %f vfr %f",
+                    in, name, t->channelCount, t->needs, vrl, t->mVolume[0], t->mVolume[1]);
+            return;
+        }
+        size_t outFrames = b.frameCount;
+
+        switch (t->mMixerFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            do {
+                uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+                in += 2;
+                int32_t l = mulRL(1, rl, vrl);
+                int32_t r = mulRL(0, rl, vrl);
+                *fout++ = float_from_q4_27(l);
+                *fout++ = float_from_q4_27(r);
+                // Note: In case of later int16_t sink output,
+                // conversion and clamping is done by memcpy_to_i16_from_float().
+            } while (--outFrames);
+            break;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            if (CC_UNLIKELY(uint32_t(vl) > UNITY_GAIN_INT || uint32_t(vr) > UNITY_GAIN_INT)) {
+                // volume is boosted, so we might need to clamp even though
+                // we process only one track.
+                do {
+                    uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+                    in += 2;
+                    int32_t l = mulRL(1, rl, vrl) >> 12;
+                    int32_t r = mulRL(0, rl, vrl) >> 12;
+                    // clamping...
+                    l = clamp16(l);
+                    r = clamp16(r);
+                    *out++ = (r<<16) | (l & 0xFFFF);
+                } while (--outFrames);
+            } else {
+                do {
+                    uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
+                    in += 2;
+                    int32_t l = mulRL(1, rl, vrl) >> 12;
+                    int32_t r = mulRL(0, rl, vrl) >> 12;
+                    *out++ = (r<<16) | (l & 0xFFFF);
+                } while (--outFrames);
+            }
+            break;
+        default:
+            LOG_ALWAYS_FATAL("bad mixer format: %d", t->mMixerFormat);
+        }
+        numFrames -= b.frameCount;
+        t->bufferProvider->releaseBuffer(&b);
+    }
+}
+
+/* TODO: consider whether this level of optimization is necessary.
+ * Perhaps just stick with a single for loop.
+ */
+
+// Needs to derive a compile time constant (constexpr).  Could be targeted to go
+// to a MONOVOL mixtype based on MAX_NUM_VOLUMES, but that's an unnecessary complication.
+#define MIXTYPE_MONOVOL(mixtype) ((mixtype) == MIXTYPE_MULTI ? MIXTYPE_MULTI_MONOVOL : \
+        (mixtype) == MIXTYPE_MULTI_SAVEONLY ? MIXTYPE_MULTI_SAVEONLY_MONOVOL : (mixtype))
+
+/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE,
+        typename TO, typename TI, typename TV, typename TA, typename TAV>
+static void volumeRampMulti(uint32_t channels, TO* out, size_t frameCount,
+        const TI* in, TA* aux, TV *vol, const TV *volinc, TAV *vola, TAV volainc)
+{
+    switch (channels) {
+    case 1:
+        volumeRampMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 2:
+        volumeRampMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 3:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 4:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 5:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 6:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 7:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    case 8:
+        volumeRampMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out,
+                frameCount, in, aux, vol, volinc, vola, volainc);
+        break;
+    }
+}
+
+/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE,
+        typename TO, typename TI, typename TV, typename TA, typename TAV>
+static void volumeMulti(uint32_t channels, TO* out, size_t frameCount,
+        const TI* in, TA* aux, const TV *vol, TAV vola)
+{
+    switch (channels) {
+    case 1:
+        volumeMulti<MIXTYPE, 1>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 2:
+        volumeMulti<MIXTYPE, 2>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 3:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 3>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 4:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 4>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 5:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 5>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 6:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 6>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 7:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 7>(out, frameCount, in, aux, vol, vola);
+        break;
+    case 8:
+        volumeMulti<MIXTYPE_MONOVOL(MIXTYPE), 8>(out, frameCount, in, aux, vol, vola);
+        break;
+    }
+}
+
+/* MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * USEFLOATVOL (set to true if float volume is used)
+ * ADJUSTVOL   (set to true if volume ramp parameters needs adjustment afterwards)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
+    typename TO, typename TI, typename TA>
+void AudioMixerBase::TrackBase::volumeMix(TO *out, size_t outFrames,
+        const TI *in, TA *aux, bool ramp)
+{
+    if (USEFLOATVOL) {
+        if (ramp) {
+            volumeRampMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+                    mPrevVolume, mVolumeInc,
+#ifdef FLOAT_AUX
+                    &mPrevAuxLevel, mAuxInc
+#else
+                    &prevAuxLevel, auxInc
+#endif
+                );
+            if (ADJUSTVOL) {
+                adjustVolumeRamp(aux != NULL, true);
+            }
+        } else {
+            volumeMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+                    mVolume,
+#ifdef FLOAT_AUX
+                    mAuxLevel
+#else
+                    auxLevel
+#endif
+            );
+        }
+    } else {
+        if (ramp) {
+            volumeRampMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+                    prevVolume, volumeInc, &prevAuxLevel, auxInc);
+            if (ADJUSTVOL) {
+                adjustVolumeRamp(aux != NULL);
+            }
+        } else {
+            volumeMulti<MIXTYPE>(mMixerChannelCount, out, outFrames, in, aux,
+                    volume, auxLevel);
+        }
+    }
+}
+
+/* This process hook is called when there is a single track without
+ * aux buffer, volume ramp, or resampling.
+ * TODO: Update the hook selection: this can properly handle aux and ramp.
+ *
+ * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27)
+ */
+template <int MIXTYPE, typename TO, typename TI, typename TA>
+void AudioMixerBase::process__noResampleOneTrack()
+{
+    ALOGVV("process__noResampleOneTrack\n");
+    LOG_ALWAYS_FATAL_IF(mEnabled.size() != 1,
+            "%zu != 1 tracks enabled", mEnabled.size());
+    const std::shared_ptr<TrackBase> &t = mTracks[mEnabled[0]];
+    const uint32_t channels = t->mMixerChannelCount;
+    TO* out = reinterpret_cast<TO*>(t->mainBuffer);
+    TA* aux = reinterpret_cast<TA*>(t->auxBuffer);
+    const bool ramp = t->needsRamp();
+
+    for (size_t numFrames = mFrameCount; numFrames > 0; ) {
+        AudioBufferProvider::Buffer& b(t->buffer);
+        // get input buffer
+        b.frameCount = numFrames;
+        t->bufferProvider->getNextBuffer(&b);
+        const TI *in = reinterpret_cast<TI*>(b.raw);
+
+        // in == NULL can happen if the track was flushed just after having
+        // been enabled for mixing.
+        if (in == NULL || (((uintptr_t)in) & 3)) {
+            memset(out, 0, numFrames
+                    * channels * audio_bytes_per_sample(t->mMixerFormat));
+            ALOGE_IF((((uintptr_t)in) & 3), "process__noResampleOneTrack: bus error: "
+                    "buffer %p track %p, channels %d, needs %#x",
+                    in, &t, t->channelCount, t->needs);
+            return;
+        }
+
+        const size_t outFrames = b.frameCount;
+        t->volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, false /* ADJUSTVOL */> (
+                out, outFrames, in, aux, ramp);
+
+        out += outFrames * channels;
+        if (aux != NULL) {
+            aux += outFrames;
+        }
+        numFrames -= b.frameCount;
+
+        // release buffer
+        t->bufferProvider->releaseBuffer(&b);
+    }
+    if (ramp) {
+        t->adjustVolumeRamp(aux != NULL, is_same<TI, float>::value);
+    }
+}
+
+/* This track hook is called to do resampling then mixing,
+ * pulling from the track's upstream AudioBufferProvider.
+ *
+ * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE, typename TO, typename TI, typename TA>
+void AudioMixerBase::TrackBase::track__Resample(TO* out, size_t outFrameCount, TO* temp, TA* aux)
+{
+    ALOGVV("track__Resample\n");
+    mResampler->setSampleRate(sampleRate);
+    const bool ramp = needsRamp();
+    if (ramp || aux != NULL) {
+        // if ramp:        resample with unity gain to temp buffer and scale/mix in 2nd step.
+        // if aux != NULL: resample with unity gain to temp buffer then apply send level.
+
+        mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
+        memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(TO));
+        mResampler->resample((int32_t*)temp, outFrameCount, bufferProvider);
+
+        volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, true /* ADJUSTVOL */>(
+                out, outFrameCount, temp, aux, ramp);
+
+    } else { // constant volume gain
+        mResampler->setVolume(mVolume[0], mVolume[1]);
+        mResampler->resample((int32_t*)out, outFrameCount, bufferProvider);
+    }
+}
+
+/* This track hook is called to mix a track, when no resampling is required.
+ * The input buffer should be present in in.
+ *
+ * MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+ * TO: int32_t (Q4.27) or float
+ * TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+ * TA: int32_t (Q4.27) or float
+ */
+template <int MIXTYPE, typename TO, typename TI, typename TA>
+void AudioMixerBase::TrackBase::track__NoResample(
+        TO* out, size_t frameCount, TO* temp __unused, TA* aux)
+{
+    ALOGVV("track__NoResample\n");
+    const TI *in = static_cast<const TI *>(mIn);
+
+    volumeMix<MIXTYPE, is_same<TI, float>::value /* USEFLOATVOL */, true /* ADJUSTVOL */>(
+            out, frameCount, in, aux, needsRamp());
+
+    // MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels.
+    // MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels.
+    in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * mMixerChannelCount;
+    mIn = in;
+}
+
+/* The Mixer engine generates either int32_t (Q4_27) or float data.
+ * We use this function to convert the engine buffers
+ * to the desired mixer output format, either int16_t (Q.15) or float.
+ */
+/* static */
+void AudioMixerBase::convertMixerFormat(void *out, audio_format_t mixerOutFormat,
+        void *in, audio_format_t mixerInFormat, size_t sampleCount)
+{
+    switch (mixerInFormat) {
+    case AUDIO_FORMAT_PCM_FLOAT:
+        switch (mixerOutFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            memcpy(out, in, sampleCount * sizeof(float)); // MEMCPY. TODO optimize out
+            break;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            memcpy_to_i16_from_float((int16_t*)out, (float*)in, sampleCount);
+            break;
+        default:
+            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+            break;
+        }
+        break;
+    case AUDIO_FORMAT_PCM_16_BIT:
+        switch (mixerOutFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            memcpy_to_float_from_q4_27((float*)out, (const int32_t*)in, sampleCount);
+            break;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            memcpy_to_i16_from_q4_27((int16_t*)out, (const int32_t*)in, sampleCount);
+            break;
+        default:
+            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+            break;
+        }
+        break;
+    default:
+        LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+        break;
+    }
+}
+
+/* Returns the proper track hook to use for mixing the track into the output buffer.
+ */
+/* static */
+AudioMixerBase::hook_t AudioMixerBase::TrackBase::getTrackHook(int trackType, uint32_t channelCount,
+        audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused)
+{
+    if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
+        switch (trackType) {
+        case TRACKTYPE_NOP:
+            return &TrackBase::track__nop;
+        case TRACKTYPE_RESAMPLE:
+            return &TrackBase::track__genericResample;
+        case TRACKTYPE_NORESAMPLEMONO:
+            return &TrackBase::track__16BitsMono;
+        case TRACKTYPE_NORESAMPLE:
+            return &TrackBase::track__16BitsStereo;
+        default:
+            LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
+            break;
+        }
+    }
+    LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
+    switch (trackType) {
+    case TRACKTYPE_NOP:
+        return &TrackBase::track__nop;
+    case TRACKTYPE_RESAMPLE:
+        switch (mixerInFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            return (AudioMixerBase::hook_t) &TrackBase::track__Resample<
+                    MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            return (AudioMixerBase::hook_t) &TrackBase::track__Resample<
+                    MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+        default:
+            LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+            break;
+        }
+        break;
+    case TRACKTYPE_NORESAMPLEMONO:
+        switch (mixerInFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
+                            MIXTYPE_MONOEXPAND, float /*TO*/, float /*TI*/, TYPE_AUX>;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
+                            MIXTYPE_MONOEXPAND, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+        default:
+            LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+            break;
+        }
+        break;
+    case TRACKTYPE_NORESAMPLE:
+        switch (mixerInFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
+                    MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
+                    MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+        default:
+            LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+            break;
+        }
+        break;
+    default:
+        LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
+        break;
+    }
+    return NULL;
+}
+
+/* Returns the proper process hook for mixing tracks. Currently works only for
+ * PROCESSTYPE_NORESAMPLEONETRACK, a mix involving one track, no resampling.
+ *
+ * TODO: Due to the special mixing considerations of duplicating to
+ * a stereo output track, the input track cannot be MONO.  This should be
+ * prevented by the caller.
+ */
+/* static */
+AudioMixerBase::process_hook_t AudioMixerBase::getProcessHook(
+        int processType, uint32_t channelCount,
+        audio_format_t mixerInFormat, audio_format_t mixerOutFormat)
+{
+    if (processType != PROCESSTYPE_NORESAMPLEONETRACK) { // Only NORESAMPLEONETRACK
+        LOG_ALWAYS_FATAL("bad processType: %d", processType);
+        return NULL;
+    }
+    if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
+        return &AudioMixerBase::process__oneTrack16BitsStereoNoResampling;
+    }
+    LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
+    switch (mixerInFormat) {
+    case AUDIO_FORMAT_PCM_FLOAT:
+        switch (mixerOutFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            return &AudioMixerBase::process__noResampleOneTrack<
+                    MIXTYPE_MULTI_SAVEONLY, float /*TO*/, float /*TI*/, TYPE_AUX>;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            return &AudioMixerBase::process__noResampleOneTrack<
+                    MIXTYPE_MULTI_SAVEONLY, int16_t /*TO*/, float /*TI*/, TYPE_AUX>;
+        default:
+            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+            break;
+        }
+        break;
+    case AUDIO_FORMAT_PCM_16_BIT:
+        switch (mixerOutFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            return &AudioMixerBase::process__noResampleOneTrack<
+                    MIXTYPE_MULTI_SAVEONLY, float /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            return &AudioMixerBase::process__noResampleOneTrack<
+                    MIXTYPE_MULTI_SAVEONLY, int16_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
+        default:
+            LOG_ALWAYS_FATAL("bad mixerOutFormat: %#x", mixerOutFormat);
+            break;
+        }
+        break;
+    default:
+        LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
+        break;
+    }
+    return NULL;
+}
+
+// ----------------------------------------------------------------------------
+} // namespace android
diff --git a/media/libaudioprocessing/include/media/AudioMixer.h b/media/libaudioprocessing/include/media/AudioMixer.h
new file mode 100644
index 0000000..3f7cd48
--- /dev/null
+++ b/media/libaudioprocessing/include/media/AudioMixer.h
@@ -0,0 +1,238 @@
+/*
+**
+** Copyright 2007, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_AUDIO_MIXER_H
+#define ANDROID_AUDIO_MIXER_H
+
+#include <pthread.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <android/os/IExternalVibratorService.h>
+#include <media/AudioMixerBase.h>
+#include <media/BufferProviders.h>
+#include <utils/threads.h>
+
+// FIXME This is actually unity gain, which might not be max in future, expressed in U.12
+#define MAX_GAIN_INT AudioMixerBase::UNITY_GAIN_INT
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+// AudioMixer extends AudioMixerBase by adding support for down- and up-mixing
+// and time stretch that are implemented via Effects HAL, and adding support
+// for haptic channels which depends on Vibrator service. This is the version
+// that is used by Audioflinger.
+
+class AudioMixer : public AudioMixerBase
+{
+public:
+    // maximum number of channels supported for the content
+    static const uint32_t MAX_NUM_CHANNELS_TO_DOWNMIX = AUDIO_CHANNEL_COUNT_MAX;
+
+    enum { // extension of AudioMixerBase parameters
+        DOWNMIX_TYPE    = 0x4004,
+        // for haptic
+        HAPTIC_ENABLED  = 0x4007, // Set haptic data from this track should be played or not.
+        HAPTIC_INTENSITY = 0x4008, // Set the intensity to play haptic data.
+        // for target TIMESTRETCH
+        PLAYBACK_RATE   = 0x4300, // Configure timestretch on this track name;
+                                  // parameter 'value' is a pointer to the new playback rate.
+    };
+
+    typedef enum { // Haptic intensity, should keep consistent with VibratorService
+        HAPTIC_SCALE_MUTE = os::IExternalVibratorService::SCALE_MUTE,
+        HAPTIC_SCALE_VERY_LOW = os::IExternalVibratorService::SCALE_VERY_LOW,
+        HAPTIC_SCALE_LOW = os::IExternalVibratorService::SCALE_LOW,
+        HAPTIC_SCALE_NONE = os::IExternalVibratorService::SCALE_NONE,
+        HAPTIC_SCALE_HIGH = os::IExternalVibratorService::SCALE_HIGH,
+        HAPTIC_SCALE_VERY_HIGH = os::IExternalVibratorService::SCALE_VERY_HIGH,
+    } haptic_intensity_t;
+    static constexpr float HAPTIC_SCALE_VERY_LOW_RATIO = 2.0f / 3.0f;
+    static constexpr float HAPTIC_SCALE_LOW_RATIO = 3.0f / 4.0f;
+    static const constexpr float HAPTIC_MAX_AMPLITUDE_FLOAT = 1.0f;
+
+    static inline bool isValidHapticIntensity(haptic_intensity_t hapticIntensity) {
+        switch (hapticIntensity) {
+        case HAPTIC_SCALE_MUTE:
+        case HAPTIC_SCALE_VERY_LOW:
+        case HAPTIC_SCALE_LOW:
+        case HAPTIC_SCALE_NONE:
+        case HAPTIC_SCALE_HIGH:
+        case HAPTIC_SCALE_VERY_HIGH:
+            return true;
+        default:
+            return false;
+        }
+    }
+
+    AudioMixer(size_t frameCount, uint32_t sampleRate)
+            : AudioMixerBase(frameCount, sampleRate) {
+        pthread_once(&sOnceControl, &sInitRoutine);
+    }
+
+    bool isValidChannelMask(audio_channel_mask_t channelMask) const override;
+
+    void setParameter(int name, int target, int param, void *value) override;
+    void setBufferProvider(int name, AudioBufferProvider* bufferProvider);
+
+private:
+
+    struct Track : public TrackBase {
+        Track() : TrackBase() {}
+
+        ~Track()
+        {
+            // mInputBufferProvider need not be deleted.
+            // Ensure the order of destruction of buffer providers as they
+            // release the upstream provider in the destructor.
+            mTimestretchBufferProvider.reset(nullptr);
+            mPostDownmixReformatBufferProvider.reset(nullptr);
+            mDownmixerBufferProvider.reset(nullptr);
+            mReformatBufferProvider.reset(nullptr);
+            mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
+            mAdjustChannelsBufferProvider.reset(nullptr);
+        }
+
+        uint32_t getOutputChannelCount() override {
+            return mDownmixerBufferProvider.get() != nullptr ? mMixerChannelCount : channelCount;
+        }
+        uint32_t getMixerChannelCount() override {
+            return mMixerChannelCount + mMixerHapticChannelCount;
+        }
+
+        status_t    prepareForDownmix();
+        void        unprepareForDownmix();
+        status_t    prepareForReformat();
+        void        unprepareForReformat();
+        status_t    prepareForAdjustChannels();
+        void        unprepareForAdjustChannels();
+        status_t    prepareForAdjustChannelsNonDestructive(size_t frames);
+        void        unprepareForAdjustChannelsNonDestructive();
+        void        clearContractedBuffer();
+        bool        setPlaybackRate(const AudioPlaybackRate &playbackRate);
+        void        reconfigureBufferProviders();
+
+        /* Buffer providers are constructed to translate the track input data as needed.
+         * See DownmixerBufferProvider below for how the Track buffer provider
+         * is wrapped by another one when dowmixing is required.
+         *
+         * TODO: perhaps make a single PlaybackConverterProvider class to move
+         * all pre-mixer track buffer conversions outside the AudioMixer class.
+         *
+         * 1) mInputBufferProvider: The AudioTrack buffer provider.
+         * 2) mAdjustChannelsBufferProvider: Expands or contracts sample data from one interleaved
+         *    channel format to another. Expanded channels are filled with zeros and put at the end
+         *    of each audio frame. Contracted channels are copied to the end of the buffer.
+         * 3) mContractChannelsNonDestructiveBufferProvider: Non-destructively contract sample data.
+         *    This is currently using at audio-haptic coupled playback to separate audio and haptic
+         *    data. Contracted channels could be written to given buffer.
+         * 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
+         *    match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
+         *    requires reformat. For example, it may convert floating point input to
+         *    PCM_16_bit if that's required by the downmixer.
+         * 5) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
+         *    the number of channels required by the mixer sink.
+         * 6) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
+         *    the downmixer requirements to the mixer engine input requirements.
+         * 7) mTimestretchBufferProvider: Adds timestretching for playback rate
+         */
+        AudioBufferProvider* mInputBufferProvider;    // externally provided buffer provider.
+        // TODO: combine mAdjustChannelsBufferProvider and
+        // mContractChannelsNonDestructiveBufferProvider
+        std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
+        std::unique_ptr<PassthruBufferProvider> mContractChannelsNonDestructiveBufferProvider;
+        std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
+        std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
+        std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
+        std::unique_ptr<PassthruBufferProvider> mTimestretchBufferProvider;
+
+        audio_format_t mDownmixRequiresFormat;  // required downmixer format
+                                                // AUDIO_FORMAT_PCM_16_BIT if 16 bit necessary
+                                                // AUDIO_FORMAT_INVALID if no required format
+
+        AudioPlaybackRate    mPlaybackRate;
+
+        // Haptic
+        bool                 mHapticPlaybackEnabled;
+        haptic_intensity_t   mHapticIntensity;
+        audio_channel_mask_t mHapticChannelMask;
+        uint32_t             mHapticChannelCount;
+        audio_channel_mask_t mMixerHapticChannelMask;
+        uint32_t             mMixerHapticChannelCount;
+        uint32_t             mAdjustInChannelCount;
+        uint32_t             mAdjustOutChannelCount;
+        uint32_t             mAdjustNonDestructiveInChannelCount;
+        uint32_t             mAdjustNonDestructiveOutChannelCount;
+        bool                 mKeepContractedChannels;
+
+        float getHapticScaleGamma() const {
+        // Need to keep consistent with the value in VibratorService.
+        switch (mHapticIntensity) {
+        case HAPTIC_SCALE_VERY_LOW:
+            return 2.0f;
+        case HAPTIC_SCALE_LOW:
+            return 1.5f;
+        case HAPTIC_SCALE_HIGH:
+            return 0.5f;
+        case HAPTIC_SCALE_VERY_HIGH:
+            return 0.25f;
+        default:
+            return 1.0f;
+        }
+        }
+
+        float getHapticMaxAmplitudeRatio() const {
+        // Need to keep consistent with the value in VibratorService.
+        switch (mHapticIntensity) {
+        case HAPTIC_SCALE_VERY_LOW:
+            return HAPTIC_SCALE_VERY_LOW_RATIO;
+        case HAPTIC_SCALE_LOW:
+            return HAPTIC_SCALE_LOW_RATIO;
+        case HAPTIC_SCALE_NONE:
+        case HAPTIC_SCALE_HIGH:
+        case HAPTIC_SCALE_VERY_HIGH:
+            return 1.0f;
+        default:
+            return 0.0f;
+        }
+        }
+    };
+
+    inline std::shared_ptr<Track> getTrack(int name) {
+        return std::static_pointer_cast<Track>(mTracks[name]);
+    }
+
+    std::shared_ptr<TrackBase> preCreateTrack() override;
+    status_t postCreateTrack(TrackBase *track) override;
+
+    void preProcess() override;
+    void postProcess() override;
+
+    bool setChannelMasks(int name,
+            audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) override;
+
+    static void sInitRoutine();
+
+    static pthread_once_t sOnceControl; // initialized in constructor by first new
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif // ANDROID_AUDIO_MIXER_H
diff --git a/media/libaudioprocessing/include/media/AudioMixerBase.h b/media/libaudioprocessing/include/media/AudioMixerBase.h
new file mode 100644
index 0000000..805b6d0
--- /dev/null
+++ b/media/libaudioprocessing/include/media/AudioMixerBase.h
@@ -0,0 +1,359 @@
+/*
+**
+** Copyright 2019, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_AUDIO_MIXER_BASE_H
+#define ANDROID_AUDIO_MIXER_BASE_H
+
+#include <map>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include <media/AudioBufferProvider.h>
+#include <media/AudioResampler.h>
+#include <media/AudioResamplerPublic.h>
+#include <system/audio.h>
+#include <utils/Compat.h>
+
+// This must match frameworks/av/services/audioflinger/Configuration.h
+// when used with the Audio Framework.
+#define FLOAT_AUX
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+// AudioMixerBase is functional on its own if only mixing and resampling
+// is needed.
+
+class AudioMixerBase
+{
+public:
+    // Do not change these unless underlying code changes.
+    // This mixer has a hard-coded upper limit of 8 channels for output.
+    static constexpr uint32_t MAX_NUM_CHANNELS = FCC_8;
+    static constexpr uint32_t MAX_NUM_VOLUMES = FCC_2; // stereo volume only
+
+    static const uint16_t UNITY_GAIN_INT = 0x1000;
+    static const CONSTEXPR float UNITY_GAIN_FLOAT = 1.0f;
+
+    enum { // names
+        // setParameter targets
+        TRACK           = 0x3000,
+        RESAMPLE        = 0x3001,
+        RAMP_VOLUME     = 0x3002, // ramp to new volume
+        VOLUME          = 0x3003, // don't ramp
+        TIMESTRETCH     = 0x3004,
+
+        // set Parameter names
+        // for target TRACK
+        CHANNEL_MASK    = 0x4000,
+        FORMAT          = 0x4001,
+        MAIN_BUFFER     = 0x4002,
+        AUX_BUFFER      = 0x4003,
+        // 0x4004 reserved
+        MIXER_FORMAT    = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+        MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output
+        // for target RESAMPLE
+        SAMPLE_RATE     = 0x4100, // Configure sample rate conversion on this track name;
+                                  // parameter 'value' is the new sample rate in Hz.
+                                  // Only creates a sample rate converter the first time that
+                                  // the track sample rate is different from the mix sample rate.
+                                  // If the new sample rate is the same as the mix sample rate,
+                                  // and a sample rate converter already exists,
+                                  // then the sample rate converter remains present but is a no-op.
+        RESET           = 0x4101, // Reset sample rate converter without changing sample rate.
+                                  // This clears out the resampler's input buffer.
+        REMOVE          = 0x4102, // Remove the sample rate converter on this track name;
+                                  // the track is restored to the mix sample rate.
+        // for target RAMP_VOLUME and VOLUME (8 channels max)
+        // FIXME use float for these 3 to improve the dynamic range
+        VOLUME0         = 0x4200,
+        VOLUME1         = 0x4201,
+        AUXLEVEL        = 0x4210,
+    };
+
+    AudioMixerBase(size_t frameCount, uint32_t sampleRate)
+        : mSampleRate(sampleRate)
+        , mFrameCount(frameCount) {
+    }
+
+    virtual ~AudioMixerBase() {}
+
+    virtual bool isValidFormat(audio_format_t format) const;
+    virtual bool isValidChannelMask(audio_channel_mask_t channelMask) const;
+
+    // Create a new track in the mixer.
+    //
+    // \param name        a unique user-provided integer associated with the track.
+    //                    If name already exists, the function will abort.
+    // \param channelMask output channel mask.
+    // \param format      PCM format
+    // \param sessionId   Session id for the track. Tracks with the same
+    //                    session id will be submixed together.
+    //
+    // \return OK        on success.
+    //         BAD_VALUE if the format does not satisfy isValidFormat()
+    //                   or the channelMask does not satisfy isValidChannelMask().
+    status_t    create(
+            int name, audio_channel_mask_t channelMask, audio_format_t format, int sessionId);
+
+    bool        exists(int name) const {
+        return mTracks.count(name) > 0;
+    }
+
+    // Free an allocated track by name.
+    void        destroy(int name);
+
+    // Enable or disable an allocated track by name
+    void        enable(int name);
+    void        disable(int name);
+
+    virtual void setParameter(int name, int target, int param, void *value);
+
+    void        process() {
+        preProcess();
+        (this->*mHook)();
+        postProcess();
+    }
+
+    size_t      getUnreleasedFrames(int name) const;
+
+    std::string trackNames() const;
+
+  protected:
+    // Set kUseNewMixer to true to use the new mixer engine always. Otherwise the
+    // original code will be used for stereo sinks, the new mixer for everything else.
+    static constexpr bool kUseNewMixer = true;
+
+    // Set kUseFloat to true to allow floating input into the mixer engine.
+    // If kUseNewMixer is false, this is ignored or may be overridden internally
+    static constexpr bool kUseFloat = true;
+
+#ifdef FLOAT_AUX
+    using TYPE_AUX = float;
+    static_assert(kUseNewMixer && kUseFloat,
+            "kUseNewMixer and kUseFloat must be true for FLOAT_AUX option");
+#else
+    using TYPE_AUX = int32_t; // q4.27
+#endif
+
+    /* For multi-format functions (calls template functions
+     * in AudioMixerOps.h).  The template parameters are as follows:
+     *
+     *   MIXTYPE     (see AudioMixerOps.h MIXTYPE_* enumeration)
+     *   USEFLOATVOL (set to true if float volume is used)
+     *   ADJUSTVOL   (set to true if volume ramp parameters needs adjustment afterwards)
+     *   TO: int32_t (Q4.27) or float
+     *   TI: int32_t (Q4.27) or int16_t (Q0.15) or float
+     *   TA: int32_t (Q4.27)
+     */
+
+    enum {
+        // FIXME this representation permits up to 8 channels
+        NEEDS_CHANNEL_COUNT__MASK   = 0x00000007,
+    };
+
+    enum {
+        NEEDS_CHANNEL_1             = 0x00000000,   // mono
+        NEEDS_CHANNEL_2             = 0x00000001,   // stereo
+
+        // sample format is not explicitly specified, and is assumed to be AUDIO_FORMAT_PCM_16_BIT
+
+        NEEDS_MUTE                  = 0x00000100,
+        NEEDS_RESAMPLE              = 0x00001000,
+        NEEDS_AUX                   = 0x00010000,
+    };
+
+    // hook types
+    enum {
+        PROCESSTYPE_NORESAMPLEONETRACK, // others set elsewhere
+    };
+
+    enum {
+        TRACKTYPE_NOP,
+        TRACKTYPE_RESAMPLE,
+        TRACKTYPE_NORESAMPLE,
+        TRACKTYPE_NORESAMPLEMONO,
+    };
+
+    // process hook functionality
+    using process_hook_t = void(AudioMixerBase::*)();
+
+    struct TrackBase;
+    using hook_t = void(TrackBase::*)(
+            int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux);
+
+    struct TrackBase {
+        TrackBase()
+            : bufferProvider(nullptr)
+        {
+            // TODO: move additional initialization here.
+        }
+        virtual ~TrackBase() {}
+
+        virtual uint32_t getOutputChannelCount() { return channelCount; }
+        virtual uint32_t getMixerChannelCount() { return mMixerChannelCount; }
+
+        bool        needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
+        bool        setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
+        bool        doesResample() const { return mResampler.get() != nullptr; }
+        void        recreateResampler(uint32_t devSampleRate);
+        void        resetResampler() { if (mResampler.get() != nullptr) mResampler->reset(); }
+        void        adjustVolumeRamp(bool aux, bool useFloat = false);
+        size_t      getUnreleasedFrames() const { return mResampler.get() != nullptr ?
+                                                    mResampler->getUnreleasedFrames() : 0; };
+
+        static hook_t getTrackHook(int trackType, uint32_t channelCount,
+                audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
+
+        void track__nop(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+
+        template <int MIXTYPE, bool USEFLOATVOL, bool ADJUSTVOL,
+            typename TO, typename TI, typename TA>
+        void volumeMix(TO *out, size_t outFrames, const TI *in, TA *aux, bool ramp);
+
+        uint32_t    needs;
+
+        // TODO: Eventually remove legacy integer volume settings
+        union {
+        int16_t     volume[MAX_NUM_VOLUMES]; // U4.12 fixed point (top bit should be zero)
+        int32_t     volumeRL;
+        };
+
+        int32_t     prevVolume[MAX_NUM_VOLUMES];
+        int32_t     volumeInc[MAX_NUM_VOLUMES];
+        int32_t     auxInc;
+        int32_t     prevAuxLevel;
+        int16_t     auxLevel;       // 0 <= auxLevel <= MAX_GAIN_INT, but signed for mul performance
+
+        uint16_t    frameCount;
+
+        uint8_t     channelCount;   // 1 or 2, redundant with (needs & NEEDS_CHANNEL_COUNT__MASK)
+        uint8_t     unused_padding; // formerly format, was always 16
+        uint16_t    enabled;        // actually bool
+        audio_channel_mask_t channelMask;
+
+        // actual buffer provider used by the track hooks
+        AudioBufferProvider*                bufferProvider;
+
+        mutable AudioBufferProvider::Buffer buffer; // 8 bytes
+
+        hook_t      hook;
+        const void  *mIn;             // current location in buffer
+
+        std::unique_ptr<AudioResampler> mResampler;
+        uint32_t    sampleRate;
+        int32_t*    mainBuffer;
+        int32_t*    auxBuffer;
+
+        int32_t     sessionId;
+
+        audio_format_t mMixerFormat;     // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+        audio_format_t mFormat;          // input track format
+        audio_format_t mMixerInFormat;   // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
+                                         // each track must be converted to this format.
+
+        float          mVolume[MAX_NUM_VOLUMES];     // floating point set volume
+        float          mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume
+        float          mVolumeInc[MAX_NUM_VOLUMES];  // floating point volume increment
+
+        float          mAuxLevel;                     // floating point set aux level
+        float          mPrevAuxLevel;                 // floating point prev aux level
+        float          mAuxInc;                       // floating point aux increment
+
+        audio_channel_mask_t mMixerChannelMask;
+        uint32_t             mMixerChannelCount;
+
+      protected:
+
+        // hooks
+        void track__genericResample(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+        void track__16BitsStereo(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+        void track__16BitsMono(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
+
+        void volumeRampStereo(int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
+        void volumeStereo(int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
+
+        // multi-format track hooks
+        template <int MIXTYPE, typename TO, typename TI, typename TA>
+        void track__Resample(TO* out, size_t frameCount, TO* temp __unused, TA* aux);
+        template <int MIXTYPE, typename TO, typename TI, typename TA>
+        void track__NoResample(TO* out, size_t frameCount, TO* temp __unused, TA* aux);
+    };
+
+    // preCreateTrack must create an instance of a proper TrackBase descendant.
+    // postCreateTrack is called after filling out fields of TrackBase. It can
+    // abort track creation by returning non-OK status. See the implementation
+    // of create() for details.
+    virtual std::shared_ptr<TrackBase> preCreateTrack();
+    virtual status_t postCreateTrack(TrackBase *track __unused) { return OK; }
+
+    // preProcess is called before the process hook, postProcess after,
+    // see the implementation of process() method.
+    virtual void preProcess() {}
+    virtual void postProcess() {}
+
+    virtual bool setChannelMasks(int name,
+            audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
+
+    // Called when track info changes and a new process hook should be determined.
+    void invalidate() {
+        mHook = &AudioMixerBase::process__validate;
+    }
+
+    void process__validate();
+    void process__nop();
+    void process__genericNoResampling();
+    void process__genericResampling();
+    void process__oneTrack16BitsStereoNoResampling();
+
+    template <int MIXTYPE, typename TO, typename TI, typename TA>
+    void process__noResampleOneTrack();
+
+    static process_hook_t getProcessHook(int processType, uint32_t channelCount,
+            audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
+
+    static void convertMixerFormat(void *out, audio_format_t mixerOutFormat,
+            void *in, audio_format_t mixerInFormat, size_t sampleCount);
+
+    // initialization constants
+    const uint32_t mSampleRate;
+    const size_t mFrameCount;
+
+    process_hook_t mHook = &AudioMixerBase::process__nop;   // one of process__*, never nullptr
+
+    // the size of the type (int32_t) should be the largest of all types supported
+    // by the mixer.
+    std::unique_ptr<int32_t[]> mOutputTemp;
+    std::unique_ptr<int32_t[]> mResampleTemp;
+
+    // track names grouped by main buffer, in no particular order of main buffer.
+    // however names for a particular main buffer are in order (by construction).
+    std::unordered_map<void * /* mainBuffer */, std::vector<int /* name */>> mGroups;
+
+    // track names that are enabled, in increasing order (by construction).
+    std::vector<int /* name */> mEnabled;
+
+    // track smart pointers, by name, in increasing order of name.
+    std::map<int /* name */, std::shared_ptr<TrackBase>> mTracks;
+};
+
+}  // namespace android
+
+#endif  // ANDROID_AUDIO_MIXER_BASE_H
diff --git a/media/libmedia/include/media/BufferProviders.h b/media/libaudioprocessing/include/media/BufferProviders.h
similarity index 100%
rename from media/libmedia/include/media/BufferProviders.h
rename to media/libaudioprocessing/include/media/BufferProviders.h
diff --git a/media/libeffects/downmix/Android.bp b/media/libeffects/downmix/Android.bp
index 9c82b1d..2a2f36e 100644
--- a/media/libeffects/downmix/Android.bp
+++ b/media/libeffects/downmix/Android.bp
@@ -6,6 +6,7 @@
     srcs: ["EffectDownmix.c"],
 
     shared_libs: [
+        "libaudioutils",
         "libcutils",
         "liblog",
     ],
@@ -23,5 +24,4 @@
         "libaudioeffects",
         "libhardware_headers",
     ],
-    static_libs: ["libaudioutils" ],
 }
diff --git a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
index 7468a90..10eedd9 100644
--- a/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
+++ b/media/libeffects/lvm/lib/Common/lib/VectorArithmetic.h
@@ -53,6 +53,7 @@
                                  LVM_INT16 NrFrames,
                                  LVM_INT32 NrChannels);
 void Copy_Float_Stereo_Mc(       const LVM_FLOAT *src,
+                                 LVM_FLOAT *StereoOut,
                                  LVM_FLOAT *dst,
                                  LVM_INT16 NrFrames,
                                  LVM_INT32 NrChannels);
diff --git a/media/libeffects/lvm/lib/Common/src/Copy_16.c b/media/libeffects/lvm/lib/Common/src/Copy_16.c
index 3858450..3eb3c14 100644
--- a/media/libeffects/lvm/lib/Common/src/Copy_16.c
+++ b/media/libeffects/lvm/lib/Common/src/Copy_16.c
@@ -117,30 +117,31 @@
     }
 }
 
-// Merge a multichannel source with stereo contained in dst, to dst.
+// Merge a multichannel source with stereo contained in StereoOut, to dst.
 void Copy_Float_Stereo_Mc(const LVM_FLOAT *src,
+                 LVM_FLOAT *StereoOut,
                  LVM_FLOAT *dst,
                  LVM_INT16 NrFrames, /* Number of frames*/
                  LVM_INT32 NrChannels)
 {
     LVM_INT16 ii, jj;
-    LVM_FLOAT *src_st = dst + 2 * (NrFrames - 1);
 
-    // repack dst which carries stereo information
+    // pack dst with stereo information of StereoOut
     // together with the upper channels of src.
+    StereoOut += 2 * (NrFrames - 1);
     dst += NrChannels * (NrFrames - 1);
     src += NrChannels * (NrFrames - 1);
     for (ii = NrFrames; ii != 0; ii--)
     {
-        dst[1] = src_st[1];
-        dst[0] = src_st[0]; // copy 1 before 0 is required for NrChannels == 3.
+        dst[1] = StereoOut[1];
+        dst[0] = StereoOut[0]; // copy 1 before 0 is required for NrChannels == 3.
         for (jj = 2; jj < NrChannels; jj++)
         {
             dst[jj] = src[jj];
         }
         dst    -= NrChannels;
         src    -= NrChannels;
-        src_st -= 2;
+        StereoOut -= 2;
     }
 }
 #endif
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
index ab8ccd1..c8df8e4 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Private.h
@@ -60,7 +60,11 @@
 #define LVCS_COMPGAINFRAME          64          /* Compressor gain update interval */
 
 /* Memory */
+#ifdef SUPPORT_MC
+#define LVCS_SCRATCHBUFFERS              8      /* Number of buffers required for inplace processing */
+#else
 #define LVCS_SCRATCHBUFFERS              6      /* Number of buffers required for inplace processing */
+#endif
 #ifdef SUPPORT_MC
 /*
  * The Concert Surround module applies processing only on the first two
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
index ef1d9eb..56fb04f 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Process.c
@@ -106,7 +106,7 @@
      * The Concert Surround module carries out processing only on L, R.
      */
     pInput = pScratch + (2 * NrFrames);
-    pStIn  = pScratch + (LVCS_SCRATCHBUFFERS * NrFrames);
+    pStIn  = pScratch + ((LVCS_SCRATCHBUFFERS - 2) * NrFrames);
     /* The first two channel data is extracted from the input data and
      * copied into pInput buffer
      */
@@ -303,13 +303,45 @@
      */
     if (pInstance->Params.OperatingMode != LVCS_OFF)
     {
+#ifdef SUPPORT_MC
+        LVM_FLOAT *pStereoOut;
+        /*
+         * LVCS_Process_CS uses output buffer to store intermediate outputs of StereoEnhancer,
+         * Equalizer, ReverbGenerator and BypassMixer.
+         * So, to avoid i/o data overlapping, when i/o buffers are common, use scratch buffer
+         * to store intermediate outputs.
+         */
+        if (pOutData == pInData)
+        {
+          /*
+           * Scratch memory is used in 4 chunks of (2 * NrFrames) size.
+           * First chunk of memory is used by LVCS_StereoEnhancer and LVCS_ReverbGenerator,
+           * second and fourth are used as input buffers by pInput and pStIn in LVCS_Process_CS.
+           * Hence, pStereoOut is pointed to use unused third portion of scratch memory.
+           */
+            pStereoOut = (LVM_FLOAT *) \
+                          pInstance->MemoryTable. \
+                          Region[LVCS_MEMREGION_TEMPORARY_FAST].pBaseAddress +
+                          ((LVCS_SCRATCHBUFFERS - 4) * NrFrames);
+        }
+        else
+        {
+            pStereoOut = pOutData;
+        }
+
         /*
          * Call CS process function
          */
             err = LVCS_Process_CS(hInstance,
                                   pInData,
+                                  pStereoOut,
+                                  NrFrames);
+#else
+            err = LVCS_Process_CS(hInstance,
+                                  pInData,
                                   pOutData,
                                   NumSamples);
+#endif
 
 
         /*
@@ -329,10 +361,17 @@
 
             if(NumSamples < LVCS_COMPGAINFRAME)
             {
+#ifdef SUPPORT_MC
+                NonLinComp_Float(Gain,                    /* Compressor gain setting */
+                                 pStereoOut,
+                                 pStereoOut,
+                                 (LVM_INT32)(2 * NrFrames));
+#else
                 NonLinComp_Float(Gain,                    /* Compressor gain setting */
                                  pOutData,
                                  pOutData,
                                  (LVM_INT32)(2 * NumSamples));
+#endif
             }
             else
             {
@@ -361,7 +400,11 @@
 
                 FinalGain = Gain;
                 Gain = pInstance->CompressGain;
+#ifdef SUPPORT_MC
+                pOutPtr = pStereoOut;
+#else
                 pOutPtr = pOutData;
+#endif
 
                 while(SampleToProcess > 0)
                 {
@@ -428,6 +471,7 @@
         }
 #ifdef SUPPORT_MC
         Copy_Float_Stereo_Mc(pInData,
+                             pStereoOut,
                              pOutData,
                              NrFrames,
                              channels);
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index c19fcf6..0a2850f 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -302,6 +302,8 @@
         for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
             pContext->pBundledContext->bandGaindB[i] = EQNB_5BandSoftPresets[i];
         }
+        pContext->pBundledContext->effectProcessCalled      = 0;
+        pContext->pBundledContext->effectInDrain            = 0;
 
         ALOGV("\tEffectCreate - Calling LvmBundle_init");
         ret = LvmBundle_init(pContext);
@@ -394,6 +396,8 @@
 
     // Clear the instantiated flag for the effect
     // protect agains the case where an effect is un-instantiated without being disabled
+
+    int &effectInDrain = pContext->pBundledContext->effectInDrain;
     if(pContext->EffectType == LVM_BASS_BOOST) {
         ALOGV("\tEffectRelease LVM_BASS_BOOST Clearing global intstantiated flag");
         pSessionContext->bBassInstantiated = LVM_FALSE;
@@ -418,12 +422,16 @@
     } else if(pContext->EffectType == LVM_VOLUME) {
         ALOGV("\tEffectRelease LVM_VOLUME Clearing global intstantiated flag");
         pSessionContext->bVolumeInstantiated = LVM_FALSE;
-        if (pContext->pBundledContext->bVolumeEnabled == LVM_TRUE){
+        // There is no samplesToExitCount for volume so we also use the drain flag to check
+        // if we should decrement the effects enabled.
+        if (pContext->pBundledContext->bVolumeEnabled == LVM_TRUE
+                || (effectInDrain & 1 << LVM_VOLUME) != 0) {
             pContext->pBundledContext->NumberEffectsEnabled--;
         }
     } else {
         ALOGV("\tLVM_ERROR : EffectRelease : Unsupported effect\n\n\n\n\n\n\n");
     }
+    effectInDrain &= ~(1 << pContext->EffectType); // no need to drain if released
 
     // Disable effect, in this case ignore errors (return codes)
     // if an effect has already been disabled
@@ -3124,8 +3132,9 @@
 
 int Effect_setEnabled(EffectContext *pContext, bool enabled)
 {
-    ALOGV("\tEffect_setEnabled() type %d, enabled %d", pContext->EffectType, enabled);
-
+    ALOGV("%s effectType %d, enabled %d, currently enabled %d", __func__,
+            pContext->EffectType, enabled, pContext->pBundledContext->NumberEffectsEnabled);
+    int &effectInDrain = pContext->pBundledContext->effectInDrain;
     if (enabled) {
         // Bass boost or Virtualizer can be temporarily disabled if playing over device speaker due
         // to their nature.
@@ -3139,6 +3148,7 @@
                 if(pContext->pBundledContext->SamplesToExitCountBb <= 0){
                     pContext->pBundledContext->NumberEffectsEnabled++;
                 }
+                effectInDrain &= ~(1 << LVM_BASS_BOOST);
                 pContext->pBundledContext->SamplesToExitCountBb =
                      (LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
                 pContext->pBundledContext->bBassEnabled = LVM_TRUE;
@@ -3152,6 +3162,7 @@
                 if(pContext->pBundledContext->SamplesToExitCountEq <= 0){
                     pContext->pBundledContext->NumberEffectsEnabled++;
                 }
+                effectInDrain &= ~(1 << LVM_EQUALIZER);
                 pContext->pBundledContext->SamplesToExitCountEq =
                      (LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
                 pContext->pBundledContext->bEqualizerEnabled = LVM_TRUE;
@@ -3164,6 +3175,7 @@
                 if(pContext->pBundledContext->SamplesToExitCountVirt <= 0){
                     pContext->pBundledContext->NumberEffectsEnabled++;
                 }
+                effectInDrain &= ~(1 << LVM_VIRTUALIZER);
                 pContext->pBundledContext->SamplesToExitCountVirt =
                      (LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
                 pContext->pBundledContext->bVirtualizerEnabled = LVM_TRUE;
@@ -3174,7 +3186,10 @@
                     ALOGV("\tEffect_setEnabled() LVM_VOLUME is already enabled");
                     return -EINVAL;
                 }
-                pContext->pBundledContext->NumberEffectsEnabled++;
+                if ((effectInDrain & 1 << LVM_VOLUME) == 0) {
+                    pContext->pBundledContext->NumberEffectsEnabled++;
+                }
+                effectInDrain &= ~(1 << LVM_VOLUME);
                 pContext->pBundledContext->bVolumeEnabled = LVM_TRUE;
                 break;
             default:
@@ -3192,6 +3207,7 @@
                     return -EINVAL;
                 }
                 pContext->pBundledContext->bBassEnabled = LVM_FALSE;
+                effectInDrain |= 1 << LVM_BASS_BOOST;
                 break;
             case LVM_EQUALIZER:
                 if (pContext->pBundledContext->bEqualizerEnabled == LVM_FALSE) {
@@ -3199,6 +3215,7 @@
                     return -EINVAL;
                 }
                 pContext->pBundledContext->bEqualizerEnabled = LVM_FALSE;
+                effectInDrain |= 1 << LVM_EQUALIZER;
                 break;
             case LVM_VIRTUALIZER:
                 if (pContext->pBundledContext->bVirtualizerEnabled == LVM_FALSE) {
@@ -3206,6 +3223,7 @@
                     return -EINVAL;
                 }
                 pContext->pBundledContext->bVirtualizerEnabled = LVM_FALSE;
+                effectInDrain |= 1 << LVM_VIRTUALIZER;
                 break;
             case LVM_VOLUME:
                 if (pContext->pBundledContext->bVolumeEnabled == LVM_FALSE) {
@@ -3213,6 +3231,7 @@
                     return -EINVAL;
                 }
                 pContext->pBundledContext->bVolumeEnabled = LVM_FALSE;
+                effectInDrain |= 1 << LVM_VOLUME;
                 break;
             default:
                 ALOGV("\tEffect_setEnabled() invalid effect type");
@@ -3283,6 +3302,38 @@
         ALOGV("\tLVM_ERROR : Effect_process() ERROR NULL INPUT POINTER OR FRAME COUNT IS WRONG");
         return -EINVAL;
     }
+
+    int &effectProcessCalled = pContext->pBundledContext->effectProcessCalled;
+    int &effectInDrain = pContext->pBundledContext->effectInDrain;
+    if ((effectProcessCalled & 1 << pContext->EffectType) != 0) {
+        ALOGW("Effect %d already called", pContext->EffectType);
+        const int undrainedEffects = effectInDrain & ~effectProcessCalled;
+        if ((undrainedEffects & 1 << LVM_BASS_BOOST) != 0) {
+            ALOGW("Draining BASS_BOOST");
+            pContext->pBundledContext->SamplesToExitCountBb = 0;
+            --pContext->pBundledContext->NumberEffectsEnabled;
+            effectInDrain &= ~(1 << LVM_BASS_BOOST);
+        }
+        if ((undrainedEffects & 1 << LVM_EQUALIZER) != 0) {
+            ALOGW("Draining EQUALIZER");
+            pContext->pBundledContext->SamplesToExitCountEq = 0;
+            --pContext->pBundledContext->NumberEffectsEnabled;
+            effectInDrain &= ~(1 << LVM_EQUALIZER);
+        }
+        if ((undrainedEffects & 1 << LVM_VIRTUALIZER) != 0) {
+            ALOGW("Draining VIRTUALIZER");
+            pContext->pBundledContext->SamplesToExitCountVirt = 0;
+            --pContext->pBundledContext->NumberEffectsEnabled;
+            effectInDrain &= ~(1 << LVM_VIRTUALIZER);
+        }
+        if ((undrainedEffects & 1 << LVM_VOLUME) != 0) {
+            ALOGW("Draining VOLUME");
+            --pContext->pBundledContext->NumberEffectsEnabled;
+            effectInDrain &= ~(1 << LVM_VOLUME);
+        }
+    }
+    effectProcessCalled |= 1 << pContext->EffectType;
+
     if ((pContext->pBundledContext->bBassEnabled == LVM_FALSE)&&
         (pContext->EffectType == LVM_BASS_BOOST)){
         //ALOGV("\tEffect_process() LVM_BASS_BOOST Effect is not enabled");
@@ -3291,9 +3342,12 @@
             //ALOGV("\tEffect_process: Waiting to turn off BASS_BOOST, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountBb);
         }
-        if(pContext->pBundledContext->SamplesToExitCountBb <= 0) {
+        if (pContext->pBundledContext->SamplesToExitCountBb <= 0) {
             status = -ENODATA;
-            pContext->pBundledContext->NumberEffectsEnabled--;
+            if ((effectInDrain & 1 << LVM_BASS_BOOST) != 0) {
+                pContext->pBundledContext->NumberEffectsEnabled--;
+                effectInDrain &= ~(1 << LVM_BASS_BOOST);
+            }
             ALOGV("\tEffect_process() this is the last frame for LVM_BASS_BOOST");
         }
     }
@@ -3301,7 +3355,10 @@
         (pContext->EffectType == LVM_VOLUME)){
         //ALOGV("\tEffect_process() LVM_VOLUME Effect is not enabled");
         status = -ENODATA;
-        pContext->pBundledContext->NumberEffectsEnabled--;
+        if ((effectInDrain & 1 << LVM_VOLUME) != 0) {
+            pContext->pBundledContext->NumberEffectsEnabled--;
+            effectInDrain &= ~(1 << LVM_VOLUME);
+        }
     }
     if ((pContext->pBundledContext->bEqualizerEnabled == LVM_FALSE)&&
         (pContext->EffectType == LVM_EQUALIZER)){
@@ -3311,9 +3368,12 @@
             //ALOGV("\tEffect_process: Waiting to turn off EQUALIZER, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountEq);
         }
-        if(pContext->pBundledContext->SamplesToExitCountEq <= 0) {
+        if (pContext->pBundledContext->SamplesToExitCountEq <= 0) {
             status = -ENODATA;
-            pContext->pBundledContext->NumberEffectsEnabled--;
+            if ((effectInDrain & 1 << LVM_EQUALIZER) != 0) {
+                pContext->pBundledContext->NumberEffectsEnabled--;
+                effectInDrain &= ~(1 << LVM_EQUALIZER);
+            }
             ALOGV("\tEffect_process() this is the last frame for LVM_EQUALIZER");
         }
     }
@@ -3326,9 +3386,12 @@
             //ALOGV("\tEffect_process: Waiting for to turn off VIRTUALIZER, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountVirt);
         }
-        if(pContext->pBundledContext->SamplesToExitCountVirt <= 0) {
+        if (pContext->pBundledContext->SamplesToExitCountVirt <= 0) {
             status = -ENODATA;
-            pContext->pBundledContext->NumberEffectsEnabled--;
+            if ((effectInDrain & 1 << LVM_VIRTUALIZER) != 0) {
+                pContext->pBundledContext->NumberEffectsEnabled--;
+                effectInDrain &= ~(1 << LVM_VIRTUALIZER);
+            }
             ALOGV("\tEffect_process() this is the last frame for LVM_VIRTUALIZER");
         }
     }
@@ -3337,8 +3400,18 @@
         pContext->pBundledContext->NumberEffectsCalled++;
     }
 
-    if(pContext->pBundledContext->NumberEffectsCalled ==
-       pContext->pBundledContext->NumberEffectsEnabled){
+    if (pContext->pBundledContext->NumberEffectsCalled >=
+            pContext->pBundledContext->NumberEffectsEnabled) {
+
+        // We expect the # effects called to be equal to # effects enabled in sequence (including
+        // draining effects).  Warn if this is not the case due to inconsistent calls.
+        ALOGW_IF(pContext->pBundledContext->NumberEffectsCalled >
+                pContext->pBundledContext->NumberEffectsEnabled,
+                "%s Number of effects called %d is greater than number of effects enabled %d",
+                __func__, pContext->pBundledContext->NumberEffectsCalled,
+                pContext->pBundledContext->NumberEffectsEnabled);
+        effectProcessCalled = 0; // reset our consistency check.
+
         //ALOGV("\tEffect_process     Calling process with %d effects enabled, %d called: Effect %d",
         //pContext->pBundledContext->NumberEffectsEnabled,
         //pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
index 6af4554..e4aacd0 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
@@ -110,6 +110,14 @@
 #ifdef SUPPORT_MC
     LVM_INT32                       ChMask;
 #endif
+
+    /* Bitmask whether drain is in progress due to disabling the effect.
+       The corresponding bit to an effect is set by 1 << lvm_effect_en. */
+    int                             effectInDrain;
+
+    /* Bitmask whether process() was called for a particular effect.
+       The corresponding bit to an effect is set by 1 << lvm_effect_en. */
+    int                             effectProcessCalled;
 };
 
 /* SessionContext : One session */
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index 1c95e27..9f34035 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -40,6 +40,8 @@
         AUDIO_PARAMETER_KEY_AUDIO_LANGUAGE_PREFERRED;
 const char * const AudioParameter::keyMonoOutput = AUDIO_PARAMETER_MONO_OUTPUT;
 const char * const AudioParameter::keyStreamHwAvSync = AUDIO_PARAMETER_STREAM_HW_AV_SYNC;
+const char * const AudioParameter::keyDeviceConnect = AUDIO_PARAMETER_DEVICE_CONNECT;
+const char * const AudioParameter::keyDeviceDisconnect = AUDIO_PARAMETER_DEVICE_DISCONNECT;
 const char * const AudioParameter::keyStreamConnect = AUDIO_PARAMETER_DEVICE_CONNECT;
 const char * const AudioParameter::keyStreamDisconnect = AUDIO_PARAMETER_DEVICE_DISCONNECT;
 const char * const AudioParameter::keyStreamSupportedFormats = AUDIO_PARAMETER_STREAM_SUP_FORMATS;
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index 028bea1..d95bc8e 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -213,15 +213,14 @@
         return interface_cast<IMemory>(reply.readStrongBinder());
     }
 
-    status_t getFrameAtIndex(std::vector<sp<IMemory> > *frames,
-            int frameIndex, int numFrames, int colorFormat, bool metaOnly)
+    sp<IMemory> getFrameAtIndex(
+            int index, int colorFormat, bool metaOnly)
     {
-        ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d) metaOnly(%d)",
-                frameIndex, numFrames, colorFormat, metaOnly);
+        ALOGV("getFrameAtIndex: index(%d), colorFormat(%d) metaOnly(%d)",
+                index, colorFormat, metaOnly);
         Parcel data, reply;
         data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
-        data.writeInt32(frameIndex);
-        data.writeInt32(numFrames);
+        data.writeInt32(index);
         data.writeInt32(colorFormat);
         data.writeInt32(metaOnly);
 #ifndef DISABLE_GROUP_SCHEDULE_HACK
@@ -230,16 +229,9 @@
         remote()->transact(GET_FRAME_AT_INDEX, data, &reply);
         status_t ret = reply.readInt32();
         if (ret != NO_ERROR) {
-            return ret;
+            return NULL;
         }
-        int retNumFrames = reply.readInt32();
-        if (retNumFrames < numFrames) {
-            numFrames = retNumFrames;
-        }
-        for (int i = 0; i < numFrames; i++) {
-            frames->push_back(interface_cast<IMemory>(reply.readStrongBinder()));
-        }
-        return OK;
+        return interface_cast<IMemory>(reply.readStrongBinder());
     }
 
     sp<IMemory> extractAlbumArt()
@@ -442,24 +434,20 @@
 
         case GET_FRAME_AT_INDEX: {
             CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
-            int frameIndex = data.readInt32();
-            int numFrames = data.readInt32();
+            int index = data.readInt32();
             int colorFormat = data.readInt32();
             bool metaOnly = (data.readInt32() != 0);
-            ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d), metaOnly(%d)",
-                    frameIndex, numFrames, colorFormat, metaOnly);
+            ALOGV("getFrameAtIndex: index(%d), colorFormat(%d), metaOnly(%d)",
+                    index, colorFormat, metaOnly);
 #ifndef DISABLE_GROUP_SCHEDULE_HACK
             setSchedPolicy(data);
 #endif
-            std::vector<sp<IMemory> > frames;
-            status_t err = getFrameAtIndex(
-                    &frames, frameIndex, numFrames, colorFormat, metaOnly);
-            reply->writeInt32(err);
-            if (OK == err) {
-                reply->writeInt32(frames.size());
-                for (size_t i = 0; i < frames.size(); i++) {
-                    reply->writeStrongBinder(IInterface::asBinder(frames[i]));
-                }
+            sp<IMemory> frame = getFrameAtIndex(index, colorFormat, metaOnly);
+            if (frame != nullptr) {  // Don't send NULL across the binder interface
+                reply->writeInt32(NO_ERROR);
+                reply->writeStrongBinder(IInterface::asBinder(frame));
+            } else {
+                reply->writeInt32(UNKNOWN_ERROR);
             }
 #ifndef DISABLE_GROUP_SCHEDULE_HACK
             restoreSchedPolicy();
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index cb8d375..2bf0802 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -77,10 +77,13 @@
     if (t != 0) {
         if (enabled) {
             if (t->exitPending()) {
+                mCaptureLock.unlock();
                 if (t->requestExitAndWait() == WOULD_BLOCK) {
+                    mCaptureLock.lock();
                     ALOGE("Visualizer::enable() called from thread");
                     return INVALID_OPERATION;
                 }
+                mCaptureLock.lock();
             }
         }
         t->mLock.lock();
diff --git a/media/libmedia/include/media/IMediaMetadataRetriever.h b/media/libmedia/include/media/IMediaMetadataRetriever.h
index c6f422d..28d2192 100644
--- a/media/libmedia/include/media/IMediaMetadataRetriever.h
+++ b/media/libmedia/include/media/IMediaMetadataRetriever.h
@@ -48,9 +48,8 @@
             int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
     virtual sp<IMemory>     getImageRectAtIndex(
             int index, int colorFormat, int left, int top, int right, int bottom) = 0;
-    virtual status_t        getFrameAtIndex(
-            std::vector<sp<IMemory> > *frames,
-            int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
+    virtual sp<IMemory>     getFrameAtIndex(
+            int index, int colorFormat, bool metaOnly) = 0;
     virtual sp<IMemory>     extractAlbumArt() = 0;
     virtual const char*     extractMetadata(int keyCode) = 0;
 };
diff --git a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
index 98d300f..37dc401 100644
--- a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
+++ b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
@@ -49,9 +49,8 @@
             int index, int colorFormat, bool metaOnly, bool thumbnail) = 0;
     virtual sp<IMemory> getImageRectAtIndex(
             int index, int colorFormat, int left, int top, int right, int bottom) = 0;
-    virtual status_t getFrameAtIndex(
-            std::vector<sp<IMemory> >* frames,
-            int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
+    virtual sp<IMemory> getFrameAtIndex(
+            int frameIndex, int colorFormat, bool metaOnly) = 0;
     virtual MediaAlbumArt* extractAlbumArt() = 0;
     virtual const char* extractMetadata(int keyCode) = 0;
 };
diff --git a/media/libmedia/include/media/mediametadataretriever.h b/media/libmedia/include/media/mediametadataretriever.h
index d29e97d..138a014 100644
--- a/media/libmedia/include/media/mediametadataretriever.h
+++ b/media/libmedia/include/media/mediametadataretriever.h
@@ -98,9 +98,8 @@
             int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false, bool thumbnail = false);
     sp<IMemory> getImageRectAtIndex(
             int index, int colorFormat, int left, int top, int right, int bottom);
-    status_t getFrameAtIndex(
-            std::vector<sp<IMemory> > *frames, int frameIndex, int numFrames = 1,
-            int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
+    sp<IMemory>  getFrameAtIndex(
+            int index, int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
     sp<IMemory> extractAlbumArt();
     const char* extractMetadata(int keyCode);
 
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index e61b04d..2ae76b3 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -179,18 +179,16 @@
             index, colorFormat, left, top, right, bottom);
 }
 
-status_t MediaMetadataRetriever::getFrameAtIndex(
-        std::vector<sp<IMemory> > *frames,
-        int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
-    ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d) metaOnly(%d)",
-            frameIndex, numFrames, colorFormat, metaOnly);
+sp<IMemory>  MediaMetadataRetriever::getFrameAtIndex(
+        int index, int colorFormat, bool metaOnly) {
+    ALOGV("getFrameAtIndex: index(%d), colorFormat(%d) metaOnly(%d)",
+            index, colorFormat, metaOnly);
     Mutex::Autolock _l(mLock);
     if (mRetriever == 0) {
         ALOGE("retriever is not initialized");
-        return INVALID_OPERATION;
+        return NULL;
     }
-    return mRetriever->getFrameAtIndex(
-            frames, frameIndex, numFrames, colorFormat, metaOnly);
+    return mRetriever->getFrameAtIndex(index, colorFormat, metaOnly);
 }
 
 const char* MediaMetadataRetriever::extractMetadata(int keyCode)
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
index 15ea578..9d348ec 100644
--- a/media/libmediametrics/Android.bp
+++ b/media/libmediametrics/Android.bp
@@ -37,6 +37,15 @@
             "1" ,
         ]
     },
+
+    header_abi_checker: {
+        enabled: true,
+        symbol_file: "libmediametrics.map.txt",
+    },
+
+    visibility: [
+        "//frameworks/av:__subpackages__",
+        "//frameworks/base/core/jni",
+        "//frameworks/base/media/jni",
+    ],
 }
-
-
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index 40b17bf..4a3c65e 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -242,31 +242,27 @@
     sp<IMemory> frame = mRetriever->getImageRectAtIndex(
             index, colorFormat, left, top, right, bottom);
     if (frame == NULL) {
-        ALOGE("failed to extract image");
-        return NULL;
+        ALOGE("failed to extract image at index %d", index);
     }
     return frame;
 }
 
-status_t MetadataRetrieverClient::getFrameAtIndex(
-            std::vector<sp<IMemory> > *frames,
-            int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
-    ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d), metaOnly(%d)",
-            frameIndex, numFrames, colorFormat, metaOnly);
+sp<IMemory> MetadataRetrieverClient::getFrameAtIndex(
+            int index, int colorFormat, bool metaOnly) {
+    ALOGV("getFrameAtIndex: index(%d), colorFormat(%d), metaOnly(%d)",
+            index, colorFormat, metaOnly);
     Mutex::Autolock lock(mLock);
     Mutex::Autolock glock(sLock);
     if (mRetriever == NULL) {
         ALOGE("retriever is not initialized");
-        return INVALID_OPERATION;
+        return NULL;
     }
 
-    status_t err = mRetriever->getFrameAtIndex(
-            frames, frameIndex, numFrames, colorFormat, metaOnly);
-    if (err != OK) {
-        frames->clear();
-        return err;
+    sp<IMemory> frame = mRetriever->getFrameAtIndex(index, colorFormat, metaOnly);
+    if (frame == NULL) {
+        ALOGE("failed to extract frame at index %d", index);
     }
-    return OK;
+    return frame;
 }
 
 sp<IMemory> MetadataRetrieverClient::extractAlbumArt()
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.h b/media/libmediaplayerservice/MetadataRetrieverClient.h
index 272d093..8020441 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.h
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.h
@@ -56,9 +56,8 @@
             int index, int colorFormat, bool metaOnly, bool thumbnail);
     virtual sp<IMemory>             getImageRectAtIndex(
             int index, int colorFormat, int left, int top, int right, int bottom);
-    virtual status_t getFrameAtIndex(
-                std::vector<sp<IMemory> > *frames,
-                int frameIndex, int numFrames, int colorFormat, bool metaOnly);
+    virtual sp<IMemory>             getFrameAtIndex(
+            int index, int colorFormat, bool metaOnly);
     virtual sp<IMemory>             extractAlbumArt();
     virtual const char*             extractMetadata(int keyCode);
 
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 2cd920a..4653711 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -1578,7 +1578,7 @@
         }
 
         if (mPreparing) {
-            notifyPreparedAndCleanup(finalStatus);
+            notifyPreparedAndCleanup(finalStatus == ERROR_END_OF_STREAM ? OK : finalStatus);
             mPreparing = false;
         } else if (mSentPauseOnBuffering) {
             sendCacheStats();
diff --git a/media/libnbaio/Android.bp b/media/libnbaio/Android.bp
index a4df38d..6345742 100644
--- a/media/libnbaio/Android.bp
+++ b/media/libnbaio/Android.bp
@@ -1,4 +1,3 @@
-
 cc_defaults {
     name: "libnbaio_mono_defaults",
     srcs: [
@@ -21,6 +20,9 @@
         "liblog",
         "libutils",
     ],
+    export_shared_lib_headers: [
+        "libaudioutils",
+    ],
 
     export_include_dirs: ["include_mono"],
 }
@@ -66,7 +68,5 @@
         "-Wall",
     ],
 
-    include_dirs: ["system/media/audio_utils/include"],
-
     export_include_dirs: ["include"],
 }
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 3d67c91..d198d39 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -4505,22 +4505,38 @@
 status_t ACodec::configureImageGrid(
         const sp<AMessage> &msg, sp<AMessage> &outputFormat) {
     int32_t tileWidth, tileHeight, gridRows, gridCols;
-    if (!msg->findInt32("tile-width", &tileWidth) ||
-        !msg->findInt32("tile-height", &tileHeight) ||
-        !msg->findInt32("grid-rows", &gridRows) ||
-        !msg->findInt32("grid-cols", &gridCols)) {
+    OMX_BOOL useGrid = OMX_FALSE;
+    if (msg->findInt32("tile-width", &tileWidth) &&
+        msg->findInt32("tile-height", &tileHeight) &&
+        msg->findInt32("grid-rows", &gridRows) &&
+        msg->findInt32("grid-cols", &gridCols)) {
+        useGrid = OMX_TRUE;
+    } else {
+        // when bEnabled is false, the tile info is not used,
+        // but clear out these too.
+        tileWidth = tileHeight = gridRows = gridCols = 0;
+    }
+
+    if (!mIsImage && !useGrid) {
         return OK;
     }
 
     OMX_VIDEO_PARAM_ANDROID_IMAGEGRIDTYPE gridType;
     InitOMXParams(&gridType);
     gridType.nPortIndex = kPortIndexOutput;
-    gridType.bEnabled = OMX_TRUE;
+    gridType.bEnabled = useGrid;
     gridType.nTileWidth = tileWidth;
     gridType.nTileHeight = tileHeight;
     gridType.nGridRows = gridRows;
     gridType.nGridCols = gridCols;
 
+    ALOGV("sending image grid info to component: bEnabled %d, tile %dx%d, grid %dx%d",
+            gridType.bEnabled,
+            gridType.nTileWidth,
+            gridType.nTileHeight,
+            gridType.nGridRows,
+            gridType.nGridCols);
+
     status_t err = mOMXNode->setParameter(
             (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidImageGrid,
             &gridType, sizeof(gridType));
@@ -4541,6 +4557,13 @@
             (OMX_INDEXTYPE)OMX_IndexParamVideoAndroidImageGrid,
             &gridType, sizeof(gridType));
 
+    ALOGV("received image grid info from component: bEnabled %d, tile %dx%d, grid %dx%d",
+            gridType.bEnabled,
+            gridType.nTileWidth,
+            gridType.nTileHeight,
+            gridType.nGridRows,
+            gridType.nGridCols);
+
     if (err == OK && gridType.bEnabled) {
         outputFormat->setInt32("tile-width", gridType.nTileWidth);
         outputFormat->setInt32("tile-height", gridType.nTileHeight);
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 18a6bd8..c6ec6de 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -21,6 +21,7 @@
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
 #include <gui/Surface.h>
+#include <gui/SurfaceComposerClient.h>
 #include <inttypes.h>
 #include <media/ICrypto.h>
 #include <media/IMediaSource.h>
@@ -28,6 +29,7 @@
 #include <media/stagefright/foundation/avc_utils.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ColorUtils.h>
 #include <media/stagefright/ColorConverter.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaCodec.h>
@@ -44,7 +46,7 @@
 
 sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
         int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
-        int32_t dstBpp, bool metaOnly = false) {
+        int32_t dstBpp, bool allocRotated, bool metaOnly) {
     int32_t rotationAngle;
     if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
         rotationAngle = 0;  // By default, no rotation
@@ -74,6 +76,14 @@
         displayHeight = height;
     }
 
+    if (allocRotated && (rotationAngle == 90 || rotationAngle == 270)) {
+        int32_t tmp;
+        tmp = width; width = height; height = tmp;
+        tmp = displayWidth; displayWidth = displayHeight; displayHeight = tmp;
+        tmp = tileWidth; tileWidth = tileHeight; tileHeight = tmp;
+        rotationAngle = 0;
+    }
+
     VideoFrame frame(width, height, displayWidth, displayHeight,
             tileWidth, tileHeight, rotationAngle, dstBpp, !metaOnly, iccSize);
 
@@ -94,6 +104,20 @@
     return frameMem;
 }
 
+sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
+        int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
+        int32_t dstBpp, bool allocRotated = false) {
+    return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp,
+            allocRotated, false /*metaOnly*/);
+}
+
+sp<IMemory> allocMetaFrame(const sp<MetaData>& trackMeta,
+        int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
+        int32_t dstBpp) {
+    return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp,
+            false /*allocRotated*/, true /*metaOnly*/);
+}
+
 bool findThumbnailInfo(
         const sp<MetaData> &trackMeta, int32_t *width, int32_t *height,
         uint32_t *type = NULL, const void **data = NULL, size_t *size = NULL) {
@@ -117,23 +141,27 @@
 bool getDstColorFormat(
         android_pixel_format_t colorFormat,
         OMX_COLOR_FORMATTYPE *dstFormat,
+        ui::PixelFormat *captureFormat,
         int32_t *dstBpp) {
     switch (colorFormat) {
         case HAL_PIXEL_FORMAT_RGB_565:
         {
             *dstFormat = OMX_COLOR_Format16bitRGB565;
+            *captureFormat = ui::PixelFormat::RGB_565;
             *dstBpp = 2;
             return true;
         }
         case HAL_PIXEL_FORMAT_RGBA_8888:
         {
             *dstFormat = OMX_COLOR_Format32BitRGBA8888;
+            *captureFormat = ui::PixelFormat::RGBA_8888;
             *dstBpp = 4;
             return true;
         }
         case HAL_PIXEL_FORMAT_BGRA_8888:
         {
             *dstFormat = OMX_COLOR_Format32bitBGRA8888;
+            *captureFormat = ui::PixelFormat::BGRA_8888;
             *dstBpp = 4;
             return true;
         }
@@ -150,9 +178,10 @@
 sp<IMemory> FrameDecoder::getMetadataOnly(
         const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail) {
     OMX_COLOR_FORMATTYPE dstFormat;
+    ui::PixelFormat captureFormat;
     int32_t dstBpp;
-    if (!getDstColorFormat(
-            (android_pixel_format_t)colorFormat, &dstFormat, &dstBpp)) {
+    if (!getDstColorFormat((android_pixel_format_t)colorFormat,
+            &dstFormat, &captureFormat, &dstBpp)) {
         return NULL;
     }
 
@@ -170,8 +199,7 @@
             tileWidth = tileHeight = 0;
         }
     }
-    return allocVideoFrame(trackMeta,
-            width, height, tileWidth, tileHeight, dstBpp, true /*metaOnly*/);
+    return allocMetaFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp);
 }
 
 FrameDecoder::FrameDecoder(
@@ -194,15 +222,30 @@
     }
 }
 
+bool isHDR(const sp<AMessage> &format) {
+    uint32_t standard, range, transfer;
+    if (!format->findInt32("color-standard", (int32_t*)&standard)) {
+        standard = 0;
+    }
+    if (!format->findInt32("color-range", (int32_t*)&range)) {
+        range = 0;
+    }
+    if (!format->findInt32("color-transfer", (int32_t*)&transfer)) {
+        transfer = 0;
+    }
+    return standard == ColorUtils::kColorStandardBT2020 &&
+            transfer == ColorUtils::kColorTransferST2084;
+}
+
 status_t FrameDecoder::init(
-        int64_t frameTimeUs, size_t numFrames, int option, int colorFormat) {
-    if (!getDstColorFormat(
-            (android_pixel_format_t)colorFormat, &mDstFormat, &mDstBpp)) {
+        int64_t frameTimeUs, int option, int colorFormat) {
+    if (!getDstColorFormat((android_pixel_format_t)colorFormat,
+            &mDstFormat, &mCaptureFormat, &mDstBpp)) {
         return ERROR_UNSUPPORTED;
     }
 
     sp<AMessage> videoFormat = onGetFormatAndSeekOptions(
-            frameTimeUs, numFrames, option, &mReadOptions);
+            frameTimeUs, option, &mReadOptions, &mSurface);
     if (videoFormat == NULL) {
         ALOGE("video format or seek mode not supported");
         return ERROR_UNSUPPORTED;
@@ -219,7 +262,7 @@
     }
 
     err = decoder->configure(
-            videoFormat, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
+            videoFormat, mSurface, NULL /* crypto */, 0 /* flags */);
     if (err != OK) {
         ALOGW("configure returned error %d (%s)", err, asString(err));
         decoder->release();
@@ -253,19 +296,7 @@
         return NULL;
     }
 
-    return mFrames.size() > 0 ? mFrames[0] : NULL;
-}
-
-status_t FrameDecoder::extractFrames(std::vector<sp<IMemory> >* frames) {
-    status_t err = extractInternal();
-    if (err != OK) {
-        return err;
-    }
-
-    for (size_t i = 0; i < mFrames.size(); i++) {
-        frames->push_back(mFrames[i]);
-    }
-    return OK;
+    return mFrameMemory;
 }
 
 status_t FrameDecoder::extractInternal() {
@@ -379,8 +410,13 @@
                         ALOGE("failed to get output buffer %zu", index);
                         break;
                     }
-                    err = onOutputReceived(videoFrameBuffer, mOutputFormat, ptsUs, &done);
-                    mDecoder->releaseOutputBuffer(index);
+                    if (mSurface != nullptr) {
+                        mDecoder->renderOutputBufferAndRelease(index);
+                        err = onOutputReceived(videoFrameBuffer, mOutputFormat, ptsUs, &done);
+                    } else {
+                        err = onOutputReceived(videoFrameBuffer, mOutputFormat, ptsUs, &done);
+                        mDecoder->releaseOutputBuffer(index);
+                    }
                 } else {
                     ALOGW("Received error %d (%s) instead of output", err, asString(err));
                     done = true;
@@ -404,22 +440,22 @@
         const sp<MetaData> &trackMeta,
         const sp<IMediaSource> &source)
     : FrameDecoder(componentName, trackMeta, source),
+      mFrame(NULL),
       mIsAvcOrHevc(false),
       mSeekMode(MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC),
-      mTargetTimeUs(-1LL),
-      mNumFrames(0),
-      mNumFramesDecoded(0) {
+      mTargetTimeUs(-1LL) {
 }
 
 sp<AMessage> VideoFrameDecoder::onGetFormatAndSeekOptions(
-        int64_t frameTimeUs, size_t numFrames, int seekMode, MediaSource::ReadOptions *options) {
+        int64_t frameTimeUs, int seekMode,
+        MediaSource::ReadOptions *options,
+        sp<Surface> *window) {
     mSeekMode = static_cast<MediaSource::ReadOptions::SeekMode>(seekMode);
     if (mSeekMode < MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC ||
             mSeekMode > MediaSource::ReadOptions::SEEK_FRAME_INDEX) {
         ALOGE("Unknown seek mode: %d", mSeekMode);
         return NULL;
     }
-    mNumFrames = numFrames;
 
     const char *mime;
     if (!trackMeta()->findCString(kKeyMIMEType, &mime)) {
@@ -460,6 +496,16 @@
         videoFormat->setInt32("android._num-input-buffers", 1);
         videoFormat->setInt32("android._num-output-buffers", 1);
     }
+
+    if (isHDR(videoFormat)) {
+        *window = initSurfaceControl();
+        if (*window == NULL) {
+            ALOGE("Failed to init surface control for HDR, fallback to non-hdr");
+        } else {
+            videoFormat->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
+        }
+    }
+
     return videoFormat;
 }
 
@@ -495,7 +541,7 @@
         return OK;
     }
 
-    *done = (++mNumFramesDecoded >= mNumFrames);
+    *done = true;
 
     if (outputFormat == NULL) {
         return ERROR_MALFORMED;
@@ -504,13 +550,22 @@
     int32_t width, height, stride, srcFormat;
     if (!outputFormat->findInt32("width", &width) ||
             !outputFormat->findInt32("height", &height) ||
-            !outputFormat->findInt32("stride", &stride) ||
             !outputFormat->findInt32("color-format", &srcFormat)) {
         ALOGE("format missing dimension or color: %s",
                 outputFormat->debugString().c_str());
         return ERROR_MALFORMED;
     }
 
+    if (!outputFormat->findInt32("stride", &stride)) {
+        if (mSurfaceControl == NULL) {
+            ALOGE("format must have stride for byte buffer mode: %s",
+                    outputFormat->debugString().c_str());
+            return ERROR_MALFORMED;
+        }
+        // for surface output, set stride to width, we don't actually need it.
+        stride = width;
+    }
+
     int32_t crop_left, crop_top, crop_right, crop_bottom;
     if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
         crop_left = crop_top = 0;
@@ -518,15 +573,23 @@
         crop_bottom = height - 1;
     }
 
-    sp<IMemory> frameMem = allocVideoFrame(
-            trackMeta(),
-            (crop_right - crop_left + 1),
-            (crop_bottom - crop_top + 1),
-            0,
-            0,
-            dstBpp());
-    addFrame(frameMem);
-    VideoFrame* frame = static_cast<VideoFrame*>(frameMem->pointer());
+    if (mFrame == NULL) {
+        sp<IMemory> frameMem = allocVideoFrame(
+                trackMeta(),
+                (crop_right - crop_left + 1),
+                (crop_bottom - crop_top + 1),
+                0,
+                0,
+                dstBpp(),
+                mSurfaceControl != nullptr /*allocRotated*/);
+        mFrame = static_cast<VideoFrame*>(frameMem->pointer());
+
+        setFrame(frameMem);
+    }
+
+    if (mSurfaceControl != nullptr) {
+        return captureSurfaceControl();
+    }
 
     ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
 
@@ -547,8 +610,8 @@
                 (const uint8_t *)videoFrameBuffer->data(),
                 width, height, stride,
                 crop_left, crop_top, crop_right, crop_bottom,
-                frame->getFlattenedData(),
-                frame->mWidth, frame->mHeight, frame->mRowBytes,
+                mFrame->getFlattenedData(),
+                mFrame->mWidth, mFrame->mHeight, mFrame->mRowBytes,
                 crop_left, crop_top, crop_right, crop_bottom);
         return OK;
     }
@@ -558,6 +621,101 @@
     return ERROR_UNSUPPORTED;
 }
 
+sp<Surface> VideoFrameDecoder::initSurfaceControl() {
+    sp<SurfaceComposerClient> client = new SurfaceComposerClient();
+    if (client->initCheck() != NO_ERROR) {
+        ALOGE("failed to get SurfaceComposerClient");
+        return NULL;
+    }
+
+    // create a container layer to hold the capture layer, so that we can
+    // use full frame drop. If without the container, the crop will be set
+    // to display size.
+    sp<SurfaceControl> parent = client->createSurface(
+            String8("parent"),
+            0 /* width */, 0 /* height */,
+            PIXEL_FORMAT_RGBA_8888,
+            ISurfaceComposerClient::eFXSurfaceContainer );
+
+    if (!parent) {
+        ALOGE("failed to get surface control parent");
+        return NULL;
+    }
+
+    // create the surface with unknown size 1x1 for now, real size will
+    // be set before the capture when we have output format info.
+    sp<SurfaceControl> surfaceControl = client->createSurface(
+            String8("thumbnail"),
+            1 /* width */, 1 /* height */,
+            PIXEL_FORMAT_RGBA_8888,
+            ISurfaceComposerClient::eFXSurfaceBufferQueue,
+            parent.get());
+
+    if (!surfaceControl) {
+        ALOGE("failed to get surface control");
+        return NULL;
+    }
+
+    SurfaceComposerClient::Transaction t;
+    t.hide(parent)
+            .show(surfaceControl)
+            .apply(true);
+
+    mSurfaceControl = surfaceControl;
+    mParent = parent;
+
+    return surfaceControl->getSurface();
+}
+
+status_t VideoFrameDecoder::captureSurfaceControl() {
+    // set the layer size to the output size before the capture
+    SurfaceComposerClient::Transaction()
+        .setSize(mSurfaceControl, mFrame->mWidth, mFrame->mHeight)
+        .apply(true);
+
+    sp<GraphicBuffer> outBuffer;
+    status_t err = ScreenshotClient::captureChildLayers(
+            mParent->getHandle(),
+            ui::Dataspace::V0_SRGB,
+            captureFormat(),
+            Rect(0, 0, mFrame->mWidth, mFrame->mHeight),
+            {},
+            1.0f /*frameScale*/,
+            &outBuffer);
+
+    if (err != OK) {
+        ALOGE("failed to captureLayers: err %d", err);
+        return err;
+    }
+
+    ALOGV("capture: %dx%d, format %d, stride %d",
+            outBuffer->getWidth(),
+            outBuffer->getHeight(),
+            outBuffer->getPixelFormat(),
+            outBuffer->getStride());
+
+    uint8_t *base;
+    int32_t outBytesPerPixel, outBytesPerStride;
+    err = outBuffer->lock(
+            GraphicBuffer::USAGE_SW_READ_OFTEN,
+            reinterpret_cast<void**>(&base),
+            &outBytesPerPixel,
+            &outBytesPerStride);
+    if (err != OK) {
+        ALOGE("failed to lock graphic buffer: err %d", err);
+        return err;
+    }
+
+    uint8_t *dst = mFrame->getFlattenedData();
+    for (size_t y = 0 ; y < fmin(mFrame->mHeight, outBuffer->getHeight()) ; y++) {
+        memcpy(dst, base, fmin(mFrame->mWidth, outBuffer->getWidth()) * mFrame->mBytesPerPixel);
+        dst += mFrame->mRowBytes;
+        base += outBuffer->getStride() * mFrame->mBytesPerPixel;
+    }
+    outBuffer->unlock();
+    return OK;
+}
+
 ////////////////////////////////////////////////////////////////////////
 
 ImageDecoder::ImageDecoder(
@@ -577,8 +735,8 @@
 }
 
 sp<AMessage> ImageDecoder::onGetFormatAndSeekOptions(
-        int64_t frameTimeUs, size_t /*numFrames*/,
-        int /*seekMode*/, MediaSource::ReadOptions *options) {
+        int64_t frameTimeUs, int /*seekMode*/,
+        MediaSource::ReadOptions *options, sp<Surface> * /*window*/) {
     sp<MetaData> overrideMeta;
     if (frameTimeUs < 0) {
         uint32_t type;
@@ -705,7 +863,7 @@
                 trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp());
         mFrame = static_cast<VideoFrame*>(frameMem->pointer());
 
-        addFrame(frameMem);
+        setFrame(frameMem);
     }
 
     int32_t srcFormat;
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 3d58d4b..a267f7e 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -170,6 +170,7 @@
 sp<IMediaCodecList> MediaCodecList::sRemoteList;
 
 sp<MediaCodecList::BinderDeathObserver> MediaCodecList::sBinderDeathObserver;
+sp<IBinder> MediaCodecList::sMediaPlayer;  // kept since linked to death
 
 void MediaCodecList::BinderDeathObserver::binderDied(const wp<IBinder> &who __unused) {
     Mutex::Autolock _l(sRemoteInitMutex);
@@ -181,15 +182,14 @@
 sp<IMediaCodecList> MediaCodecList::getInstance() {
     Mutex::Autolock _l(sRemoteInitMutex);
     if (sRemoteList == nullptr) {
-        sp<IBinder> binder =
-            defaultServiceManager()->getService(String16("media.player"));
+        sMediaPlayer = defaultServiceManager()->getService(String16("media.player"));
         sp<IMediaPlayerService> service =
-            interface_cast<IMediaPlayerService>(binder);
+            interface_cast<IMediaPlayerService>(sMediaPlayer);
         if (service.get() != nullptr) {
             sRemoteList = service->getCodecList();
             if (sRemoteList != nullptr) {
                 sBinderDeathObserver = new BinderDeathObserver();
-                binder->linkToDeath(sBinderDeathObserver.get());
+                sMediaPlayer->linkToDeath(sBinderDeathObserver.get());
             }
         }
         if (sRemoteList == nullptr) {
diff --git a/media/libstagefright/SimpleDecodingSource.cpp b/media/libstagefright/SimpleDecodingSource.cpp
index babdc7a..8b6262f 100644
--- a/media/libstagefright/SimpleDecodingSource.cpp
+++ b/media/libstagefright/SimpleDecodingSource.cpp
@@ -36,7 +36,7 @@
 using namespace android;
 
 const int64_t kTimeoutWaitForOutputUs = 500000; // 0.5 seconds
-const int64_t kTimeoutWaitForInputUs = 5000; // 5 milliseconds
+const int64_t kTimeoutWaitForInputUs = 0; // don't wait
 const int kTimeoutMaxRetries = 20;
 
 //static
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index fa3d372..6f536a9 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -44,7 +44,7 @@
 StagefrightMetadataRetriever::StagefrightMetadataRetriever()
     : mParsedMetaData(false),
       mAlbumArt(NULL),
-      mLastImageIndex(-1) {
+      mLastDecodedIndex(-1) {
     ALOGV("StagefrightMetadataRetriever()");
 }
 
@@ -143,8 +143,8 @@
 
     FrameRect rect = {left, top, right, bottom};
 
-    if (mImageDecoder != NULL && index == mLastImageIndex) {
-        return mImageDecoder->extractFrame(&rect);
+    if (mDecoder != NULL && index == mLastDecodedIndex) {
+        return mDecoder->extractFrame(&rect);
     }
 
     return getImageInternal(
@@ -153,6 +153,8 @@
 
 sp<IMemory> StagefrightMetadataRetriever::getImageInternal(
         int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect) {
+    mDecoder.clear();
+    mLastDecodedIndex = -1;
 
     if (mExtractor.get() == NULL) {
         ALOGE("no extractor.");
@@ -227,14 +229,14 @@
         const AString &componentName = matchingCodecs[i];
         sp<ImageDecoder> decoder = new ImageDecoder(componentName, trackMeta, source);
         int64_t frameTimeUs = thumbnail ? -1 : 0;
-        if (decoder->init(frameTimeUs, 1 /*numFrames*/, 0 /*option*/, colorFormat) == OK) {
+        if (decoder->init(frameTimeUs, 0 /*option*/, colorFormat) == OK) {
             sp<IMemory> frame = decoder->extractFrame(rect);
 
             if (frame != NULL) {
                 if (rect != NULL) {
                     // keep the decoder if slice decoding
-                    mImageDecoder = decoder;
-                    mLastImageIndex = index;
+                    mDecoder = decoder;
+                    mLastDecodedIndex = index;
                 }
                 return frame;
             }
@@ -242,6 +244,7 @@
         ALOGV("%s failed to extract thumbnail, trying next decoder.", componentName.c_str());
     }
 
+    ALOGE("all codecs failed to extract frame.");
     return NULL;
 }
 
@@ -250,36 +253,40 @@
     ALOGV("getFrameAtTime: %" PRId64 " us option: %d colorFormat: %d, metaOnly: %d",
             timeUs, option, colorFormat, metaOnly);
 
-    sp<IMemory> frame;
-    status_t err = getFrameInternal(
-            timeUs, 1, option, colorFormat, metaOnly, &frame, NULL /*outFrames*/);
-    return (err == OK) ? frame : NULL;
+    return getFrameInternal(timeUs, option, colorFormat, metaOnly);
 }
 
-status_t StagefrightMetadataRetriever::getFrameAtIndex(
-        std::vector<sp<IMemory> >* frames,
-        int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
-    ALOGV("getFrameAtIndex: frameIndex %d, numFrames %d, colorFormat: %d, metaOnly: %d",
-            frameIndex, numFrames, colorFormat, metaOnly);
+sp<IMemory> StagefrightMetadataRetriever::getFrameAtIndex(
+        int frameIndex, int colorFormat, bool metaOnly) {
+    ALOGV("getFrameAtIndex: frameIndex %d, colorFormat: %d, metaOnly: %d",
+            frameIndex, colorFormat, metaOnly);
+    if (mDecoder != NULL && frameIndex == mLastDecodedIndex + 1) {
+        sp<IMemory> frame = mDecoder->extractFrame();
+        if (frame != nullptr) {
+            mLastDecodedIndex = frameIndex;
+        }
+        return frame;
+    }
 
-    return getFrameInternal(
-            frameIndex, numFrames, MediaSource::ReadOptions::SEEK_FRAME_INDEX,
-            colorFormat, metaOnly, NULL /*outFrame*/, frames);
+    return getFrameInternal(frameIndex,
+            MediaSource::ReadOptions::SEEK_FRAME_INDEX, colorFormat, metaOnly);
 }
 
-status_t StagefrightMetadataRetriever::getFrameInternal(
-        int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
-        sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames) {
+sp<IMemory> StagefrightMetadataRetriever::getFrameInternal(
+        int64_t timeUs, int option, int colorFormat, bool metaOnly) {
+    mDecoder.clear();
+    mLastDecodedIndex = -1;
+
     if (mExtractor.get() == NULL) {
         ALOGE("no extractor.");
-        return NO_INIT;
+        return NULL;
     }
 
     sp<MetaData> fileMeta = mExtractor->getMetaData();
 
     if (fileMeta == NULL) {
         ALOGE("extractor doesn't publish metadata, failed to initialize?");
-        return NO_INIT;
+        return NULL;
     }
 
     size_t n = mExtractor->countTracks();
@@ -300,30 +307,24 @@
 
     if (i == n) {
         ALOGE("no video track found.");
-        return INVALID_OPERATION;
+        return NULL;
     }
 
     sp<MetaData> trackMeta = mExtractor->getTrackMetaData(
             i, MediaExtractor::kIncludeExtensiveMetaData);
     if (!trackMeta) {
-        return UNKNOWN_ERROR;
+        return NULL;
     }
 
     if (metaOnly) {
-        if (outFrame != NULL) {
-            *outFrame = FrameDecoder::getMetadataOnly(trackMeta, colorFormat);
-            if (*outFrame != NULL) {
-                return OK;
-            }
-        }
-        return UNKNOWN_ERROR;
+        return FrameDecoder::getMetadataOnly(trackMeta, colorFormat);
     }
 
     sp<IMediaSource> source = mExtractor->getTrack(i);
 
     if (source.get() == NULL) {
         ALOGV("unable to instantiate video track.");
-        return UNKNOWN_ERROR;
+        return NULL;
     }
 
     const void *data;
@@ -350,24 +351,22 @@
     for (size_t i = 0; i < matchingCodecs.size(); ++i) {
         const AString &componentName = matchingCodecs[i];
         sp<VideoFrameDecoder> decoder = new VideoFrameDecoder(componentName, trackMeta, source);
-        if (decoder->init(timeUs, numFrames, option, colorFormat) == OK) {
-            if (outFrame != NULL) {
-                *outFrame = decoder->extractFrame();
-                if (*outFrame != NULL) {
-                    return OK;
+        if (decoder->init(timeUs, option, colorFormat) == OK) {
+            sp<IMemory> frame = decoder->extractFrame();
+            if (frame != nullptr) {
+                // keep the decoder if seeking by frame index
+                if (option == MediaSource::ReadOptions::SEEK_FRAME_INDEX) {
+                    mDecoder = decoder;
+                    mLastDecodedIndex = timeUs;
                 }
-            } else if (outFrames != NULL) {
-                status_t err = decoder->extractFrames(outFrames);
-                if (err == OK) {
-                    return OK;
-                }
+                return frame;
             }
         }
         ALOGV("%s failed to extract frame, trying next decoder.", componentName.c_str());
     }
 
     ALOGE("all codecs failed to extract frame.");
-    return UNKNOWN_ERROR;
+    return NULL;
 }
 
 MediaAlbumArt *StagefrightMetadataRetriever::extractAlbumArt() {
diff --git a/media/libstagefright/codecs/flac/enc/Android.bp b/media/libstagefright/codecs/flac/enc/Android.bp
index d7d871a..f35bce1 100644
--- a/media/libstagefright/codecs/flac/enc/Android.bp
+++ b/media/libstagefright/codecs/flac/enc/Android.bp
@@ -15,8 +15,10 @@
     },
 
     header_libs: ["libbase_headers"],
-    static_libs: [
+    shared_libs: [
         "libaudioutils",
+    ],
+    static_libs: [
         "libFLAC",
     ],
 }
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/packet_util.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/packet_util.cpp
index 48414d7..5880e32 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/packet_util.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/packet_util.cpp
@@ -52,7 +52,11 @@
         PV_BitstreamByteAlign(stream);
         BitstreamReadBits32(stream, resync_marker_length);
 
-        *next_MB = (int) BitstreamReadBits16(stream, nbits);
+        int mbnum = (int) BitstreamReadBits16(stream, nbits);
+        if (mbnum < 0) {
+            return PV_FAIL;
+        }
+        *next_MB = mbnum;
 //      if (*next_MB <= video->mbnum)   /*  needs more investigation */
 //          *next_MB = video->mbnum+1;
 
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp
index f18f789..679b091 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/vop.cpp
@@ -1355,6 +1355,14 @@
             int tmpHeight = (tmpDisplayHeight + 15) & -16;
             int tmpWidth = (tmpDisplayWidth + 15) & -16;
 
+            if (tmpWidth > video->width)
+            {
+                // while allowed by the spec, this decoder does not actually
+                // support an increase in size.
+                ALOGE("width increase not supported");
+                status = PV_FAIL;
+                goto return_point;
+            }
             if (tmpHeight * tmpWidth > video->size)
             {
                 // This is just possibly "b/37079296".
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
index 8d0ea3a..08e20cc 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
@@ -290,7 +290,7 @@
 }
 
 bool SoftVorbis::isConfigured() const {
-    return mInputBufferCount >= 2;
+    return (mState != NULL && mVi != NULL);
 }
 
 static void makeBitReader(
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
index da86758..87e8fd4 100644
--- a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
@@ -1426,75 +1426,90 @@
     RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
 
     UWORD32 ui_exec_done;
+    WORD32 i_num_preroll = 0;
     /* Checking for end of processing */
     err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DONE_QUERY,
                                 &ui_exec_done);
     RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DONE_QUERY");
 
-#ifdef ENABLE_MPEG_D_DRC
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                              IA_ENHAACPLUS_DEC_CONFIG_GET_NUM_PRE_ROLL_FRAMES,
+                              &i_num_preroll);
+
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GET_NUM_PRE_ROLL_FRAMES");
     {
-        if (ui_exec_done != 1) {
-            VOID* p_array;        // ITTIAM:buffer to handle gain payload
-            WORD32 buf_size = 0;  // ITTIAM:gain payload length
-            WORD32 bit_str_fmt = 1;
-            WORD32 gain_stream_flag = 1;
+        int32_t pi_preroll_frame_offset = 0;
+        do {
+#ifdef ENABLE_MPEG_D_DRC
+            if (ui_exec_done != 1) {
+                VOID* p_array;        // ITTIAM:buffer to handle gain payload
+                WORD32 buf_size = 0;  // ITTIAM:gain payload length
+                WORD32 bit_str_fmt = 1;
+                WORD32 gain_stream_flag = 1;
 
-            err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
-                                        IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
-            RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN");
+                err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                            IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
+                RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN");
 
-            err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
-                                        IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
-            RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF");
+                err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                            IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
+                RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF");
 
-            if (buf_size > 0) {
-                /*Set bitstream_split_format */
-                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
-                                          IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
-                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+                if (buf_size > 0) {
+                    /*Set bitstream_split_format */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                              IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
 
-                memcpy(mDrcInBuf, p_array, buf_size);
-                /* Set number of bytes to be processed */
-                err_code =
-                    ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_BS, 0, &buf_size);
-                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+                    memcpy(mDrcInBuf, p_array, buf_size);
+                    /* Set number of bytes to be processed */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_BS,
+                                              0, &buf_size);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
 
-                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
-                                          IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG, &gain_stream_flag);
-                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                              IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG,
+                                              &gain_stream_flag);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
 
-                /* Execute process */
-                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
-                                          IA_CMD_TYPE_INIT_CPY_BSF_BUFF, NULL);
-                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+                    /* Execute process */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+                                              IA_CMD_TYPE_INIT_CPY_BSF_BUFF, NULL);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
 
-                mMpegDDRCPresent = 1;
+                    mMpegDDRCPresent = 1;
+                }
             }
-        }
-    }
 #endif
-    /* How much buffer is used in input buffers */
-    err_code =
-        ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CURIDX_INPUT_BUF, 0, bytesConsumed);
-    RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_CURIDX_INPUT_BUF");
+            /* How much buffer is used in input buffers */
+            err_code =
+                ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CURIDX_INPUT_BUF,
+                                 0, bytesConsumed);
+            RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_CURIDX_INPUT_BUF");
 
-    /* Get the output bytes */
-    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_OUTPUT_BYTES, 0, outBytes);
-    RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_OUTPUT_BYTES");
+            /* Get the output bytes */
+            err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+                                        IA_API_CMD_GET_OUTPUT_BYTES, 0, outBytes);
+            RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_OUTPUT_BYTES");
 #ifdef ENABLE_MPEG_D_DRC
 
-    if (mMpegDDRCPresent == 1) {
-        memcpy(mDrcInBuf, mOutputBuffer, *outBytes);
-        err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES, 0, outBytes);
-        RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+            if (mMpegDDRCPresent == 1) {
+                memcpy(mDrcInBuf, mOutputBuffer + pi_preroll_frame_offset, *outBytes);
+                pi_preroll_frame_offset += *outBytes;
+                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES,
+                                          0, outBytes);
+                RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
 
-        err_code =
-            ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DO_EXECUTE, NULL);
-        RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
+                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE,
+                                          IA_CMD_TYPE_DO_EXECUTE, NULL);
+                RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
 
-        memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
-    }
+                memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
+            }
 #endif
+            i_num_preroll--;
+        } while (i_num_preroll > 0);
+    }
     return IA_NO_ERROR;
 }
 
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index d685321..c7dc415 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -324,8 +324,8 @@
 }
 
 #define DECLARE_YUV2RGBFUNC(func, rgb) int (*func)(     \
-        const uint8*, int, const uint8*, int,           \
-        const uint8*, int, uint8*, int, int, int)       \
+        const uint8_t*, int, const uint8_t*, int,           \
+        const uint8_t*, int, uint8_t*, int, int, int)       \
         = mSrcColorSpace.isBt709() ? libyuv::H420To##rgb \
         : mSrcColorSpace.isJpeg() ? libyuv::J420To##rgb  \
         : libyuv::I420To##rgb
@@ -350,7 +350,7 @@
     {
         DECLARE_YUV2RGBFUNC(func, RGB565);
         (*func)(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
-                (uint8 *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
+                (uint8_t *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
         break;
     }
 
@@ -358,7 +358,7 @@
     {
         DECLARE_YUV2RGBFUNC(func, ABGR);
         (*func)(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
-                (uint8 *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
+                (uint8_t *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
         break;
     }
 
@@ -366,7 +366,7 @@
     {
         DECLARE_YUV2RGBFUNC(func, ARGB);
         (*func)(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
-                (uint8 *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
+                (uint8_t *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
         break;
     }
 
@@ -391,17 +391,17 @@
 
     switch (mDstFormat) {
     case OMX_COLOR_Format16bitRGB565:
-        libyuv::NV12ToRGB565(src_y, src.mStride, src_u, src.mStride, (uint8 *)dst_ptr,
+        libyuv::NV12ToRGB565(src_y, src.mStride, src_u, src.mStride, (uint8_t *)dst_ptr,
                 dst.mStride, src.cropWidth(), src.cropHeight());
         break;
 
     case OMX_COLOR_Format32bitBGRA8888:
-        libyuv::NV12ToARGB(src_y, src.mStride, src_u, src.mStride, (uint8 *)dst_ptr,
+        libyuv::NV12ToARGB(src_y, src.mStride, src_u, src.mStride, (uint8_t *)dst_ptr,
                 dst.mStride, src.cropWidth(), src.cropHeight());
         break;
 
     case OMX_COLOR_Format32BitRGBA8888:
-        libyuv::NV12ToABGR(src_y, src.mStride, src_u, src.mStride, (uint8 *)dst_ptr,
+        libyuv::NV12ToABGR(src_y, src.mStride, src_u, src.mStride, (uint8_t *)dst_ptr,
                 dst.mStride, src.cropWidth(), src.cropHeight());
         break;
 
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index 359df3d..cf91405 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -269,10 +269,21 @@
 
     Rect bounds(mCropWidth, mCropHeight);
 
-    void *dst;
-    CHECK_EQ(0, mapper.lock(buf->handle,
-            GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_RARELY,
-            bounds, &dst));
+    void *dst = NULL;
+    struct android_ycbcr ycbcr;
+    if ( !mConverter &&
+         (mColorFormat == OMX_COLOR_FormatYUV420Planar ||
+         mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar ||
+         mColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar ||
+         mColorFormat == OMX_COLOR_FormatYUV420Planar16)) {
+        CHECK_EQ(0, mapper.lockYCbCr(buf->handle,
+                GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_RARELY,
+                bounds, &ycbcr));
+    } else {
+        CHECK_EQ(0, mapper.lock(buf->handle,
+                GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_RARELY,
+                bounds, &dst));
+    }
 
     // TODO move the other conversions also into ColorConverter, and
     // fix cropping issues (when mCropLeft/Top != 0 or mWidth != mCropWidth)
@@ -289,12 +300,10 @@
         const uint8_t *src_u = (const uint8_t *)data + mStride * mHeight + mCropTop * mStride / 4;
         const uint8_t *src_v = (const uint8_t *)src_u + mStride * mHeight / 4;
 
-        uint8_t *dst_y = (uint8_t *)dst;
-        size_t dst_y_size = buf->stride * buf->height;
+        uint8_t *dst_y = (uint8_t *)ycbcr.y;
+        uint8_t *dst_v = (uint8_t *)ycbcr.cr;
+        uint8_t *dst_u = (uint8_t *)ycbcr.cb;
         size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
-        size_t dst_c_size = dst_c_stride * buf->height / 2;
-        uint8_t *dst_v = dst_y + dst_y_size;
-        uint8_t *dst_u = dst_v + dst_c_size;
 
         dst_y += mCropTop * buf->stride + mCropLeft;
         dst_v += (mCropTop/2) * dst_c_stride + mCropLeft/2;
@@ -321,12 +330,10 @@
         const uint8_t *src_u = (const uint8_t *)data + mStride * mHeight + mCropTop * mStride / 4;
         const uint8_t *src_v = (const uint8_t *)src_u + mStride * mHeight / 4;
 
-        uint8_t *dst_y = (uint8_t *)dst;
-        size_t dst_y_size = buf->stride * buf->height;
+        uint8_t *dst_y = (uint8_t *)ycbcr.y;
+        uint8_t *dst_v = (uint8_t *)ycbcr.cr;
+        uint8_t *dst_u = (uint8_t *)ycbcr.cb;
         size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
-        size_t dst_c_size = dst_c_stride * buf->height / 2;
-        uint8_t *dst_v = dst_y + dst_y_size;
-        uint8_t *dst_u = dst_v + dst_c_size;
 
         dst_y += mCropTop * buf->stride + mCropLeft;
         dst_v += (mCropTop / 2) * dst_c_stride + mCropLeft / 2;
@@ -361,13 +368,10 @@
         src_y += mCropLeft + mCropTop * mWidth;
         src_uv += (mCropLeft + mCropTop * mWidth) / 2;
 
-        uint8_t *dst_y = (uint8_t *)dst;
-
-        size_t dst_y_size = buf->stride * buf->height;
+        uint8_t *dst_y = (uint8_t *)ycbcr.y;
+        uint8_t *dst_v = (uint8_t *)ycbcr.cr;
+        uint8_t *dst_u = (uint8_t *)ycbcr.cb;
         size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
-        size_t dst_c_size = dst_c_stride * buf->height / 2;
-        uint8_t *dst_v = dst_y + dst_y_size;
-        uint8_t *dst_u = dst_v + dst_c_size;
 
         dst_y += mCropTop * buf->stride + mCropLeft;
         dst_v += (mCropTop/2) * dst_c_stride + mCropLeft/2;
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
index 04041eb..a07eb8c 100644
--- a/media/libstagefright/data/media_codecs_google_c2_video.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -77,7 +77,7 @@
             <Limit name="bitrate" range="1-40000000" />
             <Feature name="adaptive-playback" />
         </MediaCodec>
-        <MediaCodec name="c2.android.av1.decoder" type="video/av01">
+        <MediaCodec name="c2.android.gav1.decoder" type="video/av01">
             <Limit name="size" min="96x96" max="1920x1080" />
             <Limit name="alignment" value="2x2" />
             <Limit name="block-size" value="16x16" />
diff --git a/media/libstagefright/data/media_codecs_sw.xml b/media/libstagefright/data/media_codecs_sw.xml
index 67d3f1a..9532ba6 100644
--- a/media/libstagefright/data/media_codecs_sw.xml
+++ b/media/libstagefright/data/media_codecs_sw.xml
@@ -182,7 +182,7 @@
             </Variant>
             <Feature name="adaptive-playback" />
         </MediaCodec>
-        <MediaCodec name="c2.android.av1.decoder" type="video/av01" variant="!slow-cpu">
+        <MediaCodec name="c2.android.gav1.decoder" type="video/av01" variant="!slow-cpu">
             <Limit name="size" min="2x2" max="1920x1080" />
             <Limit name="alignment" value="2x2" />
             <Limit name="block-size" value="16x16" />
diff --git a/media/libstagefright/exports.lds b/media/libstagefright/exports.lds
index 06c4f19..f5ddf1e 100644
--- a/media/libstagefright/exports.lds
+++ b/media/libstagefright/exports.lds
@@ -395,7 +395,6 @@
         ScaleFilterCols_NEON*;
         ScaleFilterReduce;
         ScaleFilterRows_NEON*;
-        ScaleOffset;
         ScalePlane;
         ScalePlane_16;
         ScalePlaneBilinearDown;
diff --git a/media/libstagefright/filters/Android.bp b/media/libstagefright/filters/Android.bp
index 7a67e55..b1f62c7 100644
--- a/media/libstagefright/filters/Android.bp
+++ b/media/libstagefright/filters/Android.bp
@@ -8,7 +8,7 @@
         "MediaFilter.cpp",
         "RSFilter.cpp",
         "SaturationFilter.cpp",
-        "saturationARGB.rs",
+        "saturationARGB.rscript",
         "SimpleFilter.cpp",
         "ZeroFilter.cpp",
     ],
diff --git a/media/libstagefright/filters/saturation.rs b/media/libstagefright/filters/saturation.rscript
similarity index 100%
rename from media/libstagefright/filters/saturation.rs
rename to media/libstagefright/filters/saturation.rscript
diff --git a/media/libstagefright/filters/saturationARGB.rs b/media/libstagefright/filters/saturationARGB.rscript
similarity index 100%
rename from media/libstagefright/filters/saturationARGB.rs
rename to media/libstagefright/filters/saturationARGB.rscript
diff --git a/media/libstagefright/flac/dec/Android.bp b/media/libstagefright/flac/dec/Android.bp
index b494e16..7ebe71f 100644
--- a/media/libstagefright/flac/dec/Android.bp
+++ b/media/libstagefright/flac/dec/Android.bp
@@ -1,4 +1,4 @@
-cc_library {
+cc_library_shared {
     name: "libstagefright_flacdec",
     vendor_available: true,
 
@@ -18,29 +18,20 @@
         cfi: true,
     },
 
-    static: {
-        whole_static_libs: [
-            "libFLAC",
-            "libaudioutils",
-        ],
-    },
-
-    shared: {
-        static_libs: [
-            "libFLAC",
-            "libaudioutils",
-        ],
-        export_static_lib_headers: [
-            "libFLAC",
-        ],
-    },
-
     shared_libs: [
+        "libaudioutils",
         "liblog",
     ],
 
+    static_libs: [
+        "libFLAC",
+    ],
+
+    export_static_lib_headers: [
+        "libFLAC",
+    ],
+
     header_libs: [
         "libmedia_headers",
-        "libFLAC-headers",
     ],
 }
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
index dc58c15..1af6276 100644
--- a/media/libstagefright/include/FrameDecoder.h
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -24,15 +24,17 @@
 #include <media/stagefright/foundation/ABase.h>
 #include <media/MediaSource.h>
 #include <media/openmax/OMX_Video.h>
-#include <system/graphics-base.h>
+#include <ui/GraphicTypes.h>
 
 namespace android {
 
 struct AMessage;
-class MediaCodecBuffer;
-class IMediaSource;
-class VideoFrame;
 struct MediaCodec;
+class IMediaSource;
+class MediaCodecBuffer;
+class Surface;
+class SurfaceControl;
+class VideoFrame;
 
 struct FrameRect {
     int32_t left, top, right, bottom;
@@ -44,13 +46,10 @@
             const sp<MetaData> &trackMeta,
             const sp<IMediaSource> &source);
 
-    status_t init(
-            int64_t frameTimeUs, size_t numFrames, int option, int colorFormat);
+    status_t init(int64_t frameTimeUs, int option, int colorFormat);
 
     sp<IMemory> extractFrame(FrameRect *rect = NULL);
 
-    status_t extractFrames(std::vector<sp<IMemory> >* frames);
-
     static sp<IMemory> getMetadataOnly(
             const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail = false);
 
@@ -59,9 +58,9 @@
 
     virtual sp<AMessage> onGetFormatAndSeekOptions(
             int64_t frameTimeUs,
-            size_t numFrames,
             int seekMode,
-            MediaSource::ReadOptions *options) = 0;
+            MediaSource::ReadOptions *options,
+            sp<Surface> *window) = 0;
 
     virtual status_t onExtractRect(FrameRect *rect) = 0;
 
@@ -79,24 +78,24 @@
 
     sp<MetaData> trackMeta()     const      { return mTrackMeta; }
     OMX_COLOR_FORMATTYPE dstFormat() const  { return mDstFormat; }
+    ui::PixelFormat captureFormat() const   { return mCaptureFormat; }
     int32_t dstBpp()             const      { return mDstBpp; }
-
-    void addFrame(const sp<IMemory> &frame) {
-        mFrames.push_back(frame);
-    }
+    void setFrame(const sp<IMemory> &frameMem) { mFrameMemory = frameMem; }
 
 private:
     AString mComponentName;
     sp<MetaData> mTrackMeta;
     sp<IMediaSource> mSource;
     OMX_COLOR_FORMATTYPE mDstFormat;
+    ui::PixelFormat mCaptureFormat;
     int32_t mDstBpp;
-    std::vector<sp<IMemory> > mFrames;
+    sp<IMemory> mFrameMemory;
     MediaSource::ReadOptions mReadOptions;
     sp<MediaCodec> mDecoder;
     sp<AMessage> mOutputFormat;
     bool mHaveMoreInputs;
     bool mFirstSample;
+    sp<Surface> mSurface;
 
     status_t extractInternal();
 
@@ -112,9 +111,9 @@
 protected:
     virtual sp<AMessage> onGetFormatAndSeekOptions(
             int64_t frameTimeUs,
-            size_t numFrames,
             int seekMode,
-            MediaSource::ReadOptions *options) override;
+            MediaSource::ReadOptions *options,
+            sp<Surface> *window) override;
 
     virtual status_t onExtractRect(FrameRect *rect) override {
         // Rect extraction for sequences is not supported for now.
@@ -134,11 +133,15 @@
             bool *done) override;
 
 private:
+    sp<SurfaceControl> mSurfaceControl;
+    sp<SurfaceControl> mParent;
+    VideoFrame *mFrame;
     bool mIsAvcOrHevc;
     MediaSource::ReadOptions::SeekMode mSeekMode;
     int64_t mTargetTimeUs;
-    size_t mNumFrames;
-    size_t mNumFramesDecoded;
+
+    sp<Surface> initSurfaceControl();
+    status_t captureSurfaceControl();
 };
 
 struct ImageDecoder : public FrameDecoder {
@@ -150,9 +153,9 @@
 protected:
     virtual sp<AMessage> onGetFormatAndSeekOptions(
             int64_t frameTimeUs,
-            size_t numFrames,
             int seekMode,
-            MediaSource::ReadOptions *options) override;
+            MediaSource::ReadOptions *options,
+            sp<Surface> *window) override;
 
     virtual status_t onExtractRect(FrameRect *rect) override;
 
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index c50677a..ee51290 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -26,7 +26,7 @@
 namespace android {
 
 class DataSource;
-struct ImageDecoder;
+struct FrameDecoder;
 struct FrameRect;
 
 struct StagefrightMetadataRetriever : public MediaMetadataRetrieverBase {
@@ -47,9 +47,8 @@
             int index, int colorFormat, bool metaOnly, bool thumbnail);
     virtual sp<IMemory> getImageRectAtIndex(
             int index, int colorFormat, int left, int top, int right, int bottom);
-    virtual status_t getFrameAtIndex(
-            std::vector<sp<IMemory> >* frames,
-            int frameIndex, int numFrames, int colorFormat, bool metaOnly);
+    virtual sp<IMemory> getFrameAtIndex(
+            int index, int colorFormat, bool metaOnly);
 
     virtual MediaAlbumArt *extractAlbumArt();
     virtual const char *extractMetadata(int keyCode);
@@ -62,17 +61,17 @@
     KeyedVector<int, String8> mMetaData;
     MediaAlbumArt *mAlbumArt;
 
-    sp<ImageDecoder> mImageDecoder;
-    int mLastImageIndex;
+    sp<FrameDecoder> mDecoder;
+    int mLastDecodedIndex;
     void parseMetaData();
     void parseColorAspects(const sp<MetaData>& meta);
     // Delete album art and clear metadata.
     void clearMetadata();
 
-    status_t getFrameInternal(
-            int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
-            sp<IMemory>* outFrame, std::vector<sp<IMemory> >* outFrames);
-    virtual sp<IMemory> getImageInternal(
+    sp<IMemory> getFrameInternal(
+            int64_t timeUs, int option, int colorFormat, bool metaOnly);
+
+    sp<IMemory> getImageInternal(
             int index, int colorFormat, bool metaOnly, bool thumbnail, FrameRect* rect);
 
     StagefrightMetadataRetriever(const StagefrightMetadataRetriever &);
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecList.h b/media/libstagefright/include/media/stagefright/MediaCodecList.h
index e44b0a4..e681d25 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecList.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecList.h
@@ -83,6 +83,7 @@
     };
 
     static sp<BinderDeathObserver> sBinderDeathObserver;
+    static sp<IBinder> sMediaPlayer;
 
     static sp<IMediaCodecList> sCodecList;
     static sp<IMediaCodecList> sRemoteList;
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index e260cae..7d03d98 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -72,7 +72,6 @@
         cfi: true,
     },
 
-    compile_multilib: "32",
 }
 
 cc_library_shared {
diff --git a/media/mtp/MtpServer.h b/media/mtp/MtpServer.h
index 1f8799f..8cc9a9a 100644
--- a/media/mtp/MtpServer.h
+++ b/media/mtp/MtpServer.h
@@ -34,8 +34,11 @@
 
 class IMtpDatabase;
 class MtpStorage;
+class MtpMockServer;
 
 class MtpServer {
+    // libFuzzer testing
+    friend class MtpMockServer;
 
 private:
     IMtpDatabase*       mDatabase;
diff --git a/media/mtp/MtpStringBuffer.cpp b/media/mtp/MtpStringBuffer.cpp
index cd379bf..d8d425b 100644
--- a/media/mtp/MtpStringBuffer.cpp
+++ b/media/mtp/MtpStringBuffer.cpp
@@ -26,14 +26,31 @@
 
 namespace {
 
-std::wstring_convert<std::codecvt_utf8_utf16<char16_t>,char16_t> gConvert;
+const char * utf16_cerror = "__CONVERSION_ERROR__";
+const char16_t * utf8_cerror = u"__CONVERSION_ERROR__";
+
+std::wstring_convert<std::codecvt_utf8_utf16<char16_t>,char16_t> gConvert(utf16_cerror, utf8_cerror);
 
 static std::string utf16ToUtf8(std::u16string input_str) {
-    return gConvert.to_bytes(input_str);
+    std::string conversion = gConvert.to_bytes(input_str);
+
+    if (conversion == utf16_cerror) {
+        ALOGE("Unable to convert UTF-16 string to UTF-8");
+        return "";
+    } else {
+        return conversion;
+    }
 }
 
 static std::u16string utf8ToUtf16(std::string input_str) {
-    return gConvert.from_bytes(input_str);
+    std::u16string conversion = gConvert.from_bytes(input_str);
+
+    if (conversion == utf8_cerror) {
+        ALOGE("Unable to convert UTF-8 string to UTF-16");
+        return u"";
+    } else {
+        return conversion;
+    }
 }
 
 } // namespace
diff --git a/media/mtp/MtpUtils.cpp b/media/mtp/MtpUtils.cpp
index 8564576..84a20d3 100644
--- a/media/mtp/MtpUtils.cpp
+++ b/media/mtp/MtpUtils.cpp
@@ -150,6 +150,7 @@
             ret += copyFile(oldFile.c_str(), newFile.c_str());
         }
     }
+    closedir(dir);
     return ret;
 }
 
diff --git a/media/ndk/NdkImage.cpp b/media/ndk/NdkImage.cpp
index 1883f63..1145b7b 100644
--- a/media/ndk/NdkImage.cpp
+++ b/media/ndk/NdkImage.cpp
@@ -35,6 +35,7 @@
         int64_t timestamp, int32_t width, int32_t height, int32_t numPlanes) :
         mReader(reader), mFormat(format), mUsage(usage), mBuffer(buffer), mLockedBuffer(nullptr),
         mTimestamp(timestamp), mWidth(width), mHeight(height), mNumPlanes(numPlanes) {
+    LOG_FATAL_IF(reader == nullptr, "AImageReader shouldn't be null while creating AImage");
 }
 
 AImage::~AImage() {
@@ -57,14 +58,9 @@
     if (mIsClosed) {
         return;
     }
-    sp<AImageReader> reader = mReader.promote();
-    if (reader != nullptr) {
-        reader->releaseImageLocked(this, releaseFenceFd);
-    } else if (mBuffer != nullptr) {
-        LOG_ALWAYS_FATAL("%s: parent AImageReader closed without releasing image %p",
-                __FUNCTION__, this);
+    if (!mReader->mIsClosed) {
+        mReader->releaseImageLocked(this, releaseFenceFd);
     }
-
     // Should have been set to nullptr in releaseImageLocked
     // Set to nullptr here for extra safety only
     mBuffer = nullptr;
@@ -83,22 +79,12 @@
 
 void
 AImage::lockReader() const {
-    sp<AImageReader> reader = mReader.promote();
-    if (reader == nullptr) {
-        // Reader has been closed
-        return;
-    }
-    reader->mLock.lock();
+    mReader->mLock.lock();
 }
 
 void
 AImage::unlockReader() const {
-    sp<AImageReader> reader = mReader.promote();
-    if (reader == nullptr) {
-        // Reader has been closed
-        return;
-    }
-    reader->mLock.unlock();
+    mReader->mLock.unlock();
 }
 
 media_status_t
diff --git a/media/ndk/NdkImagePriv.h b/media/ndk/NdkImagePriv.h
index e0f16da..0e8cbcb 100644
--- a/media/ndk/NdkImagePriv.h
+++ b/media/ndk/NdkImagePriv.h
@@ -72,7 +72,7 @@
     uint32_t getJpegSize() const;
 
     // When reader is close, AImage will only accept close API call
-    wp<AImageReader>           mReader;
+    const sp<AImageReader>     mReader;
     const int32_t              mFormat;
     const uint64_t             mUsage;  // AHARDWAREBUFFER_USAGE_* flags.
     BufferItem*                mBuffer;
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index baa4fc7..c0ceb3d 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -113,12 +113,12 @@
 
 void
 AImageReader::FrameListener::onFrameAvailable(const BufferItem& /*item*/) {
-    Mutex::Autolock _l(mLock);
     sp<AImageReader> reader = mReader.promote();
     if (reader == nullptr) {
         ALOGW("A frame is available after AImageReader closed!");
         return; // reader has been closed
     }
+    Mutex::Autolock _l(mLock);
     if (mListener.onImageAvailable == nullptr) {
         return; // No callback registered
     }
@@ -143,12 +143,12 @@
 
 void
 AImageReader::BufferRemovedListener::onBufferFreed(const wp<GraphicBuffer>& graphicBuffer) {
-    Mutex::Autolock _l(mLock);
     sp<AImageReader> reader = mReader.promote();
     if (reader == nullptr) {
         ALOGW("A frame is available after AImageReader closed!");
         return; // reader has been closed
     }
+    Mutex::Autolock _l(mLock);
     if (mListener.onBufferRemoved == nullptr) {
         return; // No callback registered
     }
@@ -272,6 +272,11 @@
       mFrameListener(new FrameListener(this)),
       mBufferRemovedListener(new BufferRemovedListener(this)) {}
 
+AImageReader::~AImageReader() {
+    Mutex::Autolock _l(mLock);
+    LOG_FATAL_IF("AImageReader not closed before destruction", mIsClosed != true);
+}
+
 media_status_t
 AImageReader::init() {
     PublicFormat publicFormat = static_cast<PublicFormat>(mFormat);
@@ -347,8 +352,12 @@
     return AMEDIA_OK;
 }
 
-AImageReader::~AImageReader() {
+void AImageReader::close() {
     Mutex::Autolock _l(mLock);
+    if (mIsClosed) {
+        return;
+    }
+    mIsClosed = true;
     AImageReader_ImageListener nullListener = {nullptr, nullptr};
     setImageListenerLocked(&nullListener);
 
@@ -741,6 +750,7 @@
 void AImageReader_delete(AImageReader* reader) {
     ALOGV("%s", __FUNCTION__);
     if (reader != nullptr) {
+        reader->close();
         reader->decStrong((void*) AImageReader_delete);
     }
     return;
diff --git a/media/ndk/NdkImageReaderPriv.h b/media/ndk/NdkImageReaderPriv.h
index e328cb1..0779a71 100644
--- a/media/ndk/NdkImageReaderPriv.h
+++ b/media/ndk/NdkImageReaderPriv.h
@@ -76,6 +76,7 @@
     int32_t        getHeight()    const { return mHeight; };
     int32_t        getFormat()    const { return mFormat; };
     int32_t        getMaxImages() const { return mMaxImages; };
+    void           close();
 
   private:
 
@@ -134,7 +135,7 @@
 
       private:
         AImageReader_ImageListener mListener = {nullptr, nullptr};
-        wp<AImageReader>           mReader;
+        const wp<AImageReader>     mReader;
         Mutex                      mLock;
     };
     sp<FrameListener> mFrameListener;
@@ -149,7 +150,7 @@
 
        private:
         AImageReader_BufferRemovedListener mListener = {nullptr, nullptr};
-        wp<AImageReader>           mReader;
+        const wp<AImageReader>     mReader;
         Mutex                      mLock;
     };
     sp<BufferRemovedListener> mBufferRemovedListener;
@@ -165,6 +166,7 @@
     native_handle_t*           mWindowHandle = nullptr;
 
     List<AImage*>              mAcquiredImages;
+    bool                       mIsClosed = false;
 
     Mutex                      mLock;
 };
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index d81cde8..0ed92f7 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -45,12 +45,6 @@
         "-Werror",
     ],
 
-    product_variables: {
-        product_is_iot: {
-            cflags: ["-DTARGET_ANDROID_THINGS"],
-        },
-    },
-
     include_dirs: [
         // For android_mallopt definitions.
         "bionic/libc/private"
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index b824212..db13903 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -176,18 +176,7 @@
     // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
     bool ok = PermissionCache::checkCallingPermission(sModifyDefaultAudioEffectsAllowed);
 
-#ifdef TARGET_ANDROID_THINGS
-    if (!ok) {
-        // Use a secondary permission on Android Things to allow a more lenient level of protection.
-        static const String16 sModifyDefaultAudioEffectsAndroidThingsAllowed(
-                "com.google.android.things.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
-        ok = PermissionCache::checkCallingPermission(
-                sModifyDefaultAudioEffectsAndroidThingsAllowed);
-    }
-    if (!ok) ALOGE("com.google.android.things.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
-#else
     if (!ok) ALOGE("android.permission.MODIFY_DEFAULT_AUDIO_EFFECTS");
-#endif
     return ok;
 }
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 0b745ac..355d945 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1357,8 +1357,8 @@
         String8(AudioParameter::keyFrameCount),
         String8(AudioParameter::keyInputSource),
         String8(AudioParameter::keyMonoOutput),
-        String8(AudioParameter::keyStreamConnect),
-        String8(AudioParameter::keyStreamDisconnect),
+        String8(AudioParameter::keyDeviceConnect),
+        String8(AudioParameter::keyDeviceDisconnect),
         String8(AudioParameter::keyStreamSupportedFormats),
         String8(AudioParameter::keyStreamSupportedChannels),
         String8(AudioParameter::keyStreamSupportedSamplingRates),
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 3c4fbba..13152d0 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -24,6 +24,7 @@
 #include "Configuration.h"
 #include <utils/Log.h>
 #include <system/audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_dynamicsprocessing.h>
 #include <system/audio_effects/effect_ns.h>
 #include <system/audio_effects/effect_visualizer.h>
 #include <audio_utils/channels.h>
@@ -2569,7 +2570,8 @@
     if ((mSessionId == AUDIO_SESSION_OUTPUT_MIX) &&
         (((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) ||
          (memcmp(&desc.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0) ||
-         (memcmp(&desc.type, SL_IID_VOLUME, sizeof(effect_uuid_t)) == 0))) {
+         (memcmp(&desc.type, SL_IID_VOLUME, sizeof(effect_uuid_t)) == 0) ||
+         (memcmp(&desc.type, SL_IID_DYNAMICSPROCESSING, sizeof(effect_uuid_t)) == 0))) {
         return false;
     }
     return true;
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index c5b9953..3eacc8c 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -105,13 +105,8 @@
     return mSQ.poll();
 }
 
-void FastMixer::setNBLogWriter(NBLog::Writer *logWriter)
+void FastMixer::setNBLogWriter(NBLog::Writer *logWriter __unused)
 {
-    // FIXME If mMixer is set or changed prior to this, we don't inform correctly.
-    //       Should cache logWriter and re-apply it at the assignment to mMixer.
-    if (mMixer != NULL) {
-        mMixer->setNBLogWriter(logWriter);
-    }
 }
 
 void FastMixer::onIdle()
diff --git a/services/audioflinger/FastThread.cpp b/services/audioflinger/FastThread.cpp
index 04b32c2..8b7a124 100644
--- a/services/audioflinger/FastThread.cpp
+++ b/services/audioflinger/FastThread.cpp
@@ -124,7 +124,7 @@
             mDumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState;
             tlNBLogWriter = next->mNBLogWriter != NULL ?
                     next->mNBLogWriter : mDummyNBLogWriter.get();
-            setNBLogWriter(tlNBLogWriter); // FastMixer informs its AudioMixer, FastCapture ignores
+            setNBLogWriter(tlNBLogWriter); // This is used for debugging only
 
             // We want to always have a valid reference to the previous (non-idle) state.
             // However, the state queue only guarantees access to current and previous states.
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 28ad9dd..cf15045 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -3956,6 +3956,32 @@
     return INVALID_OPERATION;
 }
 
+// For dedicated VoIP outputs, let the HAL apply the stream volume. Track volume is
+// still applied by the mixer.
+// All tracks attached to a mixer with flag VOIP_RX are tied to the same
+// stream type STREAM_VOICE_CALL so this will only change the HAL volume once even
+// if more than one track are active
+status_t AudioFlinger::PlaybackThread::handleVoipVolume_l(float *volume)
+{
+    status_t result = NO_ERROR;
+    if ((mOutput->flags & AUDIO_OUTPUT_FLAG_VOIP_RX) != 0) {
+        if (*volume != mLeftVolFloat) {
+            result = mOutput->stream->setVolume(*volume, *volume);
+            ALOGE_IF(result != OK,
+                     "Error when setting output stream volume: %d", result);
+            if (result == NO_ERROR) {
+                mLeftVolFloat = *volume;
+            }
+        }
+        // if stream volume was successfully sent to the HAL, mLeftVolFloat == v here and we
+        // remove stream volume contribution from software volume.
+        if (mLeftVolFloat == *volume) {
+            *volume = 1.0f;
+        }
+    }
+    return result;
+}
+
 status_t AudioFlinger::MixerThread::createAudioPatch_l(const struct audio_patch *patch,
                                                           audio_patch_handle_t *handle)
 {
@@ -4758,22 +4784,25 @@
                     // no acknowledgement required for newly active tracks
                 }
                 sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
+                float volume;
+                if (track->isPlaybackRestricted() || mStreamTypes[track->streamType()].mute) {
+                    volume = 0.f;
+                } else {
+                    volume = masterVolume * mStreamTypes[track->streamType()].volume;
+                }
+
+                handleVoipVolume_l(&volume);
+
                 // cache the combined master volume and stream type volume for fast mixer; this
                 // lacks any synchronization or barrier so VolumeProvider may read a stale value
                 const float vh = track->getVolumeHandler()->getVolume(
-                        proxy->framesReleased()).first;
-                float volume;
-                if (track->isPlaybackRestricted()) {
-                    volume = 0.f;
-                } else {
-                    volume = masterVolume
-                        * mStreamTypes[track->streamType()].volume
-                        * vh;
-                }
+                    proxy->framesReleased()).first;
+                volume *= vh;
                 track->mCachedVolume = volume;
                 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
                 float vlf = volume * float_from_gain(gain_minifloat_unpack_left(vlr));
                 float vrf = volume * float_from_gain(gain_minifloat_unpack_right(vlr));
+
                 track->setFinalVolume((vlf + vrf) / 2.f);
                 ++fastTracks;
             } else {
@@ -4916,20 +4945,22 @@
             uint32_t vl, vr;       // in U8.24 integer format
             float vlf, vrf, vaf;   // in [0.0, 1.0] float format
             // read original volumes with volume control
-            float typeVolume = mStreamTypes[track->streamType()].volume;
-            float v = masterVolume * typeVolume;
+            float v = masterVolume * mStreamTypes[track->streamType()].volume;
             // Always fetch volumeshaper volume to ensure state is updated.
             const sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
             const float vh = track->getVolumeHandler()->getVolume(
                     track->mAudioTrackServerProxy->framesReleased()).first;
 
-            if (track->isPausing() || mStreamTypes[track->streamType()].mute
-                    || track->isPlaybackRestricted()) {
+            if (mStreamTypes[track->streamType()].mute || track->isPlaybackRestricted()) {
+                v = 0;
+            }
+
+            handleVoipVolume_l(&v);
+
+            if (track->isPausing()) {
                 vl = vr = 0;
                 vlf = vrf = vaf = 0.;
-                if (track->isPausing()) {
-                    track->setPaused();
-                }
+                track->setPaused();
             } else {
                 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
                 vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
@@ -4981,25 +5012,6 @@
                 track->mHasVolumeController = false;
             }
 
-            // For dedicated VoIP outputs, let the HAL apply the stream volume. Track volume is
-            // still applied by the mixer.
-            if ((mOutput->flags & AUDIO_OUTPUT_FLAG_VOIP_RX) != 0) {
-                v = mStreamTypes[track->streamType()].mute ? 0.0f : v;
-                if (v != mLeftVolFloat) {
-                    status_t result = mOutput->stream->setVolume(v, v);
-                    ALOGE_IF(result != OK, "Error when setting output stream volume: %d", result);
-                    if (result == OK) {
-                        mLeftVolFloat = v;
-                    }
-                }
-                // if stream volume was successfully sent to the HAL, mLeftVolFloat == v here and we
-                // remove stream volume contribution from software volume.
-                if (v != 0.0f && mLeftVolFloat == v) {
-                   vlf = min(1.0f, vlf / v);
-                   vrf = min(1.0f, vrf / v);
-                   vaf = min(1.0f, vaf / v);
-               }
-            }
             // XXX: these things DON'T need to be done each time
             mAudioMixer->setBufferProvider(trackId, track);
             mAudioMixer->enable(trackId);
@@ -5289,11 +5301,11 @@
         return false;
     }
     // Check validity as we don't call AudioMixer::create() here.
-    if (!AudioMixer::isValidFormat(format)) {
+    if (!mAudioMixer->isValidFormat(format)) {
         ALOGW("%s: invalid format: %#x", __func__, format);
         return false;
     }
-    if (!AudioMixer::isValidChannelMask(channelMask)) {
+    if (!mAudioMixer->isValidChannelMask(channelMask)) {
         ALOGW("%s: invalid channelMask: %#x", __func__, channelMask);
         return false;
     }
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 336c2b4..fc8aa13 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -747,6 +747,7 @@
                 // is safe to do so. That will drop the final ref count and destroy the tracks.
     virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
                 void        removeTracks_l(const Vector< sp<Track> >& tracksToRemove);
+                status_t    handleVoipVolume_l(float *volume);
 
     // StreamOutHalInterfaceCallback implementation
     virtual     void        onWriteReady();
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
index f02f3cf..ebfba83 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.bp
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -3,7 +3,6 @@
 
     srcs: [
         "src/AudioCollections.cpp",
-        "src/AudioGain.cpp",
         "src/AudioInputDescriptor.cpp",
         "src/AudioOutputDescriptor.cpp",
         "src/AudioPatch.cpp",
@@ -21,6 +20,7 @@
         "src/TypeConverter.cpp",
     ],
     shared_libs: [
+        "libaudiofoundation",
         "libcutils",
         "libhidlbase",
         "liblog",
@@ -28,7 +28,10 @@
         "libutils",
         "libxml2",
     ],
-    export_shared_lib_headers: ["libmedia"],
+    export_shared_lib_headers: [
+        "libaudiofoundation",
+        "libmedia",
+    ],
     static_libs: [
         "libaudioutils",
     ],
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index 0776a8d..31c5041 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -19,7 +19,6 @@
 #include <unordered_map>
 #include <unordered_set>
 
-#include <AudioGain.h>
 #include <AudioPort.h>
 #include <AudioPatch.h>
 #include <DeviceDescriptor.h>
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index d906f11..2e9ddf4 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -18,8 +18,8 @@
 
 #include "AudioCollections.h"
 #include "AudioProfile.h"
-#include "AudioGain.h"
 #include "HandleGenerator.h"
+#include <media/AudioGain.h>
 #include <utils/String8.h>
 #include <utils/Vector.h>
 #include <utils/RefBase.h>
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
index c90a582..e8cf485 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
@@ -21,7 +21,6 @@
 #include "AudioPort.h"
 #include "AudioRoute.h"
 #include "HwModule.h"
-#include "AudioGain.h"
 
 namespace android {
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
deleted file mode 100644
index 2725870..0000000
--- a/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioGain"
-//#define LOG_NDEBUG 0
-
-//#define VERY_VERBOSE_LOGGING
-#ifdef VERY_VERBOSE_LOGGING
-#define ALOGVV ALOGV
-#else
-#define ALOGVV(a...) do { } while(0)
-#endif
-
-#include "AudioGain.h"
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-#include <math.h>
-
-namespace android {
-
-AudioGain::AudioGain(int index, bool useInChannelMask)
-{
-    mIndex = index;
-    mUseInChannelMask = useInChannelMask;
-    memset(&mGain, 0, sizeof(struct audio_gain));
-}
-
-void AudioGain::getDefaultConfig(struct audio_gain_config *config)
-{
-    config->index = mIndex;
-    config->mode = mGain.mode;
-    config->channel_mask = mGain.channel_mask;
-    if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
-        config->values[0] = mGain.default_value;
-    } else {
-        uint32_t numValues;
-        if (mUseInChannelMask) {
-            numValues = audio_channel_count_from_in_mask(mGain.channel_mask);
-        } else {
-            numValues = audio_channel_count_from_out_mask(mGain.channel_mask);
-        }
-        for (size_t i = 0; i < numValues; i++) {
-            config->values[i] = mGain.default_value;
-        }
-    }
-    if ((mGain.mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
-        config->ramp_duration_ms = mGain.min_ramp_ms;
-    }
-}
-
-status_t AudioGain::checkConfig(const struct audio_gain_config *config)
-{
-    if ((config->mode & ~mGain.mode) != 0) {
-        return BAD_VALUE;
-    }
-    if ((config->mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
-        if ((config->values[0] < mGain.min_value) ||
-                    (config->values[0] > mGain.max_value)) {
-            return BAD_VALUE;
-        }
-    } else {
-        if ((config->channel_mask & ~mGain.channel_mask) != 0) {
-            return BAD_VALUE;
-        }
-        uint32_t numValues;
-        if (mUseInChannelMask) {
-            numValues = audio_channel_count_from_in_mask(config->channel_mask);
-        } else {
-            numValues = audio_channel_count_from_out_mask(config->channel_mask);
-        }
-        for (size_t i = 0; i < numValues; i++) {
-            if ((config->values[i] < mGain.min_value) ||
-                    (config->values[i] > mGain.max_value)) {
-                return BAD_VALUE;
-            }
-        }
-    }
-    if ((config->mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
-        if ((config->ramp_duration_ms < mGain.min_ramp_ms) ||
-                    (config->ramp_duration_ms > mGain.max_ramp_ms)) {
-            return BAD_VALUE;
-        }
-    }
-    return NO_ERROR;
-}
-
-void AudioGain::dump(String8 *dst, int spaces, int index) const
-{
-    dst->appendFormat("%*sGain %d:\n", spaces, "", index+1);
-    dst->appendFormat("%*s- mode: %08x\n", spaces, "", mGain.mode);
-    dst->appendFormat("%*s- channel_mask: %08x\n", spaces, "", mGain.channel_mask);
-    dst->appendFormat("%*s- min_value: %d mB\n", spaces, "", mGain.min_value);
-    dst->appendFormat("%*s- max_value: %d mB\n", spaces, "", mGain.max_value);
-    dst->appendFormat("%*s- default_value: %d mB\n", spaces, "", mGain.default_value);
-    dst->appendFormat("%*s- step_value: %d mB\n", spaces, "", mGain.step_value);
-    dst->appendFormat("%*s- min_ramp_ms: %d ms\n", spaces, "", mGain.min_ramp_ms);
-    dst->appendFormat("%*s- max_ramp_ms: %d ms\n", spaces, "", mGain.max_ramp_ms);
-}
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index a096e8f..a9b87e3 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -22,7 +22,6 @@
 #include <policy.h>
 #include <AudioPolicyInterface.h>
 #include "AudioInputDescriptor.h"
-#include "AudioGain.h"
 #include "AudioPolicyMix.h"
 #include "HwModule.h"
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 8a60cf2..49524b0 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -21,10 +21,10 @@
 #include "AudioOutputDescriptor.h"
 #include "AudioPolicyMix.h"
 #include "IOProfile.h"
-#include "AudioGain.h"
 #include "Volume.h"
 #include "HwModule.h"
 #include "TypeConverter.h"
+#include <media/AudioGain.h>
 #include <media/AudioParameter.h>
 #include <media/AudioPolicy.h>
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index 3a4db90..bf0cc94 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -18,7 +18,6 @@
 //#define LOG_NDEBUG 0
 
 #include "AudioPatch.h"
-#include "AudioGain.h"
 #include "TypeConverter.h"
 
 #include <log/log.h>
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index c42923a..0221348 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -22,7 +22,6 @@
 #include "HwModule.h"
 #include "AudioPort.h"
 #include "IOProfile.h"
-#include "AudioGain.h"
 #include <AudioOutputDescriptor.h>
 
 namespace android {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index c11490a..68811e9 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -19,7 +19,6 @@
 #include "TypeConverter.h"
 #include "AudioPort.h"
 #include "HwModule.h"
-#include "AudioGain.h"
 #include <policy.h>
 
 #ifndef ARRAY_SIZE
@@ -366,7 +365,9 @@
         if (mGains.size() != 0) {
             dst->appendFormat("%*s- gains:\n", spaces, "");
             for (size_t i = 0; i < mGains.size(); i++) {
-                mGains[i]->dump(dst, spaces + 2, i);
+                std::string gainStr;
+                mGains[i]->dump(&gainStr, spaces + 2, i);
+                dst->append(gainStr.c_str());
             }
         }
     }
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
index 69d6b0c..a5fe07b 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
@@ -24,7 +24,6 @@
 #include <media/AudioResamplerPublic.h>
 #include <utils/Errors.h>
 
-#include "AudioGain.h"
 #include "AudioPort.h"
 #include "AudioProfile.h"
 #include "HwModule.h"
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
index 79f0919..92cbe4e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
@@ -19,7 +19,6 @@
 
 #include "AudioRoute.h"
 #include "HwModule.h"
-#include "AudioGain.h"
 
 namespace android
 {
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index ad07ab1..1dc7020 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -21,7 +21,6 @@
 #include <utils/Log.h>
 #include <utils/String8.h>
 #include <TypeConverter.h>
-#include "AudioGain.h"
 #include "AudioOutputDescriptor.h"
 #include "AudioPatch.h"
 #include "ClientDescriptor.h"
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index ecd5b34..2b4b508 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -22,7 +22,6 @@
 #include <set>
 #include "DeviceDescriptor.h"
 #include "TypeConverter.h"
-#include "AudioGain.h"
 #include "HwModule.h"
 
 namespace android {
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 96a8337..99e282e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -19,7 +19,6 @@
 
 #include "HwModule.h"
 #include "IOProfile.h"
-#include "AudioGain.h"
 #include <policy.h>
 #include <system/audio.h>
 
@@ -333,9 +332,10 @@
             if (encodedFormat != AUDIO_FORMAT_DEFAULT) {
                 moduleDevice->setEncodedFormat(encodedFormat);
             }
-            moduleDevice->setAddress(devAddress);
             if (allowToCreate) {
                 moduleDevice->attach(hwModule);
+                moduleDevice->setAddress(devAddress);
+                moduleDevice->setName(String8(name));
             }
             return moduleDevice;
         }
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index fe2eaee..5662dcf 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -20,7 +20,6 @@
 #include <system/audio-base.h>
 #include "IOProfile.h"
 #include "HwModule.h"
-#include "AudioGain.h"
 #include "TypeConverter.h"
 
 namespace android {
diff --git a/services/audiopolicy/config/audio_policy_volumes.xml b/services/audiopolicy/config/audio_policy_volumes.xml
index ec64a7c..27bd3ff 100644
--- a/services/audiopolicy/config/audio_policy_volumes.xml
+++ b/services/audiopolicy/config/audio_policy_volumes.xml
@@ -44,7 +44,7 @@
     <volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
                                              ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_HEARING_AID"
-                                             ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+                                             ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
     <volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_HEADSET">
         <point>1,-3000</point>
         <point>33,-2600</point>
diff --git a/services/audiopolicy/engine/config/Android.bp b/services/audiopolicy/engine/config/Android.bp
index 6e72f2a..885b5fa 100644
--- a/services/audiopolicy/engine/config/Android.bp
+++ b/services/audiopolicy/engine/config/Android.bp
@@ -3,7 +3,6 @@
     export_include_dirs: ["include"],
     include_dirs: [
         "external/libxml2/include",
-        "external/icu/icu4c/source/common",
     ],
     srcs: [
         "src/EngineConfig.cpp",
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
index ebd82a7..ae3fc79 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
@@ -16,7 +16,6 @@
 
 #pragma once
 
-#include <AudioGain.h>
 #include <AudioPort.h>
 #include <AudioPatch.h>
 #include <IOProfile.h>
diff --git a/services/audiopolicy/engineconfigurable/Android.bp b/services/audiopolicy/engineconfigurable/Android.bp
index c27dc88..8f522f0 100644
--- a/services/audiopolicy/engineconfigurable/Android.bp
+++ b/services/audiopolicy/engineconfigurable/Android.bp
@@ -33,6 +33,7 @@
 
     ],
     shared_libs: [
+        "libaudiofoundation",
         "liblog",
         "libcutils",
         "libutils",
diff --git a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
index 5bfad29..72c8de1 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
+++ b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
@@ -16,7 +16,6 @@
 
 #pragma once
 
-#include <AudioGain.h>
 #include <AudioPort.h>
 #include <HwModule.h>
 #include <DeviceDescriptor.h>
diff --git a/services/audiopolicy/enginedefault/Android.bp b/services/audiopolicy/enginedefault/Android.bp
index 2b9cf09..aaf4158 100644
--- a/services/audiopolicy/enginedefault/Android.bp
+++ b/services/audiopolicy/enginedefault/Android.bp
@@ -21,6 +21,7 @@
         "libaudiopolicyengine_config",
     ],
     shared_libs: [
+        "libaudiofoundation",
         "liblog",
         "libcutils",
         "libutils",
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index 62938cf..dd1b680 100644
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -18,7 +18,6 @@
 
 #include "EngineBase.h"
 #include "EngineInterface.h"
-#include <AudioGain.h>
 #include <policy.h>
 
 namespace android
diff --git a/services/audiopolicy/manager/AudioPolicyFactory.cpp b/services/audiopolicy/manager/AudioPolicyFactory.cpp
index 7aff6a9..476a1ec 100644
--- a/services/audiopolicy/manager/AudioPolicyFactory.cpp
+++ b/services/audiopolicy/manager/AudioPolicyFactory.cpp
@@ -21,7 +21,13 @@
 extern "C" AudioPolicyInterface* createAudioPolicyManager(
         AudioPolicyClientInterface *clientInterface)
 {
-    return new AudioPolicyManager(clientInterface);
+    AudioPolicyManager *apm = new AudioPolicyManager(clientInterface);
+    status_t status = apm->initialize();
+    if (status != NO_ERROR) {
+        delete apm;
+        apm = nullptr;
+    }
+    return apm;
 }
 
 extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface)
diff --git a/services/audiopolicy/managerdefault/Android.bp b/services/audiopolicy/managerdefault/Android.bp
index 8fbeff9..1fa0d19 100644
--- a/services/audiopolicy/managerdefault/Android.bp
+++ b/services/audiopolicy/managerdefault/Android.bp
@@ -9,6 +9,7 @@
     export_include_dirs: ["."],
 
     shared_libs: [
+        "libaudiofoundation",
         "libcutils",
         "libdl",
         "libutils",
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 6d25c93..83ae35e 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -94,7 +94,7 @@
 {
     AudioParameter param(device->address());
     const String8 key(state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE ?
-                AudioParameter::keyStreamConnect : AudioParameter::keyStreamDisconnect);
+                AudioParameter::keyDeviceConnect : AudioParameter::keyDeviceDisconnect);
     param.addInt(key, device->type());
     mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
 }
@@ -472,6 +472,10 @@
     std::unordered_set<audio_format_t> formatSet;
     sp<HwModule> primaryModule =
             mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY);
+    if (primaryModule == nullptr) {
+        ALOGE("%s() unable to get primary module", __func__);
+        return NO_INIT;
+    }
     DeviceVector declaredDevices = primaryModule->getDeclaredDevices().getDevicesFromTypeMask(
             AUDIO_DEVICE_OUT_ALL_A2DP);
     for (const auto& device : declaredDevices) {
@@ -836,7 +840,7 @@
         // if explicitly requested
         static const uint32_t kRelevantFlags =
                 (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD |
-                 AUDIO_OUTPUT_FLAG_VOIP_RX);
+                 AUDIO_OUTPUT_FLAG_VOIP_RX | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ);
         flags =
             (audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
     }
@@ -2236,16 +2240,22 @@
         return status;
     }
 
-  // increment activity count before calling getNewInputDevice() below as only active sessions
+    // increment activity count before calling getNewInputDevice() below as only active sessions
     // are considered for device selection
     inputDesc->setClientActive(client, true);
 
     // indicate active capture to sound trigger service if starting capture from a mic on
     // primary HW module
     sp<DeviceDescriptor> device = getNewInputDevice(inputDesc);
-    setInputDevice(input, device, true /* force */);
+    if (device != nullptr) {
+        status = setInputDevice(input, device, true /* force */);
+    } else {
+        ALOGW("%s no new input device can be found for descriptor %d",
+                __FUNCTION__, inputDesc->getId());
+        status = BAD_VALUE;
+    }
 
-    if (inputDesc->activeCount()  == 1) {
+    if (status == NO_ERROR && inputDesc->activeCount() == 1) {
         sp<AudioPolicyMix> policyMix = inputDesc->mPolicyMix.promote();
         // if input maps to a dynamic policy with an activity listener, notify of state change
         if ((policyMix != NULL)
@@ -2276,11 +2286,16 @@
                         address, "remote-submix", AUDIO_FORMAT_DEFAULT);
             }
         }
+    } else if (status != NO_ERROR) {
+        // Restore client activity state.
+        inputDesc->setClientActive(client, false);
+        inputDesc->stop();
     }
 
-    ALOGV("%s input %d source = %d exit", __FUNCTION__, input, client->source());
+    ALOGV("%s input %d source = %d status = %d exit",
+            __FUNCTION__, input, client->source(), status);
 
-    return NO_ERROR;
+    return status;
 }
 
 status_t AudioPolicyManager::stopInput(audio_port_handle_t portId)
@@ -2393,7 +2408,8 @@
     for (size_t i = 0; i < mInputs.size(); i++) {
         const sp<AudioInputDescriptor> input = mInputs.valueAt(i);
         if (input->clientsList().size() == 0
-                || !mAvailableInputDevices.containsAtLeastOne(input->supportedDevices())) {
+                || !mAvailableInputDevices.containsAtLeastOne(input->supportedDevices())
+                || (input->getAudioPort()->getFlags() & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
             inputsToClose.push_back(mInputs.keyAt(i));
         } else {
             bool close = false;
@@ -4290,7 +4306,6 @@
         : AudioPolicyManager(clientInterface, false /*forTesting*/)
 {
     loadConfig();
-    initialize();
 }
 
 void AudioPolicyManager::loadConfig() {
@@ -5679,8 +5694,9 @@
     const auto ringVolumeSrc = toVolumeSource(AUDIO_STREAM_RING);
     const auto musicVolumeSrc = toVolumeSource(AUDIO_STREAM_MUSIC);
     const auto alarmVolumeSrc = toVolumeSource(AUDIO_STREAM_ALARM);
+    const auto a11yVolumeSrc = toVolumeSource(AUDIO_STREAM_ACCESSIBILITY);
 
-    if (volumeSource == toVolumeSource(AUDIO_STREAM_ACCESSIBILITY)
+    if (volumeSource == a11yVolumeSrc
             && (AUDIO_MODE_RINGTONE == mEngine->getPhoneState()) &&
             mOutputs.isActive(ringVolumeSrc, 0)) {
         auto &ringCurves = getVolumeCurves(AUDIO_STREAM_RING);
@@ -5697,7 +5713,7 @@
              volumeSource == toVolumeSource(AUDIO_STREAM_NOTIFICATION) ||
              volumeSource == toVolumeSource(AUDIO_STREAM_ENFORCED_AUDIBLE) ||
              volumeSource == toVolumeSource(AUDIO_STREAM_DTMF) ||
-             volumeSource == toVolumeSource(AUDIO_STREAM_ACCESSIBILITY))) {
+             volumeSource == a11yVolumeSrc)) {
         auto &voiceCurves = getVolumeCurves(callVolumeSrc);
         int voiceVolumeIndex = voiceCurves.getVolumeIndex(device);
         const float maxVoiceVolDb =
@@ -5709,7 +5725,9 @@
         // VOICE_CALL stream has minVolumeIndex > 0 : Users cannot set the volume of voice calls to
         // 0. We don't want to cap volume when the system has programmatically muted the voice call
         // stream. See setVolumeCurveIndex() for more information.
-        bool exemptFromCapping = (volumeSource == ringVolumeSrc) && (voiceVolumeIndex == 0);
+        bool exemptFromCapping =
+                ((volumeSource == ringVolumeSrc) || (volumeSource == a11yVolumeSrc))
+                && (voiceVolumeIndex == 0);
         ALOGV_IF(exemptFromCapping, "%s volume source %d at vol=%f not capped", __func__,
                  volumeSource, volumeDb);
         if ((volumeDb > maxVoiceVolDb) && !exemptFromCapping) {
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index d88d1ec..d38176b 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -35,7 +35,6 @@
 #include "AudioPolicyInterface.h"
 
 #include <AudioPolicyManagerObserver.h>
-#include <AudioGain.h>
 #include <AudioPolicyConfig.h>
 #include <AudioPort.h>
 #include <AudioPatch.h>
@@ -307,6 +306,8 @@
             return volumeGroup != VOLUME_GROUP_NONE ? NO_ERROR : BAD_VALUE;
         }
 
+        status_t initialize();
+
 protected:
         // A constructor that allows more fine-grained control over initialization process,
         // used in automatic tests.
@@ -321,7 +322,6 @@
         //   - initialize.
         AudioPolicyConfig& getConfig() { return mConfig; }
         void loadConfig();
-        status_t initialize();
 
         // From AudioPolicyManagerObserver
         virtual const AudioPatchCollection &getAudioPatches() const
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 77f7997..d1b59c1 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -29,7 +29,6 @@
 #include <utils/Log.h>
 #include <cutils/properties.h>
 #include <binder/IPCThreadState.h>
-#include <binder/ActivityManager.h>
 #include <binder/PermissionController.h>
 #include <binder/IResultReceiver.h>
 #include <utils/String16.h>
@@ -410,12 +409,17 @@
 //    Another client in the same UID has already been allowed to capture
 //    OR The client is the assistant
 //        AND an accessibility service is on TOP or a RTT call is active
-//               AND the source is VOICE_RECOGNITION or HOTWORD
-//        OR uses VOICE_RECOGNITION AND is on TOP
-//               OR uses HOTWORD
+//                AND the source is VOICE_RECOGNITION or HOTWORD
+//            OR uses VOICE_RECOGNITION AND is on TOP
+//                OR uses HOTWORD
 //            AND there is no active privacy sensitive capture or call
 //                OR client has CAPTURE_AUDIO_OUTPUT privileged permission
 //    OR The client is an accessibility service
+//        AND Is on TOP
+//                AND the source is VOICE_RECOGNITION or HOTWORD
+//            OR The assistant is not on TOP
+//                AND there is no active privacy sensitive capture or call
+//                    OR client has CAPTURE_AUDIO_OUTPUT privileged permission
 //        AND is on TOP
 //        AND the source is VOICE_RECOGNITION or HOTWORD
 //    OR the client source is virtual (remote submix, call audio TX or RX...)
@@ -423,7 +427,7 @@
 //        AND The assistant is not on TOP
 //        AND is on TOP or latest started
 //        AND there is no active privacy sensitive capture or call
-//                OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+//            OR client has CAPTURE_AUDIO_OUTPUT privileged permission
 
     sp<AudioRecordClient> topActive;
     sp<AudioRecordClient> latestActive;
@@ -459,16 +463,24 @@
             continue;
         }
 
-        if (appState == APP_STATE_TOP) {
+        bool isAssistant = mUidPolicy->isAssistantUid(current->uid);
+        bool isAccessibility = mUidPolicy->isA11yUid(current->uid);
+        if (appState == APP_STATE_TOP && !isAccessibility) {
             if (current->startTimeNs > topStartNs) {
                 topActive = current;
                 topStartNs = current->startTimeNs;
             }
-            if (mUidPolicy->isAssistantUid(current->uid)) {
+            if (isAssistant) {
                 isAssistantOnTop = true;
             }
         }
-        if (current->startTimeNs > latestStartNs) {
+        // Assistant capturing for HOTWORD or Accessibility services not considered
+        // for latest active to avoid masking regular clients started before
+        if (current->startTimeNs > latestStartNs
+                && !((current->attributes.source == AUDIO_SOURCE_HOTWORD
+                        || isA11yOnTop || rttCallActive)
+                    && isAssistant)
+                && !isAccessibility) {
             latestActive = current;
             latestStartNs = current->startTimeNs;
         }
@@ -541,10 +553,20 @@
         } else if (mUidPolicy->isA11yUid(current->uid)) {
             // For accessibility service allow capture if:
             //     Is on TOP
-            //     AND the source is VOICE_RECOGNITION or HOTWORD
-            if (isA11yOnTop &&
-                    (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
-                allowCapture = true;
+            //          AND the source is VOICE_RECOGNITION or HOTWORD
+            //     Or
+            //          The assistant is not on TOP
+            //          AND there is no active privacy sensitive capture or call
+            //             OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+            if (isA11yOnTop) {
+                if (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD) {
+                    allowCapture = true;
+                }
+            } else {
+                if (!isAssistantOnTop
+                        && (!(isSensitiveActive || isInCall) || current->canCaptureOutput)) {
+                    allowCapture = true;
+                }
             }
         }
         setAppState_l(current->uid,
@@ -774,28 +796,26 @@
 // -----------  AudioPolicyService::UidPolicy implementation ----------
 
 void AudioPolicyService::UidPolicy::registerSelf() {
-    ActivityManager am;
-    am.registerUidObserver(this, ActivityManager::UID_OBSERVER_GONE
+    status_t res = mAm.linkToDeath(this);
+    mAm.registerUidObserver(this, ActivityManager::UID_OBSERVER_GONE
             | ActivityManager::UID_OBSERVER_IDLE
             | ActivityManager::UID_OBSERVER_ACTIVE
             | ActivityManager::UID_OBSERVER_PROCSTATE,
             ActivityManager::PROCESS_STATE_UNKNOWN,
             String16("audioserver"));
-    status_t res = am.linkToDeath(this);
     if (!res) {
         Mutex::Autolock _l(mLock);
         mObserverRegistered = true;
     } else {
         ALOGE("UidPolicy::registerSelf linkToDeath failed: %d", res);
 
-        am.unregisterUidObserver(this);
+        mAm.unregisterUidObserver(this);
     }
 }
 
 void AudioPolicyService::UidPolicy::unregisterSelf() {
-    ActivityManager am;
-    am.unlinkToDeath(this);
-    am.unregisterUidObserver(this);
+    mAm.unlinkToDeath(this);
+    mAm.unregisterUidObserver(this);
     Mutex::Autolock _l(mLock);
     mObserverRegistered = false;
 }
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index e467f70..74aea0d 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -23,6 +23,7 @@
 #include <utils/String8.h>
 #include <utils/Vector.h>
 #include <utils/SortedVector.h>
+#include <binder/ActivityManager.h>
 #include <binder/BinderService.h>
 #include <binder/IUidObserver.h>
 #include <system/audio.h>
@@ -387,6 +388,7 @@
 
         wp<AudioPolicyService> mService;
         Mutex mLock;
+        ActivityManager mAm;
         bool mObserverRegistered;
         std::unordered_map<uid_t, std::pair<bool, int>> mOverrideUids;
         std::unordered_map<uid_t, std::pair<bool, int>> mCachedUids;
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
index ab9f78b..c8d1459 100644
--- a/services/audiopolicy/tests/Android.mk
+++ b/services/audiopolicy/tests/Android.mk
@@ -7,6 +7,7 @@
   $(call include-path-for, audio-utils) \
 
 LOCAL_SHARED_LIBRARIES := \
+  libaudiofoundation \
   libaudiopolicymanagerdefault \
   libbase \
   liblog \
@@ -41,6 +42,7 @@
 include $(CLEAR_VARS)
 
 LOCAL_SHARED_LIBRARIES := \
+  libaudiofoundation \
   libbase \
   liblog \
   libmedia_helper \
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 048d0e6..a4868bf 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -2608,14 +2608,13 @@
 void CameraService::UidPolicy::registerSelf() {
     Mutex::Autolock _l(mUidLock);
 
-    ActivityManager am;
     if (mRegistered) return;
-    am.registerUidObserver(this, ActivityManager::UID_OBSERVER_GONE
+    status_t res = mAm.linkToDeath(this);
+    mAm.registerUidObserver(this, ActivityManager::UID_OBSERVER_GONE
             | ActivityManager::UID_OBSERVER_IDLE
             | ActivityManager::UID_OBSERVER_ACTIVE | ActivityManager::UID_OBSERVER_PROCSTATE,
             ActivityManager::PROCESS_STATE_UNKNOWN,
             String16("cameraserver"));
-    status_t res = am.linkToDeath(this);
     if (res == OK) {
         mRegistered = true;
         ALOGV("UidPolicy: Registered with ActivityManager");
@@ -2625,9 +2624,8 @@
 void CameraService::UidPolicy::unregisterSelf() {
     Mutex::Autolock _l(mUidLock);
 
-    ActivityManager am;
-    am.unregisterUidObserver(this);
-    am.unlinkToDeath(this);
+    mAm.unregisterUidObserver(this);
+    mAm.unlinkToDeath(this);
     mRegistered = false;
     mActiveUids.clear();
     ALOGV("UidPolicy: Unregistered with ActivityManager");
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 065157d..cf93a41 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -25,6 +25,7 @@
 #include <cutils/multiuser.h>
 #include <utils/Vector.h>
 #include <utils/KeyedVector.h>
+#include <binder/ActivityManager.h>
 #include <binder/AppOpsManager.h>
 #include <binder/BinderService.h>
 #include <binder/IAppOpsCallback.h>
@@ -564,6 +565,7 @@
 
         Mutex mUidLock;
         bool mRegistered;
+        ActivityManager mAm;
         wp<CameraService> mService;
         std::unordered_set<uid_t> mActiveUids;
         // Monitored uid map to cached procState and refCount pair
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 09638d0..c72029f 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -29,6 +29,7 @@
 #include <future>
 #include <inttypes.h>
 #include <hardware/camera_common.h>
+#include <android/hidl/manager/1.2/IServiceManager.h>
 #include <hidl/ServiceManagement.h>
 #include <functional>
 #include <camera_metadata_hidden.h>
@@ -47,10 +48,6 @@
 using std::literals::chrono_literals::operator""s;
 
 namespace {
-// Hardcoded name for the passthrough HAL implementation, since it can't be discovered via the
-// service manager
-const std::string kLegacyProviderName("legacy/0");
-const std::string kExternalProviderName("external/0");
 const bool kEnableLazyHal(property_get_bool("ro.camera.enableLazyHal", false));
 } // anonymous namespace
 
@@ -62,6 +59,19 @@
 CameraProviderManager::~CameraProviderManager() {
 }
 
+hardware::hidl_vec<hardware::hidl_string>
+CameraProviderManager::HardwareServiceInteractionProxy::listServices() {
+    hardware::hidl_vec<hardware::hidl_string> ret;
+    auto manager = hardware::defaultServiceManager1_2();
+    if (manager != nullptr) {
+        manager->listManifestByInterface(provider::V2_4::ICameraProvider::descriptor,
+                [&ret](const hardware::hidl_vec<hardware::hidl_string> &registered) {
+                    ret = registered;
+                });
+    }
+    return ret;
+}
+
 status_t CameraProviderManager::initialize(wp<CameraProviderManager::StatusListener> listener,
         ServiceInteractionProxy* proxy) {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
@@ -84,9 +94,10 @@
         return INVALID_OPERATION;
     }
 
-    // See if there's a passthrough HAL, but let's not complain if there's not
-    addProviderLocked(kLegacyProviderName, /*expected*/ false);
-    addProviderLocked(kExternalProviderName, /*expected*/ false);
+
+    for (const auto& instance : mServiceProxy->listServices()) {
+        this->addProviderLocked(instance);
+    }
 
     IPCThreadState::self()->flushCommands();
 
@@ -1087,7 +1098,7 @@
     return false;
 }
 
-status_t CameraProviderManager::addProviderLocked(const std::string& newProvider, bool expected) {
+status_t CameraProviderManager::addProviderLocked(const std::string& newProvider) {
     for (const auto& providerInfo : mProviders) {
         if (providerInfo->mProviderName == newProvider) {
             ALOGW("%s: Camera provider HAL with name '%s' already registered", __FUNCTION__,
@@ -1100,13 +1111,9 @@
     interface = mServiceProxy->getService(newProvider);
 
     if (interface == nullptr) {
-        if (expected) {
-            ALOGE("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
-                    newProvider.c_str());
-            return BAD_VALUE;
-        } else {
-            return OK;
-        }
+        ALOGE("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
+                newProvider.c_str());
+        return BAD_VALUE;
     }
 
     sp<ProviderInfo> providerInfo = new ProviderInfo(newProvider, this);
@@ -2058,6 +2065,13 @@
     return OK;
 }
 bool CameraProviderManager::ProviderInfo::DeviceInfo3::isAPI1Compatible() const {
+    // Do not advertise NIR cameras to API1 camera app.
+    camera_metadata_ro_entry cfa = mCameraCharacteristics.find(
+            ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT);
+    if (cfa.count == 1 && cfa.data.u8[0] == ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_NIR) {
+        return false;
+    }
+
     bool isBackwardCompatible = false;
     camera_metadata_ro_entry_t caps = mCameraCharacteristics.find(
             ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index a42fb4d..8cdfc24 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -78,6 +78,7 @@
                 &notification) = 0;
         virtual sp<hardware::camera::provider::V2_4::ICameraProvider> getService(
                 const std::string &serviceName) = 0;
+        virtual hardware::hidl_vec<hardware::hidl_string> listServices() = 0;
         virtual ~ServiceInteractionProxy() {}
     };
 
@@ -95,6 +96,8 @@
                 const std::string &serviceName) override {
             return hardware::camera::provider::V2_4::ICameraProvider::getService(serviceName);
         }
+
+        virtual hardware::hidl_vec<hardware::hidl_string> listServices() override;
     };
 
     /**
@@ -567,7 +570,7 @@
             hardware::hidl_version minVersion = hardware::hidl_version{0,0},
             hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
 
-    status_t addProviderLocked(const std::string& newProvider, bool expected = true);
+    status_t addProviderLocked(const std::string& newProvider);
 
     status_t removeProvider(const std::string& provider);
     sp<StatusListener> getStatusListener() const;
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
index 3c90de0..94541d8 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
@@ -419,7 +419,7 @@
 
     std::vector<std::unique_ptr<Item>> items;
     std::vector<std::unique_ptr<Camera>> cameraList;
-    auto image = Image::FromDataForPrimaryImage("android/mainimage", &items);
+    auto image = Image::FromDataForPrimaryImage("image/jpeg", &items);
     std::unique_ptr<CameraParams> cameraParams(new CameraParams(std::move(image)));
     if (cameraParams == nullptr) {
         ALOGE("%s: Failed to initialize camera parameters", __FUNCTION__);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 9771f9e..4227a3b 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -29,6 +29,9 @@
 #define CLOGE(fmt, ...) ALOGE("Camera %s: %s: " fmt, mId.string(), __FUNCTION__, \
             ##__VA_ARGS__)
 
+#define CLOGW(fmt, ...) ALOGW("Camera %s: %s: " fmt, mId.string(), __FUNCTION__, \
+            ##__VA_ARGS__)
+
 // Convenience macros for transitioning to the error state
 #define SET_ERR(fmt, ...) setErrorState(   \
     "%s: " fmt, __FUNCTION__,              \
@@ -3267,14 +3270,19 @@
         ALOGVV("%s: removed frame %d from InFlightMap", __FUNCTION__, frameNumber);
      }
 
-    // Sanity check - if we have too many in-flight frames, something has
-    // likely gone wrong
-    if (!mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() > kInFlightWarnLimit) {
-        CLOGE("In-flight list too large: %zu", mInFlightMap.size());
-    } else if (mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() >
-            kInFlightWarnLimitHighSpeed) {
-        CLOGE("In-flight list too large for high speed configuration: %zu",
-                mInFlightMap.size());
+    // Sanity check - if we have too many in-flight frames with long total inflight duration,
+    // something has likely gone wrong. This might still be legit only if application send in
+    // a long burst of long exposure requests.
+    if (mExpectedInflightDuration > kMinWarnInflightDuration) {
+        if (!mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() > kInFlightWarnLimit) {
+            CLOGW("In-flight list too large: %zu, total inflight duration %" PRIu64,
+                    mInFlightMap.size(), mExpectedInflightDuration);
+        } else if (mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() >
+                kInFlightWarnLimitHighSpeed) {
+            CLOGW("In-flight list too large for high speed configuration: %zu,"
+                    "total inflight duration %" PRIu64,
+                    mInFlightMap.size(), mExpectedInflightDuration);
+        }
     }
 }
 
@@ -4364,7 +4372,7 @@
         int overrideFormat = mapToFrameworkFormat(src.v3_2.overrideFormat);
         android_dataspace overrideDataSpace = mapToFrameworkDataspace(src.overrideDataSpace);
 
-        if (dst->format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+        if (dstStream->getOriginalFormat() != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
             dstStream->setFormatOverride(false);
             dstStream->setDataSpaceOverride(false);
             if (dst->format != overrideFormat) {
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 6e8ac84..cae34ce 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -227,6 +227,7 @@
     static const size_t        kDumpLockAttempts  = 10;
     static const size_t        kDumpSleepDuration = 100000; // 0.10 sec
     static const nsecs_t       kActiveTimeout     = 500000000;  // 500 ms
+    static const nsecs_t       kMinWarnInflightDuration = 5000000000; // 5 s
     static const size_t        kInFlightWarnLimit = 30;
     static const size_t        kInFlightWarnLimitHighSpeed = 256; // batch size 32 * pipe depth 8
     static const nsecs_t       kDefaultExpectedDuration = 100000000; // 100 ms
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 2df084b..fd9b4b0 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -68,7 +68,7 @@
     mLastMaxCount(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX),
     mBufferLimitLatency(kBufferLimitLatencyBinSize),
     mFormatOverridden(false),
-    mOriginalFormat(-1),
+    mOriginalFormat(format),
     mDataSpaceOverridden(false),
     mOriginalDataSpace(HAL_DATASPACE_UNKNOWN),
     mPhysicalCameraId(physicalCameraId),
@@ -125,9 +125,6 @@
 
 void Camera3Stream::setFormatOverride(bool formatOverridden) {
     mFormatOverridden = formatOverridden;
-    if (formatOverridden && mOriginalFormat == -1) {
-        mOriginalFormat = camera3_stream::format;
-    }
 }
 
 bool Camera3Stream::isFormatOverridden() const {
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 533318f..67afd0f 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -582,9 +582,9 @@
     static const int32_t kBufferLimitLatencyBinSize = 33; //in ms
     CameraLatencyHistogram mBufferLimitLatency;
 
-    //Keep track of original format in case it gets overridden
+    //Keep track of original format when the stream is created in case it gets overridden
     bool mFormatOverridden;
-    int mOriginalFormat;
+    const int mOriginalFormat;
 
     //Keep track of original dataSpace in case it gets overridden
     bool mDataSpaceOverridden;
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index f47e5a5..78d737d 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -205,6 +205,11 @@
         return mTestCameraProvider;
     }
 
+    virtual hardware::hidl_vec<hardware::hidl_string> listServices() override {
+        hardware::hidl_vec<hardware::hidl_string> ret = {"test/0"};
+        return ret;
+    }
+
 };
 
 struct TestStatusListener : public CameraProviderManager::StatusListener {
@@ -231,37 +236,24 @@
             vendorSection);
     serviceProxy.setProvider(provider);
 
+    int numProviders = static_cast<int>(serviceProxy.listServices().size());
+
     res = providerManager->initialize(statusListener, &serviceProxy);
     ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
     // Check that both "legacy" and "external" providers (really the same object) are called
     // once for all the init methods
-    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::SET_CALLBACK], 2) <<
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::SET_CALLBACK], numProviders) <<
             "Only one call to setCallback per provider expected during init";
-    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::GET_VENDOR_TAGS], 2) <<
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::GET_VENDOR_TAGS], numProviders) <<
             "Only one call to getVendorTags per provider expected during init";
-    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::IS_SET_TORCH_MODE_SUPPORTED], 2) <<
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::IS_SET_TORCH_MODE_SUPPORTED],
+            numProviders) <<
             "Only one call to isSetTorchModeSupported per provider expected during init";
-    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::GET_CAMERA_ID_LIST], 2) <<
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::GET_CAMERA_ID_LIST], numProviders) <<
             "Only one call to getCameraIdList per provider expected during init";
-    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::NOTIFY_DEVICE_STATE], 2) <<
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::NOTIFY_DEVICE_STATE], numProviders) <<
             "Only one call to notifyDeviceState per provider expected during init";
 
-    std::string legacyInstanceName = "legacy/0";
-    std::string externalInstanceName = "external/0";
-    bool gotLegacy = false;
-    bool gotExternal = false;
-    EXPECT_EQ(2u, serviceProxy.mLastRequestedServiceNames.size()) <<
-            "Only two service queries expected to be seen by hardware service manager";
-
-    for (auto& serviceName : serviceProxy.mLastRequestedServiceNames) {
-        if (serviceName == legacyInstanceName) gotLegacy = true;
-        if (serviceName == externalInstanceName) gotExternal = true;
-    }
-    ASSERT_TRUE(gotLegacy) <<
-            "Legacy instance not requested from service manager";
-    ASSERT_TRUE(gotExternal) <<
-            "External instance not requested from service manager";
-
     hardware::hidl_string testProviderFqInterfaceName =
             "android.hardware.camera.provider@2.4::ICameraProvider";
     hardware::hidl_string testProviderInstanceName = "test/0";
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
index 655f017..1b7a20c 100644
--- a/services/oboeservice/Android.bp
+++ b/services/oboeservice/Android.bp
@@ -43,7 +43,7 @@
     ],
 
     shared_libs: [
-        "libaaudio",
+        "libaaudio_internal",
         "libaudioclient",
         "libaudioflinger",
         "libbase",