Merge "Audio HAL: Update for the new definition of SinkMetadata"
diff --git a/apex/Android.bp b/apex/Android.bp
index 991696c..05cc2c5 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -24,6 +24,7 @@
         "libmkvextractor",
         "libmp3extractor",
         "libmp4extractor",
+        "libmpeg2extractor",
         "liboggextractor",
         "libwavextractor",
     ],
diff --git a/drm/libmediadrm/Android.bp b/drm/libmediadrm/Android.bp
index 94f9e02..41d1833 100644
--- a/drm/libmediadrm/Android.bp
+++ b/drm/libmediadrm/Android.bp
@@ -73,7 +73,6 @@
         // Suppress unused parameter and no error options. These cause problems
         // with the when using the map type in a proto definition.
         "-Wno-unused-parameter",
-        "-Wno-error",
     ],
 }
 
@@ -106,7 +105,6 @@
         // Suppress unused parameter and no error options. These cause problems
         // when using the map type in a proto definition.
         "-Wno-unused-parameter",
-        "-Wno-error",
     ],
 }
 
diff --git a/drm/libmediadrm/tests/Android.bp b/drm/libmediadrm/tests/Android.bp
index dcd59b7..9e0115e 100644
--- a/drm/libmediadrm/tests/Android.bp
+++ b/drm/libmediadrm/tests/Android.bp
@@ -34,7 +34,6 @@
         // Suppress unused parameter and no error options. These cause problems
         // when using the map type in a proto definition.
         "-Wno-unused-parameter",
-        "-Wno-error",
     ]
 }
 
diff --git a/include/media/MediaExtractorPluginApi.h b/include/media/MediaExtractorPluginApi.h
index b480bbe..854bf83 100644
--- a/include/media/MediaExtractorPluginApi.h
+++ b/include/media/MediaExtractorPluginApi.h
@@ -47,33 +47,11 @@
     NONBLOCKING = 16
 };
 
-struct CMediaTrack {
-    void *data;
-    void (*free)(void *data);
-
-    status_t (*start)(void *data);
-    status_t (*stop)(void *data);
-    status_t (*getFormat)(void *data, MetaDataBase &format);
-    status_t (*read)(void *data, MediaBufferBase **buffer, uint32_t options, int64_t seekPosUs);
-    bool     (*supportsNonBlockingRead)(void *data);
-};
-
-struct CMediaTrackV2 {
-    void *data;
-    void (*free)(void *data);
-
-    media_status_t (*start)(void *data);
-    media_status_t (*stop)(void *data);
-    media_status_t (*getFormat)(void *data, AMediaFormat *format);
-    media_status_t (*read)(void *data, MediaBufferBase **buffer, uint32_t options, int64_t seekPosUs);
-    bool     (*supportsNonBlockingRead)(void *data);
-};
-
 /**
- * only use CMediaBufferV3 allocated from the CMediaBufferGroupV3 that is
+ * only use CMediaBuffer allocated from the CMediaBufferGroup that is
  * provided to CMediaTrack::start()
  */
-struct CMediaBufferV3 {
+struct CMediaBuffer {
     void *handle;
     void (*release)(void *handle);
     void* (*data)(void *handle);
@@ -84,49 +62,32 @@
     AMediaFormat* (*meta_data)(void *handle);
 };
 
-struct CMediaBufferGroupV3 {
+struct CMediaBufferGroup {
     void *handle;
     bool (*init)(void *handle, size_t buffers, size_t buffer_size, size_t growthLimit);
     void (*add_buffer)(void *handle, size_t size);
     media_status_t (*acquire_buffer)(void *handle,
-            CMediaBufferV3 **buffer, bool nonBlocking, size_t requestedSize);
+            CMediaBuffer **buffer, bool nonBlocking, size_t requestedSize);
     bool (*has_buffers)(void *handle);
 };
 
-struct CMediaTrackV3 {
+struct CMediaTrack {
     void *data;
     void (*free)(void *data);
 
-    media_status_t (*start)(void *data, CMediaBufferGroupV3 *bufferGroup);
+    media_status_t (*start)(void *data, CMediaBufferGroup *bufferGroup);
     media_status_t (*stop)(void *data);
     media_status_t (*getFormat)(void *data, AMediaFormat *format);
-    media_status_t (*read)(void *data, CMediaBufferV3 **buffer, uint32_t options, int64_t seekPosUs);
+    media_status_t (*read)(void *data, CMediaBuffer **buffer, uint32_t options, int64_t seekPosUs);
     bool     (*supportsNonBlockingRead)(void *data);
 };
 
-struct CMediaExtractorV1 {
+struct CMediaExtractor {
     void *data;
 
     void (*free)(void *data);
     size_t (*countTracks)(void *data);
     CMediaTrack* (*getTrack)(void *data, size_t index);
-    status_t (*getTrackMetaData)(
-            void *data,
-            MetaDataBase& meta,
-            size_t index, uint32_t flags);
-
-    status_t (*getMetaData)(void *data, MetaDataBase& meta);
-    uint32_t (*flags)(void *data);
-    status_t (*setMediaCas)(void *data, const uint8_t* casToken, size_t size);
-    const char * (*name)(void *data);
-};
-
-struct CMediaExtractorV2 {
-    void *data;
-
-    void (*free)(void *data);
-    size_t (*countTracks)(void *data);
-    CMediaTrackV2* (*getTrack)(void *data, size_t index);
     media_status_t (*getTrackMetaData)(
             void *data,
             AMediaFormat *meta,
@@ -138,48 +99,19 @@
     const char * (*name)(void *data);
 };
 
-struct CMediaExtractorV3 {
-    void *data;
-
-    void (*free)(void *data);
-    size_t (*countTracks)(void *data);
-    CMediaTrackV3* (*getTrack)(void *data, size_t index);
-    media_status_t (*getTrackMetaData)(
-            void *data,
-            AMediaFormat *meta,
-            size_t index, uint32_t flags);
-
-    media_status_t (*getMetaData)(void *data, AMediaFormat *meta);
-    uint32_t (*flags)(void *data);
-    media_status_t (*setMediaCas)(void *data, const uint8_t* casToken, size_t size);
-    const char * (*name)(void *data);
-};
-
-typedef CMediaExtractorV1* (*CreatorFuncV1)(CDataSource *source, void *meta);
+typedef CMediaExtractor* (*CreatorFunc)(CDataSource *source, void *meta);
 typedef void (*FreeMetaFunc)(void *meta);
 
 // The sniffer can optionally fill in an opaque object, "meta", that helps
 // the corresponding extractor initialize its state without duplicating
 // effort already exerted by the sniffer. If "freeMeta" is given, it will be
 // called against the opaque object when it is no longer used.
-typedef CreatorFuncV1 (*SnifferFuncV1)(
+typedef CreatorFunc (*SnifferFunc)(
         CDataSource *source, float *confidence,
         void **meta, FreeMetaFunc *freeMeta);
 
-typedef CMediaExtractorV2* (*CreatorFuncV2)(CDataSource *source, void *meta);
-
-typedef CreatorFuncV2 (*SnifferFuncV2)(
-        CDataSource *source, float *confidence,
-        void **meta, FreeMetaFunc *freeMeta);
-
-typedef CMediaExtractorV3* (*CreatorFuncV3)(CDataSource *source, void *meta);
-
-typedef CreatorFuncV3 (*SnifferFuncV3)(
-        CDataSource *source, float *confidence,
-        void **meta, FreeMetaFunc *freeMeta);
-
-typedef CMediaExtractorV1 CMediaExtractor;
-typedef CreatorFuncV1 CreatorFunc;
+typedef CMediaExtractor CMediaExtractor;
+typedef CreatorFunc CreatorFunc;
 
 
 typedef struct {
@@ -203,16 +135,17 @@
     const char *extractor_name;
 
     union {
-        SnifferFuncV1 v1;
-        SnifferFuncV2 v2;
-        SnifferFuncV3 v3;
+        SnifferFunc v2;
     } sniff;
 };
 
+// the C++ based API which first shipped in P and is no longer supported
 const uint32_t EXTRACTORDEF_VERSION_LEGACY = 1;
-const uint32_t EXTRACTORDEF_VERSION_CURRENT = 2;
 
-const uint32_t EXTRACTORDEF_VERSION = EXTRACTORDEF_VERSION_LEGACY;
+// the first C/NDK based API
+const uint32_t EXTRACTORDEF_VERSION_NDK_V1 = 2;
+
+const uint32_t EXTRACTORDEF_VERSION = EXTRACTORDEF_VERSION_NDK_V1;
 
 // each plugin library exports one function of this type
 typedef ExtractorDef (*GetExtractorDef)();
diff --git a/include/media/MediaExtractorPluginHelper.h b/include/media/MediaExtractorPluginHelper.h
index 705aa81..f4d4da6 100644
--- a/include/media/MediaExtractorPluginHelper.h
+++ b/include/media/MediaExtractorPluginHelper.h
@@ -35,155 +35,18 @@
 struct MediaTrack;
 
 
-class MediaTrackHelper {
-public:
-    virtual ~MediaTrackHelper() {};
-    virtual status_t start() = 0;
-    virtual status_t stop() = 0;
-    virtual status_t getFormat(MetaDataBase& format) = 0;
+class MediaTrackHelper;
 
-    class ReadOptions {
-    public:
-        enum SeekMode : int32_t {
-            SEEK_PREVIOUS_SYNC,
-            SEEK_NEXT_SYNC,
-            SEEK_CLOSEST_SYNC,
-            SEEK_CLOSEST,
-            SEEK_FRAME_INDEX,
-        };
-
-        ReadOptions(uint32_t options, int64_t seekPosUs) {
-            mOptions = options;
-            mSeekPosUs = seekPosUs;
-        }
-        bool getSeekTo(int64_t *time_us, SeekMode *mode) const {
-            if ((mOptions & CMediaTrackReadOptions::SEEK) == 0) {
-                return false;
-            }
-            *time_us = mSeekPosUs;
-            *mode = (SeekMode) (mOptions & 7);
-            return true;
-        }
-        bool getNonBlocking() const {
-            return mOptions & CMediaTrackReadOptions::NONBLOCKING;
-        }
-    private:
-        uint32_t mOptions;
-        int64_t mSeekPosUs;
-    };
-
-    virtual status_t read(
-            MediaBufferBase **buffer, const ReadOptions *options = NULL) = 0;
-    virtual bool supportsNonBlockingRead() { return false; }
-};
-
-inline CMediaTrack *wrap(MediaTrackHelper *track) {
-    CMediaTrack *wrapper = (CMediaTrack*) malloc(sizeof(CMediaTrack));
-    wrapper->data = track;
-    wrapper->free = [](void *data) -> void {
-        delete (MediaTrackHelper*)(data);
-    };
-    wrapper->start = [](void *data) -> status_t {
-        return ((MediaTrackHelper*)data)->start();
-    };
-    wrapper->stop = [](void *data) -> status_t {
-        return ((MediaTrackHelper*)data)->stop();
-    };
-    wrapper->getFormat = [](void *data, MetaDataBase &meta) -> status_t {
-        return ((MediaTrackHelper*)data)->getFormat(meta);
-    };
-    wrapper->read = [](void *data, MediaBufferBase **buffer,  uint32_t options, int64_t seekPosUs)
-            -> status_t {
-        MediaTrackHelper::ReadOptions opts(options, seekPosUs);
-        return ((MediaTrackHelper*)data)->read(buffer, &opts);
-    };
-    wrapper->supportsNonBlockingRead = [](void *data) -> bool {
-                return ((MediaTrackHelper*)data)->supportsNonBlockingRead();
-    };
-    return wrapper;
-}
-
-
-class MediaTrackHelperV2 {
-public:
-    virtual ~MediaTrackHelperV2() {};
-    virtual media_status_t start() = 0;
-    virtual media_status_t stop() = 0;
-    virtual media_status_t getFormat(AMediaFormat *format) = 0;
-
-    class ReadOptions {
-    public:
-        enum SeekMode : int32_t {
-            SEEK_PREVIOUS_SYNC,
-            SEEK_NEXT_SYNC,
-            SEEK_CLOSEST_SYNC,
-            SEEK_CLOSEST,
-            SEEK_FRAME_INDEX,
-        };
-
-        ReadOptions(uint32_t options, int64_t seekPosUs) {
-            mOptions = options;
-            mSeekPosUs = seekPosUs;
-        }
-        bool getSeekTo(int64_t *time_us, SeekMode *mode) const {
-            if ((mOptions & CMediaTrackReadOptions::SEEK) == 0) {
-                return false;
-            }
-            *time_us = mSeekPosUs;
-            *mode = (SeekMode) (mOptions & 7);
-            return true;
-        }
-        bool getNonBlocking() const {
-            return mOptions & CMediaTrackReadOptions::NONBLOCKING;
-        }
-    private:
-        uint32_t mOptions;
-        int64_t mSeekPosUs;
-    };
-
-    virtual media_status_t read(
-            MediaBufferBase **buffer, const ReadOptions *options = NULL) = 0;
-    virtual bool supportsNonBlockingRead() { return false; }
-};
-
-inline CMediaTrackV2 *wrapV2(MediaTrackHelperV2 *track) {
-    CMediaTrackV2 *wrapper = (CMediaTrackV2*) malloc(sizeof(CMediaTrackV2));
-    wrapper->data = track;
-    wrapper->free = [](void *data) -> void {
-        delete (MediaTrackHelperV2*)(data);
-    };
-    wrapper->start = [](void *data) -> media_status_t {
-        return ((MediaTrackHelperV2*)data)->start();
-    };
-    wrapper->stop = [](void *data) -> media_status_t {
-        return ((MediaTrackHelperV2*)data)->stop();
-    };
-    wrapper->getFormat = [](void *data, AMediaFormat *meta) -> media_status_t {
-        return ((MediaTrackHelperV2*)data)->getFormat(meta);
-    };
-    wrapper->read = [](void *data, MediaBufferBase **buffer,  uint32_t options, int64_t seekPosUs)
-            -> media_status_t {
-        MediaTrackHelperV2::ReadOptions opts(options, seekPosUs);
-        return ((MediaTrackHelperV2*)data)->read(buffer, &opts);
-    };
-    wrapper->supportsNonBlockingRead = [](void *data) -> bool {
-                return ((MediaTrackHelperV2*)data)->supportsNonBlockingRead();
-    };
-    return wrapper;
-}
-
-class MediaTrackHelperV3;
-
-class MediaBufferHelperV3 {
+class MediaBufferHelper {
 private:
-    friend CMediaTrackV3 *wrapV3(MediaTrackHelperV3 *);
-    CMediaBufferV3 *mBuffer;
+    friend CMediaTrack *wrap(MediaTrackHelper *);
+    CMediaBuffer *mBuffer;
 public:
-    MediaBufferHelperV3(CMediaBufferV3 *buf) {
+    MediaBufferHelper(CMediaBuffer *buf) {
         mBuffer = buf;
     }
 
-    virtual ~MediaBufferHelperV3() {}
+    virtual ~MediaBufferHelper() {}
 
     virtual void release() {
         mBuffer->release(mBuffer->handle);
@@ -213,15 +76,15 @@
     }
 };
 
-class MediaBufferGroupHelperV3 {
+class MediaBufferGroupHelper {
 private:
-    CMediaBufferGroupV3 *mGroup;
-    std::map<CMediaBufferV3*, MediaBufferHelperV3*> mBufferHelpers;
+    CMediaBufferGroup *mGroup;
+    std::map<CMediaBuffer*, MediaBufferHelper*> mBufferHelpers;
 public:
-    MediaBufferGroupHelperV3(CMediaBufferGroupV3 *group) {
+    MediaBufferGroupHelper(CMediaBufferGroup *group) {
         mGroup = group;
     }
-    ~MediaBufferGroupHelperV3() {
+    ~MediaBufferGroupHelper() {
         // delete all entries in map
         ALOGV("buffergroup %p map has %zu entries", this, mBufferHelpers.size());
         for (auto it = mBufferHelpers.begin(); it != mBufferHelpers.end(); ++it) {
@@ -235,14 +98,14 @@
         mGroup->add_buffer(mGroup->handle, size);
     }
     media_status_t acquire_buffer(
-            MediaBufferHelperV3 **buffer, bool nonBlocking = false, size_t requestedSize = 0) {
-        CMediaBufferV3 *buf = nullptr;
+            MediaBufferHelper **buffer, bool nonBlocking = false, size_t requestedSize = 0) {
+        CMediaBuffer *buf = nullptr;
         media_status_t ret =
                 mGroup->acquire_buffer(mGroup->handle, &buf, nonBlocking, requestedSize);
         if (ret == AMEDIA_OK && buf != nullptr) {
             auto helper = mBufferHelpers.find(buf);
             if (helper == mBufferHelpers.end()) {
-                MediaBufferHelperV3* newHelper = new MediaBufferHelperV3(buf);
+                MediaBufferHelper* newHelper = new MediaBufferHelper(buf);
                 mBufferHelpers.insert(std::make_pair(buf, newHelper));
                 *buffer = newHelper;
             } else {
@@ -258,11 +121,11 @@
     }
 };
 
-class MediaTrackHelperV3 {
+class MediaTrackHelper {
 public:
-    MediaTrackHelperV3() : mBufferGroup(nullptr) {
+    MediaTrackHelper() : mBufferGroup(nullptr) {
     }
-    virtual ~MediaTrackHelperV3() {
+    virtual ~MediaTrackHelper() {
         delete mBufferGroup;
     }
     virtual media_status_t start() = 0;
@@ -300,45 +163,45 @@
     };
 
     virtual media_status_t read(
-            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL) = 0;
+            MediaBufferHelper **buffer, const ReadOptions *options = NULL) = 0;
     virtual bool supportsNonBlockingRead() { return false; }
 protected:
-    friend CMediaTrackV3 *wrapV3(MediaTrackHelperV3 *track);
-    MediaBufferGroupHelperV3 *mBufferGroup;
+    friend CMediaTrack *wrap(MediaTrackHelper *track);
+    MediaBufferGroupHelper *mBufferGroup;
 };
 
-inline CMediaTrackV3 *wrapV3(MediaTrackHelperV3 *track) {
-    CMediaTrackV3 *wrapper = (CMediaTrackV3*) malloc(sizeof(CMediaTrackV3));
+inline CMediaTrack *wrap(MediaTrackHelper *track) {
+    CMediaTrack *wrapper = (CMediaTrack*) malloc(sizeof(CMediaTrack));
     wrapper->data = track;
     wrapper->free = [](void *data) -> void {
-        delete (MediaTrackHelperV3*)(data);
+        delete (MediaTrackHelper*)(data);
     };
-    wrapper->start = [](void *data, CMediaBufferGroupV3 *bufferGroup) -> media_status_t {
-        if (((MediaTrackHelperV3*)data)->mBufferGroup) {
+    wrapper->start = [](void *data, CMediaBufferGroup *bufferGroup) -> media_status_t {
+        if (((MediaTrackHelper*)data)->mBufferGroup) {
             // this shouldn't happen, but handle it anyway
-            delete ((MediaTrackHelperV3*)data)->mBufferGroup;
+            delete ((MediaTrackHelper*)data)->mBufferGroup;
         }
-        ((MediaTrackHelperV3*)data)->mBufferGroup = new MediaBufferGroupHelperV3(bufferGroup);
-        return ((MediaTrackHelperV3*)data)->start();
+        ((MediaTrackHelper*)data)->mBufferGroup = new MediaBufferGroupHelper(bufferGroup);
+        return ((MediaTrackHelper*)data)->start();
     };
     wrapper->stop = [](void *data) -> media_status_t {
-        return ((MediaTrackHelperV3*)data)->stop();
+        return ((MediaTrackHelper*)data)->stop();
     };
     wrapper->getFormat = [](void *data, AMediaFormat *meta) -> media_status_t {
-        return ((MediaTrackHelperV3*)data)->getFormat(meta);
+        return ((MediaTrackHelper*)data)->getFormat(meta);
     };
-    wrapper->read = [](void *data, CMediaBufferV3 **buffer,  uint32_t options, int64_t seekPosUs)
+    wrapper->read = [](void *data, CMediaBuffer **buffer,  uint32_t options, int64_t seekPosUs)
             -> media_status_t {
-        MediaTrackHelperV3::ReadOptions opts(options, seekPosUs);
-        MediaBufferHelperV3 *buf = NULL;
-        media_status_t ret = ((MediaTrackHelperV3*)data)->read(&buf, &opts);
+        MediaTrackHelper::ReadOptions opts(options, seekPosUs);
+        MediaBufferHelper *buf = NULL;
+        media_status_t ret = ((MediaTrackHelper*)data)->read(&buf, &opts);
         if (ret == AMEDIA_OK && buf != nullptr) {
             *buffer = buf->mBuffer;
         }
         return ret;
     };
     wrapper->supportsNonBlockingRead = [](void *data) -> bool {
-                return ((MediaTrackHelperV3*)data)->supportsNonBlockingRead();
+                return ((MediaTrackHelper*)data)->supportsNonBlockingRead();
     };
     return wrapper;
 }
@@ -356,13 +219,13 @@
     enum GetTrackMetaDataFlags {
         kIncludeExtensiveMetaData = 1
     };
-    virtual status_t getTrackMetaData(
-            MetaDataBase& meta,
+    virtual media_status_t getTrackMetaData(
+            AMediaFormat *meta,
             size_t index, uint32_t flags = 0) = 0;
 
     // Return container specific meta-data. The default implementation
     // returns an empty metadata object.
-    virtual status_t getMetaData(MetaDataBase& meta) = 0;
+    virtual media_status_t getMetaData(AMediaFormat *meta) = 0;
 
     enum Flags {
         CAN_SEEK_BACKWARD  = 1,  // the "seek 10secs back button"
@@ -377,8 +240,8 @@
         return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE;
     };
 
-    virtual status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) {
-        return INVALID_OPERATION;
+    virtual media_status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) {
+        return AMEDIA_ERROR_INVALID_OPERATION;
     }
 
     virtual const char * name() { return "<unspecified>"; }
@@ -405,13 +268,13 @@
     };
     wrapper->getTrackMetaData = [](
             void *data,
-            MetaDataBase& meta,
-            size_t index, uint32_t flags) -> status_t {
+            AMediaFormat *meta,
+            size_t index, uint32_t flags) -> media_status_t {
         return ((MediaExtractorPluginHelper*)data)->getTrackMetaData(meta, index, flags);
     };
     wrapper->getMetaData = [](
             void *data,
-            MetaDataBase& meta) -> status_t {
+            AMediaFormat *meta) -> media_status_t {
         return ((MediaExtractorPluginHelper*)data)->getMetaData(meta);
     };
     wrapper->flags = [](
@@ -419,7 +282,7 @@
         return ((MediaExtractorPluginHelper*)data)->flags();
     };
     wrapper->setMediaCas = [](
-            void *data, const uint8_t *casToken, size_t size) -> status_t {
+            void *data, const uint8_t *casToken, size_t size) -> media_status_t {
         return ((MediaExtractorPluginHelper*)data)->setMediaCas(casToken, size);
     };
     wrapper->name = [](
@@ -429,172 +292,6 @@
     return wrapper;
 }
 
-class MediaExtractorPluginHelperV2
-{
-public:
-    virtual ~MediaExtractorPluginHelperV2() {}
-    virtual size_t countTracks() = 0;
-    virtual MediaTrackHelperV2 *getTrack(size_t index) = 0;
-
-    enum GetTrackMetaDataFlags {
-        kIncludeExtensiveMetaData = 1
-    };
-    virtual media_status_t getTrackMetaData(
-            AMediaFormat *meta,
-            size_t index, uint32_t flags = 0) = 0;
-
-    // Return container specific meta-data. The default implementation
-    // returns an empty metadata object.
-    virtual media_status_t getMetaData(AMediaFormat *meta) = 0;
-
-    enum Flags {
-        CAN_SEEK_BACKWARD  = 1,  // the "seek 10secs back button"
-        CAN_SEEK_FORWARD   = 2,  // the "seek 10secs forward button"
-        CAN_PAUSE          = 4,
-        CAN_SEEK           = 8,  // the "seek bar"
-    };
-
-    // If subclasses do _not_ override this, the default is
-    // CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE
-    virtual uint32_t flags() const {
-        return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE;
-    };
-
-    virtual media_status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) {
-        return AMEDIA_ERROR_INVALID_OPERATION;
-    }
-
-    virtual const char * name() { return "<unspecified>"; }
-
-protected:
-    MediaExtractorPluginHelperV2() {}
-
-private:
-    MediaExtractorPluginHelperV2(const MediaExtractorPluginHelperV2 &);
-    MediaExtractorPluginHelperV2 &operator=(const MediaExtractorPluginHelperV2 &);
-};
-
-inline CMediaExtractorV2 *wrapV2(MediaExtractorPluginHelperV2 *extractor) {
-    CMediaExtractorV2 *wrapper = (CMediaExtractorV2*) malloc(sizeof(CMediaExtractorV2));
-    wrapper->data = extractor;
-    wrapper->free = [](void *data) -> void {
-        delete (MediaExtractorPluginHelperV2*)(data);
-    };
-    wrapper->countTracks = [](void *data) -> size_t {
-        return ((MediaExtractorPluginHelperV2*)data)->countTracks();
-    };
-    wrapper->getTrack = [](void *data, size_t index) -> CMediaTrackV2* {
-        return wrapV2(((MediaExtractorPluginHelperV2*)data)->getTrack(index));
-    };
-    wrapper->getTrackMetaData = [](
-            void *data,
-            AMediaFormat *meta,
-            size_t index, uint32_t flags) -> media_status_t {
-        return ((MediaExtractorPluginHelperV2*)data)->getTrackMetaData(meta, index, flags);
-    };
-    wrapper->getMetaData = [](
-            void *data,
-            AMediaFormat *meta) -> media_status_t {
-        return ((MediaExtractorPluginHelperV2*)data)->getMetaData(meta);
-    };
-    wrapper->flags = [](
-            void *data) -> uint32_t {
-        return ((MediaExtractorPluginHelperV2*)data)->flags();
-    };
-    wrapper->setMediaCas = [](
-            void *data, const uint8_t *casToken, size_t size) -> media_status_t {
-        return ((MediaExtractorPluginHelperV2*)data)->setMediaCas(casToken, size);
-    };
-    wrapper->name = [](
-            void *data) -> const char * {
-        return ((MediaExtractorPluginHelperV2*)data)->name();
-    };
-    return wrapper;
-}
-
-class MediaExtractorPluginHelperV3
-{
-public:
-    virtual ~MediaExtractorPluginHelperV3() {}
-    virtual size_t countTracks() = 0;
-    virtual MediaTrackHelperV3 *getTrack(size_t index) = 0;
-
-    enum GetTrackMetaDataFlags {
-        kIncludeExtensiveMetaData = 1
-    };
-    virtual media_status_t getTrackMetaData(
-            AMediaFormat *meta,
-            size_t index, uint32_t flags = 0) = 0;
-
-    // Return container specific meta-data. The default implementation
-    // returns an empty metadata object.
-    virtual media_status_t getMetaData(AMediaFormat *meta) = 0;
-
-    enum Flags {
-        CAN_SEEK_BACKWARD  = 1,  // the "seek 10secs back button"
-        CAN_SEEK_FORWARD   = 2,  // the "seek 10secs forward button"
-        CAN_PAUSE          = 4,
-        CAN_SEEK           = 8,  // the "seek bar"
-    };
-
-    // If subclasses do _not_ override this, the default is
-    // CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE
-    virtual uint32_t flags() const {
-        return CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK | CAN_PAUSE;
-    };
-
-    virtual media_status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) {
-        return AMEDIA_ERROR_INVALID_OPERATION;
-    }
-
-    virtual const char * name() { return "<unspecified>"; }
-
-protected:
-    MediaExtractorPluginHelperV3() {}
-
-private:
-    MediaExtractorPluginHelperV3(const MediaExtractorPluginHelperV2 &);
-    MediaExtractorPluginHelperV3 &operator=(const MediaExtractorPluginHelperV2 &);
-};
-
-inline CMediaExtractorV3 *wrapV3(MediaExtractorPluginHelperV3 *extractor) {
-    CMediaExtractorV3 *wrapper = (CMediaExtractorV3*) malloc(sizeof(CMediaExtractorV3));
-    wrapper->data = extractor;
-    wrapper->free = [](void *data) -> void {
-        delete (MediaExtractorPluginHelperV3*)(data);
-    };
-    wrapper->countTracks = [](void *data) -> size_t {
-        return ((MediaExtractorPluginHelperV3*)data)->countTracks();
-    };
-    wrapper->getTrack = [](void *data, size_t index) -> CMediaTrackV3* {
-        return wrapV3(((MediaExtractorPluginHelperV3*)data)->getTrack(index));
-    };
-    wrapper->getTrackMetaData = [](
-            void *data,
-            AMediaFormat *meta,
-            size_t index, uint32_t flags) -> media_status_t {
-        return ((MediaExtractorPluginHelperV3*)data)->getTrackMetaData(meta, index, flags);
-    };
-    wrapper->getMetaData = [](
-            void *data,
-            AMediaFormat *meta) -> media_status_t {
-        return ((MediaExtractorPluginHelperV3*)data)->getMetaData(meta);
-    };
-    wrapper->flags = [](
-            void *data) -> uint32_t {
-        return ((MediaExtractorPluginHelperV3*)data)->flags();
-    };
-    wrapper->setMediaCas = [](
-            void *data, const uint8_t *casToken, size_t size) -> media_status_t {
-        return ((MediaExtractorPluginHelperV3*)data)->setMediaCas(casToken, size);
-    };
-    wrapper->name = [](
-            void *data) -> const char * {
-        return ((MediaExtractorPluginHelperV3*)data)->name();
-    };
-    return wrapper;
-}
-
 /* adds some convience methods */
 class DataSourceHelper {
 public:
diff --git a/include/media/MediaTrack.h b/include/media/MediaTrack.h
index baa3410..e828a7f 100644
--- a/include/media/MediaTrack.h
+++ b/include/media/MediaTrack.h
@@ -156,42 +156,6 @@
 
 private:
     CMediaTrack *wrapper;
-};
-
-class MediaTrackCUnwrapperV2 : public MediaTrack {
-public:
-    explicit MediaTrackCUnwrapperV2(CMediaTrackV2 *wrapper);
-
-    virtual status_t start();
-    virtual status_t stop();
-    virtual status_t getFormat(MetaDataBase& format);
-    virtual status_t read(MediaBufferBase **buffer, const ReadOptions *options = NULL);
-
-    virtual bool supportNonblockingRead();
-
-protected:
-    virtual ~MediaTrackCUnwrapperV2();
-
-private:
-    CMediaTrackV2 *wrapper;
-};
-
-class MediaTrackCUnwrapperV3 : public MediaTrack {
-public:
-    explicit MediaTrackCUnwrapperV3(CMediaTrackV3 *wrapper);
-
-    virtual status_t start();
-    virtual status_t stop();
-    virtual status_t getFormat(MetaDataBase& format);
-    virtual status_t read(MediaBufferBase **buffer, const ReadOptions *options = NULL);
-
-    virtual bool supportNonblockingRead();
-
-protected:
-    virtual ~MediaTrackCUnwrapperV3();
-
-private:
-    CMediaTrackV3 *wrapper;
     MediaBufferGroup *bufferGroup;
 };
 
diff --git a/media/bufferpool/2.0/AccessorImpl.cpp b/media/bufferpool/2.0/AccessorImpl.cpp
index 2c734ac..5260909 100644
--- a/media/bufferpool/2.0/AccessorImpl.cpp
+++ b/media/bufferpool/2.0/AccessorImpl.cpp
@@ -253,9 +253,21 @@
 }
 
 void Accessor::Impl::handleInvalidateAck() {
-    std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
-    mBufferPool.processStatusMessages();
-    mBufferPool.mInvalidation.onHandleAck();
+    std::map<ConnectionId, const sp<IObserver>> observers;
+    uint32_t invalidationId;
+    {
+        std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
+        mBufferPool.processStatusMessages();
+        mBufferPool.mInvalidation.onHandleAck(&observers, &invalidationId);
+    }
+    // Do not hold lock for send invalidations
+    for (auto it = observers.begin(); it != observers.end(); ++it) {
+        const sp<IObserver> observer = it->second;
+        if (observer) {
+            Return<void> transResult = observer->onMessage(it->first, invalidationId);
+            (void) transResult;
+        }
+    }
 }
 
 bool Accessor::Impl::isValid() {
@@ -365,19 +377,21 @@
     sInvalidator->addAccessor(mId, impl);
 }
 
-void Accessor::Impl::BufferPool::Invalidation::onHandleAck() {
+void Accessor::Impl::BufferPool::Invalidation::onHandleAck(
+        std::map<ConnectionId, const sp<IObserver>> *observers,
+        uint32_t *invalidationId) {
     if (mInvalidationId != 0) {
+        *invalidationId = mInvalidationId;
         std::set<int> deads;
         for (auto it = mAcks.begin(); it != mAcks.end(); ++it) {
             if (it->second != mInvalidationId) {
                 const sp<IObserver> observer = mObservers[it->first];
                 if (observer) {
-                    ALOGV("connection %lld call observer (%u: %u)",
+                    observers->emplace(it->first, observer);
+                    ALOGV("connection %lld will call observer (%u: %u)",
                           (long long)it->first, it->second, mInvalidationId);
-                    Return<void> transResult = observer->onMessage(it->first, mInvalidationId);
-                    (void) transResult;
-                    // N.B: ignore possibility of onMessage oneway call being
-                    // lost.
+                    // N.B: onMessage will be called later. ignore possibility of
+                    // onMessage# oneway call being lost.
                     it->second = mInvalidationId;
                 } else {
                     ALOGV("bufferpool2 observer died %lld", (long long)it->first);
diff --git a/media/bufferpool/2.0/AccessorImpl.h b/media/bufferpool/2.0/AccessorImpl.h
index b3faa96..eea72b9 100644
--- a/media/bufferpool/2.0/AccessorImpl.h
+++ b/media/bufferpool/2.0/AccessorImpl.h
@@ -158,7 +158,9 @@
                     BufferInvalidationChannel &channel,
                     const std::shared_ptr<Accessor::Impl> &impl);
 
-            void onHandleAck();
+            void onHandleAck(
+                    std::map<ConnectionId, const sp<IObserver>> *observers,
+                    uint32_t *invalidationId);
         } mInvalidation;
         /// Buffer pool statistics which tracks allocation and transfer statistics.
         struct Stats {
diff --git a/media/codec2/components/avc/C2SoftAvcEnc.cpp b/media/codec2/components/avc/C2SoftAvcEnc.cpp
index ee5cf27..cf06623 100644
--- a/media/codec2/components/avc/C2SoftAvcEnc.cpp
+++ b/media/codec2/components/avc/C2SoftAvcEnc.cpp
@@ -768,7 +768,11 @@
     s_profile_params_ip.e_sub_cmd = IVE_CMD_CTL_SET_PROFILE_PARAMS;
 
     s_profile_params_ip.e_profile = mIntf->getProfile_l();
-    s_profile_params_ip.u4_entropy_coding_mode = mEntropyMode;
+    if (s_profile_params_ip.e_profile == IV_PROFILE_BASE) {
+        s_profile_params_ip.u4_entropy_coding_mode = 0;
+    } else {
+        s_profile_params_ip.u4_entropy_coding_mode = 1;
+    }
     s_profile_params_ip.u4_timestamp_high = -1;
     s_profile_params_ip.u4_timestamp_low = -1;
 
diff --git a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
index 280ae36..48825e4 100644
--- a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
+++ b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
@@ -405,10 +405,7 @@
     int numFrames = 0;
     int ret = vorbis_dsp_synthesis(mState, &pack, 1);
     if (0 != ret) {
-        ALOGE("vorbis_dsp_synthesis returned %d", ret);
-        mSignalledError = true;
-        work->result = C2_CORRUPTED;
-        return;
+        ALOGD("vorbis_dsp_synthesis returned %d; ignored", ret);
     } else {
         numFrames = vorbis_dsp_pcmout(
                 mState,  reinterpret_cast<int16_t *> (wView.data()),
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 8ecbf5d..9ba2362 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -188,6 +188,24 @@
                 .withConstValue(defaultColorInfo)
                 .build());
 
+        addParameter(
+                DefineParam(mDefaultColorAspects, C2_PARAMKEY_DEFAULT_COLOR_ASPECTS)
+                .withDefault(new C2StreamColorAspectsTuning::output(
+                        0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
+                        C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+                .withFields({
+                    C2F(mDefaultColorAspects, range).inRange(
+                                C2Color::RANGE_UNSPECIFIED,     C2Color::RANGE_OTHER),
+                    C2F(mDefaultColorAspects, primaries).inRange(
+                                C2Color::PRIMARIES_UNSPECIFIED, C2Color::PRIMARIES_OTHER),
+                    C2F(mDefaultColorAspects, transfer).inRange(
+                                C2Color::TRANSFER_UNSPECIFIED,  C2Color::TRANSFER_OTHER),
+                    C2F(mDefaultColorAspects, matrix).inRange(
+                                C2Color::MATRIX_UNSPECIFIED,    C2Color::MATRIX_OTHER)
+                })
+                .withSetter(DefaultColorAspectsSetter)
+                .build());
+
         // TODO: support more formats?
         addParameter(
                 DefineParam(mPixelFormat, C2_PARAMKEY_PIXEL_FORMAT)
@@ -228,6 +246,22 @@
         return C2R::Ok();
     }
 
+    static C2R DefaultColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsTuning::output> &me) {
+        (void)mayBlock;
+        if (me.v.range > C2Color::RANGE_OTHER) {
+            me.set().range = C2Color::RANGE_OTHER;
+        }
+        if (me.v.primaries > C2Color::PRIMARIES_OTHER) {
+            me.set().primaries = C2Color::PRIMARIES_OTHER;
+        }
+        if (me.v.transfer > C2Color::TRANSFER_OTHER) {
+            me.set().transfer = C2Color::TRANSFER_OTHER;
+        }
+        if (me.v.matrix > C2Color::MATRIX_OTHER) {
+            me.set().matrix = C2Color::MATRIX_OTHER;
+        }
+        return C2R::Ok();
+    }
 
     static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input> &me,
                                   const C2P<C2StreamPictureSizeInfo::output> &size) {
@@ -236,6 +270,9 @@
         (void)me;  // TODO: validate
         return C2R::Ok();
     }
+    std::shared_ptr<C2StreamColorAspectsTuning::output> getDefaultColorAspects_l() {
+        return mDefaultColorAspects;
+    }
 
     static C2R Hdr10PlusInfoInputSetter(bool mayBlock, C2P<C2StreamHdr10PlusInfo::input> &me) {
         (void)mayBlock;
@@ -256,6 +293,7 @@
     std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mMaxInputSize;
     std::shared_ptr<C2StreamColorInfo::output> mColorInfo;
     std::shared_ptr<C2StreamPixelFormatInfo::output> mPixelFormat;
+    std::shared_ptr<C2StreamColorAspectsTuning::output> mDefaultColorAspects;
 #ifdef VP9
 #if 0
     std::shared_ptr<C2StreamHdrStaticInfo::output> mHdrStaticInfo;
@@ -524,32 +562,129 @@
 static void copyOutputBufferToYV12Frame(uint8_t *dst,
         const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
         size_t srcYStride, size_t srcUStride, size_t srcVStride,
-        uint32_t width, uint32_t height, int32_t bpp) {
-    size_t dstYStride = align(width, 16) * bpp ;
+        uint32_t width, uint32_t height) {
+    size_t dstYStride = align(width, 16);
     size_t dstUVStride = align(dstYStride / 2, 16);
     uint8_t *dstStart = dst;
 
     for (size_t i = 0; i < height; ++i) {
-         memcpy(dst, srcY, width * bpp);
+         memcpy(dst, srcY, width);
          srcY += srcYStride;
          dst += dstYStride;
     }
 
     dst = dstStart + dstYStride * height;
     for (size_t i = 0; i < height / 2; ++i) {
-         memcpy(dst, srcV, width / 2 * bpp);
+         memcpy(dst, srcV, width / 2);
          srcV += srcVStride;
          dst += dstUVStride;
     }
 
     dst = dstStart + (dstYStride * height) + (dstUVStride * height / 2);
     for (size_t i = 0; i < height / 2; ++i) {
-         memcpy(dst, srcU, width / 2 * bpp);
+         memcpy(dst, srcU, width / 2);
          srcU += srcUStride;
          dst += dstUVStride;
     }
 }
 
+static void convertYUV420Planar16ToY410(uint32_t *dst,
+        const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
+        size_t srcYStride, size_t srcUStride, size_t srcVStride,
+        size_t dstStride, size_t width, size_t height) {
+
+    // Converting two lines at a time, slightly faster
+    for (size_t y = 0; y < height; y += 2) {
+        uint32_t *dstTop = (uint32_t *) dst;
+        uint32_t *dstBot = (uint32_t *) (dst + dstStride);
+        uint16_t *ySrcTop = (uint16_t*) srcY;
+        uint16_t *ySrcBot = (uint16_t*) (srcY + srcYStride);
+        uint16_t *uSrc = (uint16_t*) srcU;
+        uint16_t *vSrc = (uint16_t*) srcV;
+
+        uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
+        size_t x = 0;
+        for (; x < width - 3; x += 4) {
+
+            u01 = *((uint32_t*)uSrc); uSrc += 2;
+            v01 = *((uint32_t*)vSrc); vSrc += 2;
+
+            y01 = *((uint32_t*)ySrcTop); ySrcTop += 2;
+            y23 = *((uint32_t*)ySrcTop); ySrcTop += 2;
+            y45 = *((uint32_t*)ySrcBot); ySrcBot += 2;
+            y67 = *((uint32_t*)ySrcBot); ySrcBot += 2;
+
+            uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+            uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
+
+            *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
+            *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
+            *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
+            *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
+
+            *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
+            *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
+            *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
+            *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
+        }
+
+        // There should be at most 2 more pixels to process. Note that we don't
+        // need to consider odd case as the buffer is always aligned to even.
+        if (x < width) {
+            u01 = *uSrc;
+            v01 = *vSrc;
+            y01 = *((uint32_t*)ySrcTop);
+            y45 = *((uint32_t*)ySrcBot);
+            uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+            *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
+            *dstTop++ = ((y01 >> 16) << 10) | uv0;
+            *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
+            *dstBot++ = ((y45 >> 16) << 10) | uv0;
+        }
+
+        srcY += srcYStride * 2;
+        srcU += srcUStride;
+        srcV += srcVStride;
+        dst += dstStride * 2;
+    }
+
+    return;
+}
+
+static void convertYUV420Planar16ToYUV420Planar(uint8_t *dst,
+        const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
+        size_t srcYStride, size_t srcUStride, size_t srcVStride,
+        size_t dstStride, size_t width, size_t height) {
+
+    uint8_t *dstY = (uint8_t *)dst;
+    size_t dstYSize = dstStride * height;
+    size_t dstUVStride = align(dstStride / 2, 16);
+    size_t dstUVSize = dstUVStride * height / 2;
+    uint8_t *dstV = dstY + dstYSize;
+    uint8_t *dstU = dstV + dstUVSize;
+
+    for (size_t y = 0; y < height; ++y) {
+        for (size_t x = 0; x < width; ++x) {
+            dstY[x] = (uint8_t)(srcY[x] >> 2);
+        }
+
+        srcY += srcYStride;
+        dstY += dstStride;
+    }
+
+    for (size_t y = 0; y < (height + 1) / 2; ++y) {
+        for (size_t x = 0; x < (width + 1) / 2; ++x) {
+            dstU[x] = (uint8_t)(srcU[x] >> 2);
+            dstV[x] = (uint8_t)(srcV[x] >> 2);
+        }
+
+        srcU += srcUStride;
+        srcV += srcVStride;
+        dstU += dstUVStride;
+        dstV += dstUVStride;
+    }
+    return;
+}
 bool C2SoftVpxDec::outputBuffer(
         const std::shared_ptr<C2BlockPool> &pool,
         const std::unique_ptr<C2Work> &work)
@@ -581,15 +716,21 @@
 
     }
     CHECK(img->fmt == VPX_IMG_FMT_I420 || img->fmt == VPX_IMG_FMT_I42016);
-    int32_t bpp = 1;
-    if (img->fmt == VPX_IMG_FMT_I42016) {
-        bpp = 2;
-    }
 
     std::shared_ptr<C2GraphicBlock> block;
     uint32_t format = HAL_PIXEL_FORMAT_YV12;
+    if (img->fmt == VPX_IMG_FMT_I42016) {
+        IntfImpl::Lock lock = mIntf->lock();
+        std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects = mIntf->getDefaultColorAspects_l();
+
+        if (defaultColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
+            defaultColorAspects->matrix == C2Color::MATRIX_BT2020 &&
+            defaultColorAspects->transfer == C2Color::TRANSFER_ST2084) {
+            format = HAL_PIXEL_FORMAT_RGBA_1010102;
+        }
+    }
     C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
-    c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 16) * bpp, mHeight, format, usage, &block);
+    c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 16), mHeight, format, usage, &block);
     if (err != C2_OK) {
         ALOGE("fetchGraphicBlock for Output failed with status %d", err);
         work->result = err;
@@ -610,12 +751,30 @@
     size_t srcYStride = img->stride[VPX_PLANE_Y];
     size_t srcUStride = img->stride[VPX_PLANE_U];
     size_t srcVStride = img->stride[VPX_PLANE_V];
-    const uint8_t *srcY = (const uint8_t *)img->planes[VPX_PLANE_Y];
-    const uint8_t *srcU = (const uint8_t *)img->planes[VPX_PLANE_U];
-    const uint8_t *srcV = (const uint8_t *)img->planes[VPX_PLANE_V];
-    copyOutputBufferToYV12Frame(dst, srcY, srcU, srcV,
-                                srcYStride, srcUStride, srcVStride, mWidth, mHeight, bpp);
 
+    if (img->fmt == VPX_IMG_FMT_I42016) {
+        const uint16_t *srcY = (const uint16_t *)img->planes[VPX_PLANE_Y];
+        const uint16_t *srcU = (const uint16_t *)img->planes[VPX_PLANE_U];
+        const uint16_t *srcV = (const uint16_t *)img->planes[VPX_PLANE_V];
+
+        if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
+            convertYUV420Planar16ToY410((uint32_t *)dst, srcY, srcU, srcV, srcYStride / 2,
+                                    srcUStride / 2, srcVStride / 2,
+                                    align(mWidth, 16),
+                                    mWidth, mHeight);
+        } else {
+            convertYUV420Planar16ToYUV420Planar(dst, srcY, srcU, srcV, srcYStride / 2,
+                                    srcUStride / 2, srcVStride / 2,
+                                    align(mWidth, 16),
+                                    mWidth, mHeight);
+        }
+    } else {
+        const uint8_t *srcY = (const uint8_t *)img->planes[VPX_PLANE_Y];
+        const uint8_t *srcU = (const uint8_t *)img->planes[VPX_PLANE_U];
+        const uint8_t *srcV = (const uint8_t *)img->planes[VPX_PLANE_V];
+        copyOutputBufferToYV12Frame(dst, srcY, srcU, srcV,
+                                srcYStride, srcUStride, srcVStride, mWidth, mHeight);
+    }
     finishWork(*(int64_t *)img->user_priv, work, std::move(block));
     return true;
 }
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index 22e8d84..4878974 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -366,16 +366,165 @@
                 generation, igbp_id, igbp_slot);
     }
 
-    // UGLY HACK: assume YCbCr 4:2:0 8-bit format (and lockable via lockYCbCr) if we don't
-    // recognize the format
-    PixelFormat format = mInfo.mapperInfo.format;
-    if (format != PixelFormat::RGBA_8888 && format != PixelFormat::RGBX_8888) {
-        format = PixelFormat::YCBCR_420_888;
-    }
+    switch (mInfo.mapperInfo.format) {
+        case PixelFormat::RGBA_1010102: {
+            // TRICKY: this is used for media as YUV444 in the case when it is queued directly to a
+            // Surface. In all other cases it is RGBA. We don't know which case it is here, so
+            // default to YUV for now.
+            void *pointer = nullptr;
+            mMapper->lock(
+                    const_cast<native_handle_t *>(mBuffer),
+                    grallocUsage,
+                    { (int32_t)rect.left, (int32_t)rect.top, (int32_t)rect.width, (int32_t)rect.height },
+                    // TODO: fence
+                    hidl_handle(),
+                    [&err, &pointer](const auto &maperr, const auto &mapPointer) {
+                        err = maperr2error(maperr);
+                        if (err == C2_OK) {
+                            pointer = mapPointer;
+                        }
+                    });
+            if (err != C2_OK) {
+                ALOGD("lock failed: %d", err);
+                return err;
+            }
+            // treat as 32-bit values
+            addr[C2PlanarLayout::PLANE_Y] = (uint8_t *)pointer;
+            addr[C2PlanarLayout::PLANE_U] = (uint8_t *)pointer;
+            addr[C2PlanarLayout::PLANE_V] = (uint8_t *)pointer;
+            addr[C2PlanarLayout::PLANE_A] = (uint8_t *)pointer;
+            layout->type = C2PlanarLayout::TYPE_YUVA;
+            layout->numPlanes = 4;
+            layout->rootPlanes = 1;
+            layout->planes[C2PlanarLayout::PLANE_Y] = {
+                C2PlaneInfo::CHANNEL_Y,         // channel
+                4,                              // colInc
+                4 * (int32_t)mInfo.stride,      // rowInc
+                1,                              // mColSampling
+                1,                              // mRowSampling
+                32,                             // allocatedDepth
+                10,                             // bitDepth
+                10,                             // rightShift
+                C2PlaneInfo::LITTLE_END,        // endianness
+                C2PlanarLayout::PLANE_Y,        // rootIx
+                0,                              // offset
+            };
+            layout->planes[C2PlanarLayout::PLANE_U] = {
+                C2PlaneInfo::CHANNEL_CB,         // channel
+                4,                              // colInc
+                4 * (int32_t)mInfo.stride,      // rowInc
+                1,                              // mColSampling
+                1,                              // mRowSampling
+                32,                             // allocatedDepth
+                10,                             // bitDepth
+                0,                              // rightShift
+                C2PlaneInfo::LITTLE_END,        // endianness
+                C2PlanarLayout::PLANE_Y,        // rootIx
+                0,                              // offset
+            };
+            layout->planes[C2PlanarLayout::PLANE_V] = {
+                C2PlaneInfo::CHANNEL_CR,         // channel
+                4,                              // colInc
+                4 * (int32_t)mInfo.stride,      // rowInc
+                1,                              // mColSampling
+                1,                              // mRowSampling
+                32,                             // allocatedDepth
+                10,                             // bitDepth
+                20,                             // rightShift
+                C2PlaneInfo::LITTLE_END,        // endianness
+                C2PlanarLayout::PLANE_Y,        // rootIx
+                0,                              // offset
+            };
+            layout->planes[C2PlanarLayout::PLANE_A] = {
+                C2PlaneInfo::CHANNEL_A,         // channel
+                4,                              // colInc
+                4 * (int32_t)mInfo.stride,      // rowInc
+                1,                              // mColSampling
+                1,                              // mRowSampling
+                32,                             // allocatedDepth
+                2,                              // bitDepth
+                30,                             // rightShift
+                C2PlaneInfo::LITTLE_END,        // endianness
+                C2PlanarLayout::PLANE_Y,        // rootIx
+                0,                              // offset
+            };
+            break;
+        }
 
-    switch (format) {
+        case PixelFormat::RGBA_8888:
+            // TODO: alpha channel
+            // fall-through
+        case PixelFormat::RGBX_8888: {
+            void *pointer = nullptr;
+            mMapper->lock(
+                    const_cast<native_handle_t *>(mBuffer),
+                    grallocUsage,
+                    { (int32_t)rect.left, (int32_t)rect.top, (int32_t)rect.width, (int32_t)rect.height },
+                    // TODO: fence
+                    hidl_handle(),
+                    [&err, &pointer](const auto &maperr, const auto &mapPointer) {
+                        err = maperr2error(maperr);
+                        if (err == C2_OK) {
+                            pointer = mapPointer;
+                        }
+                    });
+            if (err != C2_OK) {
+                ALOGD("lock failed: %d", err);
+                return err;
+            }
+            addr[C2PlanarLayout::PLANE_R] = (uint8_t *)pointer;
+            addr[C2PlanarLayout::PLANE_G] = (uint8_t *)pointer + 1;
+            addr[C2PlanarLayout::PLANE_B] = (uint8_t *)pointer + 2;
+            layout->type = C2PlanarLayout::TYPE_RGB;
+            layout->numPlanes = 3;
+            layout->rootPlanes = 1;
+            layout->planes[C2PlanarLayout::PLANE_R] = {
+                C2PlaneInfo::CHANNEL_R,         // channel
+                4,                              // colInc
+                4 * (int32_t)mInfo.stride,      // rowInc
+                1,                              // mColSampling
+                1,                              // mRowSampling
+                8,                              // allocatedDepth
+                8,                              // bitDepth
+                0,                              // rightShift
+                C2PlaneInfo::NATIVE,            // endianness
+                C2PlanarLayout::PLANE_R,        // rootIx
+                0,                              // offset
+            };
+            layout->planes[C2PlanarLayout::PLANE_G] = {
+                C2PlaneInfo::CHANNEL_G,         // channel
+                4,                              // colInc
+                4 * (int32_t)mInfo.stride,      // rowInc
+                1,                              // mColSampling
+                1,                              // mRowSampling
+                8,                              // allocatedDepth
+                8,                              // bitDepth
+                0,                              // rightShift
+                C2PlaneInfo::NATIVE,            // endianness
+                C2PlanarLayout::PLANE_R,        // rootIx
+                1,                              // offset
+            };
+            layout->planes[C2PlanarLayout::PLANE_B] = {
+                C2PlaneInfo::CHANNEL_B,         // channel
+                4,                              // colInc
+                4 * (int32_t)mInfo.stride,      // rowInc
+                1,                              // mColSampling
+                1,                              // mRowSampling
+                8,                              // allocatedDepth
+                8,                              // bitDepth
+                0,                              // rightShift
+                C2PlaneInfo::NATIVE,            // endianness
+                C2PlanarLayout::PLANE_R,        // rootIx
+                2,                              // offset
+            };
+            break;
+        }
+
         case PixelFormat::YCBCR_420_888:
-        case PixelFormat::YV12: {
+            // fall-through
+        case PixelFormat::YV12:
+            // fall-through
+        default: {
             YCbCrLayout ycbcrLayout;
             mMapper->lockYCbCr(
                     const_cast<native_handle_t *>(mBuffer), grallocUsage,
@@ -450,79 +599,6 @@
             }
             break;
         }
-
-        case PixelFormat::RGBA_8888:
-            // TODO: alpha channel
-            // fall-through
-        case PixelFormat::RGBX_8888: {
-            void *pointer = nullptr;
-            mMapper->lock(
-                    const_cast<native_handle_t *>(mBuffer),
-                    grallocUsage,
-                    { (int32_t)rect.left, (int32_t)rect.top, (int32_t)rect.width, (int32_t)rect.height },
-                    // TODO: fence
-                    hidl_handle(),
-                    [&err, &pointer](const auto &maperr, const auto &mapPointer) {
-                        err = maperr2error(maperr);
-                        if (err == C2_OK) {
-                            pointer = mapPointer;
-                        }
-                    });
-            if (err != C2_OK) {
-                ALOGD("lock failed: %d", err);
-                return err;
-            }
-            addr[C2PlanarLayout::PLANE_R] = (uint8_t *)pointer;
-            addr[C2PlanarLayout::PLANE_G] = (uint8_t *)pointer + 1;
-            addr[C2PlanarLayout::PLANE_B] = (uint8_t *)pointer + 2;
-            layout->type = C2PlanarLayout::TYPE_RGB;
-            layout->numPlanes = 3;
-            layout->rootPlanes = 1;
-            layout->planes[C2PlanarLayout::PLANE_R] = {
-                C2PlaneInfo::CHANNEL_R,         // channel
-                4,                              // colInc
-                4 * (int32_t)mInfo.stride,      // rowInc
-                1,                              // mColSampling
-                1,                              // mRowSampling
-                8,                              // allocatedDepth
-                8,                              // bitDepth
-                0,                              // rightShift
-                C2PlaneInfo::NATIVE,            // endianness
-                C2PlanarLayout::PLANE_R,        // rootIx
-                0,                              // offset
-            };
-            layout->planes[C2PlanarLayout::PLANE_G] = {
-                C2PlaneInfo::CHANNEL_G,         // channel
-                4,                              // colInc
-                4 * (int32_t)mInfo.stride,      // rowInc
-                1,                              // mColSampling
-                1,                              // mRowSampling
-                8,                              // allocatedDepth
-                8,                              // bitDepth
-                0,                              // rightShift
-                C2PlaneInfo::NATIVE,            // endianness
-                C2PlanarLayout::PLANE_R,        // rootIx
-                1,                              // offset
-            };
-            layout->planes[C2PlanarLayout::PLANE_B] = {
-                C2PlaneInfo::CHANNEL_B,         // channel
-                4,                              // colInc
-                4 * (int32_t)mInfo.stride,      // rowInc
-                1,                              // mColSampling
-                1,                              // mRowSampling
-                8,                              // allocatedDepth
-                8,                              // bitDepth
-                0,                              // rightShift
-                C2PlaneInfo::NATIVE,            // endianness
-                C2PlanarLayout::PLANE_R,        // rootIx
-                2,                              // offset
-            };
-            break;
-        }
-        default: {
-            ALOGD("unsupported pixel format: %d", mInfo.mapperInfo.format);
-            return C2_OMITTED;
-        }
     }
     mLocked = true;
 
diff --git a/media/extractors/aac/AACExtractor.cpp b/media/extractors/aac/AACExtractor.cpp
index 9384ebf..4e9ac6e 100644
--- a/media/extractors/aac/AACExtractor.cpp
+++ b/media/extractors/aac/AACExtractor.cpp
@@ -31,7 +31,7 @@
 
 namespace android {
 
-class AACSource : public MediaTrackHelperV3 {
+class AACSource : public MediaTrackHelper {
 public:
     AACSource(
             DataSourceHelper *source,
@@ -45,7 +45,7 @@
     virtual media_status_t getFormat(AMediaFormat*);
 
     virtual media_status_t read(
-            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+            MediaBufferHelper **buffer, const ReadOptions *options = NULL);
 
 protected:
     virtual ~AACSource();
@@ -195,7 +195,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-MediaTrackHelperV3 *AACExtractor::getTrack(size_t index) {
+MediaTrackHelper *AACExtractor::getTrack(size_t index) {
     if (mInitCheck != OK || index != 0) {
         return NULL;
     }
@@ -264,7 +264,7 @@
 }
 
 media_status_t AACSource::read(
-        MediaBufferHelperV3 **out, const ReadOptions *options) {
+        MediaBufferHelper **out, const ReadOptions *options) {
     *out = NULL;
 
     int64_t seekTimeUs;
@@ -287,7 +287,7 @@
         return AMEDIA_ERROR_END_OF_STREAM;
     }
 
-    MediaBufferHelperV3 *buffer;
+    MediaBufferHelper *buffer;
     status_t err = mBufferGroup->acquire_buffer(&buffer);
     if (err != OK) {
         return AMEDIA_ERROR_UNKNOWN;
@@ -316,14 +316,14 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-static CMediaExtractorV3* CreateExtractor(
+static CMediaExtractor* CreateExtractor(
         CDataSource *source,
         void *meta) {
     off64_t offset = *static_cast<off64_t*>(meta);
-    return wrapV3(new AACExtractor(new DataSourceHelper(source), offset));
+    return wrap(new AACExtractor(new DataSourceHelper(source), offset));
 }
 
-static CreatorFuncV3 Sniff(
+static CreatorFunc Sniff(
         CDataSource *source, float *confidence, void **meta,
         FreeMetaFunc *freeMeta) {
     off64_t pos = 0;
@@ -383,11 +383,11 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-        EXTRACTORDEF_VERSION_CURRENT + 1,
+        EXTRACTORDEF_VERSION,
         UUID("4fd80eae-03d2-4d72-9eb9-48fa6bb54613"),
         1, // version
         "AAC Extractor",
-        { .v3 = Sniff }
+        { .v2 = Sniff }
     };
 }
 
diff --git a/media/extractors/aac/AACExtractor.h b/media/extractors/aac/AACExtractor.h
index be33bf5..643d3f4 100644
--- a/media/extractors/aac/AACExtractor.h
+++ b/media/extractors/aac/AACExtractor.h
@@ -29,12 +29,12 @@
 struct AMessage;
 class String8;
 
-class AACExtractor : public MediaExtractorPluginHelperV3 {
+class AACExtractor : public MediaExtractorPluginHelper {
 public:
     AACExtractor(DataSourceHelper *source, off64_t offset);
 
     virtual size_t countTracks();
-    virtual MediaTrackHelperV3 *getTrack(size_t index);
+    virtual MediaTrackHelper *getTrack(size_t index);
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
     virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/amr/AMRExtractor.cpp b/media/extractors/amr/AMRExtractor.cpp
index 7fd2a41..00d2a92 100644
--- a/media/extractors/amr/AMRExtractor.cpp
+++ b/media/extractors/amr/AMRExtractor.cpp
@@ -29,7 +29,7 @@
 
 namespace android {
 
-class AMRSource : public MediaTrackHelperV3 {
+class AMRSource : public MediaTrackHelper {
 public:
     AMRSource(
             DataSourceHelper *source,
@@ -44,7 +44,7 @@
     virtual media_status_t getFormat(AMediaFormat *);
 
     virtual media_status_t read(
-            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+            MediaBufferHelper **buffer, const ReadOptions *options = NULL);
 
 protected:
     virtual ~AMRSource();
@@ -209,7 +209,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-MediaTrackHelperV3 *AMRExtractor::getTrack(size_t index) {
+MediaTrackHelper *AMRExtractor::getTrack(size_t index) {
     if (mInitCheck != OK || index != 0) {
         return NULL;
     }
@@ -273,7 +273,7 @@
 }
 
 media_status_t AMRSource::read(
-        MediaBufferHelperV3 **out, const ReadOptions *options) {
+        MediaBufferHelper **out, const ReadOptions *options) {
     *out = NULL;
 
     int64_t seekTimeUs;
@@ -322,7 +322,7 @@
         return AMEDIA_ERROR_MALFORMED;
     }
 
-    MediaBufferHelperV3 *buffer;
+    MediaBufferHelper *buffer;
     status_t err = mBufferGroup->acquire_buffer(&buffer);
     if (err != OK) {
         return AMEDIA_ERROR_UNKNOWN;
@@ -363,22 +363,22 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-        EXTRACTORDEF_VERSION_CURRENT + 1,
+        EXTRACTORDEF_VERSION,
         UUID("c86639c9-2f31-40ac-a715-fa01b4493aaf"),
         1,
         "AMR Extractor",
         {
-           .v3 = [](
+           .v2 = [](
                     CDataSource *source,
                     float *confidence,
                     void **,
-                    FreeMetaFunc *) -> CreatorFuncV3 {
+                    FreeMetaFunc *) -> CreatorFunc {
                 DataSourceHelper helper(source);
                 if (SniffAMR(&helper, nullptr, confidence)) {
                     return [](
                             CDataSource *source,
-                            void *) -> CMediaExtractorV3* {
-                        return wrapV3(new AMRExtractor(new DataSourceHelper(source)));};
+                            void *) -> CMediaExtractor* {
+                        return wrap(new AMRExtractor(new DataSourceHelper(source)));};
                 }
                 return NULL;
             }
diff --git a/media/extractors/amr/AMRExtractor.h b/media/extractors/amr/AMRExtractor.h
index b50ce81..b76ee9c 100644
--- a/media/extractors/amr/AMRExtractor.h
+++ b/media/extractors/amr/AMRExtractor.h
@@ -29,12 +29,12 @@
 class String8;
 #define OFFSET_TABLE_LEN    300
 
-class AMRExtractor : public MediaExtractorPluginHelperV3 {
+class AMRExtractor : public MediaExtractorPluginHelper {
 public:
     explicit AMRExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
-    virtual MediaTrackHelperV3 *getTrack(size_t index);
+    virtual MediaTrackHelper *getTrack(size_t index);
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
     virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/flac/FLACExtractor.cpp b/media/extractors/flac/FLACExtractor.cpp
index 4e04605..b5eaf9b 100644
--- a/media/extractors/flac/FLACExtractor.cpp
+++ b/media/extractors/flac/FLACExtractor.cpp
@@ -52,7 +52,7 @@
 
 class FLACParser;
 
-class FLACSource : public MediaTrackHelperV3 {
+class FLACSource : public MediaTrackHelper {
 
 public:
     FLACSource(
@@ -65,7 +65,7 @@
     virtual media_status_t getFormat(AMediaFormat *meta);
 
     virtual media_status_t read(
-            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+            MediaBufferHelper **buffer, const ReadOptions *options = NULL);
 
 protected:
     virtual ~FLACSource();
@@ -124,12 +124,12 @@
     }
 
     // media buffers
-    void allocateBuffers(MediaBufferGroupHelperV3 *group);
+    void allocateBuffers(MediaBufferGroupHelper *group);
     void releaseBuffers();
-    MediaBufferHelperV3 *readBuffer() {
+    MediaBufferHelper *readBuffer() {
         return readBuffer(false, 0LL);
     }
-    MediaBufferHelperV3 *readBuffer(FLAC__uint64 sample) {
+    MediaBufferHelper *readBuffer(FLAC__uint64 sample) {
         return readBuffer(true, sample);
     }
 
@@ -142,7 +142,7 @@
 
     // media buffers
     size_t mMaxBufferSize;
-    MediaBufferGroupHelperV3 *mGroup;
+    MediaBufferGroupHelper *mGroup;
     void (*mCopy)(int16_t *dst, const int * src[kMaxChannels], unsigned nSamples, unsigned nChannels);
 
     // handle to underlying libFLAC parser
@@ -166,7 +166,7 @@
     FLAC__StreamDecoderErrorStatus mErrorStatus;
 
     status_t init();
-    MediaBufferHelperV3 *readBuffer(bool doSeek, FLAC__uint64 sample);
+    MediaBufferHelper *readBuffer(bool doSeek, FLAC__uint64 sample);
 
     // no copy constructor or assignment
     FLACParser(const FLACParser &);
@@ -576,7 +576,7 @@
     return OK;
 }
 
-void FLACParser::allocateBuffers(MediaBufferGroupHelperV3 *group)
+void FLACParser::allocateBuffers(MediaBufferGroupHelper *group)
 {
     CHECK(mGroup == NULL);
     mGroup = group;
@@ -588,7 +588,7 @@
 {
 }
 
-MediaBufferHelperV3 *FLACParser::readBuffer(bool doSeek, FLAC__uint64 sample)
+MediaBufferHelper *FLACParser::readBuffer(bool doSeek, FLAC__uint64 sample)
 {
     mWriteRequested = true;
     mWriteCompleted = false;
@@ -625,7 +625,7 @@
     }
     // acquire a media buffer
     CHECK(mGroup != NULL);
-    MediaBufferHelperV3 *buffer;
+    MediaBufferHelper *buffer;
     status_t err = mGroup->acquire_buffer(&buffer);
     if (err != OK) {
         return NULL;
@@ -716,9 +716,9 @@
 }
 
 media_status_t FLACSource::read(
-        MediaBufferHelperV3 **outBuffer, const ReadOptions *options)
+        MediaBufferHelper **outBuffer, const ReadOptions *options)
 {
-    MediaBufferHelperV3 *buffer;
+    MediaBufferHelper *buffer;
     // process an optional seek request
     int64_t seekTimeUs;
     ReadOptions::SeekMode mode;
@@ -772,7 +772,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-MediaTrackHelperV3 *FLACExtractor::getTrack(size_t index)
+MediaTrackHelper *FLACExtractor::getTrack(size_t index)
 {
     if (mInitCheck != OK || index > 0) {
         return NULL;
@@ -828,22 +828,22 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-            EXTRACTORDEF_VERSION_CURRENT + 1,
+            EXTRACTORDEF_VERSION,
             UUID("1364b048-cc45-4fda-9934-327d0ebf9829"),
             1,
             "FLAC Extractor",
             {
-                .v3 = [](
+                .v2 = [](
                         CDataSource *source,
                         float *confidence,
                         void **,
-                        FreeMetaFunc *) -> CreatorFuncV3 {
+                        FreeMetaFunc *) -> CreatorFunc {
                     DataSourceHelper helper(source);
                     if (SniffFLAC(&helper, confidence)) {
                         return [](
                                 CDataSource *source,
-                                void *) -> CMediaExtractorV3* {
-                            return wrapV3(new FLACExtractor(new DataSourceHelper(source)));};
+                                void *) -> CMediaExtractor* {
+                            return wrap(new FLACExtractor(new DataSourceHelper(source)));};
                     }
                     return NULL;
                 }
diff --git a/media/extractors/flac/FLACExtractor.h b/media/extractors/flac/FLACExtractor.h
index 9604e4a..5a73d20 100644
--- a/media/extractors/flac/FLACExtractor.h
+++ b/media/extractors/flac/FLACExtractor.h
@@ -27,13 +27,13 @@
 
 class FLACParser;
 
-class FLACExtractor : public MediaExtractorPluginHelperV3 {
+class FLACExtractor : public MediaExtractorPluginHelper {
 
 public:
     explicit FLACExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
-    virtual MediaTrackHelperV3 *getTrack(size_t index);
+    virtual MediaTrackHelper *getTrack(size_t index);
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
     virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/midi/MidiExtractor.cpp b/media/extractors/midi/MidiExtractor.cpp
index 43f394c..0c74376 100644
--- a/media/extractors/midi/MidiExtractor.cpp
+++ b/media/extractors/midi/MidiExtractor.cpp
@@ -32,7 +32,7 @@
 // how many Sonivox output buffers to aggregate into one MediaBuffer
 static const int NUM_COMBINE_BUFFERS = 4;
 
-class MidiSource : public MediaTrackHelperV3 {
+class MidiSource : public MediaTrackHelper {
 
 public:
     MidiSource(
@@ -44,7 +44,7 @@
     virtual media_status_t getFormat(AMediaFormat *);
 
     virtual media_status_t read(
-            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+            MediaBufferHelper **buffer, const ReadOptions *options = NULL);
 
 protected:
     virtual ~MidiSource();
@@ -113,10 +113,10 @@
 }
 
 media_status_t MidiSource::read(
-        MediaBufferHelperV3 **outBuffer, const ReadOptions *options)
+        MediaBufferHelper **outBuffer, const ReadOptions *options)
 {
     ALOGV("MidiSource::read");
-    MediaBufferHelperV3 *buffer;
+    MediaBufferHelper *buffer;
     // process an optional seek request
     int64_t seekTimeUs;
     ReadOptions::SeekMode mode;
@@ -199,7 +199,7 @@
     return mIsInitialized ? OK : UNKNOWN_ERROR;
 }
 
-status_t MidiEngine::allocateBuffers(MediaBufferGroupHelperV3 *group) {
+status_t MidiEngine::allocateBuffers(MediaBufferGroupHelper *group) {
     // select reverb preset and enable
     EAS_SetParameter(mEasData, EAS_MODULE_REVERB, EAS_PARAM_REVERB_PRESET, EAS_PARAM_REVERB_CHAMBER);
     EAS_SetParameter(mEasData, EAS_MODULE_REVERB, EAS_PARAM_REVERB_BYPASS, EAS_FALSE);
@@ -222,13 +222,13 @@
     return result == EAS_SUCCESS ? OK : UNKNOWN_ERROR;
 }
 
-MediaBufferHelperV3* MidiEngine::readBuffer() {
+MediaBufferHelper* MidiEngine::readBuffer() {
     EAS_STATE state;
     EAS_State(mEasData, mEasHandle, &state);
     if ((state == EAS_STATE_STOPPED) || (state == EAS_STATE_ERROR)) {
         return NULL;
     }
-    MediaBufferHelperV3 *buffer;
+    MediaBufferHelper *buffer;
     status_t err = mGroup->acquire_buffer(&buffer);
     if (err != OK) {
         ALOGE("readBuffer: no buffer");
@@ -279,6 +279,7 @@
     ALOGV("MidiExtractor dtor");
     AMediaFormat_delete(mFileMetadata);
     AMediaFormat_delete(mTrackMetadata);
+    delete mEngine;
 }
 
 size_t MidiExtractor::countTracks()
@@ -286,7 +287,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-MediaTrackHelperV3 *MidiExtractor::getTrack(size_t index)
+MediaTrackHelper *MidiExtractor::getTrack(size_t index)
 {
     if (mInitCheck != OK || index > 0) {
         return NULL;
@@ -331,21 +332,21 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-        EXTRACTORDEF_VERSION_CURRENT + 1,
+        EXTRACTORDEF_VERSION,
         UUID("ef6cca0a-f8a2-43e6-ba5f-dfcd7c9a7ef2"),
         1,
         "MIDI Extractor",
         {
-            .v3 = [](
+            .v2 = [](
                 CDataSource *source,
                 float *confidence,
                 void **,
-                FreeMetaFunc *) -> CreatorFuncV3 {
+                FreeMetaFunc *) -> CreatorFunc {
                 if (SniffMidi(source, confidence)) {
                     return [](
                             CDataSource *source,
-                            void *) -> CMediaExtractorV3* {
-                        return wrapV3(new MidiExtractor(source));};
+                            void *) -> CMediaExtractor* {
+                        return wrap(new MidiExtractor(source));};
                 }
                 return NULL;
             }
diff --git a/media/extractors/midi/MidiExtractor.h b/media/extractors/midi/MidiExtractor.h
index ad345b8..2e78086 100644
--- a/media/extractors/midi/MidiExtractor.h
+++ b/media/extractors/midi/MidiExtractor.h
@@ -38,26 +38,26 @@
 
     status_t initCheck();
 
-    status_t allocateBuffers(MediaBufferGroupHelperV3 *group);
+    status_t allocateBuffers(MediaBufferGroupHelper *group);
     status_t releaseBuffers();
     status_t seekTo(int64_t positionUs);
-    MediaBufferHelperV3* readBuffer();
+    MediaBufferHelper* readBuffer();
 private:
     MidiIoWrapper *mIoWrapper;
-    MediaBufferGroupHelperV3 *mGroup;
+    MediaBufferGroupHelper *mGroup;
     EAS_DATA_HANDLE mEasData;
     EAS_HANDLE mEasHandle;
     const S_EAS_LIB_CONFIG* mEasConfig;
     bool mIsInitialized;
 };
 
-class MidiExtractor : public MediaExtractorPluginHelperV3 {
+class MidiExtractor : public MediaExtractorPluginHelper {
 
 public:
     explicit MidiExtractor(CDataSource *source);
 
     virtual size_t countTracks();
-    virtual MediaTrackHelperV3 *getTrack(size_t index);
+    virtual MediaTrackHelper *getTrack(size_t index);
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
     virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index 4a30740..42a9c42 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -125,7 +125,7 @@
     BlockIterator &operator=(const BlockIterator &);
 };
 
-struct MatroskaSource : public MediaTrackHelperV3 {
+struct MatroskaSource : public MediaTrackHelper {
     MatroskaSource(MatroskaExtractor *extractor, size_t index);
 
     virtual media_status_t start();
@@ -134,7 +134,7 @@
     virtual media_status_t getFormat(AMediaFormat *);
 
     virtual media_status_t read(
-            MediaBufferHelperV3 **buffer, const ReadOptions *options);
+            MediaBufferHelper **buffer, const ReadOptions *options);
 
 protected:
     virtual ~MatroskaSource();
@@ -154,11 +154,11 @@
     BlockIterator mBlockIter;
     ssize_t mNALSizeLen;  // for type AVC or HEVC
 
-    List<MediaBufferHelperV3 *> mPendingFrames;
+    List<MediaBufferHelper *> mPendingFrames;
 
     status_t advance();
 
-    status_t setWebmBlockCryptoInfo(MediaBufferHelperV3 *mbuf);
+    status_t setWebmBlockCryptoInfo(MediaBufferHelper *mbuf);
     media_status_t readBlock();
     void clearPendingFrames();
 
@@ -569,7 +569,7 @@
 
 void MatroskaSource::clearPendingFrames() {
     while (!mPendingFrames.empty()) {
-        MediaBufferHelperV3 *frame = *mPendingFrames.begin();
+        MediaBufferHelper *frame = *mPendingFrames.begin();
         mPendingFrames.erase(mPendingFrames.begin());
 
         frame->release();
@@ -577,7 +577,7 @@
     }
 }
 
-status_t MatroskaSource::setWebmBlockCryptoInfo(MediaBufferHelperV3 *mbuf) {
+status_t MatroskaSource::setWebmBlockCryptoInfo(MediaBufferHelper *mbuf) {
     if (mbuf->range_length() < 1 || mbuf->range_length() - 1 > INT32_MAX) {
         // 1-byte signal
         return ERROR_MALFORMED;
@@ -727,7 +727,7 @@
         }
 
         len += trackInfo->mHeaderLen;
-        MediaBufferHelperV3 *mbuf;
+        MediaBufferHelper *mbuf;
         mBufferGroup->acquire_buffer(&mbuf, false /* nonblocking */, len /* requested size */);
         mbuf->set_range(0, len);
         uint8_t *data = static_cast<uint8_t *>(mbuf->data());
@@ -763,7 +763,7 @@
 }
 
 media_status_t MatroskaSource::read(
-        MediaBufferHelperV3 **out, const ReadOptions *options) {
+        MediaBufferHelper **out, const ReadOptions *options) {
     *out = NULL;
 
     int64_t targetSampleTimeUs = -1ll;
@@ -799,7 +799,7 @@
         }
     }
 
-    MediaBufferHelperV3 *frame = *mPendingFrames.begin();
+    MediaBufferHelper *frame = *mPendingFrames.begin();
     mPendingFrames.erase(mPendingFrames.begin());
 
     if ((mType != AVC && mType != HEVC) || mNALSizeLen == 0) {
@@ -828,7 +828,7 @@
     size_t srcSize = frame->range_length();
 
     size_t dstSize = 0;
-    MediaBufferHelperV3 *buffer = NULL;
+    MediaBufferHelper *buffer = NULL;
     uint8_t *dstPtr = NULL;
 
     for (int32_t pass = 0; pass < 2; ++pass) {
@@ -1005,7 +1005,7 @@
     return mTracks.size();
 }
 
-MediaTrackHelperV3 *MatroskaExtractor::getTrack(size_t index) {
+MediaTrackHelper *MatroskaExtractor::getTrack(size_t index) {
     if (index >= mTracks.size()) {
         return NULL;
     }
@@ -1673,22 +1673,22 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-        EXTRACTORDEF_VERSION_CURRENT + 1,
+        EXTRACTORDEF_VERSION,
         UUID("abbedd92-38c4-4904-a4c1-b3f45f899980"),
         1,
         "Matroska Extractor",
         {
-            .v3 = [](
+            .v2 = [](
                     CDataSource *source,
                     float *confidence,
                     void **,
-                    FreeMetaFunc *) -> CreatorFuncV3 {
+                    FreeMetaFunc *) -> CreatorFunc {
                 DataSourceHelper helper(source);
                 if (SniffMatroska(&helper, confidence)) {
                     return [](
                             CDataSource *source,
-                            void *) -> CMediaExtractorV3* {
-                        return wrapV3(new MatroskaExtractor(new DataSourceHelper(source)));};
+                            void *) -> CMediaExtractor* {
+                        return wrap(new MatroskaExtractor(new DataSourceHelper(source)));};
                 }
                 return NULL;
             }
diff --git a/media/extractors/mkv/MatroskaExtractor.h b/media/extractors/mkv/MatroskaExtractor.h
index a09256a..3871bdf 100644
--- a/media/extractors/mkv/MatroskaExtractor.h
+++ b/media/extractors/mkv/MatroskaExtractor.h
@@ -35,12 +35,12 @@
 struct DataSourceBaseReader;
 struct MatroskaSource;
 
-struct MatroskaExtractor : public MediaExtractorPluginHelperV3 {
+struct MatroskaExtractor : public MediaExtractorPluginHelper {
     explicit MatroskaExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
 
-    virtual MediaTrackHelperV3 *getTrack(size_t index);
+    virtual MediaTrackHelper *getTrack(size_t index);
 
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
diff --git a/media/extractors/mp3/MP3Extractor.cpp b/media/extractors/mp3/MP3Extractor.cpp
index 7abec54..20bcda8 100644
--- a/media/extractors/mp3/MP3Extractor.cpp
+++ b/media/extractors/mp3/MP3Extractor.cpp
@@ -207,7 +207,7 @@
     return valid;
 }
 
-class MP3Source : public MediaTrackHelperV3 {
+class MP3Source : public MediaTrackHelper {
 public:
     MP3Source(
             AMediaFormat *meta, DataSourceHelper *source,
@@ -220,7 +220,7 @@
     virtual media_status_t getFormat(AMediaFormat *meta);
 
     virtual media_status_t read(
-            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+            MediaBufferHelper **buffer, const ReadOptions *options = NULL);
 
 protected:
     virtual ~MP3Source();
@@ -413,7 +413,7 @@
     return mInitCheck != OK ? 0 : 1;
 }
 
-MediaTrackHelperV3 *MP3Extractor::getTrack(size_t index) {
+MediaTrackHelper *MP3Extractor::getTrack(size_t index) {
     if (mInitCheck != OK || index != 0) {
         return NULL;
     }
@@ -493,7 +493,7 @@
 }
 
 media_status_t MP3Source::read(
-        MediaBufferHelperV3 **out, const ReadOptions *options) {
+        MediaBufferHelper **out, const ReadOptions *options) {
     *out = NULL;
 
     int64_t seekTimeUs;
@@ -523,7 +523,7 @@
         mSamplesRead = 0;
     }
 
-    MediaBufferHelperV3 *buffer;
+    MediaBufferHelper *buffer;
     status_t err = mBufferGroup->acquire_buffer(&buffer);
     if (err != OK) {
         return AMEDIA_ERROR_UNKNOWN;
@@ -668,14 +668,14 @@
     return AMEDIA_OK;
 }
 
-static CMediaExtractorV3* CreateExtractor(
+static CMediaExtractor* CreateExtractor(
         CDataSource *source,
         void *meta) {
     Mp3Meta *metaData = static_cast<Mp3Meta *>(meta);
-    return wrapV3(new MP3Extractor(new DataSourceHelper(source), metaData));
+    return wrap(new MP3Extractor(new DataSourceHelper(source), metaData));
 }
 
-static CreatorFuncV3 Sniff(
+static CreatorFunc Sniff(
         CDataSource *source, float *confidence, void **meta,
         FreeMetaFunc *freeMeta) {
     off64_t pos = 0;
@@ -712,11 +712,11 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-        EXTRACTORDEF_VERSION_CURRENT + 1,
+        EXTRACTORDEF_VERSION,
         UUID("812a3f6c-c8cf-46de-b529-3774b14103d4"),
         1, // version
         "MP3 Extractor",
-        { .v3 = Sniff }
+        { .v2 = Sniff }
     };
 }
 
diff --git a/media/extractors/mp3/MP3Extractor.h b/media/extractors/mp3/MP3Extractor.h
index fe72cff..1e38ab7 100644
--- a/media/extractors/mp3/MP3Extractor.h
+++ b/media/extractors/mp3/MP3Extractor.h
@@ -32,13 +32,13 @@
 class String8;
 struct Mp3Meta;
 
-class MP3Extractor : public MediaExtractorPluginHelperV3 {
+class MP3Extractor : public MediaExtractorPluginHelper {
 public:
     MP3Extractor(DataSourceHelper *source, Mp3Meta *meta);
     ~MP3Extractor();
 
     virtual size_t countTracks();
-    virtual MediaTrackHelperV3 *getTrack(size_t index);
+    virtual MediaTrackHelper *getTrack(size_t index);
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
     virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index c4b539d..2909a50 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -69,7 +69,7 @@
     kMaxAtomSize = 64 * 1024 * 1024,
 };
 
-class MPEG4Source : public MediaTrackHelperV3 {
+class MPEG4Source : public MediaTrackHelper {
 static const size_t  kMaxPcmFrameSize = 8192;
 public:
     // Caller retains ownership of both "dataSource" and "sampleTable".
@@ -88,10 +88,10 @@
 
     virtual media_status_t getFormat(AMediaFormat *);
 
-    virtual media_status_t read(MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+    virtual media_status_t read(MediaBufferHelper **buffer, const ReadOptions *options = NULL);
     virtual bool supportNonblockingRead() { return true; }
     virtual media_status_t fragmentedRead(
-            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+            MediaBufferHelper **buffer, const ReadOptions *options = NULL);
 
     virtual ~MPEG4Source();
 
@@ -136,7 +136,7 @@
 
     bool mStarted;
 
-    MediaBufferHelperV3 *mBuffer;
+    MediaBufferHelper *mBuffer;
 
     uint8_t *mSrcBuffer;
 
@@ -1553,9 +1553,40 @@
                 return ERROR_IO;
             }
 
-            String8 mimeFormat((const char *)(buffer.get()), chunk_data_size);
-            AMediaFormat_setString(mLastTrack->meta, AMEDIAFORMAT_KEY_MIME, mimeFormat.string());
+            // Prior to API 29, the metadata track was not compliant with ISO/IEC
+            // 14496-12-2015. This led to some ISO-compliant parsers failing to read the
+            // metatrack. As of API 29 and onwards, a change was made to metadata track to
+            // make it compliant with the standard. The workaround is to write the
+            // null-terminated mime_format string twice. This allows compliant parsers to
+            // read the missing reserved, data_reference_index, and content_encoding fields
+            // from the first mime_type string. The actual mime_format field would then be
+            // read correctly from the second string. The non-compliant Android frameworks
+            // from API 28 and earlier would still be able to read the mime_format correctly
+            // as it would only read the first null-terminated mime_format string. To enable
+            // reading metadata tracks generated from both the non-compliant and compliant
+            // formats, a check needs to be done to see which format is used.
+            int null_pos = 0;
+            const unsigned char *str = buffer.get();
+            while (null_pos < chunk_data_size) {
+              if (*(str + null_pos) == '\0') {
+                break;
+              }
+              ++null_pos;
+            }
 
+            if (null_pos == chunk_data_size - 1) {
+              // This is not a standard ompliant metadata track.
+              String8 mimeFormat((const char *)(buffer.get()), chunk_data_size);
+              AMediaFormat_setString(mLastTrack->meta,
+                  AMEDIAFORMAT_KEY_MIME, mimeFormat.string());
+            } else {
+              // This is a standard compliant metadata track.
+              String8 contentEncoding((const char *)(buffer.get() + 8));
+              String8 mimeFormat((const char *)(buffer.get() + 8 + contentEncoding.size() + 1),
+                  chunk_data_size - 8 - contentEncoding.size() - 1);
+              AMediaFormat_setString(mLastTrack->meta,
+                  AMEDIAFORMAT_KEY_MIME, mimeFormat.string());
+            }
             break;
         }
 
@@ -3824,7 +3855,7 @@
     }
 }
 
-MediaTrackHelperV3 *MPEG4Extractor::getTrack(size_t index) {
+MediaTrackHelper *MPEG4Extractor::getTrack(size_t index) {
     status_t err;
     if ((err = readMetaData()) != OK) {
         return NULL;
@@ -5175,7 +5206,7 @@
 }
 
 media_status_t MPEG4Source::read(
-        MediaBufferHelperV3 **out, const ReadOptions *options) {
+        MediaBufferHelper **out, const ReadOptions *options) {
     Mutex::Autolock autoLock(mLock);
 
     CHECK(mStarted);
@@ -5578,7 +5609,7 @@
 }
 
 media_status_t MPEG4Source::fragmentedRead(
-        MediaBufferHelperV3 **out, const ReadOptions *options) {
+        MediaBufferHelper **out, const ReadOptions *options) {
 
     ALOGV("MPEG4Source::fragmentedRead");
 
@@ -6071,11 +6102,11 @@
     return true;
 }
 
-static CMediaExtractorV3* CreateExtractor(CDataSource *source, void *) {
-    return wrapV3(new MPEG4Extractor(new DataSourceHelper(source)));
+static CMediaExtractor* CreateExtractor(CDataSource *source, void *) {
+    return wrap(new MPEG4Extractor(new DataSourceHelper(source)));
 }
 
-static CreatorFuncV3 Sniff(
+static CreatorFunc Sniff(
         CDataSource *source, float *confidence, void **,
         FreeMetaFunc *) {
     DataSourceHelper helper(source);
@@ -6096,11 +6127,11 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-        EXTRACTORDEF_VERSION_CURRENT + 1,
+        EXTRACTORDEF_VERSION,
         UUID("27575c67-4417-4c54-8d3d-8e626985a164"),
         2, // version
         "MP4 Extractor",
-        { .v3 = Sniff }
+        { .v2 = Sniff }
     };
 }
 
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index a9a4635..79d5ff6dd 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -53,12 +53,12 @@
     uint32_t default_sample_flags;
 };
 
-class MPEG4Extractor : public MediaExtractorPluginHelperV3 {
+class MPEG4Extractor : public MediaExtractorPluginHelper {
 public:
     explicit MPEG4Extractor(DataSourceHelper *source, const char *mime = NULL);
 
     virtual size_t countTracks();
-    virtual MediaTrackHelperV3 *getTrack(size_t index);
+    virtual MediaTrackHelper *getTrack(size_t index);
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
     virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/mpeg2/Android.bp b/media/extractors/mpeg2/Android.bp
index 38c86eb..b816093 100644
--- a/media/extractors/mpeg2/Android.bp
+++ b/media/extractors/mpeg2/Android.bp
@@ -15,14 +15,11 @@
         "android.hardware.cas@1.0",
         "android.hardware.cas.native@1.0",
         "android.hidl.token@1.0-utils",
-        "libbinder",
-        "libcrypto",
-        "libcutils",
-        "libhidlallocatorutils",
+        "android.hidl.allocator@1.0",
+        "libhidlmemory",
         "libhidlbase",
         "liblog",
-        "libmediaextractor",
-        "libstagefright_foundation",
+        "libmediandk",
     ],
 
     header_libs: [
@@ -30,8 +27,13 @@
     ],
 
     static_libs: [
+        "libcrypto",
+        "libstagefright_foundation",
         "libstagefright_mpeg2support",
+        "libmediaextractor",
         "libutils",
+        "libstagefright",
+        "libstagefright_esds",
     ],
 
     name: "libmpeg2extractor",
diff --git a/media/extractors/mpeg2/ExtractorBundle.cpp b/media/extractors/mpeg2/ExtractorBundle.cpp
index 366aa59..2f4196c 100644
--- a/media/extractors/mpeg2/ExtractorBundle.cpp
+++ b/media/extractors/mpeg2/ExtractorBundle.cpp
@@ -36,7 +36,7 @@
         1,
         "MPEG2-PS/TS Extractor",
         {
-            [](
+            .v2 = [](
                     CDataSource *source,
                     float *confidence,
                     void **,
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.cpp b/media/extractors/mpeg2/MPEG2PSExtractor.cpp
index fc13d2c..554d252 100644
--- a/media/extractors/mpeg2/MPEG2PSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2PSExtractor.cpp
@@ -33,22 +33,23 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
 #include <utils/String8.h>
 
 #include <inttypes.h>
 
 namespace android {
 
-struct MPEG2PSExtractor::Track : public MediaTrackHelper, public RefBase {
+struct MPEG2PSExtractor::Track : public MediaTrackHelper {
     Track(MPEG2PSExtractor *extractor,
           unsigned stream_id, unsigned stream_type);
 
-    virtual status_t start();
-    virtual status_t stop();
-    virtual status_t getFormat(MetaDataBase &);
+    virtual media_status_t start();
+    virtual media_status_t stop();
+    virtual media_status_t getFormat(AMediaFormat *);
 
-    virtual status_t read(
-            MediaBufferBase **buffer, const ReadOptions *options);
+    virtual media_status_t read(
+            MediaBufferHelper **buffer, const ReadOptions *options);
 
 protected:
     virtual ~Track();
@@ -72,21 +73,21 @@
 };
 
 struct MPEG2PSExtractor::WrappedTrack : public MediaTrackHelper {
-    WrappedTrack(MPEG2PSExtractor *extractor, const sp<Track> &track);
+    WrappedTrack(MPEG2PSExtractor *extractor, Track *track);
 
-    virtual status_t start();
-    virtual status_t stop();
-    virtual status_t getFormat(MetaDataBase &);
+    virtual media_status_t start();
+    virtual media_status_t stop();
+    virtual media_status_t getFormat(AMediaFormat *);
 
-    virtual status_t read(
-            MediaBufferBase **buffer, const ReadOptions *options);
+    virtual media_status_t read(
+            MediaBufferHelper **buffer, const ReadOptions *options);
 
 protected:
     virtual ~WrappedTrack();
 
 private:
     MPEG2PSExtractor *mExtractor;
-    sp<MPEG2PSExtractor::Track> mTrack;
+    MPEG2PSExtractor::Track *mTrack;
 
     DISALLOW_EVIL_CONSTRUCTORS(WrappedTrack);
 };
@@ -107,13 +108,14 @@
     }
 
     // Remove all tracks that were unable to determine their format.
-    MetaDataBase meta;
+    AMediaFormat *meta = AMediaFormat_new();
     for (size_t i = mTracks.size(); i > 0;) {
         i--;
-        if (mTracks.valueAt(i)->getFormat(meta) != OK) {
+        if (mTracks.valueAt(i)->getFormat(meta) != AMEDIA_OK) {
             mTracks.removeItemsAt(i);
         }
     }
+    AMediaFormat_delete(meta);
 
     mScanning = false;
 }
@@ -134,20 +136,20 @@
     return new WrappedTrack(this, mTracks.valueAt(index));
 }
 
-status_t MPEG2PSExtractor::getTrackMetaData(
-        MetaDataBase &meta,
+media_status_t MPEG2PSExtractor::getTrackMetaData(
+        AMediaFormat *meta,
         size_t index, uint32_t /* flags */) {
     if (index >= mTracks.size()) {
-        return UNKNOWN_ERROR;
+        return AMEDIA_ERROR_UNKNOWN;
     }
 
     return mTracks.valueAt(index)->getFormat(meta);
 }
 
-status_t MPEG2PSExtractor::getMetaData(MetaDataBase &meta) {
-    meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG2PS);
+media_status_t MPEG2PSExtractor::getMetaData(AMediaFormat *meta) {
+    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_CONTAINER_MPEG2PS);
 
-    return OK;
+    return AMEDIA_OK;
 }
 
 uint32_t MPEG2PSExtractor::flags() const {
@@ -635,42 +637,55 @@
     mQueue = NULL;
 }
 
-status_t MPEG2PSExtractor::Track::start() {
+media_status_t MPEG2PSExtractor::Track::start() {
     if (mSource == NULL) {
-        return NO_INIT;
+        return AMEDIA_ERROR_UNKNOWN;
     }
 
-    return mSource->start(NULL); // AnotherPacketSource::start doesn't use its argument
+    // initialize with one small buffer, but allow growth
+    mBufferGroup->init(1 /* one buffer */, 256 /* buffer size */, 64 /* max number of buffers */);
+
+    if (mSource->start(NULL) == OK) { // AnotherPacketSource::start doesn't use its argument
+        return AMEDIA_OK;
+    }
+    return AMEDIA_ERROR_UNKNOWN;
 }
 
-status_t MPEG2PSExtractor::Track::stop() {
+media_status_t MPEG2PSExtractor::Track::stop() {
     if (mSource == NULL) {
-        return NO_INIT;
+        return AMEDIA_ERROR_UNKNOWN;
     }
 
-    return mSource->stop();
+    if (mSource->stop() == OK) {
+        return AMEDIA_OK;
+    }
+    return AMEDIA_ERROR_UNKNOWN;
 }
 
-status_t MPEG2PSExtractor::Track::getFormat(MetaDataBase &meta) {
+void copyAMessageToAMediaFormat(AMediaFormat *format, sp<AMessage> msg);
+
+media_status_t MPEG2PSExtractor::Track::getFormat(AMediaFormat *meta) {
     if (mSource == NULL) {
-        return NO_INIT;
+        return AMEDIA_ERROR_UNKNOWN;
     }
 
     sp<MetaData> sourceMeta = mSource->getFormat();
-    meta = *sourceMeta;
-    return OK;
+    sp<AMessage> msg;
+    convertMetaDataToMessage(sourceMeta, &msg);
+    copyAMessageToAMediaFormat(meta, msg);
+    return AMEDIA_OK;
 }
 
-status_t MPEG2PSExtractor::Track::read(
-        MediaBufferBase **buffer, const ReadOptions *options) {
+media_status_t MPEG2PSExtractor::Track::read(
+        MediaBufferHelper **buffer, const ReadOptions *options) {
     if (mSource == NULL) {
-        return NO_INIT;
+        return AMEDIA_ERROR_UNKNOWN;
     }
 
     status_t finalResult;
     while (!mSource->hasBufferAvailable(&finalResult)) {
         if (finalResult != OK) {
-            return ERROR_END_OF_STREAM;
+            return AMEDIA_ERROR_END_OF_STREAM;
         }
 
         status_t err = mExtractor->feedMore();
@@ -680,7 +695,47 @@
         }
     }
 
-    return mSource->read(buffer, (MediaSource::ReadOptions*)options);
+    MediaBufferBase *mbuf;
+    mSource->read(&mbuf, (MediaTrack::ReadOptions*) options);
+    size_t length = mbuf->range_length();
+    MediaBufferHelper *outbuf;
+    mBufferGroup->acquire_buffer(&outbuf, false, length);
+    memcpy(outbuf->data(), mbuf->data(), length);
+    outbuf->set_range(0, length);
+    *buffer = outbuf;
+    MetaDataBase &inMeta = mbuf->meta_data();
+    AMediaFormat *outMeta = outbuf->meta_data();
+    int64_t val64;
+    if (inMeta.findInt64(kKeyTime, &val64)) {
+        AMediaFormat_setInt64(outMeta, AMEDIAFORMAT_KEY_TIME_US, val64);
+    }
+    int32_t val32;
+    if (inMeta.findInt32(kKeyIsSyncFrame, &val32)) {
+        AMediaFormat_setInt32(outMeta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, val32);
+    }
+    if (inMeta.findInt32(kKeyCryptoMode, &val32)) {
+        AMediaFormat_setInt32(outMeta, AMEDIAFORMAT_KEY_CRYPTO_MODE, val32);
+    }
+    uint32_t bufType;
+    const void *bufData;
+    size_t bufSize;
+    if (inMeta.findData(kKeyCryptoIV, &bufType, &bufData, &bufSize)) {
+        AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_IV, bufData, bufSize);
+    }
+    if (inMeta.findData(kKeyCryptoKey, &bufType, &bufData, &bufSize)) {
+        AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_KEY, bufData, bufSize);
+    }
+    if (inMeta.findData(kKeyPlainSizes, &bufType, &bufData, &bufSize)) {
+        AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_PLAIN_SIZES, bufData, bufSize);
+    }
+    if (inMeta.findData(kKeyEncryptedSizes, &bufType, &bufData, &bufSize)) {
+        AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES, bufData, bufSize);
+    }
+    if (inMeta.findData(kKeySEI, &bufType, &bufData, &bufSize)) {
+        AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_SEI, bufData, bufSize);
+    }
+    mbuf->release();
+    return AMEDIA_OK;
 }
 
 status_t MPEG2PSExtractor::Track::appendPESData(
@@ -726,7 +781,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 
 MPEG2PSExtractor::WrappedTrack::WrappedTrack(
-        MPEG2PSExtractor *extractor, const sp<Track> &track)
+        MPEG2PSExtractor *extractor, Track *track)
     : mExtractor(extractor),
       mTrack(track) {
 }
@@ -734,20 +789,20 @@
 MPEG2PSExtractor::WrappedTrack::~WrappedTrack() {
 }
 
-status_t MPEG2PSExtractor::WrappedTrack::start() {
+media_status_t MPEG2PSExtractor::WrappedTrack::start() {
     return mTrack->start();
 }
 
-status_t MPEG2PSExtractor::WrappedTrack::stop() {
+media_status_t MPEG2PSExtractor::WrappedTrack::stop() {
     return mTrack->stop();
 }
 
-status_t MPEG2PSExtractor::WrappedTrack::getFormat(MetaDataBase &meta) {
+media_status_t MPEG2PSExtractor::WrappedTrack::getFormat(AMediaFormat *meta) {
     return mTrack->getFormat(meta);
 }
 
-status_t MPEG2PSExtractor::WrappedTrack::read(
-        MediaBufferBase **buffer, const ReadOptions *options) {
+media_status_t MPEG2PSExtractor::WrappedTrack::read(
+        MediaBufferHelper **buffer, const ReadOptions *options) {
     return mTrack->read(buffer, options);
 }
 
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.h b/media/extractors/mpeg2/MPEG2PSExtractor.h
index c4082ef..e5d591f 100644
--- a/media/extractors/mpeg2/MPEG2PSExtractor.h
+++ b/media/extractors/mpeg2/MPEG2PSExtractor.h
@@ -37,9 +37,9 @@
 
     virtual size_t countTracks();
     virtual MediaTrackHelper *getTrack(size_t index);
-    virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags);
+    virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
-    virtual status_t getMetaData(MetaDataBase& meta);
+    virtual media_status_t getMetaData(AMediaFormat *meta);
 
     virtual uint32_t flags() const;
     virtual const char * name() { return "MPEG2PSExtractor"; }
@@ -57,7 +57,7 @@
     off64_t mOffset;
     status_t mFinalResult;
     sp<ABuffer> mBuffer;
-    KeyedVector<unsigned, sp<Track> > mTracks;
+    KeyedVector<unsigned, Track* > mTracks;
     bool mScanning;
 
     bool mProgramStreamMapValid;
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.cpp b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
index 605b13a..3bb2af7 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
@@ -34,6 +34,7 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
 #include <utils/String8.h>
 
 #include "mpeg2ts/AnotherPacketSource.h"
@@ -57,12 +58,12 @@
             bool doesSeek);
     virtual ~MPEG2TSSource();
 
-    virtual status_t start();
-    virtual status_t stop();
-    virtual status_t getFormat(MetaDataBase &);
+    virtual media_status_t start();
+    virtual media_status_t stop();
+    virtual media_status_t getFormat(AMediaFormat *);
 
-    virtual status_t read(
-            MediaBufferBase **buffer, const ReadOptions *options = NULL);
+    virtual media_status_t read(
+            MediaBufferHelper **buffer, const ReadOptions *options = NULL);
 
 private:
     MPEG2TSExtractor *mExtractor;
@@ -87,22 +88,84 @@
 MPEG2TSSource::~MPEG2TSSource() {
 }
 
-status_t MPEG2TSSource::start() {
-    return mImpl->start(NULL); // AnotherPacketSource::start() doesn't use its argument
+media_status_t MPEG2TSSource::start() {
+    // initialize with one small buffer, but allow growth
+    mBufferGroup->init(1 /* one buffer */, 256 /* buffer size */, 64 /* max number of buffers */);
+
+    if (!mImpl->start(NULL)) { // AnotherPacketSource::start() doesn't use its argument
+        return AMEDIA_OK;
+    }
+    return AMEDIA_ERROR_UNKNOWN;
 }
 
-status_t MPEG2TSSource::stop() {
-    return mImpl->stop();
+media_status_t MPEG2TSSource::stop() {
+    if (!mImpl->stop()) {
+        return AMEDIA_OK;
+    }
+    return AMEDIA_ERROR_UNKNOWN;
 }
 
-status_t MPEG2TSSource::getFormat(MetaDataBase &meta) {
+void copyAMessageToAMediaFormat(AMediaFormat *format, sp<AMessage> msg) {
+    size_t numEntries = msg->countEntries();
+    for (size_t i = 0; i < numEntries; i++) {
+        AMessage::Type type;
+        const char *name = msg->getEntryNameAt(i, &type);
+        AMessage::ItemData id = msg->getEntryAt(i);
+
+        switch (type) {
+            case AMessage::kTypeInt32:
+                int32_t val32;
+                if (id.find(&val32)) {
+                    AMediaFormat_setInt32(format, name, val32);
+                }
+                break;
+            case AMessage::kTypeInt64:
+                int64_t val64;
+                if (id.find(&val64)) {
+                    AMediaFormat_setInt64(format, name, val64);
+                }
+                break;
+            case AMessage::kTypeFloat:
+                float valfloat;
+                if (id.find(&valfloat)) {
+                    AMediaFormat_setFloat(format, name, valfloat);
+                }
+                break;
+            case AMessage::kTypeDouble:
+                double valdouble;
+                if (id.find(&valdouble)) {
+                    AMediaFormat_setDouble(format, name, valdouble);
+                }
+                break;
+            case AMessage::kTypeString:
+                if (AString s; id.find(&s)) {
+                    AMediaFormat_setString(format, name, s.c_str());
+                }
+                break;
+            case AMessage::kTypeBuffer:
+            {
+                sp<ABuffer> buffer;
+                if (id.find(&buffer)) {
+                    AMediaFormat_setBuffer(format, name, buffer->data(), buffer->size());
+                }
+                break;
+            }
+            default:
+                ALOGW("ignoring unsupported type %d '%s'", type, name);
+        }
+    }
+}
+
+media_status_t MPEG2TSSource::getFormat(AMediaFormat *meta) {
     sp<MetaData> implMeta = mImpl->getFormat();
-    meta = *implMeta;
-    return OK;
+    sp<AMessage> msg;
+    convertMetaDataToMessage(implMeta, &msg);
+    copyAMessageToAMediaFormat(meta, msg);
+    return AMEDIA_OK;
 }
 
-status_t MPEG2TSSource::read(
-        MediaBufferBase **out, const ReadOptions *options) {
+media_status_t MPEG2TSSource::read(
+        MediaBufferHelper **out, const ReadOptions *options) {
     *out = NULL;
 
     int64_t seekTimeUs;
@@ -110,16 +173,59 @@
     if (mDoesSeek && options && options->getSeekTo(&seekTimeUs, &seekMode)) {
         // seek is needed
         status_t err = mExtractor->seek(seekTimeUs, (ReadOptions::SeekMode)seekMode);
-        if (err != OK) {
-            return err;
+        if (err == ERROR_END_OF_STREAM) {
+            return AMEDIA_ERROR_END_OF_STREAM;
+        } else if (err != OK) {
+            return AMEDIA_ERROR_UNKNOWN;
         }
     }
 
     if (mExtractor->feedUntilBufferAvailable(mImpl) != OK) {
-        return ERROR_END_OF_STREAM;
+        return AMEDIA_ERROR_END_OF_STREAM;
     }
 
-    return mImpl->read(out, (MediaSource::ReadOptions*) options);
+    MediaBufferBase *mbuf;
+    mImpl->read(&mbuf, (MediaTrack::ReadOptions*) options);
+    size_t length = mbuf->range_length();
+    MediaBufferHelper *outbuf;
+    mBufferGroup->acquire_buffer(&outbuf, false, length);
+    memcpy(outbuf->data(), mbuf->data(), length);
+    outbuf->set_range(0, length);
+    *out = outbuf;
+    MetaDataBase &inMeta = mbuf->meta_data();
+    AMediaFormat *outMeta = outbuf->meta_data();
+    AMediaFormat_clear(outMeta);
+    int64_t val64;
+    if (inMeta.findInt64(kKeyTime, &val64)) {
+        AMediaFormat_setInt64(outMeta, AMEDIAFORMAT_KEY_TIME_US, val64);
+    }
+    int32_t val32;
+    if (inMeta.findInt32(kKeyIsSyncFrame, &val32)) {
+        AMediaFormat_setInt32(outMeta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, val32);
+    }
+    if (inMeta.findInt32(kKeyCryptoMode, &val32)) {
+        AMediaFormat_setInt32(outMeta, AMEDIAFORMAT_KEY_CRYPTO_MODE, val32);
+    }
+    uint32_t bufType;
+    const void *bufData;
+    size_t bufSize;
+    if (inMeta.findData(kKeyCryptoIV, &bufType, &bufData, &bufSize)) {
+        AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_IV, bufData, bufSize);
+    }
+    if (inMeta.findData(kKeyCryptoKey, &bufType, &bufData, &bufSize)) {
+        AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_KEY, bufData, bufSize);
+    }
+    if (inMeta.findData(kKeyPlainSizes, &bufType, &bufData, &bufSize)) {
+        AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_PLAIN_SIZES, bufData, bufSize);
+    }
+    if (inMeta.findData(kKeyEncryptedSizes, &bufType, &bufData, &bufSize)) {
+        AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES, bufData, bufSize);
+    }
+    if (inMeta.findData(kKeySEI, &bufType, &bufData, &bufSize)) {
+        AMediaFormat_setBuffer(outMeta, AMEDIAFORMAT_KEY_SEI, bufData, bufSize);
+    }
+    mbuf->release();
+    return AMEDIA_OK;
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -151,22 +257,23 @@
             (mSeekSyncPoints == &mSyncPoints.editItemAt(index)));
 }
 
-status_t MPEG2TSExtractor::getTrackMetaData(
-        MetaDataBase &meta,
+media_status_t MPEG2TSExtractor::getTrackMetaData(
+        AMediaFormat *meta,
         size_t index, uint32_t /* flags */) {
     sp<MetaData> implMeta = index < mSourceImpls.size()
         ? mSourceImpls.editItemAt(index)->getFormat() : NULL;
     if (implMeta == NULL) {
-        return UNKNOWN_ERROR;
+        return AMEDIA_ERROR_UNKNOWN;
     }
-    meta = *implMeta;
-    return OK;
+    sp<AMessage> msg = new AMessage;
+    convertMetaDataToMessage(implMeta, &msg);
+    copyAMessageToAMediaFormat(meta, msg);
+    return AMEDIA_OK;
 }
 
-status_t MPEG2TSExtractor::getMetaData(MetaDataBase &meta) {
-    meta.setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG2TS);
-
-    return OK;
+media_status_t MPEG2TSExtractor::getMetaData(AMediaFormat *meta) {
+    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_CONTAINER_MPEG2TS);
+    return AMEDIA_OK;
 }
 
 //static
@@ -177,7 +284,7 @@
                     || !strcasecmp(MEDIA_MIMETYPE_AUDIO_SCRAMBLED, mime));
 }
 
-status_t MPEG2TSExtractor::setMediaCas(const uint8_t* casToken, size_t size) {
+media_status_t MPEG2TSExtractor::setMediaCas(const uint8_t* casToken, size_t size) {
     HalToken halToken;
     halToken.setToExternal((uint8_t*)casToken, size);
     sp<ICas> cas = ICas::castFrom(retrieveHalInterface(halToken));
@@ -187,8 +294,9 @@
     if (err == OK) {
         ALOGI("All tracks now have descramblers");
         init();
+        return AMEDIA_OK;
     }
-    return err;
+    return AMEDIA_ERROR_UNKNOWN;
 }
 
 void MPEG2TSExtractor::addSource(const sp<AnotherPacketSource> &impl) {
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.h b/media/extractors/mpeg2/MPEG2TSExtractor.h
index 4013442..e425d23 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.h
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.h
@@ -43,11 +43,11 @@
 
     virtual size_t countTracks();
     virtual MediaTrackHelper *getTrack(size_t index);
-    virtual status_t getTrackMetaData(MetaDataBase &meta, size_t index, uint32_t flags);
+    virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
-    virtual status_t getMetaData(MetaDataBase& meta);
+    virtual media_status_t getMetaData(AMediaFormat *meta);
 
-    virtual status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) override;
+    virtual media_status_t setMediaCas(const uint8_t* /*casToken*/, size_t /*size*/) override;
 
     virtual uint32_t flags() const;
     virtual const char * name() { return "MPEG2TSExtractor"; }
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index 29fe2b1..c3914f1 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -45,7 +45,7 @@
 
 namespace android {
 
-struct OggSource : public MediaTrackHelperV3 {
+struct OggSource : public MediaTrackHelper {
     explicit OggSource(OggExtractor *extractor);
 
     virtual media_status_t getFormat(AMediaFormat *);
@@ -54,7 +54,7 @@
     virtual media_status_t stop();
 
     virtual media_status_t read(
-            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+            MediaBufferHelper **buffer, const ReadOptions *options = NULL);
 
 protected:
     virtual ~OggSource();
@@ -82,7 +82,7 @@
 
     status_t seekToTime(int64_t timeUs);
     status_t seekToOffset(off64_t offset);
-    virtual media_status_t readNextPacket(MediaBufferHelperV3 **buffer) = 0;
+    virtual media_status_t readNextPacket(MediaBufferHelper **buffer) = 0;
 
     status_t init();
 
@@ -90,7 +90,7 @@
         return AMediaFormat_copy(meta, mFileMeta);
     }
 
-    void setBufferGroup(MediaBufferGroupHelperV3 *group) {
+    void setBufferGroup(MediaBufferGroupHelper *group) {
         mBufferGroup = group;
     }
 protected:
@@ -110,7 +110,7 @@
         int64_t mTimeUs;
     };
 
-    MediaBufferGroupHelperV3 *mBufferGroup;
+    MediaBufferGroupHelper *mBufferGroup;
     DataSourceHelper *mSource;
     off64_t mOffset;
     Page mCurrentPage;
@@ -149,7 +149,7 @@
     // 1 - bitstream identification header
     // 3 - comment header
     // 5 - codec setup header (Vorbis only)
-    virtual media_status_t verifyHeader(MediaBufferHelperV3 *buffer, uint8_t type) = 0;
+    virtual media_status_t verifyHeader(MediaBufferHelper *buffer, uint8_t type) = 0;
 
     // Read the next ogg packet from the underlying data source; optionally
     // calculate the timestamp for the output packet whilst pretending
@@ -157,9 +157,9 @@
     //
     // *buffer is NULL'ed out immediately upon entry, and if successful a new buffer is allocated;
     // clients are responsible for releasing the original buffer.
-    media_status_t _readNextPacket(MediaBufferHelperV3 **buffer, bool calcVorbisTimestamp);
+    media_status_t _readNextPacket(MediaBufferHelper **buffer, bool calcVorbisTimestamp);
 
-    int32_t getPacketBlockSize(MediaBufferHelperV3 *buffer);
+    int32_t getPacketBlockSize(MediaBufferHelper *buffer);
 
     void parseFileMetaData();
 
@@ -183,7 +183,7 @@
 
     virtual uint64_t approxBitrate() const;
 
-    virtual media_status_t readNextPacket(MediaBufferHelperV3 **buffer) {
+    virtual media_status_t readNextPacket(MediaBufferHelper **buffer) {
         return _readNextPacket(buffer, /* calcVorbisTimestamp = */ true);
     }
 
@@ -195,7 +195,7 @@
         return granulePos * 1000000ll / mVi.rate;
     }
 
-    virtual media_status_t verifyHeader(MediaBufferHelperV3 *buffer, uint8_t type);
+    virtual media_status_t verifyHeader(MediaBufferHelper *buffer, uint8_t type);
 };
 
 struct MyOpusExtractor : public MyOggExtractor {
@@ -213,16 +213,16 @@
         return 0;
     }
 
-    virtual media_status_t readNextPacket(MediaBufferHelperV3 **buffer);
+    virtual media_status_t readNextPacket(MediaBufferHelper **buffer);
 
 protected:
     virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const;
-    virtual media_status_t verifyHeader(MediaBufferHelperV3 *buffer, uint8_t type);
+    virtual media_status_t verifyHeader(MediaBufferHelper *buffer, uint8_t type);
 
 private:
-    media_status_t verifyOpusHeader(MediaBufferHelperV3 *buffer);
-    media_status_t verifyOpusComments(MediaBufferHelperV3 *buffer);
-    uint32_t getNumSamplesInPacket(MediaBufferHelperV3 *buffer) const;
+    media_status_t verifyOpusHeader(MediaBufferHelper *buffer);
+    media_status_t verifyOpusComments(MediaBufferHelper *buffer);
+    uint32_t getNumSamplesInPacket(MediaBufferHelper *buffer) const;
 
     uint8_t mChannelCount;
     uint16_t mCodecDelay;
@@ -265,7 +265,7 @@
 }
 
 media_status_t OggSource::read(
-        MediaBufferHelperV3 **out, const ReadOptions *options) {
+        MediaBufferHelper **out, const ReadOptions *options) {
     *out = NULL;
 
     int64_t seekTimeUs;
@@ -277,7 +277,7 @@
         }
     }
 
-    MediaBufferHelperV3 *packet;
+    MediaBufferHelper *packet;
     media_status_t err = mExtractor->mImpl->readNextPacket(&packet);
 
     if (err != AMEDIA_OK) {
@@ -578,13 +578,13 @@
     return sizeof(header) + page->mNumSegments + totalSize;
 }
 
-media_status_t MyOpusExtractor::readNextPacket(MediaBufferHelperV3 **out) {
+media_status_t MyOpusExtractor::readNextPacket(MediaBufferHelper **out) {
     if (mOffset <= mFirstDataOffset && mStartGranulePosition < 0) {
         // The first sample might not start at time 0; find out where by subtracting
         // the number of samples on the first page from the granule position
         // (position of last complete sample) of the first page. This happens
         // the first time before we attempt to read a packet from the first page.
-        MediaBufferHelperV3 *mBuf;
+        MediaBufferHelper *mBuf;
         uint32_t numSamples = 0;
         uint64_t curGranulePosition = 0;
         while (true) {
@@ -640,7 +640,7 @@
     return AMEDIA_OK;
 }
 
-uint32_t MyOpusExtractor::getNumSamplesInPacket(MediaBufferHelperV3 *buffer) const {
+uint32_t MyOpusExtractor::getNumSamplesInPacket(MediaBufferHelper *buffer) const {
     if (buffer == NULL || buffer->range_length() < 1) {
         return 0;
     }
@@ -690,7 +690,7 @@
  * basic mediabuffer implementation used during initial parsing of the
  * header packets, which happens before we have a buffer group
  */
-class StandAloneMediaBuffer : public MediaBufferHelperV3 {
+class StandAloneMediaBuffer : public MediaBufferHelper {
 private:
     void *mData;
     size_t mSize;
@@ -698,7 +698,7 @@
     size_t mLength;
     AMediaFormat *mFormat;
 public:
-    StandAloneMediaBuffer(size_t size) : MediaBufferHelperV3(NULL) {
+    StandAloneMediaBuffer(size_t size) : MediaBufferHelper(NULL) {
         mSize = size;
         mData = malloc(mSize);
         mOffset = 0;
@@ -742,10 +742,10 @@
     }
 };
 
-media_status_t MyOggExtractor::_readNextPacket(MediaBufferHelperV3 **out, bool calcVorbisTimestamp) {
+media_status_t MyOggExtractor::_readNextPacket(MediaBufferHelper **out, bool calcVorbisTimestamp) {
     *out = NULL;
 
-    MediaBufferHelperV3 *buffer = NULL;
+    MediaBufferHelper *buffer = NULL;
     int64_t timeUs = -1;
 
     for (;;) {
@@ -781,7 +781,7 @@
                 ALOGE("b/36592202");
                 return AMEDIA_ERROR_MALFORMED;
             }
-            MediaBufferHelperV3 *tmp;
+            MediaBufferHelper *tmp;
             if (mBufferGroup) {
                 mBufferGroup->acquire_buffer(&tmp, false, fullSize);
                 ALOGV("acquired buffer %p from group", tmp);
@@ -916,7 +916,7 @@
     AMediaFormat_setString(mMeta, AMEDIAFORMAT_KEY_MIME, mMimeType);
 
     media_status_t err;
-    MediaBufferHelperV3 *packet;
+    MediaBufferHelper *packet;
     for (size_t i = 0; i < mNumHeaders; ++i) {
         // ignore timestamp for configuration packets
         if ((err = _readNextPacket(&packet, /* calcVorbisTimestamp = */ false)) != AMEDIA_OK) {
@@ -993,7 +993,7 @@
     }
 }
 
-int32_t MyOggExtractor::getPacketBlockSize(MediaBufferHelperV3 *buffer) {
+int32_t MyOggExtractor::getPacketBlockSize(MediaBufferHelper *buffer) {
     const uint8_t *data =
         (const uint8_t *)buffer->data() + buffer->range_offset();
 
@@ -1033,7 +1033,7 @@
     return pcmSamplePosition * 1000000ll / kOpusSampleRate;
 }
 
-media_status_t MyOpusExtractor::verifyHeader(MediaBufferHelperV3 *buffer, uint8_t type) {
+media_status_t MyOpusExtractor::verifyHeader(MediaBufferHelper *buffer, uint8_t type) {
     switch (type) {
         // there are actually no header types defined in the Opus spec; we choose 1 and 3 to mean
         // header and comments such that we can share code with MyVorbisExtractor.
@@ -1046,7 +1046,7 @@
     }
 }
 
-media_status_t MyOpusExtractor::verifyOpusHeader(MediaBufferHelperV3 *buffer) {
+media_status_t MyOpusExtractor::verifyOpusHeader(MediaBufferHelper *buffer) {
     const size_t kOpusHeaderSize = 19;
     const uint8_t *data =
         (const uint8_t *)buffer->data() + buffer->range_offset();
@@ -1074,7 +1074,7 @@
     return AMEDIA_OK;
 }
 
-media_status_t MyOpusExtractor::verifyOpusComments(MediaBufferHelperV3 *buffer) {
+media_status_t MyOpusExtractor::verifyOpusComments(MediaBufferHelper *buffer) {
     // add artificial framing bit so we can reuse _vorbis_unpack_comment
     int32_t commentSize = buffer->range_length() + 1;
     auto tmp = heapbuffer<uint8_t>(commentSize);
@@ -1167,7 +1167,7 @@
 }
 
 media_status_t MyVorbisExtractor::verifyHeader(
-        MediaBufferHelperV3 *buffer, uint8_t type) {
+        MediaBufferHelper *buffer, uint8_t type) {
     const uint8_t *data =
         (const uint8_t *)buffer->data() + buffer->range_offset();
 
@@ -1335,7 +1335,7 @@
     return mInitCheck != OK ? 0 : 1;
 }
 
-MediaTrackHelperV3 *OggExtractor::getTrack(size_t index) {
+MediaTrackHelper *OggExtractor::getTrack(size_t index) {
     if (index >= 1) {
         return NULL;
     }
@@ -1357,13 +1357,13 @@
     return mImpl->getFileMetaData(meta);
 }
 
-static CMediaExtractorV3* CreateExtractor(
+static CMediaExtractor* CreateExtractor(
         CDataSource *source,
         void *) {
-    return wrapV3(new OggExtractor(new DataSourceHelper(source)));
+    return wrap(new OggExtractor(new DataSourceHelper(source)));
 }
 
-static CreatorFuncV3 Sniff(
+static CreatorFunc Sniff(
         CDataSource *source,
         float *confidence,
         void **,
@@ -1384,11 +1384,11 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-        EXTRACTORDEF_VERSION_CURRENT + 1,
+        EXTRACTORDEF_VERSION,
         UUID("8cc5cd06-f772-495e-8a62-cba9649374e9"),
         1, // version
         "Ogg Extractor",
-        { .v3 = Sniff }
+        { .v2 = Sniff }
     };
 }
 
diff --git a/media/extractors/ogg/OggExtractor.h b/media/extractors/ogg/OggExtractor.h
index 97506ad..c75dfa9 100644
--- a/media/extractors/ogg/OggExtractor.h
+++ b/media/extractors/ogg/OggExtractor.h
@@ -31,11 +31,11 @@
 struct MyOggExtractor;
 struct OggSource;
 
-struct OggExtractor : public MediaExtractorPluginHelperV3 {
+struct OggExtractor : public MediaExtractorPluginHelper {
     explicit OggExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
-    virtual MediaTrackHelperV3 *getTrack(size_t index);
+    virtual MediaTrackHelper *getTrack(size_t index);
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
     virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/extractors/wav/WAVExtractor.cpp b/media/extractors/wav/WAVExtractor.cpp
index 1f0aae5..6f9f689 100644
--- a/media/extractors/wav/WAVExtractor.cpp
+++ b/media/extractors/wav/WAVExtractor.cpp
@@ -66,7 +66,7 @@
     return ptr[1] << 8 | ptr[0];
 }
 
-struct WAVSource : public MediaTrackHelperV3 {
+struct WAVSource : public MediaTrackHelper {
     WAVSource(
             DataSourceHelper *dataSource,
             AMediaFormat *meta,
@@ -79,7 +79,7 @@
     virtual media_status_t getFormat(AMediaFormat *meta);
 
     virtual media_status_t read(
-            MediaBufferHelperV3 **buffer, const ReadOptions *options = NULL);
+            MediaBufferHelper **buffer, const ReadOptions *options = NULL);
 
     virtual bool supportNonblockingRead() { return true; }
 
@@ -131,7 +131,7 @@
     return mInitCheck == OK ? 1 : 0;
 }
 
-MediaTrackHelperV3 *WAVExtractor::getTrack(size_t index) {
+MediaTrackHelper *WAVExtractor::getTrack(size_t index) {
     if (mInitCheck != OK || index > 0) {
         return NULL;
     }
@@ -428,7 +428,7 @@
 }
 
 media_status_t WAVSource::read(
-        MediaBufferHelperV3 **out, const ReadOptions *options) {
+        MediaBufferHelper **out, const ReadOptions *options) {
     *out = NULL;
 
     if (options != nullptr && options->getNonBlocking() && !mBufferGroup->has_buffers()) {
@@ -454,7 +454,7 @@
         mCurrentPos = pos + mOffset;
     }
 
-    MediaBufferHelperV3 *buffer;
+    MediaBufferHelper *buffer;
     media_status_t err = mBufferGroup->acquire_buffer(&buffer);
     if (err != OK) {
         return err;
@@ -581,13 +581,13 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-static CMediaExtractorV3* CreateExtractor(
+static CMediaExtractor* CreateExtractor(
         CDataSource *source,
         void *) {
-    return wrapV3(new WAVExtractor(new DataSourceHelper(source)));
+    return wrap(new WAVExtractor(new DataSourceHelper(source)));
 }
 
-static CreatorFuncV3 Sniff(
+static CreatorFunc Sniff(
         CDataSource *source,
         float *confidence,
         void **,
@@ -621,11 +621,11 @@
 __attribute__ ((visibility ("default")))
 ExtractorDef GETEXTRACTORDEF() {
     return {
-        EXTRACTORDEF_VERSION_CURRENT + 1,
+        EXTRACTORDEF_VERSION,
         UUID("7d613858-5837-4a38-84c5-332d1cddee27"),
         1, // version
         "WAV Extractor",
-        { .v3 = Sniff }
+        { .v2 = Sniff }
     };
 }
 
diff --git a/media/extractors/wav/WAVExtractor.h b/media/extractors/wav/WAVExtractor.h
index 9b7dfde..b514196 100644
--- a/media/extractors/wav/WAVExtractor.h
+++ b/media/extractors/wav/WAVExtractor.h
@@ -29,12 +29,12 @@
 struct CDataSource;
 class String8;
 
-class WAVExtractor : public MediaExtractorPluginHelperV3 {
+class WAVExtractor : public MediaExtractorPluginHelper {
 public:
     explicit WAVExtractor(DataSourceHelper *source);
 
     virtual size_t countTracks();
-    virtual MediaTrackHelperV3 *getTrack(size_t index);
+    virtual MediaTrackHelper *getTrack(size_t index);
     virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);
 
     virtual media_status_t getMetaData(AMediaFormat *meta);
diff --git a/media/libaaudio/tests/test_return_stop.cpp b/media/libaaudio/tests/test_return_stop.cpp
index f34c3c8..9a9e00c 100644
--- a/media/libaaudio/tests/test_return_stop.cpp
+++ b/media/libaaudio/tests/test_return_stop.cpp
@@ -228,8 +228,6 @@
         result = AAudioStream_requestStart(engine.stream);
         printf("AAudioStream_requestStart() returned %d >>>>>>>>>>>>>>>>>>>>>>\n", result);
         if (result != AAUDIO_OK) {
-            printf("ERROR - AAudioStream_requestStart returned %s",
-                   AAudio_convertResultToText(result));
             errorCount++;
             break;
         }
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 3e91717..dc7531c 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -1388,9 +1388,14 @@
 }
 
 void AudioSystem::AudioPolicyServiceClient::onRecordingConfigurationUpdate(
-        int event, const record_client_info_t *clientInfo,
-        const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
-        audio_patch_handle_t patchHandle) {
+                                                int event,
+                                                const record_client_info_t *clientInfo,
+                                                const audio_config_base_t *clientConfig,
+                                                std::vector<effect_descriptor_t> clientEffects,
+                                                const audio_config_base_t *deviceConfig,
+                                                std::vector<effect_descriptor_t> effects,
+                                                audio_patch_handle_t patchHandle,
+                                                audio_source_t source) {
     record_config_callback cb = NULL;
     {
         Mutex::Autolock _l(AudioSystem::gLock);
@@ -1398,7 +1403,8 @@
     }
 
     if (cb != NULL) {
-        cb(event, clientInfo, clientConfig, deviceConfig, patchHandle);
+        cb(event, clientInfo, clientConfig, clientEffects,
+           deviceConfig, effects, patchHandle, source);
     }
 }
 
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 1f6dd60..b444d2d 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -952,7 +952,8 @@
     if (rate == mSampleRate) {
         return NO_ERROR;
     }
-    if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
+    if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)
+            || (mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL)) {
         return INVALID_OPERATION;
     }
     if (mOutput == AUDIO_IO_HANDLE_NONE) {
diff --git a/media/libaudioclient/IAudioPolicyServiceClient.cpp b/media/libaudioclient/IAudioPolicyServiceClient.cpp
index ad7f1de..1f9eab7 100644
--- a/media/libaudioclient/IAudioPolicyServiceClient.cpp
+++ b/media/libaudioclient/IAudioPolicyServiceClient.cpp
@@ -52,12 +52,37 @@
     clientInfo->uid = (uid_t) data.readUint32();
     clientInfo->session = (audio_session_t) data.readInt32();
     clientInfo->source = (audio_source_t) data.readInt32();
+    data.read(&clientInfo->port_id, sizeof(audio_port_handle_t));
+    clientInfo->silenced = data.readBool();
 }
 
-inline void writeRecordClientInfoFromParcel(Parcel& data, const record_client_info_t *clientInfo) {
+inline void writeRecordClientInfoToParcel(Parcel& data, const record_client_info_t *clientInfo) {
     data.writeUint32((uint32_t) clientInfo->uid);
     data.writeInt32((int32_t) clientInfo->session);
     data.writeInt32((int32_t) clientInfo->source);
+    data.write(&clientInfo->port_id, sizeof(audio_port_handle_t));
+    data.writeBool(clientInfo->silenced);
+}
+
+inline void readEffectVectorFromParcel(const Parcel& data,
+                                       std::vector<effect_descriptor_t> *effects) {
+    int32_t numEffects = data.readInt32();
+    for (int32_t i = 0; i < numEffects; i++) {
+        effect_descriptor_t effect;
+        if (data.read(&effect, sizeof(effect_descriptor_t)) != NO_ERROR) {
+            break;
+        }
+        (*effects).push_back(effect);
+    }
+}
+
+inline void writeEffectVectorToParcel(Parcel& data, std::vector<effect_descriptor_t> effects) {
+    data.writeUint32((uint32_t) effects.size());
+    for (const auto& effect : effects) {
+        if (data.write(&effect, sizeof(effect_descriptor_t)) != NO_ERROR) {
+            break;
+        }
+    }
 }
 
 // ----------------------------------------------------------------------
@@ -92,16 +117,24 @@
         remote()->transact(MIX_STATE_UPDATE, data, &reply, IBinder::FLAG_ONEWAY);
     }
 
-    void onRecordingConfigurationUpdate(int event, const record_client_info_t *clientInfo,
-            const audio_config_base_t *clientConfig,
-            const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle) {
+    void onRecordingConfigurationUpdate(int event,
+                                        const record_client_info_t *clientInfo,
+                                        const audio_config_base_t *clientConfig,
+                                        std::vector<effect_descriptor_t> clientEffects,
+                                        const audio_config_base_t *deviceConfig,
+                                        std::vector<effect_descriptor_t> effects,
+                                        audio_patch_handle_t patchHandle,
+                                        audio_source_t source) {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyServiceClient::getInterfaceDescriptor());
         data.writeInt32(event);
-        writeRecordClientInfoFromParcel(data, clientInfo);
+        writeRecordClientInfoToParcel(data, clientInfo);
         writeAudioConfigBaseToParcel(data, clientConfig);
+        writeEffectVectorToParcel(data, clientEffects);
         writeAudioConfigBaseToParcel(data, deviceConfig);
+        writeEffectVectorToParcel(data, effects);
         data.writeInt32(patchHandle);
+        data.writeInt32((int32_t) source);
         remote()->transact(RECORDING_CONFIGURATION_UPDATE, data, &reply, IBinder::FLAG_ONEWAY);
     }
 };
@@ -139,10 +172,15 @@
             audio_config_base_t deviceConfig;
             readRecordClientInfoFromParcel(data, &clientInfo);
             readAudioConfigBaseFromParcel(data, &clientConfig);
+            std::vector<effect_descriptor_t> clientEffects;
+            readEffectVectorFromParcel(data, &clientEffects);
             readAudioConfigBaseFromParcel(data, &deviceConfig);
+            std::vector<effect_descriptor_t> effects;
+            readEffectVectorFromParcel(data, &effects);
             audio_patch_handle_t patchHandle = (audio_patch_handle_t) data.readInt32();
-            onRecordingConfigurationUpdate(event, &clientInfo, &clientConfig, &deviceConfig,
-                    patchHandle);
+            audio_source_t source = (audio_source_t) data.readInt32();
+            onRecordingConfigurationUpdate(event, &clientInfo, &clientConfig, clientEffects,
+                                           &deviceConfig, effects, patchHandle, source);
             return NO_ERROR;
         } break;
     default:
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 74156ca..b0da5b8 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -35,9 +35,14 @@
 
 typedef void (*audio_error_callback)(status_t err);
 typedef void (*dynamic_policy_callback)(int event, String8 regId, int val);
-typedef void (*record_config_callback)(int event, const record_client_info_t *clientInfo,
-                const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
-                audio_patch_handle_t patchHandle);
+typedef void (*record_config_callback)(int event,
+                                       const record_client_info_t *clientInfo,
+                                       const audio_config_base_t *clientConfig,
+                                       std::vector<effect_descriptor_t> clientEffects,
+                                       const audio_config_base_t *deviceConfig,
+                                       std::vector<effect_descriptor_t> effects,
+                                       audio_patch_handle_t patchHandle,
+                                       audio_source_t source);
 
 class IAudioFlinger;
 class IAudioPolicyService;
@@ -448,9 +453,13 @@
         virtual void onAudioPatchListUpdate();
         virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
         virtual void onRecordingConfigurationUpdate(int event,
-                        const record_client_info_t *clientInfo,
-                        const audio_config_base_t *clientConfig,
-                        const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
+                                                    const record_client_info_t *clientInfo,
+                                                    const audio_config_base_t *clientConfig,
+                                                    std::vector<effect_descriptor_t> clientEffects,
+                                                    const audio_config_base_t *deviceConfig,
+                                                    std::vector<effect_descriptor_t> effects,
+                                                    audio_patch_handle_t patchHandle,
+                                                    audio_source_t source);
 
     private:
         Mutex                               mLock;
diff --git a/media/libaudioclient/include/media/IAudioPolicyServiceClient.h b/media/libaudioclient/include/media/IAudioPolicyServiceClient.h
index e0d2495..b3c0381 100644
--- a/media/libaudioclient/include/media/IAudioPolicyServiceClient.h
+++ b/media/libaudioclient/include/media/IAudioPolicyServiceClient.h
@@ -17,10 +17,12 @@
 #ifndef ANDROID_IAUDIOPOLICYSERVICECLIENT_H
 #define ANDROID_IAUDIOPOLICYSERVICECLIENT_H
 
+#include <vector>
 
 #include <utils/RefBase.h>
 #include <binder/IInterface.h>
 #include <system/audio.h>
+#include <system/audio_effect.h>
 
 namespace android {
 
@@ -30,6 +32,8 @@
     uid_t uid;
     audio_session_t session;
     audio_source_t source;
+    audio_port_handle_t port_id;
+    bool silenced;
 };
 
 typedef struct record_client_info record_client_info_t;
@@ -51,8 +55,11 @@
     virtual void onRecordingConfigurationUpdate(int event,
             const record_client_info_t *clientInfo,
             const audio_config_base_t *clientConfig,
+            std::vector<effect_descriptor_t> clientEffects,
             const audio_config_base_t *deviceConfig,
-            audio_patch_handle_t patchHandle) = 0;
+            std::vector<effect_descriptor_t> effects,
+            audio_patch_handle_t patchHandle,
+            audio_source_t source) = 0;
 };
 
 
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
index 277d95c..e45d81f 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
@@ -178,13 +178,15 @@
 #define CS_MIDDLE_96000_B1                    (-0.993334)
 #define CS_MIDDLE_96000_B2                     0.000000
 #define CS_MIDDLE_96000_SCALE                        15
-/* bandpass filter with fc1 270 and fc2 3703, designed using 2nd order butterworth */
-#define CS_SIDE_96000_A0                     0.016727
-#define CS_SIDE_96000_A1                     0.000000
-#define CS_SIDE_96000_A2                    (-0.016727)
-#define CS_SIDE_96000_B1                    (-1.793372)
-#define CS_SIDE_96000_B2                     0.797236
-#define CS_SIDE_96000_SCALE                        14
+/* Coefficients calculated using tf2ss and ss2tf functions based on
+ * coefficients available for 48000Hz sampling frequency
+ */
+#define CS_SIDE_96000_A0                     0.224326f
+#define CS_SIDE_96000_A1                     (-0.294937f)
+#define CS_SIDE_96000_A2                     0.070611f
+#define CS_SIDE_96000_B1                     (-1.792166f)
+#define CS_SIDE_96000_B2                     0.795830f
+#define CS_SIDE_96000_SCALE                  14
 
 /* Stereo Enhancer coefficients for 176400Hz sample rate.
  * The filter coefficients are obtained by carrying out
@@ -211,13 +213,15 @@
 #define CS_MIDDLE_192000_B1                    (-0.996661)
 #define CS_MIDDLE_192000_B2                     0.000000
 #define CS_MIDDLE_192000_SCALE                        15
-/* bandpass filter with fc1 270 and fc2 3703, designed using 2nd order butterworth */
-#define CS_SIDE_192000_A0                     0.008991
-#define CS_SIDE_192000_A1                    (-0.000000)
-#define CS_SIDE_192000_A2                    (-0.008991)
-#define CS_SIDE_192000_B1                    (-1.892509)
-#define CS_SIDE_192000_B2                     0.893524
-#define CS_SIDE_192000_SCALE                       14
+/* Coefficients calculated using tf2ss and ss2tf functions based on
+ * coefficients available for 48000Hz sampling frequency
+ */
+#define CS_SIDE_192000_A0                    0.196039f
+#define CS_SIDE_192000_A1                    (-0.311027f)
+#define CS_SIDE_192000_A2                    0.114988f
+#define CS_SIDE_192000_B1                    (-1.891380f)
+#define CS_SIDE_192000_B2                    0.8923460f
+#define CS_SIDE_192000_SCALE                 14
 #endif
 
 /************************************************************************************/
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
index 340469a..861ee64 100755
--- a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
@@ -20,30 +20,69 @@
 # location of test files
 testdir="/data/local/tmp/lvmTest"
 
-#flags="-bE -tE -eqE -csE"
-flags="-csE -tE -eqE"
-
-
 echo "========================================"
 echo "testing lvm"
-adb shell mkdir $testdir
+adb shell mkdir -p $testdir
 adb push $ANDROID_BUILD_TOP/cts/tests/tests/media/res/raw/sinesweepraw.raw $testdir
 adb push $OUT/testcases/lvmtest/arm64/lvmtest $testdir
 
-# run multichannel effects at different channel counts, saving only the stereo channel pair.
-adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_1.raw\
-                          -ch:1 -fs:44100 $flags
-adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_2.raw\
-                           -ch:2 -fs:44100 $flags
-adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_4.raw\
-                           -ch:4 -fs:44100 $flags
-adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_6.raw\
-                           -ch:6 -fs:44100 $flags
-adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw -o:$testdir/sinesweep_8.raw\
-                           -ch:8 -fs:44100 $flags
+flags_arr=(
+    "-csE"
+    "-eqE"
+    "-tE"
+    "-csE -tE -eqE"
+    "-bE"
+    "-csE -tE"
+    "-csE -eqE" "-tE -eqE"
+    "-csE -tE -bE -eqE"
+)
 
-# two channel files should be identical to higher channel computation (first 2 channels).
-adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_2.raw
-adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_4.raw
-adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_6.raw
-adb shell cmp $testdir/sinesweep_2.raw $testdir/sinesweep_8.raw
+fs_arr=(
+    8000
+    11025
+    12000
+    16000
+    22050
+    24000
+    32000
+    44100
+    48000
+    88200
+    96000
+    176400
+    192000
+)
+
+ch_arr=(
+    1
+    2
+    4
+    6
+    8
+)
+
+# run multichannel effects at different configs, saving only the stereo channel
+# pair.
+for flags in "${flags_arr[@]}"
+do
+    for fs in ${fs_arr[*]}
+    do
+        for ch in ${ch_arr[*]}
+        do
+            adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw \
+                -o:$testdir/sinesweep_$((ch))_$((fs)).raw -ch:$ch -fs:$fs $flags
+
+            # two channel files should be identical to higher channel
+            # computation (first 2 channels).
+            # Do not compare cases where -bE is in flags (due to mono computation)
+            if [[ $flags != *"-bE"* ]] && [ "$ch" -gt 2 ]
+            then
+                adb shell cmp $testdir/sinesweep_2_$((fs)).raw \
+                    $testdir/sinesweep_$((ch))_$((fs)).raw
+            fi
+
+        done
+    done
+done
+
+adb shell rm -r $testdir
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
index 99551cc..43271d2 100644
--- a/media/libeffects/lvm/tests/lvmtest.cpp
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -76,6 +76,7 @@
   int              samplingFreq    = 44100;
   int              nrChannels      = 2;
   int              fChannels       = 2;
+  bool             monoMode        = false;
   int              bassEffectLevel = 0;
   int              eqPresetLevel   = 0;
   int              frameLength     = 256;
@@ -98,6 +99,8 @@
   printf("\n");
   printf("\n     -ch:<process_channels> (1 through 8)\n\n");
   printf("\n     -fch:<file_channels> (1 through 8)\n\n");
+  printf("\n     -M");
+  printf("\n           Mono mode (force all input audio channels to be identical)");
   printf("\n     -basslvl:<effect_level>");
   printf("\n           A value that ranges between 0 - 15 default 0");
   printf("\n");
@@ -612,6 +615,15 @@
     }
     memcpy_to_float_from_i16(floatIn.data(), in.data(), frameLength * channelCount);
 
+    // Mono mode will replicate the first channel to all other channels.
+    // This ensures all audio channels are identical. This is useful for testing
+    // Bass Boost, which extracts a mono signal for processing.
+    if (plvmConfigParams->monoMode && channelCount > 1) {
+        for (int i = 0; i < frameLength; ++i) {
+            auto *fp = &floatIn[i * channelCount];
+            std::fill(fp + 1, fp + channelCount, *fp); // replicate ch 0
+        }
+    }
 #if 1
     errCode = lvmExecute(floatIn.data(), floatOut.data(), &context, plvmConfigParams);
     if (errCode) {
@@ -677,6 +689,8 @@
              return -1;
            }
            lvmConfigParams.fChannels = fChannels;
+    } else if (!strcmp(argv[i],"-M")) {
+          lvmConfigParams.monoMode = true;
     } else if (!strncmp(argv[i], "-basslvl:", 9)) {
       const int bassEffectLevel = atoi(argv[i] + 9);
       if (bassEffectLevel > 15 || bassEffectLevel < 0) {
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index b2c91c4..56ee18e 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -65,7 +65,7 @@
     GET_ROUTED_DEVICE_ID,
     ENABLE_AUDIO_DEVICE_CALLBACK,
     GET_ACTIVE_MICROPHONES,
-
+    GET_PORT_ID,
 };
 
 class BpMediaRecorder: public BpInterface<IMediaRecorder>
@@ -407,6 +407,23 @@
         return status;
     }
 
+    status_t getPortId(audio_port_handle_t *portId)
+    {
+        ALOGV("getPortId");
+        if (portId == nullptr) {
+            return BAD_VALUE;
+        }
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_PORT_ID, data, &reply);
+        if (status != OK
+                || (status = (status_t)reply.readInt32()) != NO_ERROR) {
+            *portId = AUDIO_PORT_HANDLE_NONE;
+            return status;
+        }
+        *portId = (audio_port_handle_t)reply.readInt32();
+        return NO_ERROR;
+    }
 };
 
 IMPLEMENT_META_INTERFACE(MediaRecorder, "android.media.IMediaRecorder");
@@ -661,6 +678,17 @@
             return NO_ERROR;
 
         }
+        case GET_PORT_ID: {
+            ALOGV("GET_PORT_ID");
+            CHECK_INTERFACE(IMediaRecorder, data, reply);
+            audio_port_handle_t portId;
+            status_t status = getPortId(&portId);
+            reply->writeInt32(status);
+            if (status == NO_ERROR) {
+                reply->writeInt32(portId);
+            }
+            return NO_ERROR;
+        }
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/MidiIoWrapper.cpp b/media/libmedia/MidiIoWrapper.cpp
index 1150d61..d8ef9cf 100644
--- a/media/libmedia/MidiIoWrapper.cpp
+++ b/media/libmedia/MidiIoWrapper.cpp
@@ -50,24 +50,15 @@
     mDataSource = nullptr;
 }
 
-MidiIoWrapper::MidiIoWrapper(DataSourceBase *source) {
-    ALOGV("MidiIoWrapper(DataSource)");
-    mFd = -1;
-    mDataSource = source;
-    off64_t l;
-    if (mDataSource->getSize(&l) == OK) {
-        mLength = l;
-    } else {
-        mLength = 0;
-    }
-}
-
 class DataSourceUnwrapper : public DataSourceBase {
 
 public:
     explicit DataSourceUnwrapper(CDataSource *csource) {
         mSource = csource;
     }
+
+    virtual ~DataSourceUnwrapper() {}
+
     virtual status_t initCheck() const { return OK; }
 
     // Returns the number of bytes read, or -1 on failure. It's not an error if
@@ -98,6 +89,7 @@
 MidiIoWrapper::MidiIoWrapper(CDataSource *csource) {
     ALOGV("MidiIoWrapper(CDataSource)");
     mFd = -1;
+    mBase = 0;
     mDataSource = new DataSourceUnwrapper(csource);
     off64_t l;
     if (mDataSource->getSize(&l) == OK) {
@@ -112,6 +104,7 @@
     if (mFd >= 0) {
         close(mFd);
     }
+    delete mDataSource;
 }
 
 int MidiIoWrapper::readAt(void *buffer, int offset, int size) {
diff --git a/media/libmedia/include/media/IMediaRecorder.h b/media/libmedia/include/media/IMediaRecorder.h
index 379000e..e7c466d 100644
--- a/media/libmedia/include/media/IMediaRecorder.h
+++ b/media/libmedia/include/media/IMediaRecorder.h
@@ -73,7 +73,7 @@
     virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
     virtual status_t getActiveMicrophones(
                         std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
-
+    virtual status_t getPortId(audio_port_handle_t *portId) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/media/libmedia/include/media/MediaRecorderBase.h b/media/libmedia/include/media/MediaRecorderBase.h
index 5340dde..e1c5d47 100644
--- a/media/libmedia/include/media/MediaRecorderBase.h
+++ b/media/libmedia/include/media/MediaRecorderBase.h
@@ -72,6 +72,7 @@
     virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
     virtual status_t getActiveMicrophones(
                         std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
+    virtual status_t getPortId(audio_port_handle_t *portId) const = 0;
 
 
 
diff --git a/media/libmedia/include/media/MidiIoWrapper.h b/media/libmedia/include/media/MidiIoWrapper.h
index 6309dda..b19d49e 100644
--- a/media/libmedia/include/media/MidiIoWrapper.h
+++ b/media/libmedia/include/media/MidiIoWrapper.h
@@ -24,12 +24,12 @@
 namespace android {
 
 struct CDataSource;
+class DataSourceUnwrapper;
 
 class MidiIoWrapper {
 public:
     explicit MidiIoWrapper(const char *path);
     explicit MidiIoWrapper(int fd, off64_t offset, int64_t size);
-    explicit MidiIoWrapper(DataSourceBase *source);
     explicit MidiIoWrapper(CDataSource *csource);
 
     ~MidiIoWrapper();
@@ -43,7 +43,7 @@
     int mFd;
     off64_t mBase;
     int64_t  mLength;
-    DataSourceBase *mDataSource;
+    DataSourceUnwrapper *mDataSource;
     EAS_FILE mEasFile;
 };
 
diff --git a/media/libmedia/include/media/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
index bdf1aae..caa0186 100644
--- a/media/libmedia/include/media/mediarecorder.h
+++ b/media/libmedia/include/media/mediarecorder.h
@@ -264,6 +264,7 @@
     status_t    getRoutedDeviceId(audio_port_handle_t *deviceId);
     status_t    enableAudioDeviceCallback(bool enabled);
     status_t    getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
+    status_t    getPortId(audio_port_handle_t *portId) const;
 
 private:
     void                    doCleanUp();
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 92cfb1c..d07e703 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -842,4 +842,15 @@
     return mMediaRecorder->getActiveMicrophones(activeMicrophones);
 }
 
+status_t MediaRecorder::getPortId(audio_port_handle_t *portId) const
+{
+    ALOGV("getPortId");
+
+    if (mMediaRecorder == NULL) {
+        ALOGE("media recorder is not initialized yet");
+        return INVALID_OPERATION;
+    }
+    return mMediaRecorder->getPortId(portId);
+}
+
 } // namespace android
diff --git a/media/libmediaextractor/Android.bp b/media/libmediaextractor/Android.bp
index 6f2b35f..4758cd6 100644
--- a/media/libmediaextractor/Android.bp
+++ b/media/libmediaextractor/Android.bp
@@ -14,11 +14,19 @@
         "-Wall",
     ],
 
+    static: {
+        cflags: [
+            "-Wno-multichar",
+            "-Werror",
+            "-Wall",
+            "-DNO_IMEMORY",
+        ],
+    },
+
     shared_libs: [
         "libbinder",
         "libstagefright_foundation",
         "libutils",
-        "libcutils",
         "liblog",
     ],
 
diff --git a/media/libmediaextractor/MediaBuffer.cpp b/media/libmediaextractor/MediaBuffer.cpp
index 26d0bd4..bab3a03 100644
--- a/media/libmediaextractor/MediaBuffer.cpp
+++ b/media/libmediaextractor/MediaBuffer.cpp
@@ -51,9 +51,12 @@
       mRangeLength(size),
       mOwnsData(true),
       mMetaData(new MetaDataBase) {
+#ifndef NO_IMEMORY
     if (size < kSharedMemThreshold
             || std::atomic_load_explicit(&mUseSharedMemory, std::memory_order_seq_cst) == 0) {
+#endif
         mData = malloc(size);
+#ifndef NO_IMEMORY
     } else {
         ALOGV("creating memoryDealer");
         sp<MemoryDealer> memoryDealer =
@@ -71,6 +74,7 @@
             ALOGV("Allocated shared mem buffer of size %zu @ %p", size, mData);
         }
     }
+#endif
 }
 
 MediaBuffer::MediaBuffer(const sp<ABuffer> &buffer)
diff --git a/media/libmediaextractor/MediaBufferGroup.cpp b/media/libmediaextractor/MediaBufferGroup.cpp
index 4e6beca..84ff9a6 100644
--- a/media/libmediaextractor/MediaBufferGroup.cpp
+++ b/media/libmediaextractor/MediaBufferGroup.cpp
@@ -62,6 +62,7 @@
         mInternal->mGrowthLimit = buffers;
     }
 
+#ifndef NO_IMEMORY
     if (buffer_size >= kSharedMemoryThreshold) {
         ALOGD("creating MemoryDealer");
         // Using a single MemoryDealer is efficient for a group of shared memory objects.
@@ -84,6 +85,9 @@
         }
         return;
     }
+#else
+    (void)kSharedMemoryThreshold;
+#endif
 
     // Non-shared memory allocation.
     for (size_t i = 0; i < buffers; ++i) {
@@ -121,6 +125,7 @@
         buffer->release();
     }
     delete mInternal;
+    delete mWrapper;
 }
 
 void MediaBufferGroup::add_buffer(MediaBufferBase *buffer) {
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
index 5b362a4..ace63ae 100644
--- a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
+++ b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
@@ -46,12 +46,13 @@
     explicit MediaBuffer(size_t size);
 
     explicit MediaBuffer(const sp<ABuffer> &buffer);
-
+#ifndef NO_IMEMORY
     MediaBuffer(const sp<IMemory> &mem) :
         MediaBuffer((uint8_t *)mem->pointer() + sizeof(SharedControl), mem->size()) {
         // delegate and override mMemory
         mMemory = mem;
     }
+#endif
 
     // If MediaBufferGroup is set, decrement the local reference count;
     // if the local reference count drops to 0, return the buffer to the
@@ -92,17 +93,26 @@
     }
 
     virtual int remoteRefcount() const {
+#ifndef NO_IMEMORY
         if (mMemory.get() == nullptr || mMemory->pointer() == nullptr) return 0;
         int32_t remoteRefcount =
                 reinterpret_cast<SharedControl *>(mMemory->pointer())->getRemoteRefcount();
         // Sanity check so that remoteRefCount() is non-negative.
         return remoteRefcount >= 0 ? remoteRefcount : 0; // do not allow corrupted data.
+#else
+        return 0;
+#endif
     }
 
     // returns old value
     int addRemoteRefcount(int32_t value) {
+#ifndef NO_IMEMORY
         if (mMemory.get() == nullptr || mMemory->pointer() == nullptr) return 0;
         return reinterpret_cast<SharedControl *>(mMemory->pointer())->addRemoteRefcount(value);
+#else
+        (void) value;
+        return 0;
+#endif
     }
 
     bool isDeadObject() const {
@@ -110,8 +120,13 @@
     }
 
     static bool isDeadObject(const sp<IMemory> &memory) {
+#ifndef NO_IMEMORY
         if (memory.get() == nullptr || memory->pointer() == nullptr) return false;
         return reinterpret_cast<SharedControl *>(memory->pointer())->isDeadObject();
+#else
+        (void) memory;
+        return false;
+#endif
     }
 
     // Sticky on enabling of shared memory MediaBuffers. By default we don't use
@@ -204,7 +219,11 @@
     };
 
     inline SharedControl *getSharedControl() const {
+#ifndef NO_IMEMORY
          return reinterpret_cast<SharedControl *>(mMemory->pointer());
+#else
+         return nullptr;
+#endif
      }
 };
 
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h b/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h
index 3682368..e2cbfc8 100644
--- a/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h
+++ b/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h
@@ -75,13 +75,16 @@
     virtual int localRefcount() const = 0;
     virtual int remoteRefcount() const = 0;
 
-    virtual ~MediaBufferBase() {};
+    virtual ~MediaBufferBase() {
+        delete mWrapper;
+        delete mFormat;
+    };
 
-    CMediaBufferV3 *wrap() {
+    CMediaBuffer *wrap() {
         if (mWrapper) {
             return mWrapper;
         }
-        mWrapper = new CMediaBufferV3;
+        mWrapper = new CMediaBuffer;
         mWrapper->handle = this;
 
         mWrapper->release = [](void *handle) -> void {
@@ -124,7 +127,7 @@
         mFormat = nullptr;
     }
 private:
-    CMediaBufferV3 *mWrapper;
+    CMediaBuffer *mWrapper;
     AMediaFormat *mFormat;
 };
 
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h b/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h
index dc04556..a162116 100644
--- a/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h
+++ b/media/libmediaextractor/include/media/stagefright/MediaBufferGroup.h
@@ -59,12 +59,12 @@
     // If buffer is nullptr, have acquire_buffer() check for remote release.
     virtual void signalBufferReturned(MediaBufferBase *buffer);
 
-    CMediaBufferGroupV3 *wrap() {
+    CMediaBufferGroup *wrap() {
         if (mWrapper) {
             return mWrapper;
         }
 
-        mWrapper = new CMediaBufferGroupV3;
+        mWrapper = new CMediaBufferGroup;
         mWrapper->handle = this;
 
         mWrapper->add_buffer = [](void *handle, size_t size) -> void {
@@ -80,7 +80,7 @@
         };
 
         mWrapper->acquire_buffer = [](void *handle,
-                CMediaBufferV3 **buf, bool nonBlocking, size_t requestedSize) -> media_status_t {
+                CMediaBuffer **buf, bool nonBlocking, size_t requestedSize) -> media_status_t {
             MediaBufferBase *acquiredBuf = nullptr;
             status_t err = ((MediaBufferGroup*)handle)->acquire_buffer(
                     &acquiredBuf, nonBlocking, requestedSize);
@@ -100,7 +100,7 @@
     }
 
 private:
-    CMediaBufferGroupV3 *mWrapper;
+    CMediaBufferGroup *mWrapper;
     struct InternalData;
     InternalData *mInternal;
 
diff --git a/media/libmediaextractor/include/media/stagefright/MetaDataBase.h b/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
index 9f2deda..b99c14c 100644
--- a/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
+++ b/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
@@ -199,6 +199,7 @@
 
     // HDR related
     kKeyHdrStaticInfo    = 'hdrS', // HDRStaticInfo
+    kKeyHdr10PlusInfo    = 'hdrD', // raw data
 
     // color aspects
     kKeyColorRange       = 'cRng', // int32_t, color range, value defined by ColorAspects.Range
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
index 8f8c478..e188e54 100644
--- a/media/libmediametrics/Android.bp
+++ b/media/libmediametrics/Android.bp
@@ -10,11 +10,9 @@
     ],
 
     shared_libs: [
-        "libbase",
         "libbinder",
         "libcutils",
         "liblog",
-        "libstagefright_foundation",
         "libutils",
     ],
 
diff --git a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
index c7cd7d2..a945ffd 100644
--- a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
+++ b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
@@ -86,7 +86,7 @@
                     MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC);
             status_t        notifyAt(int64_t mediaTimeUs);
             status_t        getCurrentPosition(int64_t *msec);
-            status_t        getDuration(int64_t *msec);
+            status_t        getDuration(int64_t srcId, int64_t *msec);
             status_t        reset();
             status_t        setAudioStreamType(audio_stream_type_t type);
             status_t        getAudioStreamType(audio_stream_type_t *type);
diff --git a/media/libmediaplayer2/mediaplayer2.cpp b/media/libmediaplayer2/mediaplayer2.cpp
index 2ae5a8c..f432059 100644
--- a/media/libmediaplayer2/mediaplayer2.cpp
+++ b/media/libmediaplayer2/mediaplayer2.cpp
@@ -718,8 +718,15 @@
     return ret;
 }
 
-status_t MediaPlayer2::getDuration(int64_t *msec) {
+status_t MediaPlayer2::getDuration(int64_t srcId, int64_t *msec) {
     Mutex::Autolock _l(mLock);
+    // TODO: cache duration for currentSrcId and nextSrcId, and return correct
+    // value for nextSrcId.
+    if (srcId != mSrcId) {
+        *msec = -1;
+        return OK;
+    }
+
     ALOGV("getDuration_l");
     bool isValidState = (mCurrentState & (MEDIA_PLAYER2_PREPARED | MEDIA_PLAYER2_STARTED |
             MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_PLAYBACK_COMPLETE));
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
index 81ffbc7..080d923 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
@@ -791,9 +791,13 @@
             sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
+            int64_t srcId;
+            CHECK(msg->findInt64("srcId", (int64_t*)&srcId));
+
             PlayerMessage* reply;
             CHECK(msg->findPointer("reply", (void**)&reply));
 
+            // TODO: use correct source info based on srcId.
             size_t inbandTracks = 0;
             if (mCurrentSourceInfo.mSource != NULL) {
                 inbandTracks = mCurrentSourceInfo.mSource->getTrackCount();
@@ -824,10 +828,14 @@
 
         case kWhatGetSelectedTrack:
         {
+            int64_t srcId;
+            CHECK(msg->findInt64("srcId", (int64_t*)&srcId));
+
             int32_t type32;
             CHECK(msg->findInt32("type", (int32_t*)&type32));
             media_track_type type = (media_track_type)type32;
 
+            // TODO: use correct source info based on srcId.
             size_t inbandTracks = 0;
             status_t err = INVALID_OPERATION;
             ssize_t selectedTrack = -1;
@@ -863,15 +871,18 @@
             sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
+            int64_t srcId;
             size_t trackIndex;
             int32_t select;
             int64_t timeUs;
+            CHECK(msg->findInt64("srcId", (int64_t*)&srcId));
             CHECK(msg->findSize("trackIndex", &trackIndex));
             CHECK(msg->findInt32("select", &select));
             CHECK(msg->findInt64("timeUs", &timeUs));
 
             status_t err = INVALID_OPERATION;
 
+            // TODO: use correct source info based on srcId.
             size_t inbandTracks = 0;
             if (mCurrentSourceInfo.mSource != NULL) {
                 inbandTracks = mCurrentSourceInfo.mSource->getTrackCount();
@@ -2324,8 +2335,9 @@
     return OK;
 }
 
-status_t NuPlayer2::getTrackInfo(PlayerMessage* reply) const {
+status_t NuPlayer2::getTrackInfo(int64_t srcId, PlayerMessage* reply) const {
     sp<AMessage> msg = new AMessage(kWhatGetTrackInfo, this);
+    msg->setInt64("srcId", srcId);
     msg->setPointer("reply", reply);
 
     sp<AMessage> response;
@@ -2333,9 +2345,10 @@
     return err;
 }
 
-status_t NuPlayer2::getSelectedTrack(int32_t type, PlayerMessage* reply) const {
+status_t NuPlayer2::getSelectedTrack(int64_t srcId, int32_t type, PlayerMessage* reply) const {
     sp<AMessage> msg = new AMessage(kWhatGetSelectedTrack, this);
     msg->setPointer("reply", reply);
+    msg->setInt64("srcId", srcId);
     msg->setInt32("type", type);
 
     sp<AMessage> response;
@@ -2346,8 +2359,9 @@
     return err;
 }
 
-status_t NuPlayer2::selectTrack(size_t trackIndex, bool select, int64_t timeUs) {
+status_t NuPlayer2::selectTrack(int64_t srcId, size_t trackIndex, bool select, int64_t timeUs) {
     sp<AMessage> msg = new AMessage(kWhatSelectTrack, this);
+    msg->setInt64("srcId", srcId);
     msg->setSize("trackIndex", trackIndex);
     msg->setInt32("select", select);
     msg->setInt64("timeUs", timeUs);
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.h b/media/libmediaplayer2/nuplayer2/NuPlayer2.h
index e9b5f11..fdc128f 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.h
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.h
@@ -82,9 +82,9 @@
     void rewind();
 
     status_t setVideoScalingMode(int32_t mode);
-    status_t getTrackInfo(PlayerMessage* reply) const;
-    status_t getSelectedTrack(int32_t type, PlayerMessage* reply) const;
-    status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
+    status_t getTrackInfo(int64_t srcId, PlayerMessage* reply) const;
+    status_t getSelectedTrack(int64_t srcId, int32_t type, PlayerMessage* reply) const;
+    status_t selectTrack(int64_t srcId, size_t trackIndex, bool select, int64_t timeUs);
     status_t getCurrentPosition(int64_t *mediaUs);
     void getStats(Vector<sp<AMessage> > *mTrackStats);
 
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
index 56d708a..2dab2dd 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
@@ -603,28 +603,33 @@
 
         case MEDIA_PLAYER2_INVOKE_ID_GET_TRACK_INFO:
         {
-            return mPlayer->getTrackInfo(response);
+            int64_t srcId = (it++)->int64_value();
+            return mPlayer->getTrackInfo(srcId, response);
         }
 
         case MEDIA_PLAYER2_INVOKE_ID_SELECT_TRACK:
         {
+            int64_t srcId = (it++)->int64_value();
             int trackIndex = (it++)->int32_value();
             int64_t msec = 0;
             // getCurrentPosition should always return OK
             getCurrentPosition(&msec);
-            return mPlayer->selectTrack(trackIndex, true /* select */, msec * 1000LL);
+            return mPlayer->selectTrack(srcId, trackIndex, true /* select */, msec * 1000LL);
         }
 
         case MEDIA_PLAYER2_INVOKE_ID_UNSELECT_TRACK:
         {
+            int64_t srcId = (it++)->int64_value();
             int trackIndex = (it++)->int32_value();
-            return mPlayer->selectTrack(trackIndex, false /* select */, 0xdeadbeef /* not used */);
+            return mPlayer->selectTrack(
+                    srcId, trackIndex, false /* select */, 0xdeadbeef /* not used */);
         }
 
         case MEDIA_PLAYER2_INVOKE_ID_GET_SELECTED_TRACK:
         {
+            int64_t srcId = (it++)->int64_value();
             int32_t type = (it++)->int32_value();
-            return mPlayer->getSelectedTrack(type, response);
+            return mPlayer->getSelectedTrack(srcId, type, response);
         }
 
         default:
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
index 9d9e179..e3c9b4b 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
@@ -28,7 +28,7 @@
 #include <media/stagefright/MediaClock.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/Utils.h>
-#include <media/stagefright/VideoFrameScheduler.h>
+#include <media/stagefright/VideoFrameScheduler2.h>
 #include <media/MediaCodecBuffer.h>
 
 #include <inttypes.h>
@@ -1436,7 +1436,7 @@
 
     if (mHasVideo) {
         if (mVideoScheduler == NULL) {
-            mVideoScheduler = new VideoFrameScheduler();
+            mVideoScheduler = new VideoFrameScheduler2();
             mVideoScheduler->init();
         }
     }
@@ -1779,7 +1779,7 @@
 
 void NuPlayer2::Renderer::onSetVideoFrameRate(float fps) {
     if (mVideoScheduler == NULL) {
-        mVideoScheduler = new VideoFrameScheduler();
+        mVideoScheduler = new VideoFrameScheduler2();
     }
     mVideoScheduler->init(fps);
 }
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.h
index 305af68..484d9b7 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.h
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.h
@@ -28,7 +28,7 @@
 class  JWakeLock;
 struct MediaClock;
 class MediaCodecBuffer;
-struct VideoFrameScheduler;
+struct VideoFrameSchedulerBase;
 
 struct NuPlayer2::Renderer : public AHandler {
     enum Flags {
@@ -156,7 +156,7 @@
     List<QueueEntry> mAudioQueue;
     List<QueueEntry> mVideoQueue;
     uint32_t mNumFramesWritten;
-    sp<VideoFrameScheduler> mVideoScheduler;
+    sp<VideoFrameSchedulerBase> mVideoScheduler;
 
     bool mDrainAudioQueuePending;
     bool mDrainVideoQueuePending;
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 4206647..3fa8e3f 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -537,4 +537,13 @@
     }
     return NO_INIT;
 }
+
+status_t MediaRecorderClient::getPortId(audio_port_handle_t *portId) {
+    ALOGV("getPortId");
+    Mutex::Autolock lock(mLock);
+    if (mRecorder != NULL) {
+        return mRecorder->getPortId(portId);
+    }
+    return NO_INIT;
+}
 }; // namespace android
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index d2e681f..303cefc 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -109,6 +109,7 @@
     virtual     status_t   enableAudioDeviceCallback(bool enabled);
     virtual     status_t   getActiveMicrophones(
                               std::vector<media::MicrophoneInfo>* activeMicrophones);
+                status_t   getPortId(audio_port_handle_t *portId) override;
 
 private:
     friend class           MediaPlayerService;  // for accessing private constructor
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index eae52c2..f2a3038 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -2255,6 +2255,12 @@
     return NO_INIT;
 }
 
+status_t StagefrightRecorder::getPortId(audio_port_handle_t *portId) const {
+    if (mAudioSourceNode != 0) {
+        return mAudioSourceNode->getPortId(portId);
+    }
+    return NO_INIT;
+}
 
 status_t StagefrightRecorder::dump(
         int fd, const Vector<String16>& args) const {
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 2ada301..a292e58 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -77,7 +77,7 @@
     virtual void setAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
     virtual status_t enableAudioDeviceCallback(bool enabled);
     virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
-
+            status_t getPortId(audio_port_handle_t *portId) const override;
 
 private:
     mutable Mutex mLock;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index a047975..a521f62 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -28,7 +28,7 @@
 class  AWakeLock;
 struct MediaClock;
 class MediaCodecBuffer;
-struct VideoFrameScheduler;
+struct VideoFrameSchedulerBase;
 
 struct NuPlayer::Renderer : public AHandler {
     enum Flags {
@@ -156,7 +156,7 @@
     List<QueueEntry> mAudioQueue;
     List<QueueEntry> mVideoQueue;
     uint32_t mNumFramesWritten;
-    sp<VideoFrameScheduler> mVideoScheduler;
+    sp<VideoFrameSchedulerBase> mVideoScheduler;
 
     bool mDrainAudioQueuePending;
     bool mDrainVideoQueuePending;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 114f492..6ad0417 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -576,6 +576,7 @@
       mTunneled(false),
       mDescribeColorAspectsIndex((OMX_INDEXTYPE)0),
       mDescribeHDRStaticInfoIndex((OMX_INDEXTYPE)0),
+      mDescribeHDR10PlusInfoIndex((OMX_INDEXTYPE)0),
       mStateGeneration(0),
       mVendorExtensionsStatus(kExtensionsUnchecked) {
     memset(&mLastHDRStaticInfo, 0, sizeof(mLastHDRStaticInfo));
@@ -3765,8 +3766,17 @@
             "OMX.google.android.index.describeHDRStaticInfo", &mDescribeHDRStaticInfoIndex);
     if (err != OK) {
         mDescribeHDRStaticInfoIndex = (OMX_INDEXTYPE)0;
+        return err;
     }
-    return err;
+
+    err = mOMXNode->getExtensionIndex(
+                "OMX.google.android.index.describeHDR10PlusInfo", &mDescribeHDR10PlusInfoIndex);
+    if (err != OK) {
+        mDescribeHDR10PlusInfoIndex = (OMX_INDEXTYPE)0;
+        return err;
+    }
+
+    return OK;
 }
 
 status_t ACodec::setHDRStaticInfo(const DescribeHDRStaticInfoParams &params) {
@@ -5397,6 +5407,70 @@
     return getVendorParameters(portIndex, notify);
 }
 
+DescribeHDR10PlusInfoParams* ACodec::getHDR10PlusInfo(size_t paramSizeUsed) {
+    if (mDescribeHDR10PlusInfoIndex == 0) {
+        ALOGE("getHDR10PlusInfo: does not support DescribeHDR10PlusInfoParams");
+        return nullptr;
+    }
+
+    size_t newSize = sizeof(DescribeHDR10PlusInfoParams) - 1 +
+            ((paramSizeUsed > 0) ? paramSizeUsed : 512);
+    if (mHdr10PlusScratchBuffer == nullptr
+            || newSize > mHdr10PlusScratchBuffer->size()) {
+        mHdr10PlusScratchBuffer = new ABuffer(newSize);
+    }
+    DescribeHDR10PlusInfoParams *config =
+            (DescribeHDR10PlusInfoParams *)mHdr10PlusScratchBuffer->data();
+    InitOMXParams(config);
+    config->nSize = mHdr10PlusScratchBuffer->size();
+    config->nPortIndex = 1;
+    size_t paramSize = config->nSize - sizeof(DescribeHDR10PlusInfoParams) + 1;
+    config->nParamSize = paramSize;
+    config->nParamSizeUsed = 0;
+    status_t err = mOMXNode->getConfig(
+            (OMX_INDEXTYPE)mDescribeHDR10PlusInfoIndex,
+            config, config->nSize);
+    if (err != OK) {
+        ALOGE("failed to get DescribeHDR10PlusInfoParams (err %d)", err);
+        return nullptr;
+    }
+    if (config->nParamSize != paramSize) {
+        ALOGE("DescribeHDR10PlusInfoParams alters nParamSize: %u vs %zu",
+                config->nParamSize, paramSize);
+        return nullptr;
+    }
+    if (paramSizeUsed > 0 && config->nParamSizeUsed != paramSizeUsed) {
+        ALOGE("DescribeHDR10PlusInfoParams returns wrong nParamSizeUsed: %u vs %zu",
+                config->nParamSizeUsed, paramSizeUsed);
+        return nullptr;
+    }
+    return config;
+}
+
+void ACodec::onConfigUpdate(OMX_INDEXTYPE configIndex) {
+    if (mDescribeHDR10PlusInfoIndex == 0
+            || configIndex != mDescribeHDR10PlusInfoIndex) {
+        // mDescribeHDR10PlusInfoIndex is the only update we recognize now
+        return;
+    }
+
+    DescribeHDR10PlusInfoParams *config = getHDR10PlusInfo();
+    if (config == nullptr) {
+        return;
+    }
+    if (config->nParamSizeUsed > config->nParamSize) {
+        // try again with the size specified
+        config = getHDR10PlusInfo(config->nParamSizeUsed);
+        if (config == nullptr) {
+            return;
+        }
+    }
+
+    mOutputFormat = mOutputFormat->dup(); // trigger an output format changed event
+    mOutputFormat->setBuffer("hdr10-plus-info",
+            ABuffer::CreateAsCopy(config->nValue, config->nParamSizeUsed));
+}
+
 void ACodec::onDataSpaceChanged(android_dataspace dataSpace, const ColorAspects &aspects) {
     // aspects are normally communicated in ColorAspects
     int32_t range, standard, transfer;
@@ -6337,6 +6411,15 @@
             }
         }
 
+        sp<ABuffer> hdr10PlusInfo;
+        if (buffer->format()->findBuffer("hdr10-plus-info", &hdr10PlusInfo)
+                && hdr10PlusInfo != nullptr && hdr10PlusInfo->size() > 0
+                && hdr10PlusInfo != mCodec->mLastHdr10PlusBuffer) {
+            native_window_set_buffers_hdr10_plus_metadata(mCodec->mNativeWindow.get(),
+                    hdr10PlusInfo->size(), hdr10PlusInfo->data());
+            mCodec->mLastHdr10PlusBuffer = hdr10PlusInfo;
+        }
+
         // save buffers sent to the surface so we can get render time when they return
         int64_t mediaTimeUs = -1;
         buffer->meta()->findInt64("timeUs", &mediaTimeUs);
@@ -7475,12 +7558,45 @@
         }
     }
 
+    sp<ABuffer> hdr10PlusInfo;
+    if (params->findBuffer("hdr10-plus-info", &hdr10PlusInfo)
+            && hdr10PlusInfo != nullptr && hdr10PlusInfo->size() > 0) {
+        (void)setHdr10PlusInfo(hdr10PlusInfo);
+    }
+
     // Ignore errors as failure is expected for codecs that aren't video encoders.
     (void)configureTemporalLayers(params, false /* inConfigure */, mOutputFormat);
 
     return setVendorParameters(params);
 }
 
+status_t ACodec::setHdr10PlusInfo(const sp<ABuffer> &hdr10PlusInfo) {
+    if (mDescribeHDR10PlusInfoIndex == 0) {
+        ALOGE("setHdr10PlusInfo: does not support DescribeHDR10PlusInfoParams");
+        return ERROR_UNSUPPORTED;
+    }
+    size_t newSize = sizeof(DescribeHDR10PlusInfoParams) + hdr10PlusInfo->size() - 1;
+    if (mHdr10PlusScratchBuffer == nullptr ||
+            newSize > mHdr10PlusScratchBuffer->size()) {
+        mHdr10PlusScratchBuffer = new ABuffer(newSize);
+    }
+    DescribeHDR10PlusInfoParams *config =
+            (DescribeHDR10PlusInfoParams *)mHdr10PlusScratchBuffer->data();
+    InitOMXParams(config);
+    config->nPortIndex = 0;
+    config->nSize = newSize;
+    config->nParamSize = hdr10PlusInfo->size();
+    config->nParamSizeUsed = hdr10PlusInfo->size();
+    memcpy(config->nValue, hdr10PlusInfo->data(), hdr10PlusInfo->size());
+    status_t err = mOMXNode->setConfig(
+            (OMX_INDEXTYPE)mDescribeHDR10PlusInfoIndex,
+            config, config->nSize);
+    if (err != OK) {
+        ALOGE("failed to set DescribeHDR10PlusInfoParams (err %d)", err);
+    }
+    return OK;
+}
+
 // Removes trailing tags matching |tag| from |key| (e.g. a settings name). |minLength| specifies
 // the minimum number of characters to keep in |key| (even if it has trailing tags).
 // (Used to remove trailing 'value' tags in settings names, e.g. to normalize
@@ -7902,6 +8018,15 @@
             return true;
         }
 
+        case OMX_EventConfigUpdate:
+        {
+            CHECK_EQ(data1, (OMX_U32)kPortIndexOutput);
+
+            mCodec->onConfigUpdate((OMX_INDEXTYPE)data2);
+
+            return true;
+        }
+
         case OMX_EventBufferFlag:
         {
             return true;
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 9aea88a..249f2a4 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -77,7 +77,7 @@
     },
 }
 
-cc_library_shared {
+cc_library {
     name: "libstagefright",
 
     srcs: [
@@ -133,6 +133,7 @@
         "SurfaceUtils.cpp",
         "Utils.cpp",
         "ThrottledSource.cpp",
+        "VideoFrameSchedulerBase.cpp",
         "VideoFrameScheduler.cpp",
     ],
 
@@ -145,7 +146,6 @@
         "libdl",
         "libdrmframework",
         "libgui",
-        "libion",
         "liblog",
         "libmedia",
         "libmedia_omx",
@@ -153,7 +153,6 @@
         "libmediaextractor",
         "libmediametrics",
         "libmediautils",
-        "libnetd_client",
         "libui",
         "libutils",
         "libmedia_helper",
@@ -161,7 +160,6 @@
         "libstagefright_foundation",
         "libstagefright_omx_utils",
         "libstagefright_opus_common",
-        "libstagefright_xmlparser",
         "libRScpp",
         "libhidlallocatorutils",
         "libhidlbase",
@@ -170,8 +168,6 @@
         "android.hidl.allocator@1.0",
         "android.hardware.cas.native@1.0",
         "android.hardware.media.omx@1.0",
-        "android.hardware.graphics.allocator@2.0",
-        "android.hardware.graphics.mapper@2.0",
     ],
 
     static_libs: [
@@ -189,6 +185,7 @@
     ],
 
     header_libs:[
+        "libstagefright_xmlparser_headers",
         "media_ndk_headers",
     ],
 
@@ -237,7 +234,8 @@
         "MediaClock.cpp",
         "NdkUtils.cpp",
         "Utils.cpp",
-        "VideoFrameScheduler.cpp",
+        "VideoFrameSchedulerBase.cpp",
+        "VideoFrameScheduler2.cpp",
         "http/ClearMediaHTTP.cpp",
     ],
 
@@ -247,10 +245,12 @@
         "libnetd_client",
         "libutils",
         "libstagefright_foundation",
+        "libandroid",
     ],
 
     static_libs: [
         "libmedia_player2_util",
+        "libmedia2_jni_core",
     ],
 
     export_include_dirs: [
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 2ae3218..9de1e22 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -506,4 +506,11 @@
     return NO_INIT;
 }
 
+status_t AudioSource::getPortId(audio_port_handle_t *portId) const {
+    if (mRecord != 0) {
+        *portId = mRecord->getPortId();
+        return NO_ERROR;
+    }
+    return NO_INIT;
+}
 }  // namespace android
diff --git a/media/libstagefright/HTTPBase.cpp b/media/libstagefright/HTTPBase.cpp
index 03e0d12..d118e8c 100644
--- a/media/libstagefright/HTTPBase.cpp
+++ b/media/libstagefright/HTTPBase.cpp
@@ -26,8 +26,6 @@
 #include <cutils/properties.h>
 #include <cutils/qtaguid.h>
 
-#include <NetdClient.h>
-
 namespace android {
 
 HTTPBase::HTTPBase()
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 6ff3d78..a48466a 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -3674,6 +3674,29 @@
         TRESPASS();
     }
     mOwner->beginBox(fourcc);    // TextMetaDataSampleEntry
+
+    //  HACK to make the metadata track compliant with the ISO standard.
+    //
+    //  Metadata track is added from API 26 and the original implementation does not
+    //  fully followed the TextMetaDataSampleEntry specified in ISO/IEC 14496-12-2015
+    //  in that only the mime_format is written out. content_encoding and
+    //  data_reference_index have not been written out. This leads to the failure
+    //  when some MP4 parser tries to parse the metadata track according to the
+    //  standard. The hack here will make the metadata track compliant with the
+    //  standard while still maintaining backwards compatibility. This would enable
+    //  Android versions before API 29 to be able to read out the standard compliant
+    //  Metadata track generated with Android API 29 and upward. The trick is based
+    //  on the fact that the Metadata track must start with prefix “application/” and
+    //  those missing fields are not used in Android's Metadata track. By writting
+    //  out the mime_format twice, the first mime_format will be used to fill out the
+    //  missing reserved, data_reference_index and content encoding fields. On the
+    //  parser side, the extracter before API 29  will read out the first mime_format
+    //  correctly and drop the second mime_format. The extractor from API 29 will
+    //  check if the reserved, data_reference_index and content encoding are filled
+    //  with “application” to detect if this is a standard compliant metadata track
+    //  and read out the data accordingly.
+    mOwner->writeCString(mime);
+
     mOwner->writeCString(mime);  // metadata mime_format
     mOwner->endBox(); // mett
 }
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 70064ea..bd9e2bb 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -2197,6 +2197,13 @@
                                 }
                             }
 
+                            sp<ABuffer> hdr10PlusInfo;
+                            if (mOutputFormat->findBuffer("hdr10-plus-info", &hdr10PlusInfo)
+                                    && hdr10PlusInfo != nullptr && hdr10PlusInfo->size() > 0) {
+                                native_window_set_buffers_hdr10_plus_metadata(mSurface.get(),
+                                        hdr10PlusInfo->size(), hdr10PlusInfo->data());
+                            }
+
                             if (mime.startsWithIgnoreCase("video/")) {
                                 mSoftRenderer = new SoftwareRenderer(mSurface, mRotationDegrees);
                             }
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index ea818ff..9511931 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -43,63 +43,24 @@
 }
 
 // --------------------------------------------------------------------------------
-MediaExtractorCUnwrapperV1::MediaExtractorCUnwrapperV1(CMediaExtractor *plugin) {
+MediaExtractorCUnwrapper::MediaExtractorCUnwrapper(CMediaExtractor *plugin) {
     this->plugin = plugin;
 }
 
-MediaExtractorCUnwrapperV1::~MediaExtractorCUnwrapperV1() {
+MediaExtractorCUnwrapper::~MediaExtractorCUnwrapper() {
     plugin->free(plugin->data);
     free(plugin);
 }
 
-size_t MediaExtractorCUnwrapperV1::countTracks() {
+size_t MediaExtractorCUnwrapper::countTracks() {
     return plugin->countTracks(plugin->data);
 }
 
-MediaTrack *MediaExtractorCUnwrapperV1::getTrack(size_t index) {
+MediaTrack *MediaExtractorCUnwrapper::getTrack(size_t index) {
     return new MediaTrackCUnwrapper(plugin->getTrack(plugin->data, index));
 }
 
-status_t MediaExtractorCUnwrapperV1::getTrackMetaData(
-        MetaDataBase& meta, size_t index, uint32_t flags) {
-    return plugin->getTrackMetaData(plugin->data, meta, index, flags);
-}
-
-status_t MediaExtractorCUnwrapperV1::getMetaData(MetaDataBase& meta) {
-    return plugin->getMetaData(plugin->data, meta);
-}
-
-const char * MediaExtractorCUnwrapperV1::name() {
-    return plugin->name(plugin->data);
-}
-
-uint32_t MediaExtractorCUnwrapperV1::flags() const {
-    return plugin->flags(plugin->data);
-}
-
-status_t MediaExtractorCUnwrapperV1::setMediaCas(const uint8_t* casToken, size_t size) {
-    return plugin->setMediaCas(plugin->data, casToken, size);
-}
-
-// --------------------------------------------------------------------------------
-MediaExtractorCUnwrapperV2::MediaExtractorCUnwrapperV2(CMediaExtractorV2 *plugin) {
-    this->plugin = plugin;
-}
-
-MediaExtractorCUnwrapperV2::~MediaExtractorCUnwrapperV2() {
-    plugin->free(plugin->data);
-    free(plugin);
-}
-
-size_t MediaExtractorCUnwrapperV2::countTracks() {
-    return plugin->countTracks(plugin->data);
-}
-
-MediaTrack *MediaExtractorCUnwrapperV2::getTrack(size_t index) {
-    return new MediaTrackCUnwrapperV2(plugin->getTrack(plugin->data, index));
-}
-
-status_t MediaExtractorCUnwrapperV2::getTrackMetaData(
+status_t MediaExtractorCUnwrapper::getTrackMetaData(
         MetaDataBase& meta, size_t index, uint32_t flags) {
     sp<AMessage> msg = new AMessage();
     AMediaFormat *format =  AMediaFormat_fromMsg(&msg);
@@ -111,7 +72,7 @@
     return reverse_translate_error(ret);
 }
 
-status_t MediaExtractorCUnwrapperV2::getMetaData(MetaDataBase& meta) {
+status_t MediaExtractorCUnwrapper::getMetaData(MetaDataBase& meta) {
     sp<AMessage> msg = new AMessage();
     AMediaFormat *format =  AMediaFormat_fromMsg(&msg);
     media_status_t ret = plugin->getMetaData(plugin->data, format);
@@ -122,68 +83,15 @@
     return reverse_translate_error(ret);
 }
 
-const char * MediaExtractorCUnwrapperV2::name() {
+const char * MediaExtractorCUnwrapper::name() {
     return plugin->name(plugin->data);
 }
 
-uint32_t MediaExtractorCUnwrapperV2::flags() const {
+uint32_t MediaExtractorCUnwrapper::flags() const {
     return plugin->flags(plugin->data);
 }
 
-status_t MediaExtractorCUnwrapperV2::setMediaCas(const uint8_t* casToken, size_t size) {
-    return plugin->setMediaCas(plugin->data, casToken, size);
-}
-
-// --------------------------------------------------------------------------------
-MediaExtractorCUnwrapperV3::MediaExtractorCUnwrapperV3(CMediaExtractorV3 *plugin) {
-    this->plugin = plugin;
-}
-
-MediaExtractorCUnwrapperV3::~MediaExtractorCUnwrapperV3() {
-    plugin->free(plugin->data);
-    free(plugin);
-}
-
-size_t MediaExtractorCUnwrapperV3::countTracks() {
-    return plugin->countTracks(plugin->data);
-}
-
-MediaTrack *MediaExtractorCUnwrapperV3::getTrack(size_t index) {
-    return new MediaTrackCUnwrapperV3(plugin->getTrack(plugin->data, index));
-}
-
-status_t MediaExtractorCUnwrapperV3::getTrackMetaData(
-        MetaDataBase& meta, size_t index, uint32_t flags) {
-    sp<AMessage> msg = new AMessage();
-    AMediaFormat *format =  AMediaFormat_fromMsg(&msg);
-    media_status_t ret = plugin->getTrackMetaData(plugin->data, format, index, flags);
-    sp<MetaData> newMeta = new MetaData();
-    convertMessageToMetaData(msg, newMeta);
-    delete format;
-    meta = *newMeta;
-    return reverse_translate_error(ret);
-}
-
-status_t MediaExtractorCUnwrapperV3::getMetaData(MetaDataBase& meta) {
-    sp<AMessage> msg = new AMessage();
-    AMediaFormat *format =  AMediaFormat_fromMsg(&msg);
-    media_status_t ret = plugin->getMetaData(plugin->data, format);
-    sp<MetaData> newMeta = new MetaData();
-    convertMessageToMetaData(msg, newMeta);
-    delete format;
-    meta = *newMeta;
-    return reverse_translate_error(ret);
-}
-
-const char * MediaExtractorCUnwrapperV3::name() {
-    return plugin->name(plugin->data);
-}
-
-uint32_t MediaExtractorCUnwrapperV3::flags() const {
-    return plugin->flags(plugin->data);
-}
-
-status_t MediaExtractorCUnwrapperV3::setMediaCas(const uint8_t* casToken, size_t size) {
+status_t MediaExtractorCUnwrapper::setMediaCas(const uint8_t* casToken, size_t size) {
     return plugin->setMediaCas(plugin->data, casToken, size);
 }
 
diff --git a/media/libstagefright/MediaExtractorFactory.cpp b/media/libstagefright/MediaExtractorFactory.cpp
index 81fc4ae..2c7a4e5 100644
--- a/media/libstagefright/MediaExtractorFactory.cpp
+++ b/media/libstagefright/MediaExtractorFactory.cpp
@@ -106,24 +106,12 @@
     }
 
     MediaExtractor *ex = nullptr;
-    if (creatorVersion == 1) {
-        CMediaExtractor *ret = ((CreatorFuncV1)creator)(source->wrap(), meta);
+    if (creatorVersion == EXTRACTORDEF_VERSION_NDK_V1) {
+        CMediaExtractor *ret = ((CreatorFunc)creator)(source->wrap(), meta);
         if (meta != nullptr && freeMeta != nullptr) {
             freeMeta(meta);
         }
-        ex = ret != nullptr ? new MediaExtractorCUnwrapperV1(ret) : nullptr;
-    } else if (creatorVersion == 2) {
-        CMediaExtractorV2 *ret = ((CreatorFuncV2)creator)(source->wrap(), meta);
-        if (meta != nullptr && freeMeta != nullptr) {
-            freeMeta(meta);
-        }
-        ex = ret != nullptr ? new MediaExtractorCUnwrapperV2(ret) : nullptr;
-    } else if (creatorVersion == 3) {
-        CMediaExtractorV3 *ret = ((CreatorFuncV3)creator)(source->wrap(), meta);
-        if (meta != nullptr && freeMeta != nullptr) {
-            freeMeta(meta);
-        }
-        ex = ret != nullptr ? new MediaExtractorCUnwrapperV3(ret) : nullptr;
+        ex = ret != nullptr ? new MediaExtractorCUnwrapper(ret) : nullptr;
     }
 
     ALOGV("Created an extractor '%s' with confidence %.2f",
@@ -195,15 +183,9 @@
         FreeMetaFunc newFreeMeta = nullptr;
 
         void *curCreator = NULL;
-        if ((*it)->def.def_version == 1) {
-            curCreator = (void*) (*it)->def.sniff.v1(
-                    source->wrap(), &newConfidence, &newMeta, &newFreeMeta);
-        } else if ((*it)->def.def_version == 2) {
+        if ((*it)->def.def_version == EXTRACTORDEF_VERSION_NDK_V1) {
             curCreator = (void*) (*it)->def.sniff.v2(
                     source->wrap(), &newConfidence, &newMeta, &newFreeMeta);
-        } else if ((*it)->def.def_version == 3) {
-            curCreator = (void*) (*it)->def.sniff.v3(
-                    source->wrap(), &newConfidence, &newMeta, &newFreeMeta);
         }
 
         if (curCreator) {
@@ -232,8 +214,7 @@
 void MediaExtractorFactory::RegisterExtractor(const sp<ExtractorPlugin> &plugin,
         std::list<sp<ExtractorPlugin>> &pluginList) {
     // sanity check check struct version, uuid, name
-    if (plugin->def.def_version == 0
-            || plugin->def.def_version > EXTRACTORDEF_VERSION_CURRENT + 1) {
+    if (plugin->def.def_version != EXTRACTORDEF_VERSION_NDK_V1) {
         ALOGE("don't understand extractor format %u, ignoring.", plugin->def.def_version);
         return;
     }
diff --git a/media/libstagefright/MediaTrack.cpp b/media/libstagefright/MediaTrack.cpp
index ef252f4..1c1be30 100644
--- a/media/libstagefright/MediaTrack.cpp
+++ b/media/libstagefright/MediaTrack.cpp
@@ -58,27 +58,39 @@
     return (mOptions & kSeekTo_Option) != 0;
 }
 
-/* -------------- unwrapper v1 --------------- */
+/* -------------- unwrapper --------------- */
 
 MediaTrackCUnwrapper::MediaTrackCUnwrapper(CMediaTrack *cmediatrack) {
     wrapper = cmediatrack;
+    bufferGroup = nullptr;
 }
 
 MediaTrackCUnwrapper::~MediaTrackCUnwrapper() {
     wrapper->free(wrapper->data);
     free(wrapper);
+    delete bufferGroup;
 }
 
 status_t MediaTrackCUnwrapper::start() {
-    return wrapper->start(wrapper->data);
+    if (bufferGroup == nullptr) {
+        bufferGroup = new MediaBufferGroup();
+    }
+    return reverse_translate_error(wrapper->start(wrapper->data, bufferGroup->wrap()));
 }
 
 status_t MediaTrackCUnwrapper::stop() {
-    return wrapper->stop(wrapper->data);
+    return reverse_translate_error(wrapper->stop(wrapper->data));
 }
 
 status_t MediaTrackCUnwrapper::getFormat(MetaDataBase& format) {
-    return wrapper->getFormat(wrapper->data, format);
+    sp<AMessage> msg = new AMessage();
+    AMediaFormat *tmpFormat =  AMediaFormat_fromMsg(&msg);
+    media_status_t ret = wrapper->getFormat(wrapper->data, tmpFormat);
+    sp<MetaData> newMeta = new MetaData();
+    convertMessageToMetaData(msg, newMeta);
+    delete tmpFormat;
+    format = *newMeta;
+    return reverse_translate_error(ret);
 }
 
 status_t MediaTrackCUnwrapper::read(MediaBufferBase **buffer, const ReadOptions *options) {
@@ -95,116 +107,7 @@
         opts |= SEEK;
         opts |= (uint32_t) seekMode;
     }
-
-
-    return wrapper->read(wrapper->data, buffer, opts, seekPosition);
-}
-
-bool MediaTrackCUnwrapper::supportNonblockingRead() {
-    return wrapper->supportsNonBlockingRead(wrapper->data);
-}
-
-/* -------------- unwrapper v2 --------------- */
-
-MediaTrackCUnwrapperV2::MediaTrackCUnwrapperV2(CMediaTrackV2 *cmediatrack2) {
-    wrapper = cmediatrack2;
-}
-
-MediaTrackCUnwrapperV2::~MediaTrackCUnwrapperV2() {
-    wrapper->free(wrapper->data);
-    free(wrapper);
-}
-
-status_t MediaTrackCUnwrapperV2::start() {
-    return reverse_translate_error(wrapper->start(wrapper->data));
-}
-
-status_t MediaTrackCUnwrapperV2::stop() {
-    return reverse_translate_error(wrapper->stop(wrapper->data));
-}
-
-status_t MediaTrackCUnwrapperV2::getFormat(MetaDataBase& format) {
-    sp<AMessage> msg = new AMessage();
-    AMediaFormat *tmpFormat =  AMediaFormat_fromMsg(&msg);
-    media_status_t ret = wrapper->getFormat(wrapper->data, tmpFormat);
-    sp<MetaData> newMeta = new MetaData();
-    convertMessageToMetaData(msg, newMeta);
-    delete tmpFormat;
-    format = *newMeta;
-    return reverse_translate_error(ret);
-}
-
-status_t MediaTrackCUnwrapperV2::read(MediaBufferBase **buffer, const ReadOptions *options) {
-
-    uint32_t opts = 0;
-
-    if (options && options->getNonBlocking()) {
-        opts |= CMediaTrackReadOptions::NONBLOCKING;
-    }
-
-    int64_t seekPosition = 0;
-    MediaTrack::ReadOptions::SeekMode seekMode;
-    if (options && options->getSeekTo(&seekPosition, &seekMode)) {
-        opts |= SEEK;
-        opts |= (uint32_t) seekMode;
-    }
-
-    return reverse_translate_error(wrapper->read(wrapper->data, buffer, opts, seekPosition));
-}
-
-bool MediaTrackCUnwrapperV2::supportNonblockingRead() {
-    return wrapper->supportsNonBlockingRead(wrapper->data);
-}
-
-/* -------------- unwrapper v3 --------------- */
-
-MediaTrackCUnwrapperV3::MediaTrackCUnwrapperV3(CMediaTrackV3 *cmediatrack3) {
-    wrapper = cmediatrack3;
-    bufferGroup = nullptr;
-}
-
-MediaTrackCUnwrapperV3::~MediaTrackCUnwrapperV3() {
-    wrapper->free(wrapper->data);
-    free(wrapper);
-}
-
-status_t MediaTrackCUnwrapperV3::start() {
-    if (bufferGroup == nullptr) {
-        bufferGroup = new MediaBufferGroup();
-    }
-    return reverse_translate_error(wrapper->start(wrapper->data, bufferGroup->wrap()));
-}
-
-status_t MediaTrackCUnwrapperV3::stop() {
-    return reverse_translate_error(wrapper->stop(wrapper->data));
-}
-
-status_t MediaTrackCUnwrapperV3::getFormat(MetaDataBase& format) {
-    sp<AMessage> msg = new AMessage();
-    AMediaFormat *tmpFormat =  AMediaFormat_fromMsg(&msg);
-    media_status_t ret = wrapper->getFormat(wrapper->data, tmpFormat);
-    sp<MetaData> newMeta = new MetaData();
-    convertMessageToMetaData(msg, newMeta);
-    delete tmpFormat;
-    format = *newMeta;
-    return reverse_translate_error(ret);
-}
-
-status_t MediaTrackCUnwrapperV3::read(MediaBufferBase **buffer, const ReadOptions *options) {
-
-    uint32_t opts = 0;
-
-    if (options && options->getNonBlocking()) {
-        opts |= CMediaTrackReadOptions::NONBLOCKING;
-    }
-
-    int64_t seekPosition = 0;
-    MediaTrack::ReadOptions::SeekMode seekMode;
-    if (options && options->getSeekTo(&seekPosition, &seekMode)) {
-        opts |= SEEK;
-        opts |= (uint32_t) seekMode;
-    }
-    CMediaBufferV3 *buf = nullptr;
+    CMediaBuffer *buf = nullptr;
     media_status_t ret = wrapper->read(wrapper->data, &buf, opts, seekPosition);
     if (ret == AMEDIA_OK && buf != nullptr) {
         *buffer = (MediaBufferBase*)buf->handle;
@@ -265,6 +168,10 @@
             meta.setData(kKeyCryptoIV,
                     MetaDataBase::Type::TYPE_NONE, valbuf->data(), valbuf->size());
         }
+        if (format->mFormat->findBuffer("sei", &valbuf)) {
+            meta.setData(kKeySEI,
+                    MetaDataBase::Type::TYPE_NONE, valbuf->data(), valbuf->size());
+        }
     } else {
         *buffer = nullptr;
     }
@@ -272,7 +179,7 @@
     return reverse_translate_error(ret);
 }
 
-bool MediaTrackCUnwrapperV3::supportNonblockingRead() {
+bool MediaTrackCUnwrapper::supportNonblockingRead() {
     return wrapper->supportsNonBlockingRead(wrapper->data);
 }
 
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 5e8d173..163cd05 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -119,7 +119,8 @@
     }
 
     // if user/container supplied HDR static info without transfer set, assume true
-    if (format->contains("hdr-static-info") && !format->contains("color-transfer")) {
+    if ((format->contains("hdr-static-info") || format->contains("hdr10-plus-info"))
+            && !format->contains("color-transfer")) {
         return true;
     }
     // otherwise, verify that an HDR transfer function is set
@@ -634,6 +635,7 @@
         { "crypto-encrypted-sizes", kKeyEncryptedSizes },
         { "crypto-plain-sizes", kKeyPlainSizes },
         { "icc-profile", kKeyIccProfile },
+        { "sei", kKeySEI },
         { "text-format-data", kKeyTextFormatData },
     }
 };
@@ -877,6 +879,16 @@
             ColorUtils::setHDRStaticInfoIntoFormat(*(HDRStaticInfo*)data, msg);
         }
 
+        if (meta->findData(kKeyHdr10PlusInfo, &type, &data, &size)
+                && size > 0) {
+            sp<ABuffer> buffer = new (std::nothrow) ABuffer(size);
+            if (buffer.get() == NULL || buffer->base() == NULL) {
+                return NO_MEMORY;
+            }
+            memcpy(buffer->data(), data, size);
+            msg->setBuffer("hdr10-plus-info", buffer);
+        }
+
         convertMetaDataToMessageColorAspects(meta, msg);
     } else if (!strncasecmp("audio/", mime, 6)) {
         int32_t numChannels, sampleRate;
@@ -1625,6 +1637,12 @@
             }
         }
 
+        sp<ABuffer> hdr10PlusInfo;
+        if (msg->findBuffer("hdr10-plus-info", &hdr10PlusInfo)) {
+            meta->setData(kKeyHdr10PlusInfo, 0,
+                    hdr10PlusInfo->data(), hdr10PlusInfo->size());
+        }
+
         convertMessageToMetaDataColorAspects(msg, meta);
 
         AString tsSchema;
diff --git a/media/libstagefright/VideoFrameScheduler.cpp b/media/libstagefright/VideoFrameScheduler.cpp
index 9020fc1..4e5b5e2 100644
--- a/media/libstagefright/VideoFrameScheduler.cpp
+++ b/media/libstagefright/VideoFrameScheduler.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2018 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -19,8 +19,7 @@
 #include <utils/Log.h>
 #define ATRACE_TAG ATRACE_TAG_VIDEO
 #include <utils/Trace.h>
-
-#include <sys/time.h>
+#include <utils/String16.h>
 
 #include <binder/IServiceManager.h>
 #include <gui/ISurfaceComposer.h>
@@ -32,321 +31,14 @@
 
 namespace android {
 
-static const nsecs_t kNanosIn1s = 1000000000;
-
-template<class T>
-static int compare(const T *lhs, const T *rhs) {
-    if (*lhs < *rhs) {
-        return -1;
-    } else if (*lhs > *rhs) {
-        return 1;
-    } else {
-        return 0;
-    }
-}
-
-/* ======================================================================= */
-/*                                   PLL                                   */
-/* ======================================================================= */
-
-static const size_t kMinSamplesToStartPrime = 3;
-static const size_t kMinSamplesToStopPrime = VideoFrameScheduler::kHistorySize;
-static const size_t kMinSamplesToEstimatePeriod = 3;
-static const size_t kMaxSamplesToEstimatePeriod = VideoFrameScheduler::kHistorySize;
-
-static const size_t kPrecision = 12;
-static const int64_t kErrorThreshold = (1 << (kPrecision * 2)) / 10;
-static const int64_t kMultiplesThresholdDiv = 4;            // 25%
-static const int64_t kReFitThresholdDiv = 100;              // 1%
-static const nsecs_t kMaxAllowedFrameSkip = kNanosIn1s;     // 1 sec
-static const nsecs_t kMinPeriod = kNanosIn1s / 120;         // 120Hz
-static const nsecs_t kRefitRefreshPeriod = 10 * kNanosIn1s; // 10 sec
-
-VideoFrameScheduler::PLL::PLL()
-    : mPeriod(-1),
-      mPhase(0),
-      mPrimed(false),
-      mSamplesUsedForPriming(0),
-      mLastTime(-1),
-      mNumSamples(0) {
-}
-
-void VideoFrameScheduler::PLL::reset(float fps) {
-    //test();
-
-    mSamplesUsedForPriming = 0;
-    mLastTime = -1;
-
-    // set up or reset video PLL
-    if (fps <= 0.f) {
-        mPeriod = -1;
-        mPrimed = false;
-    } else {
-        ALOGV("reset at %.1f fps", fps);
-        mPeriod = (nsecs_t)(1e9 / fps + 0.5);
-        mPrimed = true;
-    }
-
-    restart();
-}
-
-// reset PLL but keep previous period estimate
-void VideoFrameScheduler::PLL::restart() {
-    mNumSamples = 0;
-    mPhase = -1;
-}
-
-#if 0
-
-void VideoFrameScheduler::PLL::test() {
-    nsecs_t period = kNanosIn1s / 60;
-    mTimes[0] = 0;
-    mTimes[1] = period;
-    mTimes[2] = period * 3;
-    mTimes[3] = period * 4;
-    mTimes[4] = period * 7;
-    mTimes[5] = period * 8;
-    mTimes[6] = period * 10;
-    mTimes[7] = period * 12;
-    mNumSamples = 8;
-    int64_t a, b, err;
-    fit(0, period * 12 / 7, 8, &a, &b, &err);
-    // a = 0.8(5)+
-    // b = -0.14097(2)+
-    // err = 0.2750578(703)+
-    ALOGD("a=%lld (%.6f), b=%lld (%.6f), err=%lld (%.6f)",
-            (long long)a, (a / (float)(1 << kPrecision)),
-            (long long)b, (b / (float)(1 << kPrecision)),
-            (long long)err, (err / (float)(1 << (kPrecision * 2))));
-}
-
-#endif
-
-bool VideoFrameScheduler::PLL::fit(
-        nsecs_t phase, nsecs_t period, size_t numSamplesToUse,
-        int64_t *a, int64_t *b, int64_t *err) {
-    if (numSamplesToUse > mNumSamples) {
-        numSamplesToUse = mNumSamples;
-    }
-
-    if ((period >> kPrecision) == 0 ) {
-        ALOGW("Period is 0, or after including precision is 0 - would cause div0, returning");
-        return false;
-    }
-
-    int64_t sumX = 0;
-    int64_t sumXX = 0;
-    int64_t sumXY = 0;
-    int64_t sumYY = 0;
-    int64_t sumY = 0;
-
-    int64_t x = 0; // x usually is in [0..numSamplesToUse)
-    nsecs_t lastTime;
-    for (size_t i = 0; i < numSamplesToUse; i++) {
-        size_t ix = (mNumSamples - numSamplesToUse + i) % kHistorySize;
-        nsecs_t time = mTimes[ix];
-        if (i > 0) {
-            x += divRound(time - lastTime, period);
-        }
-        // y is usually in [-numSamplesToUse..numSamplesToUse+kRefitRefreshPeriod/kMinPeriod) << kPrecision
-        //   ideally in [0..numSamplesToUse), but shifted by -numSamplesToUse during
-        //   priming, and possibly shifted by up to kRefitRefreshPeriod/kMinPeriod
-        //   while we are not refitting.
-        int64_t y = divRound(time - phase, period >> kPrecision);
-        sumX += x;
-        sumY += y;
-        sumXX += x * x;
-        sumXY += x * y;
-        sumYY += y * y;
-        lastTime = time;
-    }
-
-    int64_t div   = (int64_t)numSamplesToUse * sumXX - sumX * sumX;
-    if (div == 0) {
-        return false;
-    }
-
-    int64_t a_nom = (int64_t)numSamplesToUse * sumXY - sumX * sumY;
-    int64_t b_nom = sumXX * sumY            - sumX * sumXY;
-    *a = divRound(a_nom, div);
-    *b = divRound(b_nom, div);
-    // don't use a and b directly as the rounding error is significant
-    *err = sumYY - divRound(a_nom * sumXY + b_nom * sumY, div);
-    ALOGV("fitting[%zu] a=%lld (%.6f), b=%lld (%.6f), err=%lld (%.6f)",
-            numSamplesToUse,
-            (long long)*a,   (*a / (float)(1 << kPrecision)),
-            (long long)*b,   (*b / (float)(1 << kPrecision)),
-            (long long)*err, (*err / (float)(1 << (kPrecision * 2))));
-    return true;
-}
-
-void VideoFrameScheduler::PLL::prime(size_t numSamplesToUse) {
-    if (numSamplesToUse > mNumSamples) {
-        numSamplesToUse = mNumSamples;
-    }
-    CHECK(numSamplesToUse >= 3);  // must have at least 3 samples
-
-    // estimate video framerate from deltas between timestamps, and
-    // 2nd order deltas
-    Vector<nsecs_t> deltas;
-    nsecs_t lastTime, firstTime;
-    for (size_t i = 0; i < numSamplesToUse; ++i) {
-        size_t index = (mNumSamples - numSamplesToUse + i) % kHistorySize;
-        nsecs_t time = mTimes[index];
-        if (i > 0) {
-            if (time - lastTime > kMinPeriod) {
-                //ALOGV("delta: %lld", (long long)(time - lastTime));
-                deltas.push(time - lastTime);
-            }
-        } else {
-            firstTime = time;
-        }
-        lastTime = time;
-    }
-    deltas.sort(compare<nsecs_t>);
-    size_t numDeltas = deltas.size();
-    if (numDeltas > 1) {
-        nsecs_t deltaMinLimit = max(deltas[0] / kMultiplesThresholdDiv, kMinPeriod);
-        nsecs_t deltaMaxLimit = deltas[numDeltas / 2] * kMultiplesThresholdDiv;
-        for (size_t i = numDeltas / 2 + 1; i < numDeltas; ++i) {
-            if (deltas[i] > deltaMaxLimit) {
-                deltas.resize(i);
-                numDeltas = i;
-                break;
-            }
-        }
-        for (size_t i = 1; i < numDeltas; ++i) {
-            nsecs_t delta2nd = deltas[i] - deltas[i - 1];
-            if (delta2nd >= deltaMinLimit) {
-                //ALOGV("delta2: %lld", (long long)(delta2nd));
-                deltas.push(delta2nd);
-            }
-        }
-    }
-
-    // use the one that yields the best match
-    int64_t bestScore;
-    for (size_t i = 0; i < deltas.size(); ++i) {
-        nsecs_t delta = deltas[i];
-        int64_t score = 0;
-#if 1
-        // simplest score: number of deltas that are near multiples
-        size_t matches = 0;
-        for (size_t j = 0; j < deltas.size(); ++j) {
-            nsecs_t err = periodicError(deltas[j], delta);
-            if (err < delta / kMultiplesThresholdDiv) {
-                ++matches;
-            }
-        }
-        score = matches;
-#if 0
-        // could be weighed by the (1 - normalized error)
-        if (numSamplesToUse >= kMinSamplesToEstimatePeriod) {
-            int64_t a, b, err;
-            fit(firstTime, delta, numSamplesToUse, &a, &b, &err);
-            err = (1 << (2 * kPrecision)) - err;
-            score *= max(0, err);
-        }
-#endif
-#else
-        // or use the error as a negative score
-        if (numSamplesToUse >= kMinSamplesToEstimatePeriod) {
-            int64_t a, b, err;
-            fit(firstTime, delta, numSamplesToUse, &a, &b, &err);
-            score = -delta * err;
-        }
-#endif
-        if (i == 0 || score > bestScore) {
-            bestScore = score;
-            mPeriod = delta;
-            mPhase = firstTime;
-        }
-    }
-    ALOGV("priming[%zu] phase:%lld period:%lld",
-            numSamplesToUse, (long long)mPhase, (long long)mPeriod);
-}
-
-nsecs_t VideoFrameScheduler::PLL::addSample(nsecs_t time) {
-    if (mLastTime >= 0
-            // if time goes backward, or we skipped rendering
-            && (time > mLastTime + kMaxAllowedFrameSkip || time < mLastTime)) {
-        restart();
-    }
-
-    mLastTime = time;
-    mTimes[mNumSamples % kHistorySize] = time;
-    ++mNumSamples;
-
-    bool doFit = time > mRefitAt;
-    if ((mPeriod <= 0 || !mPrimed) && mNumSamples >= kMinSamplesToStartPrime) {
-        prime(kMinSamplesToStopPrime);
-        ++mSamplesUsedForPriming;
-        doFit = true;
-    }
-    if (mPeriod > 0 && mNumSamples >= kMinSamplesToEstimatePeriod) {
-        if (mPhase < 0) {
-            // initialize phase to the current render time
-            mPhase = time;
-            doFit = true;
-        } else if (!doFit) {
-            int64_t err = periodicError(time - mPhase, mPeriod);
-            doFit = err > mPeriod / kReFitThresholdDiv;
-        }
-
-        if (doFit) {
-            int64_t a, b, err;
-            if (!fit(mPhase, mPeriod, kMaxSamplesToEstimatePeriod, &a, &b, &err)) {
-                // samples are not suitable for fitting.  this means they are
-                // also not suitable for priming.
-                ALOGV("could not fit - keeping old period:%lld", (long long)mPeriod);
-                return mPeriod;
-            }
-
-            mRefitAt = time + kRefitRefreshPeriod;
-
-            mPhase += (mPeriod * b) >> kPrecision;
-            mPeriod = (mPeriod * a) >> kPrecision;
-            ALOGV("new phase:%lld period:%lld", (long long)mPhase, (long long)mPeriod);
-
-            if (err < kErrorThreshold) {
-                if (!mPrimed && mSamplesUsedForPriming >= kMinSamplesToStopPrime) {
-                    mPrimed = true;
-                }
-            } else {
-                mPrimed = false;
-                mSamplesUsedForPriming = 0;
-            }
-        }
-    }
-    return mPeriod;
-}
-
-nsecs_t VideoFrameScheduler::PLL::getPeriod() const {
-    return mPrimed ? mPeriod : 0;
-}
-
-/* ======================================================================= */
-/*                             Frame Scheduler                             */
-/* ======================================================================= */
-
-static const nsecs_t kDefaultVsyncPeriod = kNanosIn1s / 60;  // 60Hz
-static const nsecs_t kVsyncRefreshPeriod = kNanosIn1s;       // 1 sec
-
-VideoFrameScheduler::VideoFrameScheduler()
-    : mVsyncTime(0),
-      mVsyncPeriod(0),
-      mVsyncRefreshAt(0),
-      mLastVsyncTime(-1),
-      mTimeCorrection(0) {
+VideoFrameScheduler::VideoFrameScheduler() : VideoFrameSchedulerBase() {
 }
 
 void VideoFrameScheduler::updateVsync() {
     mVsyncRefreshAt = systemTime(SYSTEM_TIME_MONOTONIC) + kVsyncRefreshPeriod;
-    mVsyncPeriod = 0;
     mVsyncTime = 0;
+    mVsyncPeriod = 0;
 
-    // TODO: schedule frames for the destination surface
-    // For now, surface flinger only schedules frames on the primary display
     if (mComposer == NULL) {
         String16 name("SurfaceFlinger");
         sp<IServiceManager> sm = defaultServiceManager();
@@ -368,136 +60,6 @@
     }
 }
 
-void VideoFrameScheduler::init(float videoFps) {
-    updateVsync();
-
-    mLastVsyncTime = -1;
-    mTimeCorrection = 0;
-
-    mPll.reset(videoFps);
-}
-
-void VideoFrameScheduler::restart() {
-    mLastVsyncTime = -1;
-    mTimeCorrection = 0;
-
-    mPll.restart();
-}
-
-nsecs_t VideoFrameScheduler::getVsyncPeriod() {
-    if (mVsyncPeriod > 0) {
-        return mVsyncPeriod;
-    }
-    return kDefaultVsyncPeriod;
-}
-
-float VideoFrameScheduler::getFrameRate() {
-    nsecs_t videoPeriod = mPll.getPeriod();
-    if (videoPeriod > 0) {
-        return 1e9 / videoPeriod;
-    }
-    return 0.f;
-}
-
-nsecs_t VideoFrameScheduler::schedule(nsecs_t renderTime) {
-    nsecs_t origRenderTime = renderTime;
-
-    nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
-    if (now >= mVsyncRefreshAt) {
-        updateVsync();
-    }
-
-    // without VSYNC info, there is nothing to do
-    if (mVsyncPeriod == 0) {
-        ALOGV("no vsync: render=%lld", (long long)renderTime);
-        return renderTime;
-    }
-
-    // ensure vsync time is well before (corrected) render time
-    if (mVsyncTime > renderTime - 4 * mVsyncPeriod) {
-        mVsyncTime -=
-            ((mVsyncTime - renderTime) / mVsyncPeriod + 5) * mVsyncPeriod;
-    }
-
-    // Video presentation takes place at the VSYNC _after_ renderTime.  Adjust renderTime
-    // so this effectively becomes a rounding operation (to the _closest_ VSYNC.)
-    renderTime -= mVsyncPeriod / 2;
-
-    const nsecs_t videoPeriod = mPll.addSample(origRenderTime);
-    if (videoPeriod > 0) {
-        // Smooth out rendering
-        size_t N = 12;
-        nsecs_t fiveSixthDev =
-            abs(((videoPeriod * 5 + mVsyncPeriod) % (mVsyncPeriod * 6)) - mVsyncPeriod)
-                    / (mVsyncPeriod / 100);
-        // use 20 samples if we are doing 5:6 ratio +- 1% (e.g. playing 50Hz on 60Hz)
-        if (fiveSixthDev < 12) {  /* 12% / 6 = 2% */
-            N = 20;
-        }
-
-        nsecs_t offset = 0;
-        nsecs_t edgeRemainder = 0;
-        for (size_t i = 1; i <= N; i++) {
-            offset +=
-                (renderTime + mTimeCorrection + videoPeriod * i - mVsyncTime) % mVsyncPeriod;
-            edgeRemainder += (videoPeriod * i) % mVsyncPeriod;
-        }
-        mTimeCorrection += mVsyncPeriod / 2 - offset / (nsecs_t)N;
-        renderTime += mTimeCorrection;
-        nsecs_t correctionLimit = mVsyncPeriod * 3 / 5;
-        edgeRemainder = abs(edgeRemainder / (nsecs_t)N - mVsyncPeriod / 2);
-        if (edgeRemainder <= mVsyncPeriod / 3) {
-            correctionLimit /= 2;
-        }
-
-        // estimate how many VSYNCs a frame will spend on the display
-        nsecs_t nextVsyncTime =
-            renderTime + mVsyncPeriod - ((renderTime - mVsyncTime) % mVsyncPeriod);
-        if (mLastVsyncTime >= 0) {
-            size_t minVsyncsPerFrame = videoPeriod / mVsyncPeriod;
-            size_t vsyncsForLastFrame = divRound(nextVsyncTime - mLastVsyncTime, mVsyncPeriod);
-            bool vsyncsPerFrameAreNearlyConstant =
-                periodicError(videoPeriod, mVsyncPeriod) / (mVsyncPeriod / 20) == 0;
-
-            if (mTimeCorrection > correctionLimit &&
-                    (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame > minVsyncsPerFrame)) {
-                // remove a VSYNC
-                mTimeCorrection -= mVsyncPeriod / 2;
-                renderTime -= mVsyncPeriod / 2;
-                nextVsyncTime -= mVsyncPeriod;
-                if (vsyncsForLastFrame > 0)
-                    --vsyncsForLastFrame;
-            } else if (mTimeCorrection < -correctionLimit &&
-                    (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame == minVsyncsPerFrame)) {
-                // add a VSYNC
-                mTimeCorrection += mVsyncPeriod / 2;
-                renderTime += mVsyncPeriod / 2;
-                nextVsyncTime += mVsyncPeriod;
-                if (vsyncsForLastFrame < ULONG_MAX)
-                    ++vsyncsForLastFrame;
-            } else if (mTimeCorrection < -correctionLimit * 2
-                    || mTimeCorrection > correctionLimit * 2) {
-                ALOGW("correction beyond limit: %lld vs %lld (vsyncs for last frame: %zu, min: %zu)"
-                        " restarting. render=%lld",
-                        (long long)mTimeCorrection, (long long)correctionLimit,
-                        vsyncsForLastFrame, minVsyncsPerFrame, (long long)origRenderTime);
-                restart();
-                return origRenderTime;
-            }
-
-            ATRACE_INT("FRAME_VSYNCS", vsyncsForLastFrame);
-        }
-        mLastVsyncTime = nextVsyncTime;
-    }
-
-    // align rendertime to the center between VSYNC edges
-    renderTime -= (renderTime - mVsyncTime) % mVsyncPeriod;
-    renderTime += mVsyncPeriod / 2;
-    ALOGV("adjusting render: %lld => %lld", (long long)origRenderTime, (long long)renderTime);
-    ATRACE_INT("FRAME_FLIP_IN(ms)", (renderTime - now) / 1000000);
-    return renderTime;
-}
-
 void VideoFrameScheduler::release() {
     mComposer.clear();
 }
@@ -507,4 +69,3 @@
 }
 
 } // namespace android
-
diff --git a/media/libstagefright/VideoFrameScheduler2.cpp b/media/libstagefright/VideoFrameScheduler2.cpp
new file mode 100644
index 0000000..e02ae7d
--- /dev/null
+++ b/media/libstagefright/VideoFrameScheduler2.cpp
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VideoFrameScheduler2"
+#include <utils/Log.h>
+#define ATRACE_TAG ATRACE_TAG_VIDEO
+#include <utils/Mutex.h>
+#include <utils/Thread.h>
+#include <utils/Trace.h>
+
+#include <algorithm>
+#include <jni.h>
+#include <math.h>
+
+#include <android/choreographer.h>
+#include <android/looper.h>
+#include <media/stagefright/VideoFrameScheduler2.h>
+#include <mediaplayer2/JavaVMHelper.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AUtils.h>
+
+namespace android {
+
+static void getVsyncOffset(long* appVsyncOffsetPtr, long* sfVsyncOffsetPtr);
+
+/* ======================================================================= */
+/*                               VsyncTracker                              */
+/* ======================================================================= */
+
+class VsyncTracker : public RefBase{
+public:
+    VsyncTracker();
+    ~VsyncTracker() {}
+    long getVsyncPeriod();
+    long getVsyncTime(long periodOffset);
+    void addSample(long timestamp);
+
+private:
+    static const int kMaxSamples = 32;
+    static const int kMinSamplesForUpdate = 6;
+    int mNumSamples;
+    int mFirstSample;
+    long mReferenceTime;
+    long mPhase;
+    long mPeriod;
+    long mTimestampSamples[kMaxSamples];
+    Mutex mLock;
+
+    void updateModelLocked();
+};
+
+VsyncTracker::VsyncTracker()
+    : mNumSamples(0),
+      mFirstSample(0),
+      mReferenceTime(0),
+      mPhase(0),
+      mPeriod(0) {
+    for (int i = 0; i < kMaxSamples; i++) {
+        mTimestampSamples[i] = 0;
+    }
+}
+
+long VsyncTracker::getVsyncPeriod() {
+    Mutex::Autolock dataLock(mLock);
+    return mPeriod;
+}
+
+long VsyncTracker::getVsyncTime(long periodOffset) {
+    Mutex::Autolock dataLock(mLock);
+    const long now = systemTime();
+    long phase = mReferenceTime + mPhase;
+    return (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase;
+}
+
+void VsyncTracker::addSample(long timestamp) {
+    Mutex::Autolock dataLock(mLock);
+    if (mNumSamples == 0) {
+        mPhase = 0;
+        mReferenceTime = timestamp;
+    }
+    int idx = (mFirstSample + mNumSamples) % kMaxSamples;
+    mTimestampSamples[idx] = timestamp;
+    if (mNumSamples < kMaxSamples) {
+        mNumSamples++;
+    } else {
+        mFirstSample = (mFirstSample + 1) % kMaxSamples;
+    }
+    updateModelLocked();
+}
+
+void VsyncTracker::updateModelLocked() {
+    if (mNumSamples < kMinSamplesForUpdate) {
+        return;
+    }
+    long durationSum = 0;
+    long minDuration = LONG_MAX;
+    long maxDuration = 0;
+
+    for (int i = 1; i < mNumSamples; i++) {
+        int idx = (mFirstSample + i) % kMaxSamples;
+        int prev = (idx + kMaxSamples - 1) % kMaxSamples;
+        long duration = mTimestampSamples[idx] - mTimestampSamples[prev];
+        durationSum += duration;
+        minDuration = min(minDuration, duration);
+        maxDuration = max(maxDuration, duration);
+    }
+
+    durationSum -= (minDuration + maxDuration);
+    mPeriod = durationSum / (mNumSamples - 3);
+
+    double sampleAvgX = 0.0;
+    double sampleAvgY = 0.0;
+    double scale = 2.0 * M_PI / (double) mPeriod;
+
+    for (int i = 1; i < mNumSamples; i++) {
+        int idx = (mFirstSample + i) % kMaxSamples;
+        long sample = mTimestampSamples[idx] - mReferenceTime;
+        double samplePhase = (double) (sample % mPeriod) * scale;
+        sampleAvgX += cos(samplePhase);
+        sampleAvgY += sin(samplePhase);
+    }
+
+    sampleAvgX /= (double) mNumSamples - 1.0;
+    sampleAvgY /= (double) mNumSamples - 1.0;
+    mPhase = (long) (atan2(sampleAvgY, sampleAvgX) / scale);
+}
+
+static void frameCallback(long frameTimeNanos, void* data) {
+    if (data == NULL) {
+        return;
+    }
+    sp<VsyncTracker> vsyncTracker(static_cast<VsyncTracker*>(data));
+    vsyncTracker->addSample(frameTimeNanos);
+    AChoreographer_postFrameCallback(AChoreographer_getInstance(),
+            frameCallback, static_cast<void*>(vsyncTracker.get()));
+}
+
+/* ======================================================================= */
+/*                                   JNI                                   */
+/* ======================================================================= */
+
+static void getVsyncOffset(long* appVsyncOffsetPtr, long* sfVsyncOffsetPtr) {
+    static const long kOneMillisecInNanosec = 1000000;
+    static const long kOneSecInNanosec = kOneMillisecInNanosec * 1000;
+
+    JNIEnv *env = JavaVMHelper::getJNIEnv();
+    jclass jDisplayManagerGlobalCls = env->FindClass(
+            "android/hardware/display/DisplayManagerGlobal");
+    jclass jDisplayCls = env->FindClass("android/view/Display");
+
+    jmethodID jGetInstance = env->GetStaticMethodID(jDisplayManagerGlobalCls,
+            "getInstance", "()Landroid/hardware/display/DisplayManagerGlobal;");
+    jobject javaDisplayManagerGlobalObj = env->CallStaticObjectMethod(
+            jDisplayManagerGlobalCls, jGetInstance);
+
+    jfieldID jDEFAULT_DISPLAY = env->GetStaticFieldID(jDisplayCls, "DEFAULT_DISPLAY", "I");
+    jint DEFAULT_DISPLAY = env->GetStaticIntField(jDisplayCls, jDEFAULT_DISPLAY);
+
+    jmethodID jgetRealDisplay = env->GetMethodID(jDisplayManagerGlobalCls,
+            "getRealDisplay", "(I)Landroid/view/Display;");
+    jobject javaDisplayObj = env->CallObjectMethod(
+            javaDisplayManagerGlobalObj, jgetRealDisplay, DEFAULT_DISPLAY);
+
+    jmethodID jGetRefreshRate = env->GetMethodID(jDisplayCls, "getRefreshRate", "()F");
+    jfloat javaRefreshRate = env->CallFloatMethod(javaDisplayObj, jGetRefreshRate);
+    long vsyncPeriod = (long) (kOneSecInNanosec / (float) javaRefreshRate);
+
+    jmethodID jGetAppVsyncOffsetNanos = env->GetMethodID(
+            jDisplayCls, "getAppVsyncOffsetNanos", "()J");
+    jlong javaAppVsyncOffset = env->CallLongMethod(javaDisplayObj, jGetAppVsyncOffsetNanos);
+    *appVsyncOffsetPtr = (long) javaAppVsyncOffset;
+
+    jmethodID jGetPresentationDeadlineNanos = env->GetMethodID(
+            jDisplayCls, "getPresentationDeadlineNanos", "()J");
+    jlong javaPresentationDeadline = env->CallLongMethod(
+            javaDisplayObj, jGetPresentationDeadlineNanos);
+
+    *sfVsyncOffsetPtr = vsyncPeriod - ((long) javaPresentationDeadline - kOneMillisecInNanosec);
+}
+
+/* ======================================================================= */
+/*                          Choreographer Thread                           */
+/* ======================================================================= */
+
+struct ChoreographerThread : public Thread {
+    ChoreographerThread(bool canCallJava);
+    status_t init(void* data);
+    virtual status_t readyToRun() override;
+    virtual bool threadLoop() override;
+
+protected:
+    virtual ~ChoreographerThread() {}
+
+private:
+    DISALLOW_EVIL_CONSTRUCTORS(ChoreographerThread);
+    void* mData;
+};
+
+ChoreographerThread::ChoreographerThread(bool canCallJava) : Thread(canCallJava) {
+}
+
+status_t ChoreographerThread::init(void* data) {
+    if (data == NULL) {
+        return NO_INIT;
+    }
+    mData = data;
+    return OK;
+}
+
+status_t ChoreographerThread::readyToRun() {
+    ALooper_prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
+    if (AChoreographer_getInstance() == NULL) {
+        return NO_INIT;
+    }
+    AChoreographer_postFrameCallback(AChoreographer_getInstance(), frameCallback, mData);
+    return OK;
+}
+
+bool ChoreographerThread::threadLoop() {
+    ALooper_pollOnce(-1, nullptr, nullptr, nullptr);
+    return true;
+}
+
+/* ======================================================================= */
+/*                             Frame Scheduler                             */
+/* ======================================================================= */
+
+VideoFrameScheduler2::VideoFrameScheduler2() : VideoFrameSchedulerBase() {
+
+    getVsyncOffset(&mAppVsyncOffset, &mSfVsyncOffset);
+
+    Mutex::Autolock threadLock(mLock);
+    mChoreographerThread = new ChoreographerThread(true);
+
+    mVsyncTracker = new VsyncTracker();
+    if (mChoreographerThread->init(static_cast<void*>(mVsyncTracker.get())) != OK) {
+        mChoreographerThread.clear();
+    }
+    if (mChoreographerThread != NULL && mChoreographerThread->run("Choreographer") != OK) {
+        mChoreographerThread.clear();
+    }
+}
+
+void VideoFrameScheduler2::updateVsync() {
+    mVsyncTime = 0;
+    mVsyncPeriod = 0;
+
+    if (mVsyncTracker != NULL) {
+        mVsyncPeriod = mVsyncTracker->getVsyncPeriod();
+        mVsyncTime = mVsyncTracker->getVsyncTime(mSfVsyncOffset - mAppVsyncOffset);
+    }
+    mVsyncRefreshAt = systemTime(SYSTEM_TIME_MONOTONIC) + kVsyncRefreshPeriod;
+}
+
+void VideoFrameScheduler2::release() {
+    // Do not change order
+    {
+        Mutex::Autolock threadLock(mLock);
+        mChoreographerThread->requestExitAndWait();
+        mChoreographerThread.clear();
+    }
+
+    mVsyncTracker.clear();
+}
+
+VideoFrameScheduler2::~VideoFrameScheduler2() {
+    release();
+}
+
+} // namespace android
diff --git a/media/libstagefright/VideoFrameSchedulerBase.cpp b/media/libstagefright/VideoFrameSchedulerBase.cpp
new file mode 100644
index 0000000..77107ff
--- /dev/null
+++ b/media/libstagefright/VideoFrameSchedulerBase.cpp
@@ -0,0 +1,465 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "VideoFrameSchedulerBase"
+#include <utils/Log.h>
+#define ATRACE_TAG ATRACE_TAG_VIDEO
+#include <utils/Trace.h>
+#include <utils/Vector.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/VideoFrameSchedulerBase.h>
+
+namespace android {
+
+template<class T>
+static int compare(const T *lhs, const T *rhs) {
+    if (*lhs < *rhs) {
+        return -1;
+    } else if (*lhs > *rhs) {
+        return 1;
+    } else {
+        return 0;
+    }
+}
+
+/* ======================================================================= */
+/*                                   PLL                                   */
+/* ======================================================================= */
+
+static const size_t kMinSamplesToStartPrime = 3;
+static const size_t kMinSamplesToStopPrime = VideoFrameSchedulerBase::kHistorySize;
+static const size_t kMinSamplesToEstimatePeriod = 3;
+static const size_t kMaxSamplesToEstimatePeriod = VideoFrameSchedulerBase::kHistorySize;
+
+static const size_t kPrecision = 12;
+static const int64_t kErrorThreshold = (1 << (kPrecision * 2)) / 10;
+static const int64_t kMultiplesThresholdDiv = 4;                                     // 25%
+static const int64_t kReFitThresholdDiv = 100;                                       // 1%
+static const nsecs_t kMaxAllowedFrameSkip = VideoFrameSchedulerBase::kNanosIn1s;     // 1 sec
+static const nsecs_t kMinPeriod = VideoFrameSchedulerBase::kNanosIn1s / 120;         // 120Hz
+static const nsecs_t kRefitRefreshPeriod = 10 * VideoFrameSchedulerBase::kNanosIn1s; // 10 sec
+
+VideoFrameSchedulerBase::PLL::PLL()
+    : mPeriod(-1),
+      mPhase(0),
+      mPrimed(false),
+      mSamplesUsedForPriming(0),
+      mLastTime(-1),
+      mNumSamples(0) {
+}
+
+void VideoFrameSchedulerBase::PLL::reset(float fps) {
+    //test();
+
+    mSamplesUsedForPriming = 0;
+    mLastTime = -1;
+
+    // set up or reset video PLL
+    if (fps <= 0.f) {
+        mPeriod = -1;
+        mPrimed = false;
+    } else {
+        ALOGV("reset at %.1f fps", fps);
+        mPeriod = (nsecs_t)(1e9 / fps + 0.5);
+        mPrimed = true;
+    }
+
+    restart();
+}
+
+// reset PLL but keep previous period estimate
+void VideoFrameSchedulerBase::PLL::restart() {
+    mNumSamples = 0;
+    mPhase = -1;
+}
+
+#if 0
+
+void VideoFrameSchedulerBase::PLL::test() {
+    nsecs_t period = VideoFrameSchedulerBase::kNanosIn1s / 60;
+    mTimes[0] = 0;
+    mTimes[1] = period;
+    mTimes[2] = period * 3;
+    mTimes[3] = period * 4;
+    mTimes[4] = period * 7;
+    mTimes[5] = period * 8;
+    mTimes[6] = period * 10;
+    mTimes[7] = period * 12;
+    mNumSamples = 8;
+    int64_t a, b, err;
+    fit(0, period * 12 / 7, 8, &a, &b, &err);
+    // a = 0.8(5)+
+    // b = -0.14097(2)+
+    // err = 0.2750578(703)+
+    ALOGD("a=%lld (%.6f), b=%lld (%.6f), err=%lld (%.6f)",
+            (long long)a, (a / (float)(1 << kPrecision)),
+            (long long)b, (b / (float)(1 << kPrecision)),
+            (long long)err, (err / (float)(1 << (kPrecision * 2))));
+}
+
+#endif
+
+bool VideoFrameSchedulerBase::PLL::fit(
+        nsecs_t phase, nsecs_t period, size_t numSamplesToUse,
+        int64_t *a, int64_t *b, int64_t *err) {
+    if (numSamplesToUse > mNumSamples) {
+        numSamplesToUse = mNumSamples;
+    }
+
+    if ((period >> kPrecision) == 0 ) {
+        ALOGW("Period is 0, or after including precision is 0 - would cause div0, returning");
+        return false;
+    }
+
+    int64_t sumX = 0;
+    int64_t sumXX = 0;
+    int64_t sumXY = 0;
+    int64_t sumYY = 0;
+    int64_t sumY = 0;
+
+    int64_t x = 0; // x usually is in [0..numSamplesToUse)
+    nsecs_t lastTime;
+    for (size_t i = 0; i < numSamplesToUse; i++) {
+        size_t ix = (mNumSamples - numSamplesToUse + i) % kHistorySize;
+        nsecs_t time = mTimes[ix];
+        if (i > 0) {
+            x += divRound(time - lastTime, period);
+        }
+        // y is usually in [-numSamplesToUse..numSamplesToUse+kRefitRefreshPeriod/kMinPeriod) << kPrecision
+        //   ideally in [0..numSamplesToUse), but shifted by -numSamplesToUse during
+        //   priming, and possibly shifted by up to kRefitRefreshPeriod/kMinPeriod
+        //   while we are not refitting.
+        int64_t y = divRound(time - phase, period >> kPrecision);
+        sumX += x;
+        sumY += y;
+        sumXX += x * x;
+        sumXY += x * y;
+        sumYY += y * y;
+        lastTime = time;
+    }
+
+    int64_t div   = (int64_t)numSamplesToUse * sumXX - sumX * sumX;
+    if (div == 0) {
+        return false;
+    }
+
+    int64_t a_nom = (int64_t)numSamplesToUse * sumXY - sumX * sumY;
+    int64_t b_nom = sumXX * sumY            - sumX * sumXY;
+    *a = divRound(a_nom, div);
+    *b = divRound(b_nom, div);
+    // don't use a and b directly as the rounding error is significant
+    *err = sumYY - divRound(a_nom * sumXY + b_nom * sumY, div);
+    ALOGV("fitting[%zu] a=%lld (%.6f), b=%lld (%.6f), err=%lld (%.6f)",
+            numSamplesToUse,
+            (long long)*a,   (*a / (float)(1 << kPrecision)),
+            (long long)*b,   (*b / (float)(1 << kPrecision)),
+            (long long)*err, (*err / (float)(1 << (kPrecision * 2))));
+    return true;
+}
+
+void VideoFrameSchedulerBase::PLL::prime(size_t numSamplesToUse) {
+    if (numSamplesToUse > mNumSamples) {
+        numSamplesToUse = mNumSamples;
+    }
+    CHECK(numSamplesToUse >= 3);  // must have at least 3 samples
+
+    // estimate video framerate from deltas between timestamps, and
+    // 2nd order deltas
+    Vector<nsecs_t> deltas;
+    nsecs_t lastTime, firstTime;
+    for (size_t i = 0; i < numSamplesToUse; ++i) {
+        size_t index = (mNumSamples - numSamplesToUse + i) % kHistorySize;
+        nsecs_t time = mTimes[index];
+        if (i > 0) {
+            if (time - lastTime > kMinPeriod) {
+                //ALOGV("delta: %lld", (long long)(time - lastTime));
+                deltas.push(time - lastTime);
+            }
+        } else {
+            firstTime = time;
+        }
+        lastTime = time;
+    }
+    deltas.sort(compare<nsecs_t>);
+    size_t numDeltas = deltas.size();
+    if (numDeltas > 1) {
+        nsecs_t deltaMinLimit = max(deltas[0] / kMultiplesThresholdDiv, kMinPeriod);
+        nsecs_t deltaMaxLimit = deltas[numDeltas / 2] * kMultiplesThresholdDiv;
+        for (size_t i = numDeltas / 2 + 1; i < numDeltas; ++i) {
+            if (deltas[i] > deltaMaxLimit) {
+                deltas.resize(i);
+                numDeltas = i;
+                break;
+            }
+        }
+        for (size_t i = 1; i < numDeltas; ++i) {
+            nsecs_t delta2nd = deltas[i] - deltas[i - 1];
+            if (delta2nd >= deltaMinLimit) {
+                //ALOGV("delta2: %lld", (long long)(delta2nd));
+                deltas.push(delta2nd);
+            }
+        }
+    }
+
+    // use the one that yields the best match
+    int64_t bestScore;
+    for (size_t i = 0; i < deltas.size(); ++i) {
+        nsecs_t delta = deltas[i];
+        int64_t score = 0;
+#if 1
+        // simplest score: number of deltas that are near multiples
+        size_t matches = 0;
+        for (size_t j = 0; j < deltas.size(); ++j) {
+            nsecs_t err = periodicError(deltas[j], delta);
+            if (err < delta / kMultiplesThresholdDiv) {
+                ++matches;
+            }
+        }
+        score = matches;
+#if 0
+        // could be weighed by the (1 - normalized error)
+        if (numSamplesToUse >= kMinSamplesToEstimatePeriod) {
+            int64_t a, b, err;
+            fit(firstTime, delta, numSamplesToUse, &a, &b, &err);
+            err = (1 << (2 * kPrecision)) - err;
+            score *= max(0, err);
+        }
+#endif
+#else
+        // or use the error as a negative score
+        if (numSamplesToUse >= kMinSamplesToEstimatePeriod) {
+            int64_t a, b, err;
+            fit(firstTime, delta, numSamplesToUse, &a, &b, &err);
+            score = -delta * err;
+        }
+#endif
+        if (i == 0 || score > bestScore) {
+            bestScore = score;
+            mPeriod = delta;
+            mPhase = firstTime;
+        }
+    }
+    ALOGV("priming[%zu] phase:%lld period:%lld",
+            numSamplesToUse, (long long)mPhase, (long long)mPeriod);
+}
+
+nsecs_t VideoFrameSchedulerBase::PLL::addSample(nsecs_t time) {
+    if (mLastTime >= 0
+            // if time goes backward, or we skipped rendering
+            && (time > mLastTime + kMaxAllowedFrameSkip || time < mLastTime)) {
+        restart();
+    }
+
+    mLastTime = time;
+    mTimes[mNumSamples % kHistorySize] = time;
+    ++mNumSamples;
+
+    bool doFit = time > mRefitAt;
+    if ((mPeriod <= 0 || !mPrimed) && mNumSamples >= kMinSamplesToStartPrime) {
+        prime(kMinSamplesToStopPrime);
+        ++mSamplesUsedForPriming;
+        doFit = true;
+    }
+    if (mPeriod > 0 && mNumSamples >= kMinSamplesToEstimatePeriod) {
+        if (mPhase < 0) {
+            // initialize phase to the current render time
+            mPhase = time;
+            doFit = true;
+        } else if (!doFit) {
+            int64_t err = periodicError(time - mPhase, mPeriod);
+            doFit = err > mPeriod / kReFitThresholdDiv;
+        }
+
+        if (doFit) {
+            int64_t a, b, err;
+            if (!fit(mPhase, mPeriod, kMaxSamplesToEstimatePeriod, &a, &b, &err)) {
+                // samples are not suitable for fitting.  this means they are
+                // also not suitable for priming.
+                ALOGV("could not fit - keeping old period:%lld", (long long)mPeriod);
+                return mPeriod;
+            }
+
+            mRefitAt = time + kRefitRefreshPeriod;
+
+            mPhase += (mPeriod * b) >> kPrecision;
+            mPeriod = (mPeriod * a) >> kPrecision;
+            ALOGV("new phase:%lld period:%lld", (long long)mPhase, (long long)mPeriod);
+
+            if (err < kErrorThreshold) {
+                if (!mPrimed && mSamplesUsedForPriming >= kMinSamplesToStopPrime) {
+                    mPrimed = true;
+                }
+            } else {
+                mPrimed = false;
+                mSamplesUsedForPriming = 0;
+            }
+        }
+    }
+    return mPeriod;
+}
+
+nsecs_t VideoFrameSchedulerBase::PLL::getPeriod() const {
+    return mPrimed ? mPeriod : 0;
+}
+
+/* ======================================================================= */
+/*                             Frame Scheduler                             */
+/* ======================================================================= */
+
+VideoFrameSchedulerBase::VideoFrameSchedulerBase()
+    : mVsyncTime(0),
+      mVsyncPeriod(0),
+      mVsyncRefreshAt(0),
+      mLastVsyncTime(-1),
+      mTimeCorrection(0) {
+}
+
+void VideoFrameSchedulerBase::init(float videoFps) {
+    updateVsync();
+
+    mLastVsyncTime = -1;
+    mTimeCorrection = 0;
+
+    mPll.reset(videoFps);
+}
+
+void VideoFrameSchedulerBase::restart() {
+    mLastVsyncTime = -1;
+    mTimeCorrection = 0;
+
+    mPll.restart();
+}
+
+nsecs_t VideoFrameSchedulerBase::getVsyncPeriod() {
+    if (mVsyncPeriod > 0) {
+        return mVsyncPeriod;
+    }
+    return kDefaultVsyncPeriod;
+}
+
+float VideoFrameSchedulerBase::getFrameRate() {
+    nsecs_t videoPeriod = mPll.getPeriod();
+    if (videoPeriod > 0) {
+        return 1e9 / videoPeriod;
+    }
+    return 0.f;
+}
+
+nsecs_t VideoFrameSchedulerBase::schedule(nsecs_t renderTime) {
+    nsecs_t origRenderTime = renderTime;
+
+    nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+    if (now >= mVsyncRefreshAt) {
+        updateVsync();
+    }
+
+    // without VSYNC info, there is nothing to do
+    if (mVsyncPeriod == 0) {
+        ALOGV("no vsync: render=%lld", (long long)renderTime);
+        return renderTime;
+    }
+
+    // ensure vsync time is well before (corrected) render time
+    if (mVsyncTime > renderTime - 4 * mVsyncPeriod) {
+        mVsyncTime -=
+            ((mVsyncTime - renderTime) / mVsyncPeriod + 5) * mVsyncPeriod;
+    }
+
+    // Video presentation takes place at the VSYNC _after_ renderTime.  Adjust renderTime
+    // so this effectively becomes a rounding operation (to the _closest_ VSYNC.)
+    renderTime -= mVsyncPeriod / 2;
+
+    const nsecs_t videoPeriod = mPll.addSample(origRenderTime);
+    if (videoPeriod > 0) {
+        // Smooth out rendering
+        size_t N = 12;
+        nsecs_t fiveSixthDev =
+            abs(((videoPeriod * 5 + mVsyncPeriod) % (mVsyncPeriod * 6)) - mVsyncPeriod)
+                    / (mVsyncPeriod / 100);
+        // use 20 samples if we are doing 5:6 ratio +- 1% (e.g. playing 50Hz on 60Hz)
+        if (fiveSixthDev < 12) {  /* 12% / 6 = 2% */
+            N = 20;
+        }
+
+        nsecs_t offset = 0;
+        nsecs_t edgeRemainder = 0;
+        for (size_t i = 1; i <= N; i++) {
+            offset +=
+                (renderTime + mTimeCorrection + videoPeriod * i - mVsyncTime) % mVsyncPeriod;
+            edgeRemainder += (videoPeriod * i) % mVsyncPeriod;
+        }
+        mTimeCorrection += mVsyncPeriod / 2 - offset / (nsecs_t)N;
+        renderTime += mTimeCorrection;
+        nsecs_t correctionLimit = mVsyncPeriod * 3 / 5;
+        edgeRemainder = abs(edgeRemainder / (nsecs_t)N - mVsyncPeriod / 2);
+        if (edgeRemainder <= mVsyncPeriod / 3) {
+            correctionLimit /= 2;
+        }
+
+        // estimate how many VSYNCs a frame will spend on the display
+        nsecs_t nextVsyncTime =
+            renderTime + mVsyncPeriod - ((renderTime - mVsyncTime) % mVsyncPeriod);
+        if (mLastVsyncTime >= 0) {
+            size_t minVsyncsPerFrame = videoPeriod / mVsyncPeriod;
+            size_t vsyncsForLastFrame = divRound(nextVsyncTime - mLastVsyncTime, mVsyncPeriod);
+            bool vsyncsPerFrameAreNearlyConstant =
+                periodicError(videoPeriod, mVsyncPeriod) / (mVsyncPeriod / 20) == 0;
+
+            if (mTimeCorrection > correctionLimit &&
+                    (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame > minVsyncsPerFrame)) {
+                // remove a VSYNC
+                mTimeCorrection -= mVsyncPeriod / 2;
+                renderTime -= mVsyncPeriod / 2;
+                nextVsyncTime -= mVsyncPeriod;
+                if (vsyncsForLastFrame > 0)
+                    --vsyncsForLastFrame;
+            } else if (mTimeCorrection < -correctionLimit &&
+                    (vsyncsPerFrameAreNearlyConstant || vsyncsForLastFrame == minVsyncsPerFrame)) {
+                // add a VSYNC
+                mTimeCorrection += mVsyncPeriod / 2;
+                renderTime += mVsyncPeriod / 2;
+                nextVsyncTime += mVsyncPeriod;
+                if (vsyncsForLastFrame < ULONG_MAX)
+                    ++vsyncsForLastFrame;
+            } else if (mTimeCorrection < -correctionLimit * 2
+                    || mTimeCorrection > correctionLimit * 2) {
+                ALOGW("correction beyond limit: %lld vs %lld (vsyncs for last frame: %zu, min: %zu)"
+                        " restarting. render=%lld",
+                        (long long)mTimeCorrection, (long long)correctionLimit,
+                        vsyncsForLastFrame, minVsyncsPerFrame, (long long)origRenderTime);
+                restart();
+                return origRenderTime;
+            }
+
+            ATRACE_INT("FRAME_VSYNCS", vsyncsForLastFrame);
+        }
+        mLastVsyncTime = nextVsyncTime;
+    }
+
+    // align rendertime to the center between VSYNC edges
+    renderTime -= (renderTime - mVsyncTime) % mVsyncPeriod;
+    renderTime += mVsyncPeriod / 2;
+    ALOGV("adjusting render: %lld => %lld", (long long)origRenderTime, (long long)renderTime);
+    ATRACE_INT("FRAME_FLIP_IN(ms)", (renderTime - now) / 1000000);
+    return renderTime;
+}
+
+VideoFrameSchedulerBase::~VideoFrameSchedulerBase() {}
+
+} // namespace android
diff --git a/media/libstagefright/bqhelper/Android.bp b/media/libstagefright/bqhelper/Android.bp
index 81777f1..218fe15 100644
--- a/media/libstagefright/bqhelper/Android.bp
+++ b/media/libstagefright/bqhelper/Android.bp
@@ -25,7 +25,6 @@
     ],
 
     shared_libs: [
-        "libbase",
         "libbinder",
         "libcutils",
         "libgui",
@@ -38,8 +37,6 @@
         "libutils",
 
         "android.hardware.graphics.bufferqueue@1.0",
-
-        "libnativewindow", // TODO(b/62923479): use header library
     ],
 
     export_shared_lib_headers: [
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
index 379d41e..e0f2683 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -1058,8 +1058,8 @@
     }
 }
 
-OMX_ERRORTYPE SoftAVC::setConfig(
-        OMX_INDEXTYPE index, const OMX_PTR _params) {
+OMX_ERRORTYPE SoftAVC::internalSetConfig(
+        OMX_INDEXTYPE index, const OMX_PTR _params, bool *frameConfig) {
     switch ((int)index) {
         case OMX_IndexConfigVideoIntraVOPRefresh:
         {
@@ -1125,7 +1125,7 @@
         }
 
         default:
-            return SimpleSoftOMXComponent::setConfig(index, _params);
+            return SimpleSoftOMXComponent::internalSetConfig(index, _params, frameConfig);
     }
 }
 
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
index a43cdf1..8253b7d 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
@@ -219,8 +219,8 @@
     OMX_ERRORTYPE internalSetBitrateParams(
         const OMX_VIDEO_PARAM_BITRATETYPE *bitrate);
 
-    OMX_ERRORTYPE setConfig(
-        OMX_INDEXTYPE index, const OMX_PTR _params);
+    OMX_ERRORTYPE internalSetConfig(
+        OMX_INDEXTYPE index, const OMX_PTR _params, bool *frameConfig);
 
     OMX_ERRORTYPE getConfig(
         OMX_INDEXTYPE index, const OMX_PTR _params);
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 8d5f3e7..0f2ff17 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -33,6 +33,7 @@
     { OMX_VIDEO_VP9Profile0, OMX_VIDEO_VP9Level5 },
     { OMX_VIDEO_VP9Profile2, OMX_VIDEO_VP9Level5 },
     { OMX_VIDEO_VP9Profile2HDR, OMX_VIDEO_VP9Level5 },
+    { OMX_VIDEO_VP9Profile2HDR10Plus, OMX_VIDEO_VP9Level5 },
 };
 
 SoftVPX::SoftVPX(
@@ -84,6 +85,10 @@
     return true;
 }
 
+bool SoftVPX::supportDescribeHdr10PlusInfo() {
+    return true;
+}
+
 status_t SoftVPX::initDecoder() {
     mCtx = new vpx_codec_ctx_t;
     vpx_codec_err_t vpx_err;
@@ -167,7 +172,12 @@
         outHeader->nOffset = 0;
         outHeader->nFlags = 0;
         outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * bpp * 3) / 2;
-        outHeader->nTimeStamp = *(OMX_TICKS *)mImg->user_priv;
+        PrivInfo *privInfo = (PrivInfo *)mImg->user_priv;
+        outHeader->nTimeStamp = privInfo->mTimeStamp;
+        if (privInfo->mHdr10PlusInfo != nullptr) {
+            queueOutputFrameConfig(privInfo->mHdr10PlusInfo);
+        }
+
         if (outputBufferSafe(outHeader)) {
             uint8_t *dst = outHeader->pBuffer;
             const uint8_t *srcY = (const uint8_t *)mImg->planes[VPX_PLANE_Y];
@@ -275,7 +285,13 @@
             }
         }
 
-        mTimeStamps[mTimeStampIdx] = inHeader->nTimeStamp;
+        mPrivInfo[mTimeStampIdx].mTimeStamp = inHeader->nTimeStamp;
+
+        if (inInfo->mFrameConfig) {
+            mPrivInfo[mTimeStampIdx].mHdr10PlusInfo = dequeueInputFrameConfig();
+        } else {
+            mPrivInfo[mTimeStampIdx].mHdr10PlusInfo.clear();
+        }
 
         if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
             mEOSStatus = INPUT_EOS_SEEN;
@@ -285,7 +301,7 @@
         if (inHeader->nFilledLen > 0) {
             vpx_codec_err_t err = vpx_codec_decode(
                     (vpx_codec_ctx_t *)mCtx, inHeader->pBuffer + inHeader->nOffset,
-                    inHeader->nFilledLen, &mTimeStamps[mTimeStampIdx], 0);
+                    inHeader->nFilledLen, &mPrivInfo[mTimeStampIdx], 0);
             if (err == VPX_CODEC_OK) {
                 inInfo->mOwnedByUs = false;
                 inQueue.erase(inQueue.begin());
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.h b/media/libstagefright/codecs/on2/dec/SoftVPX.h
index b62b526..0aa8e9c 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.h
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.h
@@ -26,6 +26,8 @@
 
 namespace android {
 
+struct ABuffer;
+
 struct SoftVPX : public SoftVideoDecoderOMXComponent {
     SoftVPX(const char *name,
             const char *componentRole,
@@ -41,6 +43,7 @@
     virtual void onPortFlushCompleted(OMX_U32 portIndex);
     virtual void onReset();
     virtual bool supportDescribeHdrStaticInfo();
+    virtual bool supportDescribeHdr10PlusInfo();
 
 private:
     enum {
@@ -60,7 +63,11 @@
 
     void *mCtx;
     bool mFrameParallelMode;  // Frame parallel is only supported by VP9 decoder.
-    OMX_TICKS mTimeStamps[kNumBuffers];
+    struct PrivInfo {
+        OMX_TICKS mTimeStamp;
+        sp<ABuffer> mHdr10PlusInfo;
+    };
+    PrivInfo mPrivInfo[kNumBuffers];
     uint8_t mTimeStampIdx;
     vpx_image_t *mImg;
 
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 2dfba13..d0cb071 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -401,8 +401,8 @@
     }
 }
 
-OMX_ERRORTYPE SoftVPXEncoder::setConfig(
-        OMX_INDEXTYPE index, const OMX_PTR _params) {
+OMX_ERRORTYPE SoftVPXEncoder::internalSetConfig(
+        OMX_INDEXTYPE index, const OMX_PTR _params, bool *frameConfig) {
     switch (index) {
         case OMX_IndexConfigVideoIntraVOPRefresh:
         {
@@ -442,7 +442,7 @@
         }
 
         default:
-            return SimpleSoftOMXComponent::setConfig(index, _params);
+            return SimpleSoftOMXComponent::internalSetConfig(index, _params, frameConfig);
     }
 }
 
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index dd86d36..263d134 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -84,8 +84,8 @@
     virtual OMX_ERRORTYPE internalSetParameter(
             OMX_INDEXTYPE index, const OMX_PTR param);
 
-    virtual OMX_ERRORTYPE setConfig(
-            OMX_INDEXTYPE index, const OMX_PTR params);
+    virtual OMX_ERRORTYPE internalSetConfig(
+            OMX_INDEXTYPE index, const OMX_PTR params, bool *frameConfig);
 
     // OMX callback when buffers available
     // Note that both an input and output buffer
diff --git a/media/libstagefright/flac/dec/Android.bp b/media/libstagefright/flac/dec/Android.bp
index 6bfab16..751b053 100644
--- a/media/libstagefright/flac/dec/Android.bp
+++ b/media/libstagefright/flac/dec/Android.bp
@@ -36,8 +36,6 @@
 
     shared_libs: [
         "liblog",
-        "libstagefright_foundation",
-        "libutils",
     ],
     header_libs: ["libmedia_headers"],
 }
diff --git a/media/libstagefright/httplive/Android.bp b/media/libstagefright/httplive/Android.bp
index 78d410a..c4a072b 100644
--- a/media/libstagefright/httplive/Android.bp
+++ b/media/libstagefright/httplive/Android.bp
@@ -29,7 +29,6 @@
 
     shared_libs: [
         "liblog",
-        "libbinder",
         "libcrypto",
         "libcutils",
         "libmedia",
@@ -38,10 +37,11 @@
         "libstagefright",
         "libstagefright_foundation",
         "libutils",
-        "libhidlallocatorutils",
         "libhidlbase",
+        "libhidlmemory",
         "android.hardware.cas@1.0",
         "android.hardware.cas.native@1.0",
+        "android.hidl.allocator@1.0",
     ],
 
     header_libs: [
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 80125d4..9b2853e 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -237,6 +237,8 @@
     android_native_rect_t mLastNativeWindowCrop;
     int32_t mLastNativeWindowDataSpace;
     HDRStaticInfo mLastHDRStaticInfo;
+    sp<ABuffer> mHdr10PlusScratchBuffer;
+    sp<ABuffer> mLastHdr10PlusBuffer;
     sp<AMessage> mConfigFormat;
     sp<AMessage> mInputFormat;
     sp<AMessage> mOutputFormat;
@@ -290,6 +292,7 @@
 
     OMX_INDEXTYPE mDescribeColorAspectsIndex;
     OMX_INDEXTYPE mDescribeHDRStaticInfoIndex;
+    OMX_INDEXTYPE mDescribeHDR10PlusInfoIndex;
 
     std::shared_ptr<ACodecBufferChannel> mBufferChannel;
 
@@ -424,6 +427,11 @@
     // unspecified values.
     void onDataSpaceChanged(android_dataspace dataSpace, const ColorAspects &aspects);
 
+    // notifies the codec that the config with |configIndex| has changed, the value
+    // can be queried by OMX getConfig, and the config should be applied to the next
+    // output buffer notified after this callback.
+    void onConfigUpdate(OMX_INDEXTYPE configIndex);
+
     // gets index or sets it to 0 on error. Returns error from codec.
     status_t initDescribeHDRStaticInfoIndex();
 
@@ -435,12 +443,22 @@
     // sets |params|. Returns the codec error.
     status_t setHDRStaticInfo(const DescribeHDRStaticInfoParams &params);
 
+    // sets |hdr10PlusInfo|. Returns the codec error.
+    status_t setHdr10PlusInfo(const sp<ABuffer> &hdr10PlusInfo);
+
     // gets |params|. Returns the codec error.
     status_t getHDRStaticInfo(DescribeHDRStaticInfoParams &params);
 
     // gets HDR static information for the video encoder/decoder port and sets them into |format|.
     status_t getHDRStaticInfoForVideoCodec(OMX_U32 portIndex, sp<AMessage> &format);
 
+    // gets DescribeHDR10PlusInfoParams params. If |paramSizeUsed| is zero, it's
+    // possible that the returned DescribeHDR10PlusInfoParams only has the
+    // nParamSizeUsed field updated, because the size of the storage is insufficient.
+    // In this case, getHDR10PlusInfo() should be called again with |paramSizeUsed|
+    // specified to the previous returned value.
+    DescribeHDR10PlusInfoParams* getHDR10PlusInfo(size_t paramSizeUsed = 0);
+
     typedef struct drcParams {
         int32_t drcCut;
         int32_t drcBoost;
diff --git a/media/libstagefright/include/media/stagefright/AudioSource.h b/media/libstagefright/include/media/stagefright/AudioSource.h
index 206d322..b0e32d0 100644
--- a/media/libstagefright/include/media/stagefright/AudioSource.h
+++ b/media/libstagefright/include/media/stagefright/AudioSource.h
@@ -69,6 +69,7 @@
 
     status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
 
+    status_t getPortId(audio_port_handle_t *portId) const;
 
 protected:
     virtual ~AudioSource();
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractor.h b/media/libstagefright/include/media/stagefright/MediaExtractor.h
index 6f3e57e..79f18d5 100644
--- a/media/libstagefright/include/media/stagefright/MediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/MediaExtractor.h
@@ -90,22 +90,7 @@
 
 class MediaExtractorCUnwrapper : public MediaExtractor {
 public:
-    MediaExtractorCUnwrapper() {};
-    virtual size_t countTracks() = 0;
-    virtual MediaTrack *getTrack(size_t index) = 0;
-    virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags = 0) = 0;
-    virtual status_t getMetaData(MetaDataBase& meta) = 0;
-    virtual const char * name() = 0;
-    virtual uint32_t flags() const = 0;
-    virtual status_t setMediaCas(const uint8_t* casToken, size_t size) = 0;
-protected:
-    virtual ~MediaExtractorCUnwrapper() {};
-};
-
-
-class MediaExtractorCUnwrapperV1 : public MediaExtractorCUnwrapper {
-public:
-    explicit MediaExtractorCUnwrapperV1(CMediaExtractor *plugin);
+    explicit MediaExtractorCUnwrapper(CMediaExtractor *plugin);
     virtual size_t countTracks();
     virtual MediaTrack *getTrack(size_t index);
     virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags = 0);
@@ -114,43 +99,11 @@
     virtual uint32_t flags() const;
     virtual status_t setMediaCas(const uint8_t* casToken, size_t size);
 protected:
-    virtual ~MediaExtractorCUnwrapperV1();
+    virtual ~MediaExtractorCUnwrapper();
 private:
     CMediaExtractor *plugin;
 };
 
-class MediaExtractorCUnwrapperV2 : public MediaExtractorCUnwrapper {
-public:
-    explicit MediaExtractorCUnwrapperV2(CMediaExtractorV2 *plugin);
-    virtual size_t countTracks();
-    virtual MediaTrack *getTrack(size_t index);
-    virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags = 0);
-    virtual status_t getMetaData(MetaDataBase& meta);
-    virtual const char * name();
-    virtual uint32_t flags() const;
-    virtual status_t setMediaCas(const uint8_t* casToken, size_t size);
-protected:
-    virtual ~MediaExtractorCUnwrapperV2();
-private:
-    CMediaExtractorV2 *plugin;
-};
-
-class MediaExtractorCUnwrapperV3 : public MediaExtractorCUnwrapper {
-public:
-    explicit MediaExtractorCUnwrapperV3(CMediaExtractorV3 *plugin);
-    virtual size_t countTracks();
-    virtual MediaTrack *getTrack(size_t index);
-    virtual status_t getTrackMetaData(MetaDataBase& meta, size_t index, uint32_t flags = 0);
-    virtual status_t getMetaData(MetaDataBase& meta);
-    virtual const char * name();
-    virtual uint32_t flags() const;
-    virtual status_t setMediaCas(const uint8_t* casToken, size_t size);
-protected:
-    virtual ~MediaExtractorCUnwrapperV3();
-private:
-    CMediaExtractorV3 *plugin;
-};
-
 }  // namespace android
 
 #endif  // MEDIA_EXTRACTOR_H_
diff --git a/media/libstagefright/include/media/stagefright/VideoFrameScheduler.h b/media/libstagefright/include/media/stagefright/VideoFrameScheduler.h
index 9d97dfd..fcfcbec 100644
--- a/media/libstagefright/include/media/stagefright/VideoFrameScheduler.h
+++ b/media/libstagefright/include/media/stagefright/VideoFrameScheduler.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2014, The Android Open Source Project
+ * Copyright 2018, The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -17,87 +17,24 @@
 #ifndef VIDEO_FRAME_SCHEDULER_H_
 #define VIDEO_FRAME_SCHEDULER_H_
 
-#include <utils/RefBase.h>
-#include <utils/Timers.h>
-
-#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/VideoFrameSchedulerBase.h>
 
 namespace android {
 
 class ISurfaceComposer;
 
-struct VideoFrameScheduler : public RefBase {
+struct VideoFrameScheduler : public VideoFrameSchedulerBase {
     VideoFrameScheduler();
-
-    // (re)initialize scheduler
-    void init(float videoFps = -1);
-    // use in case of video render-time discontinuity, e.g. seek
-    void restart();
-    // get adjusted nanotime for a video frame render at renderTime
-    nsecs_t schedule(nsecs_t renderTime);
-
-    // returns the vsync period for the main display
-    nsecs_t getVsyncPeriod();
-
-    // returns the current frames-per-second, or 0.f if not primed
-    float getFrameRate();
-
-    void release();
-
-    static const size_t kHistorySize = 8;
+    void release() override;
 
 protected:
     virtual ~VideoFrameScheduler();
 
 private:
-    struct PLL {
-        PLL();
-
-        // reset PLL to new PLL
-        void reset(float fps = -1);
-        // keep current estimate, but restart phase
-        void restart();
-        // returns period or 0 if not yet primed
-        nsecs_t addSample(nsecs_t time);
-        nsecs_t getPeriod() const;
-
-    private:
-        nsecs_t mPeriod;
-        nsecs_t mPhase;
-
-        bool    mPrimed;        // have an estimate for the period
-        size_t  mSamplesUsedForPriming;
-
-        nsecs_t mLastTime;      // last input time
-        nsecs_t mRefitAt;       // next input time to fit at
-
-        size_t  mNumSamples;    // can go past kHistorySize
-        nsecs_t mTimes[kHistorySize];
-
-        void test();
-        // returns whether fit was successful
-        bool fit(nsecs_t phase, nsecs_t period, size_t numSamples,
-                int64_t *a, int64_t *b, int64_t *err);
-        void prime(size_t numSamples);
-    };
-
-    void updateVsync();
-
-    nsecs_t mVsyncTime;        // vsync timing from display
-    nsecs_t mVsyncPeriod;
-    nsecs_t mVsyncRefreshAt;   // next time to refresh timing info
-
-    nsecs_t mLastVsyncTime;    // estimated vsync time for last frame
-    nsecs_t mTimeCorrection;   // running adjustment
-
-    PLL mPll;                  // PLL for video frame rate based on render time
-
+    void updateVsync() override;
     sp<ISurfaceComposer> mComposer;
-
-    DISALLOW_EVIL_CONSTRUCTORS(VideoFrameScheduler);
 };
 
 }  // namespace android
 
 #endif  // VIDEO_FRAME_SCHEDULER_H_
-
diff --git a/media/libstagefright/include/media/stagefright/VideoFrameScheduler2.h b/media/libstagefright/include/media/stagefright/VideoFrameScheduler2.h
new file mode 100644
index 0000000..be911cc
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/VideoFrameScheduler2.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VIDEO_FRAME_SCHEDULER_2_H_
+#define VIDEO_FRAME_SCHEDULER_2_H_
+
+#include <media/stagefright/VideoFrameSchedulerBase.h>
+
+namespace android {
+
+class VsyncTracker;
+struct ChoreographerThread;
+
+struct VideoFrameScheduler2 : public VideoFrameSchedulerBase {
+    VideoFrameScheduler2();
+    void release() override;
+
+protected:
+    virtual ~VideoFrameScheduler2();
+
+private:
+    void updateVsync() override;
+
+    long mAppVsyncOffset;
+    long mSfVsyncOffset;
+    sp<VsyncTracker> mVsyncTracker;
+    sp<ChoreographerThread> mChoreographerThread;
+    Mutex mLock;
+};
+
+}  // namespace android
+
+#endif  // VIDEO_FRAME_SCHEDULER_2_H_
diff --git a/media/libstagefright/include/media/stagefright/VideoFrameSchedulerBase.h b/media/libstagefright/include/media/stagefright/VideoFrameSchedulerBase.h
new file mode 100644
index 0000000..ff5f716
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/VideoFrameSchedulerBase.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef VIDEO_FRAME_SCHEDULER_BASE_H_
+#define VIDEO_FRAME_SCHEDULER_BASE_H_
+
+#include <utils/RefBase.h>
+#include <utils/Timers.h>
+
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+struct VideoFrameSchedulerBase : public RefBase {
+    VideoFrameSchedulerBase();
+
+    // (re)initialize scheduler
+    void init(float videoFps = -1);
+    // use in case of video render-time discontinuity, e.g. seek
+    void restart();
+    // get adjusted nanotime for a video frame render at renderTime
+    nsecs_t schedule(nsecs_t renderTime);
+
+    // returns the vsync period for the main display
+    nsecs_t getVsyncPeriod();
+
+    // returns the current frames-per-second, or 0.f if not primed
+    float getFrameRate();
+
+    virtual void release() = 0;
+
+    static const size_t kHistorySize = 8;
+    static const nsecs_t kNanosIn1s = 1000000000;
+    static const nsecs_t kDefaultVsyncPeriod = kNanosIn1s / 60;  // 60Hz
+    static const nsecs_t kVsyncRefreshPeriod = kNanosIn1s;       // 1 sec
+
+protected:
+    virtual ~VideoFrameSchedulerBase();
+
+    nsecs_t mVsyncTime;        // vsync timing from display
+    nsecs_t mVsyncPeriod;
+    nsecs_t mVsyncRefreshAt;   // next time to refresh timing info
+
+private:
+    struct PLL {
+        PLL();
+
+        // reset PLL to new PLL
+        void reset(float fps = -1);
+        // keep current estimate, but restart phase
+        void restart();
+        // returns period or 0 if not yet primed
+        nsecs_t addSample(nsecs_t time);
+        nsecs_t getPeriod() const;
+
+    private:
+        nsecs_t mPeriod;
+        nsecs_t mPhase;
+
+        bool    mPrimed;        // have an estimate for the period
+        size_t  mSamplesUsedForPriming;
+
+        nsecs_t mLastTime;      // last input time
+        nsecs_t mRefitAt;       // next input time to fit at
+
+        size_t  mNumSamples;    // can go past kHistorySize
+        nsecs_t mTimes[kHistorySize];
+
+        void test();
+        // returns whether fit was successful
+        bool fit(nsecs_t phase, nsecs_t period, size_t numSamples,
+                int64_t *a, int64_t *b, int64_t *err);
+        void prime(size_t numSamples);
+    };
+
+    virtual void updateVsync() = 0;
+
+    nsecs_t mLastVsyncTime;    // estimated vsync time for last frame
+    nsecs_t mTimeCorrection;   // running adjustment
+    PLL mPll;                  // PLL for video frame rate based on render time
+
+    DISALLOW_EVIL_CONSTRUCTORS(VideoFrameSchedulerBase);
+};
+
+}  // namespace android
+
+#endif  // VIDEO_FRAME_SCHEDULER_BASE_H_
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 590131e..e9baa1a 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -23,10 +23,10 @@
 #include "ESQueue.h"
 
 #include <android/hardware/cas/native/1.0/IDescrambler.h>
-#include <binder/IMemory.h>
-#include <binder/MemoryDealer.h>
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <android/hidl/memory/1.0/IMemory.h>
 #include <cutils/native_handle.h>
-#include <hidlmemory/FrameworkUtils.h>
+#include <hidlmemory/mapping.h>
 #include <media/cas/DescramblerAPI.h>
 #include <media/stagefright/foundation/ABitReader.h>
 #include <media/stagefright/foundation/ABuffer.h>
@@ -46,12 +46,13 @@
 #include <inttypes.h>
 
 namespace android {
-using hardware::fromHeap;
 using hardware::hidl_string;
 using hardware::hidl_vec;
-using hardware::HidlMemory;
+using hardware::hidl_memory;
 using namespace hardware::cas::V1_0;
 using namespace hardware::cas::native::V1_0;
+typedef hidl::allocator::V1_0::IAllocator TAllocator;
+typedef hidl::memory::V1_0::IMemory TMemory;
 
 // I want the expression "y" evaluated even if verbose logging is off.
 #define MY_LOGV(x, y) \
@@ -208,9 +209,8 @@
     bool mScrambled;
     bool mSampleEncrypted;
     sp<AMessage> mSampleAesKeyItem;
-    sp<IMemory> mMem;
-    sp<MemoryDealer> mDealer;
-    sp<HidlMemory> mHidlMemory;
+    sp<TMemory> mHidlMemory;
+    sp<TAllocator> mHidlAllocator;
     hardware::cas::native::V1_0::SharedBuffer mDescramblerSrcBuffer;
     sp<ABuffer> mDescrambledBuffer;
     List<SubSampleInfo> mSubSamples;
@@ -975,16 +975,43 @@
             mBuffer == NULL ? 0 : mBuffer->capacity(), neededSize, mScrambled);
 
     sp<ABuffer> newBuffer, newScrambledBuffer;
-    sp<IMemory> newMem;
-    sp<MemoryDealer> newDealer;
+    sp<TMemory> newMem;
     if (mScrambled) {
-        size_t alignment = MemoryDealer::getAllocationAlignment();
-        neededSize = (neededSize + (alignment - 1)) & ~(alignment - 1);
-        // Align to multiples of 64K.
-        neededSize = (neededSize + 65535) & ~65535;
-        newDealer = new MemoryDealer(neededSize, "ATSParser");
-        newMem = newDealer->allocate(neededSize);
-        newScrambledBuffer = new ABuffer(newMem->pointer(), newMem->size());
+        if (mHidlAllocator == nullptr) {
+            mHidlAllocator = TAllocator::getService("ashmem");
+            if (mHidlAllocator == nullptr) {
+                ALOGE("[stream %d] can't get hidl allocator", mElementaryPID);
+                return false;
+            }
+        }
+
+        hidl_memory hidlMemToken;
+        bool success;
+        auto transStatus = mHidlAllocator->allocate(
+                neededSize,
+                [&success, &hidlMemToken](
+                        bool s,
+                        hidl_memory const& m) {
+                    success = s;
+                    hidlMemToken = m;
+                });
+
+        if (!transStatus.isOk()) {
+            ALOGE("[stream %d] hidl allocator failed at the transport: %s",
+                    mElementaryPID, transStatus.description().c_str());
+            return false;
+        }
+        if (!success) {
+            ALOGE("[stream %d] hidl allocator failed", mElementaryPID);
+            return false;
+        }
+        newMem = mapMemory(hidlMemToken);
+        if (newMem == nullptr || newMem->getPointer() == nullptr) {
+            ALOGE("[stream %d] hidl failed to map memory", mElementaryPID);
+            return false;
+        }
+
+        newScrambledBuffer = new ABuffer(newMem->getPointer(), newMem->getSize());
 
         if (mDescrambledBuffer != NULL) {
             memcpy(newScrambledBuffer->data(),
@@ -993,24 +1020,15 @@
         } else {
             newScrambledBuffer->setRange(0, 0);
         }
-        mMem = newMem;
-        mDealer = newDealer;
+        mHidlMemory = newMem;
         mDescrambledBuffer = newScrambledBuffer;
 
-        ssize_t offset;
-        size_t size;
-        sp<IMemoryHeap> heap = newMem->getMemory(&offset, &size);
-        if (heap == NULL) {
-            return false;
-        }
+        mDescramblerSrcBuffer.heapBase = hidlMemToken;
+        mDescramblerSrcBuffer.offset = 0ULL;
+        mDescramblerSrcBuffer.size =  (uint64_t)neededSize;
 
-        mHidlMemory = fromHeap(heap);
-        mDescramblerSrcBuffer.heapBase = *mHidlMemory;
-        mDescramblerSrcBuffer.offset = (uint64_t) offset;
-        mDescramblerSrcBuffer.size = (uint64_t) size;
-
-        ALOGD("[stream %d] created shared buffer for descrambling, offset %zd, size %zu",
-                mElementaryPID, offset, size);
+        ALOGD("[stream %d] created shared buffer for descrambling, size %zu",
+                mElementaryPID, neededSize);
     } else {
         // Align to multiples of 64K.
         neededSize = (neededSize + 65535) & ~65535;
@@ -1498,7 +1516,7 @@
         return UNKNOWN_ERROR;
     }
 
-    if (mDescrambledBuffer == NULL || mMem == NULL) {
+    if (mDescrambledBuffer == NULL || mHidlMemory == NULL) {
         ALOGE("received scrambled packets without shared memory!");
 
         return UNKNOWN_ERROR;
diff --git a/media/libstagefright/mpeg2ts/Android.bp b/media/libstagefright/mpeg2ts/Android.bp
index e516cf1..a507b91 100644
--- a/media/libstagefright/mpeg2ts/Android.bp
+++ b/media/libstagefright/mpeg2ts/Android.bp
@@ -30,9 +30,10 @@
     shared_libs: [
         "libcrypto",
         "libmedia",
-        "libhidlallocatorutils",
+        "libhidlmemory",
         "android.hardware.cas.native@1.0",
         "android.hidl.memory@1.0",
+        "android.hidl.allocator@1.0",
     ],
 
     header_libs: [
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index 8a76de3..362b7f5 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -53,7 +53,6 @@
         "libhidlbase",
         "libhidlmemory",
         "libhidltransport",
-        "libnativewindow", // TODO(b/62923479): use header library
         "libvndksupport",
         "android.hardware.media.omx@1.0",
         "android.hardware.graphics.bufferqueue@1.0",
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index 55afe04..ddb459f 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -34,7 +34,8 @@
       mLooper(new ALooper),
       mHandler(new AHandlerReflector<SimpleSoftOMXComponent>(this)),
       mState(OMX_StateLoaded),
-      mTargetState(OMX_StateLoaded) {
+      mTargetState(OMX_StateLoaded),
+      mFrameConfig(false) {
     mLooper->setName(name);
     mLooper->registerHandler(mHandler);
 
@@ -204,6 +205,21 @@
     }
 }
 
+OMX_ERRORTYPE SimpleSoftOMXComponent::internalSetConfig(
+        OMX_INDEXTYPE index, const OMX_PTR params, bool *frameConfig) {
+    return OMX_ErrorUndefined;
+}
+
+OMX_ERRORTYPE SimpleSoftOMXComponent::setConfig(
+        OMX_INDEXTYPE index, const OMX_PTR params) {
+    bool frameConfig = mFrameConfig;
+    OMX_ERRORTYPE err = internalSetConfig(index, params, &frameConfig);
+    if (err == OMX_ErrorNone) {
+        mFrameConfig = frameConfig;
+    }
+    return err;
+}
+
 OMX_ERRORTYPE SimpleSoftOMXComponent::useBuffer(
         OMX_BUFFERHEADERTYPE **header,
         OMX_U32 portIndex,
@@ -336,6 +352,10 @@
         OMX_BUFFERHEADERTYPE *buffer) {
     sp<AMessage> msg = new AMessage(kWhatEmptyThisBuffer, mHandler);
     msg->setPointer("header", buffer);
+    if (mFrameConfig) {
+        msg->setInt32("frame-config", mFrameConfig);
+        mFrameConfig = false;
+    }
     msg->post();
 
     return OMX_ErrorNone;
@@ -378,6 +398,10 @@
         {
             OMX_BUFFERHEADERTYPE *header;
             CHECK(msg->findPointer("header", (void **)&header));
+            int32_t frameConfig;
+            if (!msg->findInt32("frame-config", &frameConfig)) {
+                frameConfig = 0;
+            }
 
             CHECK(mState == OMX_StateExecuting && mTargetState == mState);
 
@@ -393,6 +417,7 @@
                     CHECK(!buffer->mOwnedByUs);
 
                     buffer->mOwnedByUs = true;
+                    buffer->mFrameConfig = (bool)frameConfig;
 
                     CHECK((msgType == kWhatEmptyThisBuffer
                             && port->mDef.eDir == OMX_DirInput)
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index f9f7ec2..e853da9 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -602,13 +602,40 @@
             return OMX_ErrorNone;
         }
 
+        case kDescribeHdr10PlusInfoIndex:
+        {
+            if (!supportDescribeHdr10PlusInfo()) {
+                return OMX_ErrorUnsupportedIndex;
+            }
+
+            if (mHdr10PlusOutputs.size() > 0) {
+                auto it = mHdr10PlusOutputs.begin();
+
+                auto info = (*it).get();
+
+                DescribeHDR10PlusInfoParams* outParams =
+                        (DescribeHDR10PlusInfoParams *)params;
+
+                outParams->nParamSizeUsed = info->size();
+
+                // If the buffer provided by the client does not have enough
+                // storage, return the size only and do not remove the param yet.
+                if (outParams->nParamSize >= info->size()) {
+                    memcpy(outParams->nValue, info->data(), info->size());
+                    mHdr10PlusOutputs.erase(it);
+                }
+                return OMX_ErrorNone;
+            }
+            return OMX_ErrorUnderflow;
+        }
+
         default:
             return OMX_ErrorUnsupportedIndex;
     }
 }
 
-OMX_ERRORTYPE SoftVideoDecoderOMXComponent::setConfig(
-        OMX_INDEXTYPE index, const OMX_PTR params){
+OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalSetConfig(
+        OMX_INDEXTYPE index, const OMX_PTR params, bool *frameConfig){
     switch ((int)index) {
         case kDescribeColorAspectsIndex:
         {
@@ -658,11 +685,55 @@
             return OMX_ErrorNone;
         }
 
+        case kDescribeHdr10PlusInfoIndex:
+        {
+            if (!supportDescribeHdr10PlusInfo()) {
+                return OMX_ErrorUnsupportedIndex;
+            }
+
+            const DescribeHDR10PlusInfoParams* inParams =
+                    (DescribeHDR10PlusInfoParams *)params;
+
+            if (*frameConfig) {
+                // This is a request to append to the current frame config set.
+                // For now, we only support kDescribeHdr10PlusInfoIndex, which
+                // we simply replace with the last set value.
+                if (mHdr10PlusInputs.size() > 0) {
+                    *(--mHdr10PlusInputs.end()) = ABuffer::CreateAsCopy(
+                            inParams->nValue, inParams->nParamSizeUsed);
+                } else {
+                    ALOGW("Ignoring kDescribeHdr10PlusInfoIndex: append to "
+                            "frame config while no frame config is present");
+                }
+            } else {
+                // This is a frame config, setting *frameConfig to true so that
+                // the client marks the next queued input frame to apply it.
+                *frameConfig = true;
+                mHdr10PlusInputs.push_back(ABuffer::CreateAsCopy(
+                        inParams->nValue, inParams->nParamSizeUsed));
+            }
+            return OMX_ErrorNone;
+        }
+
         default:
             return OMX_ErrorUnsupportedIndex;
     }
 }
 
+sp<ABuffer> SoftVideoDecoderOMXComponent::dequeueInputFrameConfig() {
+    auto it = mHdr10PlusInputs.begin();
+    sp<ABuffer> info = *it;
+    mHdr10PlusInputs.erase(it);
+    return info;
+}
+
+void SoftVideoDecoderOMXComponent::queueOutputFrameConfig(const sp<ABuffer> &info) {
+    mHdr10PlusOutputs.push_back(info);
+    notify(OMX_EventConfigUpdate,
+           kOutputPortIndex,
+           kDescribeHdr10PlusInfoIndex,
+           NULL);
+}
 
 OMX_ERRORTYPE SoftVideoDecoderOMXComponent::getExtensionIndex(
         const char *name, OMX_INDEXTYPE *index) {
@@ -677,6 +748,10 @@
             && supportDescribeHdrStaticInfo()) {
         *(int32_t*)index = kDescribeHdrStaticInfoIndex;
         return OMX_ErrorNone;
+    } else if (!strcmp(name, "OMX.google.android.index.describeHDR10PlusInfo")
+            && supportDescribeHdr10PlusInfo()) {
+        *(int32_t*)index = kDescribeHdr10PlusInfoIndex;
+        return OMX_ErrorNone;
     }
 
     return SimpleSoftOMXComponent::getExtensionIndex(name, index);
@@ -694,6 +769,10 @@
     return false;
 }
 
+bool SoftVideoDecoderOMXComponent::supportDescribeHdr10PlusInfo() {
+    return false;
+}
+
 void SoftVideoDecoderOMXComponent::onReset() {
     mOutputPortSettingsChange = NONE;
 }
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/SimpleSoftOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SimpleSoftOMXComponent.h
index 1d1f2bd..6bbedda 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/SimpleSoftOMXComponent.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/SimpleSoftOMXComponent.h
@@ -20,6 +20,7 @@
 
 #include "SoftOMXComponent.h"
 
+#include <atomic>
 #include <media/stagefright/foundation/AHandlerReflector.h>
 #include <utils/RefBase.h>
 #include <utils/threads.h>
@@ -28,6 +29,7 @@
 namespace android {
 
 struct ALooper;
+struct ABuffer;
 
 struct CodecProfileLevel {
     OMX_U32 mProfile;
@@ -49,6 +51,7 @@
     struct BufferInfo {
         OMX_BUFFERHEADERTYPE *mHeader;
         bool mOwnedByUs;
+        bool mFrameConfig;
     };
 
     struct PortInfo {
@@ -76,6 +79,9 @@
     virtual OMX_ERRORTYPE internalSetParameter(
             OMX_INDEXTYPE index, const OMX_PTR params);
 
+    virtual OMX_ERRORTYPE internalSetConfig(
+            OMX_INDEXTYPE index, const OMX_PTR params, bool *frameConfig);
+
     virtual void onQueueFilled(OMX_U32 portIndex);
     List<BufferInfo *> &getPortQueue(OMX_U32 portIndex);
 
@@ -101,6 +107,7 @@
     OMX_STATETYPE mTargetState;
 
     Vector<PortInfo> mPorts;
+    std::atomic_bool mFrameConfig;
 
     bool isSetParameterAllowed(
             OMX_INDEXTYPE index, const OMX_PTR params) const;
@@ -114,6 +121,9 @@
     virtual OMX_ERRORTYPE setParameter(
             OMX_INDEXTYPE index, const OMX_PTR params);
 
+    virtual OMX_ERRORTYPE setConfig(
+            OMX_INDEXTYPE index, const OMX_PTR params);
+
     virtual OMX_ERRORTYPE useBuffer(
             OMX_BUFFERHEADERTYPE **buffer,
             OMX_U32 portIndex,
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
index 56fc691..3b381ce 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/SoftVideoDecoderOMXComponent.h
@@ -20,6 +20,7 @@
 
 #include "SimpleSoftOMXComponent.h"
 
+#include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/AHandlerReflector.h>
 #include <media/stagefright/foundation/ColorUtils.h>
 #include <media/IOMX.h>
@@ -28,6 +29,7 @@
 #include <utils/RefBase.h>
 #include <utils/threads.h>
 #include <utils/Vector.h>
+#include <utils/List.h>
 
 namespace android {
 
@@ -48,6 +50,7 @@
     enum {
         kDescribeColorAspectsIndex = kPrepareForAdaptivePlaybackIndex + 1,
         kDescribeHdrStaticInfoIndex = kPrepareForAdaptivePlaybackIndex + 2,
+        kDescribeHdr10PlusInfoIndex = kPrepareForAdaptivePlaybackIndex + 3,
     };
 
     enum {
@@ -68,8 +71,8 @@
     virtual OMX_ERRORTYPE getConfig(
             OMX_INDEXTYPE index, OMX_PTR params);
 
-    virtual OMX_ERRORTYPE setConfig(
-            OMX_INDEXTYPE index, const OMX_PTR params);
+    virtual OMX_ERRORTYPE internalSetConfig(
+            OMX_INDEXTYPE index, const OMX_PTR params, bool *frameConfig);
 
     virtual OMX_ERRORTYPE getExtensionIndex(
             const char *name, OMX_INDEXTYPE *index);
@@ -80,6 +83,8 @@
 
     virtual bool supportDescribeHdrStaticInfo();
 
+    virtual bool supportDescribeHdr10PlusInfo();
+
     // This function sets both minimum buffer count and actual buffer count of
     // input port to be |numInputBuffers|. It will also set both minimum buffer
     // count and actual buffer count of output port to be |numOutputBuffers|.
@@ -166,6 +171,9 @@
     // Helper function to dump the ColorAspects.
     void dumpColorAspects(const ColorAspects &colorAspects);
 
+    sp<ABuffer> dequeueInputFrameConfig();
+    void queueOutputFrameConfig(const sp<ABuffer> &info);
+
 private:
     uint32_t mMinInputBufferSize;
     uint32_t mMinCompressionRatio;
@@ -174,6 +182,9 @@
     OMX_VIDEO_CODINGTYPE mCodingType;
     const CodecProfileLevel *mProfileLevels;
     size_t mNumProfileLevels;
+    typedef List<sp<ABuffer> > Hdr10PlusInfoList;
+    Hdr10PlusInfoList mHdr10PlusInputs;
+    Hdr10PlusInfoList mHdr10PlusOutputs;
 
     DISALLOW_EVIL_CONSTRUCTORS(SoftVideoDecoderOMXComponent);
 };
diff --git a/media/libstagefright/xmlparser/Android.bp b/media/libstagefright/xmlparser/Android.bp
index b55dbb0..bebfb3b 100644
--- a/media/libstagefright/xmlparser/Android.bp
+++ b/media/libstagefright/xmlparser/Android.bp
@@ -1,3 +1,9 @@
+cc_library_headers {
+    name: "libstagefright_xmlparser_headers",
+    export_include_dirs: ["include"],
+    vendor_available: true,
+}
+
 cc_library_shared {
     name: "libstagefright_xmlparser",
     vendor_available: true,
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 6976950..73bd2ca 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -88,7 +88,6 @@
         "libandroid",
         "libandroid_runtime",
         "libbinder",
-        "libhwbinder",
         "libhidlbase",
         "libgui",
         "libui",
@@ -141,10 +140,6 @@
     ],
 
     shared_libs: [
-        "libstagefright_foundation",
-        "liblog",
-        "libutils",
-        "libcutils",
     ],
 
     sanitize: {
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index b05e022..a11602b 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -27,7 +27,6 @@
     ],
     shared_libs: [
         "libbinder",
-        "libcutils",
         "liblog",
         "libutils",
         "libmemunreachable",
diff --git a/packages/MediaComponents/apex/Android.bp b/packages/MediaComponents/apex/Android.bp
index e797e14..d89eb77 100644
--- a/packages/MediaComponents/apex/Android.bp
+++ b/packages/MediaComponents/apex/Android.bp
@@ -9,6 +9,8 @@
         // "Refusing to generate code with unstructured parcelables."
         "java/android/media/MediaDescription.aidl",
         "java/android/media/MediaMetadata.aidl",
+        // TODO(insun): check why MediaParceledListSlice.aidl should be added here
+        "java/android/media/MediaParceledListSlice.aidl",
         "java/android/media/Rating.aidl",
         "java/android/media/browse/MediaBrowser.aidl",
         "java/android/media/session/MediaSession.aidl",
diff --git a/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.aidl b/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.aidl
new file mode 100644
index 0000000..228ea9c
--- /dev/null
+++ b/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.aidl
@@ -0,0 +1,19 @@
+/* Copyright (C) 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/** @hide */
+parcelable MediaParceledListSlice;
\ No newline at end of file
diff --git a/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.java b/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.java
new file mode 100644
index 0000000..ec3fdb7
--- /dev/null
+++ b/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.annotation.UnsupportedAppUsage;
+import android.os.Binder;
+import android.os.Build;
+import android.os.IBinder;
+import android.os.Parcel;
+import android.os.Parcelable;
+import android.os.RemoteException;
+import android.util.Log;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Transfer a large list of objects across an IPC. Splits into multiple transactions if needed.
+ * Note: Only use classes declared final in order to avoid subclasses overriding reading/writing
+ * parcel logic.
+ *
+ * TODO: Add test for sending large data
+ * @hide
+ */
+public class MediaParceledListSlice<T extends Parcelable> implements Parcelable {
+    private static final String TAG = "MediaParceledListSlice";
+    private static final boolean DEBUG = false;
+
+    private static final int MAX_IPC_SIZE = 64 * 1024; // IBinder.MAX_IPC_SIZE
+
+    final List<T> mList;
+
+    public MediaParceledListSlice(List<T> list) {
+        if (list == null) {
+            throw new IllegalArgumentException("list shouldn't be null");
+        }
+        mList = list;
+    }
+
+    MediaParceledListSlice(Parcel p) {
+        final int itemCount = p.readInt();
+        mList = new ArrayList<>(itemCount);
+        if (DEBUG) {
+            Log.d(TAG, "Retrieving " + itemCount + " items");
+        }
+        if (itemCount <= 0) {
+            return;
+        }
+
+        int i = 0;
+        while (i < itemCount) {
+            if (p.readInt() == 0) {
+                break;
+            }
+
+            final T parcelable = p.readParcelable(null);
+            mList.add(parcelable);
+
+            if (DEBUG) {
+                Log.d(TAG, "Read inline #" + i + ": " + mList.get(mList.size() - 1));
+            }
+            i++;
+        }
+        if (i >= itemCount) {
+            return;
+        }
+        final IBinder retriever = p.readStrongBinder();
+        while (i < itemCount) {
+            if (DEBUG) {
+                Log.d(TAG, "Reading more @" + i + " of " + itemCount + ": retriever=" + retriever);
+            }
+            Parcel data = Parcel.obtain();
+            Parcel reply = Parcel.obtain();
+            data.writeInt(i);
+            try {
+                retriever.transact(IBinder.FIRST_CALL_TRANSACTION, data, reply, 0);
+            } catch (RemoteException e) {
+                Log.w(TAG, "Failure retrieving array; only received " + i + " of " + itemCount, e);
+                return;
+            }
+            while (i < itemCount && reply.readInt() != 0) {
+                final T parcelable = reply.readParcelable(null);
+                mList.add(parcelable);
+
+                if (DEBUG) {
+                    Log.d(TAG, "Read extra #" + i + ": " + mList.get(mList.size() - 1));
+                }
+                i++;
+            }
+            reply.recycle();
+            data.recycle();
+        }
+    }
+
+    public List<T> getList() {
+        return mList;
+    }
+
+    /**
+     * Write this to another Parcel. Note that this discards the internal Parcel
+     * and should not be used anymore. This is so we can pass this to a Binder
+     * where we won't have a chance to call recycle on this.
+     */
+    @Override
+    public void writeToParcel(Parcel dest, int flags) {
+        final int itemCount = mList.size();
+        dest.writeInt(itemCount);
+        if (DEBUG) {
+            Log.d(TAG, "Writing " + itemCount + " items");
+        }
+        if (itemCount > 0) {
+            int i = 0;
+            while (i < itemCount && dest.dataSize() < MAX_IPC_SIZE) {
+                dest.writeInt(1);
+
+                final T parcelable = mList.get(i);
+                dest.writeParcelable(parcelable, flags);
+
+                if (DEBUG) {
+                    Log.d(TAG, "Wrote inline #" + i + ": " + mList.get(i));
+                }
+                i++;
+            }
+            if (i < itemCount) {
+                dest.writeInt(0);
+                Binder retriever = new Binder() {
+                    @Override
+                    protected boolean onTransact(int code, Parcel data, Parcel reply, int flags)
+                            throws RemoteException {
+                        if (code != FIRST_CALL_TRANSACTION) {
+                            return super.onTransact(code, data, reply, flags);
+                        }
+                        int i = data.readInt();
+                        if (DEBUG) {
+                            Log.d(TAG, "Writing more @" + i + " of " + itemCount);
+                        }
+                        while (i < itemCount && reply.dataSize() < MAX_IPC_SIZE) {
+                            reply.writeInt(1);
+
+                            final T parcelable = mList.get(i);
+                            reply.writeParcelable(parcelable, flags);
+
+                            if (DEBUG) {
+                                Log.d(TAG, "Wrote extra #" + i + ": " + mList.get(i));
+                            }
+                            i++;
+                        }
+                        if (i < itemCount) {
+                            if (DEBUG) {
+                                Log.d(TAG, "Breaking @" + i + " of " + itemCount);
+                            }
+                            reply.writeInt(0);
+                        }
+                        return true;
+                    }
+                };
+                if (DEBUG) {
+                    Log.d(TAG, "Breaking @" + i + " of " + itemCount + ": retriever=" + retriever);
+                }
+                dest.writeStrongBinder(retriever);
+            }
+        }
+    }
+
+    @Override
+    public int describeContents() {
+        int contents = 0;
+        final List<T> list = getList();
+        for (int i = 0; i < list.size(); i++) {
+            contents |= list.get(i).describeContents();
+        }
+        return contents;
+    }
+
+    public static final Parcelable.Creator<MediaParceledListSlice> CREATOR =
+            new Parcelable.Creator<MediaParceledListSlice>() {
+        @Override
+        public MediaParceledListSlice createFromParcel(Parcel in) {
+            return new MediaParceledListSlice(in);
+        }
+
+        @Override
+        public MediaParceledListSlice[] newArray(int size) {
+            return new MediaParceledListSlice[size];
+        }
+    };
+}
diff --git a/packages/MediaComponents/apex/java/android/media/browse/MediaBrowser.java b/packages/MediaComponents/apex/java/android/media/browse/MediaBrowser.java
index 4e091ad..b1b14c6 100644
--- a/packages/MediaComponents/apex/java/android/media/browse/MediaBrowser.java
+++ b/packages/MediaComponents/apex/java/android/media/browse/MediaBrowser.java
@@ -23,8 +23,8 @@
 import android.content.Context;
 import android.content.Intent;
 import android.content.ServiceConnection;
-//import android.content.pm.ParceledListSlice;
 import android.media.MediaDescription;
+import android.media.MediaParceledListSlice;
 import android.media.session.MediaController;
 import android.media.session.MediaSession;
 import android.os.Binder;
@@ -652,10 +652,8 @@
         });
     }
 
-    //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
-    /*
     private final void onLoadChildren(final IMediaBrowserServiceCallbacks callback,
-            final String parentId, final ParceledListSlice list, final Bundle options) {
+            final String parentId, final MediaParceledListSlice list, final Bundle options) {
         mHandler.post(new Runnable() {
             @Override
             public void run() {
@@ -699,7 +697,6 @@
             }
         });
     }
-    */
 
     /**
      * Return true if {@code callback} is the current ServiceCallbacks. Also logs if it's not.
@@ -1109,22 +1106,19 @@
             }
         }
 
-        //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
-        /*
         @Override
-        public void onLoadChildren(String parentId, ParceledListSlice list) {
+        public void onLoadChildren(String parentId, MediaParceledListSlice list) {
             onLoadChildrenWithOptions(parentId, list, null);
         }
 
         @Override
-        public void onLoadChildrenWithOptions(String parentId, ParceledListSlice list,
+        public void onLoadChildrenWithOptions(String parentId, MediaParceledListSlice list,
                 final Bundle options) {
             MediaBrowser mediaBrowser = mMediaBrowser.get();
             if (mediaBrowser != null) {
                 mediaBrowser.onLoadChildren(this, parentId, list, options);
             }
         }
-        */
     }
 
     private static class Subscription {
diff --git a/packages/MediaComponents/apex/java/android/media/session/ISession.aidl b/packages/MediaComponents/apex/java/android/media/session/ISession.aidl
index 6363ed0..14b1c64 100644
--- a/packages/MediaComponents/apex/java/android/media/session/ISession.aidl
+++ b/packages/MediaComponents/apex/java/android/media/session/ISession.aidl
@@ -16,9 +16,9 @@
 package android.media.session;
 
 import android.app.PendingIntent;
-import android.content.pm.ParceledListSlice;
 //import android.media.AudioAttributes;
 import android.media.MediaMetadata;
+import android.media.MediaParceledListSlice;
 import android.media.session.ISessionController;
 import android.media.session.PlaybackState;
 import android.media.session.MediaSession;
@@ -41,8 +41,7 @@
     // These commands are for the TransportPerformer
     void setMetadata(in MediaMetadata metadata, long duration, String metadataDescription);
     void setPlaybackState(in PlaybackState state);
-    //TODO(b/119750807): Resolve hidden API usage ParceledListSlice.
-    //void setQueue(in ParceledListSlice queue);
+    void setQueue(in MediaParceledListSlice queue);
     void setQueueTitle(CharSequence title);
     void setExtras(in Bundle extras);
     void setRatingType(int type);
diff --git a/packages/MediaComponents/apex/java/android/media/session/ISessionController.aidl b/packages/MediaComponents/apex/java/android/media/session/ISessionController.aidl
index 031a388..74897f7 100644
--- a/packages/MediaComponents/apex/java/android/media/session/ISessionController.aidl
+++ b/packages/MediaComponents/apex/java/android/media/session/ISessionController.aidl
@@ -17,8 +17,8 @@
 
 import android.app.PendingIntent;
 import android.content.Intent;
-//import android.content.pm.ParceledListSlice;
 import android.media.MediaMetadata;
+import android.media.MediaParceledListSlice;
 import android.media.Rating;
 import android.media.session.ISessionControllerCallback;
 import android.media.session.MediaSession;
@@ -81,8 +81,7 @@
             String action, in Bundle args);
     MediaMetadata getMetadata();
     PlaybackState getPlaybackState();
-    //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
-    //ParceledListSlice getQueue();
+    MediaParceledListSlice getQueue();
     CharSequence getQueueTitle();
     Bundle getExtras();
     int getRatingType();
diff --git a/packages/MediaComponents/apex/java/android/media/session/ISessionControllerCallback.aidl b/packages/MediaComponents/apex/java/android/media/session/ISessionControllerCallback.aidl
index 173504b..f5cc4f6 100644
--- a/packages/MediaComponents/apex/java/android/media/session/ISessionControllerCallback.aidl
+++ b/packages/MediaComponents/apex/java/android/media/session/ISessionControllerCallback.aidl
@@ -15,8 +15,8 @@
 
 package android.media.session;
 
-//import android.content.pm.ParceledListSlice;
 import android.media.MediaMetadata;
+import android.media.MediaParceledListSlice;
 import android.media.session.ParcelableVolumeInfo;
 import android.media.session.PlaybackState;
 import android.media.session.MediaSession;
@@ -32,8 +32,7 @@
     // These callbacks are for the TransportController
     void onPlaybackStateChanged(in PlaybackState state);
     void onMetadataChanged(in MediaMetadata metadata);
-    //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
-    //void onQueueChanged(in ParceledListSlice queue);
+    void onQueueChanged(in MediaParceledListSlice queue);
     void onQueueTitleChanged(CharSequence title);
     void onExtrasChanged(in Bundle extras);
     void onVolumeInfoChanged(in ParcelableVolumeInfo info);
diff --git a/packages/MediaComponents/apex/java/android/media/session/MediaController.java b/packages/MediaComponents/apex/java/android/media/session/MediaController.java
index 60f74ab..8c3a013 100644
--- a/packages/MediaComponents/apex/java/android/media/session/MediaController.java
+++ b/packages/MediaComponents/apex/java/android/media/session/MediaController.java
@@ -21,10 +21,10 @@
 import android.annotation.UnsupportedAppUsage;
 import android.app.PendingIntent;
 import android.content.Context;
-//import android.content.pm.ParceledListSlice;
 import android.media.AudioAttributes;
 import android.media.AudioManager;
 import android.media.MediaMetadata;
+import android.media.MediaParceledListSlice;
 import android.media.Rating;
 import android.media.VolumeProvider;
 import android.net.Uri;
@@ -243,17 +243,14 @@
      * @return The current play queue or null.
      */
     public @Nullable List<MediaSession.QueueItem> getQueue() {
-        //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
-        /*
         try {
-            ParceledListSlice queue = mSessionBinder.getQueue();
+            MediaParceledListSlice queue = mSessionBinder.getQueue();
             if (queue != null) {
                 return queue.getList();
             }
         } catch (RemoteException e) {
             Log.wtf(TAG, "Error calling getQueue.", e);
         }
-        */
         return null;
     }
 
@@ -1102,10 +1099,8 @@
             }
         }
 
-        //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
-        /*
         @Override
-        public void onQueueChanged(ParceledListSlice parceledQueue) {
+        public void onQueueChanged(MediaParceledListSlice parceledQueue) {
             List<MediaSession.QueueItem> queue = parceledQueue == null ? null : parceledQueue
                     .getList();
             MediaController controller = mController.get();
@@ -1113,7 +1108,6 @@
                 controller.postMessage(MSG_UPDATE_QUEUE, queue, null);
             }
         }
-        */
 
         @Override
         public void onQueueTitleChanged(CharSequence title) {
diff --git a/packages/MediaComponents/apex/java/android/media/session/MediaSession.java b/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
index 4ebfb8e..943843d 100644
--- a/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
+++ b/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
@@ -24,10 +24,10 @@
 import android.app.PendingIntent;
 import android.content.Context;
 import android.content.Intent;
-//import android.content.pm.ParceledListSlice;
 import android.media.AudioAttributes;
 import android.media.MediaDescription;
 import android.media.MediaMetadata;
+import android.media.MediaParceledListSlice;
 import android.media.Rating;
 import android.media.VolumeProvider;
 import android.media.session.MediaSessionManager.RemoteUserInfo;
@@ -471,14 +471,11 @@
      * @param queue A list of items in the play queue.
      */
     public void setQueue(@Nullable List<QueueItem> queue) {
-        //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
-        /*
         try {
-            mBinder.setQueue(queue == null ? null : new ParceledListSlice<QueueItem>(queue));
+            mBinder.setQueue(queue == null ? null : new MediaParceledListSlice<QueueItem>(queue));
         } catch (RemoteException e) {
             Log.wtf("Dead object in setQueue.", e);
         }
-        */
     }
 
     /**
diff --git a/packages/MediaComponents/apex/java/android/service/media/IMediaBrowserServiceCallbacks.aidl b/packages/MediaComponents/apex/java/android/service/media/IMediaBrowserServiceCallbacks.aidl
index bcc2826..8dc480d 100644
--- a/packages/MediaComponents/apex/java/android/service/media/IMediaBrowserServiceCallbacks.aidl
+++ b/packages/MediaComponents/apex/java/android/service/media/IMediaBrowserServiceCallbacks.aidl
@@ -2,8 +2,8 @@
 
 package android.service.media;
 
-//import android.content.pm.ParceledListSlice;
 import android.graphics.Bitmap;
+import android.media.MediaParceledListSlice;
 import android.media.session.MediaSession;
 import android.os.Bundle;
 
@@ -22,7 +22,7 @@
      */
     void onConnect(String root, in MediaSession.Token session, in Bundle extras);
     void onConnectFailed();
-    //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
-    //void onLoadChildren(String mediaId, in ParceledListSlice list);
-    //void onLoadChildrenWithOptions(String mediaId, in ParceledListSlice list, in Bundle options);
+    void onLoadChildren(String mediaId, in MediaParceledListSlice list);
+    void onLoadChildrenWithOptions(String mediaId, in MediaParceledListSlice list,
+            in Bundle options);
 }
diff --git a/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java b/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
index fa7696e..a66ec35 100644
--- a/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
+++ b/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
@@ -25,7 +25,7 @@
 import android.app.Service;
 import android.content.Intent;
 import android.content.pm.PackageManager;
-//import android.content.pm.ParceledListSlice;
+import android.media.MediaParceledListSlice;
 import android.media.browse.MediaBrowser;
 import android.media.browse.MediaBrowserUtils;
 import android.media.session.MediaSession;
@@ -687,10 +687,8 @@
                 List<MediaBrowser.MediaItem> filteredList =
                         (flag & RESULT_FLAG_OPTION_NOT_HANDLED) != 0
                         ? applyOptions(list, options) : list;
-                //TODO:(b/119750807) Resolve hidden API usage ParceledListSlice.
-                /*
-                final ParceledListSlice<MediaBrowser.MediaItem> pls =
-                        filteredList == null ? null : new ParceledListSlice<>(filteredList);
+                final MediaParceledListSlice<MediaBrowser.MediaItem> pls =
+                        filteredList == null ? null : new MediaParceledListSlice<>(filteredList);
                 try {
                     connection.callbacks.onLoadChildrenWithOptions(parentId, pls, options);
                 } catch (RemoteException ex) {
@@ -698,7 +696,6 @@
                     Log.w(TAG, "Calling onLoadChildren() failed for id=" + parentId
                             + " package=" + connection.pkg);
                 }
-                */
             }
         };
 
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 02ab8ad..bfa1b5e 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -25,7 +25,11 @@
     libmedia_helper \
     libmediametrics \
     libmediautils \
-    libeffectsconfig
+    libeffectsconfig \
+    libsensorprivacy
+
+LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := \
+    libsensorprivacy
 
 LOCAL_STATIC_LIBRARIES := \
     libaudiopolicycomponents
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index ea6389c..ad12a90 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -332,10 +332,13 @@
     virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state) = 0;
 
     virtual void onRecordingConfigurationUpdate(int event,
-                    const record_client_info_t *clientInfo,
-                    const struct audio_config_base *clientConfig,
-                    const struct audio_config_base *deviceConfig,
-                    audio_patch_handle_t patchHandle) = 0;
+                                                const record_client_info_t *clientInfo,
+                                                const audio_config_base_t *clientConfig,
+                                                std::vector<effect_descriptor_t> clientEffects,
+                                                const audio_config_base_t *deviceConfig,
+                                                std::vector<effect_descriptor_t> effects,
+                                                audio_patch_handle_t patchHandle,
+                                                audio_source_t source) = 0;
 };
 
 extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface);
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 9f8b8c0..fa9ba0b 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -23,6 +23,7 @@
 #include "AudioIODescriptorInterface.h"
 #include "AudioPort.h"
 #include "ClientDescriptor.h"
+#include "EffectDescriptor.h"
 
 namespace android {
 
@@ -62,7 +63,8 @@
     bool isSoundTrigger() const;
     void setClientActive(const sp<RecordClientDescriptor>& client, bool active);
     int32_t activeCount() { return mGlobalActiveCount; }
-
+    void trackEffectEnabled(const sp<EffectDescriptor> &effect, bool enabled);
+    EffectDescriptorCollection getEnabledEffects() const;
     // implementation of AudioIODescriptorInterface
     audio_config_base_t getConfig() const override;
     audio_patch_handle_t getPatchHandle() const override;
@@ -86,6 +88,11 @@
     RecordClientVector clientsList(bool activeOnly = false,
         audio_source_t source = AUDIO_SOURCE_DEFAULT, bool preferredDeviceOnly = false) const;
 
+    void setAppState(uid_t uid, app_state_t state);
+
+    // implementation of ClientMapHandler<RecordClientDescriptor>
+    void addClient(const sp<RecordClientDescriptor> &client) override;
+
  private:
 
     void updateClientRecordingConfiguration(int event, const sp<RecordClientDescriptor>& client);
@@ -101,6 +108,7 @@
     SortedVector<audio_session_t> mPreemptedSessions;
     AudioPolicyClientInterface * const mClientInterface;
     int32_t mGlobalActiveCount = 0;  // non-client-specific activity ref count
+    EffectDescriptorCollection mEnabledEffects;
 };
 
 class AudioInputCollection :
@@ -126,6 +134,8 @@
 
     sp<AudioInputDescriptor> getInputForClient(audio_port_handle_t portId);
 
+    void trackEffectEnabled(const sp<EffectDescriptor> &effect, bool enabled);
+
     void dump(String8 *dst) const;
 };
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 986d109..a187029 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -28,6 +28,7 @@
 #include <utils/RefBase.h>
 #include <utils/String8.h>
 #include "AudioPatch.h"
+#include "EffectDescriptor.h"
 #include "RoutingStrategy.h"
 
 namespace android {
@@ -119,13 +120,15 @@
     void setAppState(app_state_t appState) { mAppState = appState; }
     app_state_t appState() { return mAppState; }
     bool isSilenced() const { return mAppState == APP_STATE_IDLE; }
+    void trackEffectEnabled(const sp<EffectDescriptor> &effect, bool enabled);
+    EffectDescriptorCollection getEnabledEffects() const { return mEnabledEffects; }
 
 private:
     const audio_source_t mSource;
     const audio_input_flags_t mFlags;
     const bool mIsSoundTrigger;
           app_state_t mAppState;
-
+    EffectDescriptorCollection mEnabledEffects;
 };
 
 class SourceClientDescriptor: public TrackClientDescriptor
@@ -172,7 +175,7 @@
     virtual ~ClientMapHandler() = default;
 
     // Track client management
-    void addClient(const sp<T> &client) {
+    virtual void addClient(const sp<T> &client) {
         const audio_port_handle_t portId = client->portId();
         LOG_ALWAYS_FATAL_IF(!mClients.emplace(portId, client).second,
                 "%s(%d): attempting to add client that already exists", __func__, portId);
diff --git a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
index 9fa7486..2dc33ab 100644
--- a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
@@ -25,12 +25,12 @@
 
 namespace android {
 
-
 class EffectDescriptor : public RefBase
 {
 public:
-    void dump(String8 *dst) const;
+    void dump(String8 *dst, int spaces = 0) const;
 
+    int mId;                // effect unique ID
     int mIo;                // io the effect is attached to
     routing_strategy mStrategy; // routing strategy the effect is associated to
     int mSession;               // audio session the effect is on
@@ -46,12 +46,14 @@
     status_t registerEffect(const effect_descriptor_t *desc, audio_io_handle_t io,
                             uint32_t strategy, int session, int id);
     status_t unregisterEffect(int id);
+    sp<EffectDescriptor> getEffect(int id) const;
     status_t setEffectEnabled(int id, bool enabled);
+    bool     isEffectEnabled(int id) const;
     uint32_t getMaxEffectsCpuLoad() const;
     uint32_t getMaxEffectsMemory() const;
-    bool isNonOffloadableEffectEnabled();
+    bool isNonOffloadableEffectEnabled() const;
 
-    void dump(String8 *dst) const;
+    void dump(String8 *dst, int spaces = 0, bool verbose = true) const;
 
 private:
     status_t setEffectEnabled(const sp<EffectDescriptor> &effectDesc, bool enabled);
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 559274f..0bc88a5 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -269,6 +269,16 @@
     }
 }
 
+void AudioInputDescriptor::addClient(const sp<RecordClientDescriptor> &client) {
+    ClientMapHandler<RecordClientDescriptor>::addClient(client);
+
+    for (size_t i = 0; i < mEnabledEffects.size(); i++) {
+        if (mEnabledEffects.valueAt(i)->mSession == client->session()) {
+            client->trackEffectEnabled(mEnabledEffects.valueAt(i), true);
+        }
+    }
+}
+
 void AudioInputDescriptor::setClientActive(const sp<RecordClientDescriptor>& client, bool active)
 {
     LOG_ALWAYS_FATAL_IF(getClient(client->portId()) == nullptr,
@@ -312,11 +322,26 @@
     int event, const sp<RecordClientDescriptor>& client)
 {
     const audio_config_base_t sessionConfig = client->config();
-    const record_client_info_t recordClientInfo{client->uid(), client->session(), client->source()};
+    const record_client_info_t recordClientInfo{client->uid(), client->session(),
+                                                client->source(), client->portId(),
+                                                client->isSilenced()};
     const audio_config_base_t config = getConfig();
-    mClientInterface->onRecordingConfigurationUpdate(event,
-                                                     &recordClientInfo, &sessionConfig,
-                                                     &config, mPatchHandle);
+
+    std::vector<effect_descriptor_t> clientEffects;
+    EffectDescriptorCollection effectsList = client->getEnabledEffects();
+    for (size_t i = 0; i < effectsList.size(); i++) {
+        clientEffects.push_back(effectsList.valueAt(i)->mDesc);
+    }
+
+    std::vector<effect_descriptor_t> effects;
+    effectsList = getEnabledEffects();
+    for (size_t i = 0; i < effectsList.size(); i++) {
+        effects.push_back(effectsList.valueAt(i)->mDesc);
+    }
+
+    mClientInterface->onRecordingConfigurationUpdate(event, &recordClientInfo, &sessionConfig,
+                                                     clientEffects, &config, effects,
+                                                     mPatchHandle, source());
 }
 
 RecordClientVector AudioInputDescriptor::getClientsForSession(
@@ -345,6 +370,53 @@
     return clients;
 }
 
+void AudioInputDescriptor::trackEffectEnabled(const sp<EffectDescriptor> &effect,
+                                              bool enabled)
+{
+    if (enabled) {
+        mEnabledEffects.replaceValueFor(effect->mId, effect);
+    } else {
+        mEnabledEffects.removeItem(effect->mId);
+    }
+
+    RecordClientVector clients = getClientsForSession((audio_session_t)effect->mSession);
+    for (const auto& client : clients) {
+        sp<EffectDescriptor> clientEffect = client->getEnabledEffects().getEffect(effect->mId);
+        bool changed = (enabled && clientEffect == nullptr)
+                || (!enabled && clientEffect != nullptr);
+        client->trackEffectEnabled(effect, enabled);
+        if (changed && client->active()) {
+            updateClientRecordingConfiguration(RECORD_CONFIG_EVENT_START, client);
+        }
+    }
+}
+
+EffectDescriptorCollection AudioInputDescriptor::getEnabledEffects() const
+{
+    EffectDescriptorCollection enabledEffects;
+    // report effects for highest priority active source as applied to all clients
+    RecordClientVector clients =
+        clientsList(true /*activeOnly*/, source(), false /*preferredDeviceOnly*/);
+    if (clients.size() > 0) {
+        enabledEffects = clients[0]->getEnabledEffects();
+    }
+    return enabledEffects;
+}
+
+void AudioInputDescriptor::setAppState(uid_t uid, app_state_t state) {
+    RecordClientVector clients = clientsList(false /*activeOnly*/);
+
+    for (const auto& client : clients) {
+        if (uid == client->uid()) {
+            bool wasSilenced = client->isSilenced();
+            client->setAppState(state);
+            if (client->active() && wasSilenced != client->isSilenced()) {
+                updateClientRecordingConfiguration(RECORD_CONFIG_EVENT_START, client);
+            }
+        }
+    }
+}
+
 void AudioInputDescriptor::dump(String8 *dst) const
 {
     dst->appendFormat(" ID: %d\n", getId());
@@ -352,6 +424,7 @@
     dst->appendFormat(" Format: %d\n", mFormat);
     dst->appendFormat(" Channels: %08x\n", mChannelMask);
     dst->appendFormat(" Devices %08x\n", mDevice);
+    getEnabledEffects().dump(dst, 1 /*spaces*/, false /*verbose*/);
     dst->append(" AudioRecord Clients:\n");
     ClientMapHandler<RecordClientDescriptor>::dump(dst);
     dst->append("\n");
@@ -424,6 +497,17 @@
     return 0;
 }
 
+void AudioInputCollection::trackEffectEnabled(const sp<EffectDescriptor> &effect,
+                                            bool enabled)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioInputDescriptor> inputDesc = valueAt(i);
+        if (inputDesc->mIoHandle == effect->mIo) {
+            return inputDesc->trackEffectEnabled(effect, enabled);
+        }
+    }
+}
+
 void AudioInputCollection::dump(String8 *dst) const
 {
     dst->append("\nInputs dump:\n");
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index 815612d..82d64c9 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -63,10 +63,20 @@
     return ss.str();
 }
 
+void RecordClientDescriptor::trackEffectEnabled(const sp<EffectDescriptor> &effect, bool enabled)
+{
+    if (enabled) {
+        mEnabledEffects.replaceValueFor(effect->mId, effect);
+    } else {
+        mEnabledEffects.removeItem(effect->mId);
+    }
+}
+
 void RecordClientDescriptor::dump(String8 *dst, int spaces, int index) const
 {
     ClientDescriptor::dump(dst, spaces, index);
     dst->appendFormat("%*s- Source: %d flags: %08x\n", spaces, "", mSource, mFlags);
+    mEnabledEffects.dump(dst, spaces + 2 /*spaces*/, false /*verbose*/);
 }
 
 SourceClientDescriptor::SourceClientDescriptor(audio_port_handle_t portId, uid_t uid,
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
index 8bbb798..40c49e7 100644
--- a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -22,13 +22,13 @@
 
 namespace android {
 
-void EffectDescriptor::dump(String8 *dst) const
+void EffectDescriptor::dump(String8 *dst, int spaces) const
 {
-    dst->appendFormat(" I/O: %d\n", mIo);
-    dst->appendFormat(" Strategy: %d\n", mStrategy);
-    dst->appendFormat(" Session: %d\n", mSession);
-    dst->appendFormat(" Name: %s\n",  mDesc.name);
-    dst->appendFormat(" %s\n",  mEnabled ? "Enabled" : "Disabled");
+    dst->appendFormat("%*sI/O: %d\n", spaces, "", mIo);
+    dst->appendFormat("%*sStrategy: %d\n", spaces, "", mStrategy);
+    dst->appendFormat("%*sSession: %d\n", spaces, "", mSession);
+    dst->appendFormat("%*sName: %s\n", spaces, "",  mDesc.name);
+    dst->appendFormat("%*s%s\n", spaces, "",  mEnabled ? "Enabled" : "Disabled");
 }
 
 EffectDescriptorCollection::EffectDescriptorCollection() :
@@ -45,6 +45,11 @@
                                                     int session,
                                                     int id)
 {
+    if (getEffect(id) != nullptr) {
+        ALOGW("%s effect %s already registered", __FUNCTION__, desc->name);
+        return INVALID_OPERATION;
+    }
+
     if (mTotalEffectsMemory + desc->memoryUsage > getMaxEffectsMemory()) {
         ALOGW("registerEffect() memory limit exceeded for Fx %s, Memory %d KB",
                 desc->name, desc->memoryUsage);
@@ -60,6 +65,7 @@
 
     sp<EffectDescriptor> effectDesc = new EffectDescriptor();
     memcpy (&effectDesc->mDesc, desc, sizeof(effect_descriptor_t));
+    effectDesc->mId = id;
     effectDesc->mIo = io;
     effectDesc->mStrategy = static_cast<routing_strategy>(strategy);
     effectDesc->mSession = session;
@@ -70,18 +76,23 @@
     return NO_ERROR;
 }
 
-status_t EffectDescriptorCollection::unregisterEffect(int id)
+sp<EffectDescriptor> EffectDescriptorCollection::getEffect(int id) const
 {
     ssize_t index = indexOfKey(id);
     if (index < 0) {
-        ALOGW("unregisterEffect() unknown effect ID %d", id);
+        return nullptr;
+    }
+    return valueAt(index);
+}
+
+status_t EffectDescriptorCollection::unregisterEffect(int id)
+{
+    sp<EffectDescriptor> effectDesc = getEffect(id);
+    if (effectDesc == nullptr) {
+        ALOGW("%s unknown effect ID %d", __FUNCTION__, id);
         return INVALID_OPERATION;
     }
 
-    sp<EffectDescriptor> effectDesc = valueAt(index);
-
-    setEffectEnabled(effectDesc, false);
-
     if (mTotalEffectsMemory < effectDesc->mDesc.memoryUsage) {
         ALOGW("unregisterEffect() memory %d too big for total %d",
                 effectDesc->mDesc.memoryUsage, mTotalEffectsMemory);
@@ -107,6 +118,14 @@
     return setEffectEnabled(valueAt(index), enabled);
 }
 
+bool EffectDescriptorCollection::isEffectEnabled(int id) const
+{
+    ssize_t index = indexOfKey(id);
+    if (index < 0) {
+        return false;
+    }
+    return valueAt(index)->mEnabled;
+}
 
 status_t EffectDescriptorCollection::setEffectEnabled(const sp<EffectDescriptor> &effectDesc,
                                                       bool enabled)
@@ -138,7 +157,7 @@
     return NO_ERROR;
 }
 
-bool EffectDescriptorCollection::isNonOffloadableEffectEnabled()
+bool EffectDescriptorCollection::isNonOffloadableEffectEnabled() const
 {
     for (size_t i = 0; i < size(); i++) {
         sp<EffectDescriptor> effectDesc = valueAt(i);
@@ -162,15 +181,21 @@
     return MAX_EFFECTS_MEMORY;
 }
 
-void EffectDescriptorCollection::dump(String8 *dst) const
+void EffectDescriptorCollection::dump(String8 *dst, int spaces, bool verbose) const
 {
-    dst->appendFormat(
-            "\nTotal Effects CPU: %f MIPS, Total Effects memory: %d KB, Max memory used: %d KB\n",
-             (float)mTotalEffectsCpuLoad/10, mTotalEffectsMemory, mTotalEffectsMemoryMaxUsed);
-    dst->append("Registered effects:\n");
+    if (verbose) {
+        dst->appendFormat(
+            "\n%*sTotal Effects CPU: %f MIPS, "
+            "Total Effects memory: %d KB, Max memory used: %d KB\n",
+            spaces, "",
+            (float) mTotalEffectsCpuLoad / 10,
+            mTotalEffectsMemory,
+            mTotalEffectsMemoryMaxUsed);
+    }
+    dst->appendFormat("%*sEffects:\n", spaces, "");
     for (size_t i = 0; i < size(); i++) {
-        dst->appendFormat("- Effect %d dump:\n", keyAt(i));
-        valueAt(i)->dump(dst);
+        dst->appendFormat("%*s- Effect %d:\n", spaces, "", keyAt(i));
+        valueAt(i)->dump(dst, spaces + 2);
     }
 }
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 64a2b8a..aa205f0 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -1313,6 +1313,10 @@
     audio_format_t bestFormat = AUDIO_FORMAT_INVALID;
     audio_format_t bestFormatForFlags = AUDIO_FORMAT_INVALID;
 
+    // Flags which must be present on both the request and the selected output
+    static const audio_output_flags_t kMandatedFlags = (audio_output_flags_t)
+        (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ);
+
     for (audio_io_handle_t output : outputs) {
         sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
         if (!outputDesc->isDuplicated()) {
@@ -1336,6 +1340,10 @@
                     continue;
                 }
             }
+            if ((kMandatedFlags & flags) !=
+                (kMandatedFlags & outputDesc->mProfile->getFlags())) {
+                continue;
+            }
 
             // if a valid format is specified, skip output if not compatible
             if (format != AUDIO_FORMAT_INVALID) {
@@ -2423,6 +2431,33 @@
     return mEffects.registerEffect(desc, io, strategy, session, id);
 }
 
+status_t AudioPolicyManager::unregisterEffect(int id)
+{
+    if (mEffects.getEffect(id) == nullptr) {
+        return INVALID_OPERATION;
+    }
+
+    if (mEffects.isEffectEnabled(id)) {
+        ALOGW("%s effect %d enabled", __FUNCTION__, id);
+        setEffectEnabled(id, false);
+    }
+    return mEffects.unregisterEffect(id);
+}
+
+status_t AudioPolicyManager::setEffectEnabled(int id, bool enabled)
+{
+    sp<EffectDescriptor> effect = mEffects.getEffect(id);
+    if (effect == nullptr) {
+        return INVALID_OPERATION;
+    }
+
+    status_t status = mEffects.setEffectEnabled(id, enabled);
+    if (status == NO_ERROR) {
+        mInputs.trackEffectEnabled(effect, enabled);
+    }
+    return status;
+}
+
 bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
 {
     bool active = false;
@@ -3706,13 +3741,11 @@
 
 void AudioPolicyManager::setAppState(uid_t uid, app_state_t state)
 {
-    Vector<sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
-
     ALOGV("%s(uid:%d, state:%d)", __func__, uid, state);
 
-    for (size_t i = 0; i < activeInputs.size(); i++) {
-        sp<AudioInputDescriptor> activeDesc = activeInputs[i];
-        RecordClientVector clients = activeDesc->clientsList(true /*activeOnly*/);
+    for (size_t i = 0; i < mInputs.size(); i++) {
+        sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(i);
+        RecordClientVector clients = inputDesc->clientsList(false /*activeOnly*/);
         for (const auto& client : clients) {
             if (uid == client->uid()) {
                 client->setAppState(state);
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 86993d4..35dd87c 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -164,14 +164,8 @@
                                         uint32_t strategy,
                                         int session,
                                         int id);
-        virtual status_t unregisterEffect(int id)
-        {
-            return mEffects.unregisterEffect(id);
-        }
-        virtual status_t setEffectEnabled(int id, bool enabled)
-        {
-            return mEffects.setEffectEnabled(id, enabled);
-        }
+        virtual status_t unregisterEffect(int id);
+        virtual status_t setEffectEnabled(int id, bool enabled);
 
         virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
         // return whether a stream is playing remotely, override to change the definition of
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index 21fffec..d826192 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -209,12 +209,17 @@
 }
 
 void AudioPolicyService::AudioPolicyClient::onRecordingConfigurationUpdate(
-        int event, const record_client_info_t *clientInfo,
-        const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
-        audio_patch_handle_t patchHandle)
+                                                    int event,
+                                                    const record_client_info_t *clientInfo,
+                                                    const audio_config_base_t *clientConfig,
+                                                    std::vector<effect_descriptor_t> clientEffects,
+                                                    const audio_config_base_t *deviceConfig,
+                                                    std::vector<effect_descriptor_t> effects,
+                                                    audio_patch_handle_t patchHandle,
+                                                    audio_source_t source)
 {
     mAudioPolicyService->onRecordingConfigurationUpdate(event, clientInfo,
-            clientConfig, deviceConfig, patchHandle);
+            clientConfig, clientEffects, deviceConfig, effects, patchHandle, source);
 }
 
 audio_unique_id_t AudioPolicyService::AudioPolicyClient::newAudioUniqueId(audio_unique_id_use_t use)
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index ee5d6ff..416817f 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -38,6 +38,7 @@
 #include <media/AudioEffect.h>
 #include <media/AudioParameter.h>
 #include <mediautils/ServiceUtilities.h>
+#include <sensorprivacy/SensorPrivacyManager.h>
 
 #include <system/audio.h>
 #include <system/audio_policy.h>
@@ -84,6 +85,9 @@
 
     mUidPolicy = new UidPolicy(this);
     mUidPolicy->registerSelf();
+
+    mSensorPrivacyPolicy = new SensorPrivacyPolicy(this);
+    mSensorPrivacyPolicy->registerSelf();
 }
 
 AudioPolicyService::~AudioPolicyService()
@@ -99,6 +103,9 @@
 
     mUidPolicy->unregisterSelf();
     mUidPolicy.clear();
+
+    mSensorPrivacyPolicy->unregisterSelf();
+    mSensorPrivacyPolicy.clear();
 }
 
 // A notification client is always registered by AudioSystem when the client process
@@ -208,22 +215,34 @@
     }
 }
 
-void AudioPolicyService::onRecordingConfigurationUpdate(int event,
-        const record_client_info_t *clientInfo, const audio_config_base_t *clientConfig,
-        const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle)
+void AudioPolicyService::onRecordingConfigurationUpdate(
+                                                    int event,
+                                                    const record_client_info_t *clientInfo,
+                                                    const audio_config_base_t *clientConfig,
+                                                    std::vector<effect_descriptor_t> clientEffects,
+                                                    const audio_config_base_t *deviceConfig,
+                                                    std::vector<effect_descriptor_t> effects,
+                                                    audio_patch_handle_t patchHandle,
+                                                    audio_source_t source)
 {
     mOutputCommandThread->recordingConfigurationUpdateCommand(event, clientInfo,
-            clientConfig, deviceConfig, patchHandle);
+            clientConfig, clientEffects, deviceConfig, effects, patchHandle, source);
 }
 
-void AudioPolicyService::doOnRecordingConfigurationUpdate(int event,
-        const record_client_info_t *clientInfo, const audio_config_base_t *clientConfig,
-        const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle)
+void AudioPolicyService::doOnRecordingConfigurationUpdate(
+                                                  int event,
+                                                  const record_client_info_t *clientInfo,
+                                                  const audio_config_base_t *clientConfig,
+                                                  std::vector<effect_descriptor_t> clientEffects,
+                                                  const audio_config_base_t *deviceConfig,
+                                                  std::vector<effect_descriptor_t> effects,
+                                                  audio_patch_handle_t patchHandle,
+                                                  audio_source_t source)
 {
     Mutex::Autolock _l(mNotificationClientsLock);
     for (size_t i = 0; i < mNotificationClients.size(); i++) {
         mNotificationClients.valueAt(i)->onRecordingConfigurationUpdate(event, clientInfo,
-                clientConfig, deviceConfig, patchHandle);
+                clientConfig, clientEffects, deviceConfig, effects, patchHandle, source);
     }
 }
 
@@ -291,13 +310,18 @@
 }
 
 void AudioPolicyService::NotificationClient::onRecordingConfigurationUpdate(
-        int event, const record_client_info_t *clientInfo,
-        const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
-        audio_patch_handle_t patchHandle)
+                                            int event,
+                                            const record_client_info_t *clientInfo,
+                                            const audio_config_base_t *clientConfig,
+                                            std::vector<effect_descriptor_t> clientEffects,
+                                            const audio_config_base_t *deviceConfig,
+                                            std::vector<effect_descriptor_t> effects,
+                                            audio_patch_handle_t patchHandle,
+                                            audio_source_t source)
 {
     if (mAudioPolicyServiceClient != 0 && isServiceUid(mUid)) {
         mAudioPolicyServiceClient->onRecordingConfigurationUpdate(event, clientInfo,
-                clientConfig, deviceConfig, patchHandle);
+                clientConfig, clientEffects, deviceConfig, effects, patchHandle, source);
     }
 }
 
@@ -375,6 +399,12 @@
     bool isAssistantOnTop = false;
     bool isSensitiveActive = false;
 
+    // if Sensor Privacy is enabled then all recordings should be silenced.
+    if (mSensorPrivacyPolicy->isSensorPrivacyEnabled()) {
+        silenceAllRecordings_l();
+        return;
+    }
+
     for (size_t i =0; i < mAudioRecordClients.size(); i++) {
         sp<AudioRecordClient> current = mAudioRecordClients[i];
         if (!current->active) continue;
@@ -445,6 +475,13 @@
     }
 }
 
+void AudioPolicyService::silenceAllRecordings_l() {
+    for (size_t i = 0; i < mAudioRecordClients.size(); i++) {
+        sp<AudioRecordClient> current = mAudioRecordClients[i];
+        setAppState_l(current->uid, APP_STATE_IDLE);
+    }
+}
+
 /* static */
 app_state_t AudioPolicyService::apmStatFromAmState(int amState) {
     switch (amState) {
@@ -858,6 +895,31 @@
     return it != mA11yUids.end();
 }
 
+// -----------  AudioPolicyService::SensorPrivacyService implementation ----------
+void AudioPolicyService::SensorPrivacyPolicy::registerSelf() {
+    SensorPrivacyManager spm;
+    mSensorPrivacyEnabled = spm.isSensorPrivacyEnabled();
+    spm.addSensorPrivacyListener(this);
+}
+
+void AudioPolicyService::SensorPrivacyPolicy::unregisterSelf() {
+    SensorPrivacyManager spm;
+    spm.removeSensorPrivacyListener(this);
+}
+
+bool AudioPolicyService::SensorPrivacyPolicy::isSensorPrivacyEnabled() {
+    return mSensorPrivacyEnabled;
+}
+
+binder::Status AudioPolicyService::SensorPrivacyPolicy::onSensorPrivacyChanged(bool enabled) {
+    mSensorPrivacyEnabled = enabled;
+    sp<AudioPolicyService> service = mService.promote();
+    if (service != nullptr) {
+        service->updateUidStates();
+    }
+    return binder::Status::ok();
+}
+
 // -----------  AudioPolicyService::AudioCommandThread implementation ----------
 
 AudioPolicyService::AudioCommandThread::AudioCommandThread(String8 name,
@@ -1026,8 +1088,9 @@
                     }
                     mLock.unlock();
                     svc->doOnRecordingConfigurationUpdate(data->mEvent, &data->mClientInfo,
-                            &data->mClientConfig, &data->mDeviceConfig,
-                            data->mPatchHandle);
+                            &data->mClientConfig, data->mClientEffects,
+                            &data->mDeviceConfig, data->mEffects,
+                            data->mPatchHandle, data->mSource);
                     mLock.lock();
                     } break;
                 default:
@@ -1262,9 +1325,14 @@
 }
 
 void AudioPolicyService::AudioCommandThread::recordingConfigurationUpdateCommand(
-        int event, const record_client_info_t *clientInfo,
-        const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
-        audio_patch_handle_t patchHandle)
+                                                int event,
+                                                const record_client_info_t *clientInfo,
+                                                const audio_config_base_t *clientConfig,
+                                                std::vector<effect_descriptor_t> clientEffects,
+                                                const audio_config_base_t *deviceConfig,
+                                                std::vector<effect_descriptor_t> effects,
+                                                audio_patch_handle_t patchHandle,
+                                                audio_source_t source)
 {
     sp<AudioCommand>command = new AudioCommand();
     command->mCommand = RECORDING_CONFIGURATION_UPDATE;
@@ -1272,8 +1340,11 @@
     data->mEvent = event;
     data->mClientInfo = *clientInfo;
     data->mClientConfig = *clientConfig;
+    data->mClientEffects = clientEffects;
     data->mDeviceConfig = *deviceConfig;
+    data->mEffects = effects;
     data->mPatchHandle = patchHandle;
+    data->mSource = source;
     command->mParam = data;
     ALOGV("AudioCommandThread() adding recording configuration update event %d, source %d uid %u",
             event, clientInfo->source, clientInfo->uid);
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 23c3daa..c44d816 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -33,6 +33,7 @@
 #include <media/AudioPolicy.h>
 #include "AudioPolicyEffects.h"
 #include "managerdefault/AudioPolicyManager.h"
+#include <android/hardware/BnSensorPrivacyListener.h>
 
 #include <unordered_map>
 
@@ -239,12 +240,22 @@
 
             void onDynamicPolicyMixStateUpdate(const String8& regId, int32_t state);
             void doOnDynamicPolicyMixStateUpdate(const String8& regId, int32_t state);
-            void onRecordingConfigurationUpdate(int event, const record_client_info_t *clientInfo,
-                    const audio_config_base_t *clientConfig,
-                    const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
-            void doOnRecordingConfigurationUpdate(int event, const record_client_info_t *clientInfo,
-                    const audio_config_base_t *clientConfig,
-                    const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
+            void onRecordingConfigurationUpdate(int event,
+                                                const record_client_info_t *clientInfo,
+                                                const audio_config_base_t *clientConfig,
+                                                std::vector<effect_descriptor_t> clientEffects,
+                                                const audio_config_base_t *deviceConfig,
+                                                std::vector<effect_descriptor_t> effects,
+                                                audio_patch_handle_t patchHandle,
+                                                audio_source_t source);
+            void doOnRecordingConfigurationUpdate(int event,
+                                                  const record_client_info_t *clientInfo,
+                                                  const audio_config_base_t *clientConfig,
+                                                  std::vector<effect_descriptor_t> clientEffects,
+                                                  const audio_config_base_t *deviceConfig,
+                                                  std::vector<effect_descriptor_t> effects,
+                                                  audio_patch_handle_t patchHandle,
+                                                  audio_source_t source);
 
 private:
                         AudioPolicyService() ANDROID_API;
@@ -279,6 +290,8 @@
     void updateUidStates();
     void updateUidStates_l();
 
+    void silenceAllRecordings_l();
+
     static bool isPrivacySensitive(audio_source_t source);
 
     // If recording we need to make sure the UID is allowed to do that. If the UID is idle
@@ -334,6 +347,27 @@
         std::vector<uid_t> mA11yUids;
     };
 
+    // If sensor privacy is enabled then all apps, including those that are active, should be
+    // prevented from recording. This is handled similar to idle UIDs, any app that attempts
+    // to record while sensor privacy is enabled will receive buffers with zeros. As soon as
+    // sensor privacy is disabled active apps will receive the expected data when recording.
+    class SensorPrivacyPolicy : public hardware::BnSensorPrivacyListener {
+        public:
+            explicit SensorPrivacyPolicy(wp<AudioPolicyService> service)
+                    : mService(service) {}
+
+            void registerSelf();
+            void unregisterSelf();
+
+            bool isSensorPrivacyEnabled();
+
+            binder::Status onSensorPrivacyChanged(bool enabled);
+
+        private:
+            wp<AudioPolicyService> mService;
+            std::atomic_bool mSensorPrivacyEnabled;
+    };
+
     // Thread used to send audio config commands to audio flinger
     // For audio config commands, it is necessary because audio flinger requires that the calling
     // process (user) has permission to modify audio settings.
@@ -385,13 +419,17 @@
                     void        updateAudioPatchListCommand();
                     status_t    setAudioPortConfigCommand(const struct audio_port_config *config,
                                                           int delayMs);
-                    void        dynamicPolicyMixStateUpdateCommand(const String8& regId, int32_t state);
+                    void        dynamicPolicyMixStateUpdateCommand(const String8& regId,
+                                                                   int32_t state);
                     void        recordingConfigurationUpdateCommand(
-                                                        int event,
-                                                        const record_client_info_t *clientInfo,
-                                                        const audio_config_base_t *clientConfig,
-                                                        const audio_config_base_t *deviceConfig,
-                                                        audio_patch_handle_t patchHandle);
+                                                    int event,
+                                                    const record_client_info_t *clientInfo,
+                                                    const audio_config_base_t *clientConfig,
+                                                    std::vector<effect_descriptor_t> clientEffects,
+                                                    const audio_config_base_t *deviceConfig,
+                                                    std::vector<effect_descriptor_t> effects,
+                                                    audio_patch_handle_t patchHandle,
+                                                    audio_source_t source);
                     void        insertCommand_l(AudioCommand *command, int delayMs = 0);
     private:
         class AudioCommandData;
@@ -476,8 +514,11 @@
             int mEvent;
             record_client_info_t mClientInfo;
             struct audio_config_base mClientConfig;
+            std::vector<effect_descriptor_t> mClientEffects;
             struct audio_config_base mDeviceConfig;
+            std::vector<effect_descriptor_t> mEffects;
             audio_patch_handle_t mPatchHandle;
+            audio_source_t mSource;
         };
 
         Mutex   mLock;
@@ -581,9 +622,13 @@
         virtual void onAudioPatchListUpdate();
         virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
         virtual void onRecordingConfigurationUpdate(int event,
-                        const record_client_info_t *clientInfo,
-                        const audio_config_base_t *clientConfig,
-                        const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
+                                                    const record_client_info_t *clientInfo,
+                                                    const audio_config_base_t *clientConfig,
+                                                    std::vector<effect_descriptor_t> clientEffects,
+                                                    const audio_config_base_t *deviceConfig,
+                                                    std::vector<effect_descriptor_t> effects,
+                                                    audio_patch_handle_t patchHandle,
+                                                    audio_source_t source);
 
         virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use);
 
@@ -601,12 +646,17 @@
 
                             void      onAudioPortListUpdate();
                             void      onAudioPatchListUpdate();
-                            void      onDynamicPolicyMixStateUpdate(const String8& regId, int32_t state);
+                            void      onDynamicPolicyMixStateUpdate(const String8& regId,
+                                                                    int32_t state);
                             void      onRecordingConfigurationUpdate(
-                                        int event, const record_client_info_t *clientInfo,
-                                        const audio_config_base_t *clientConfig,
-                                        const audio_config_base_t *deviceConfig,
-                                        audio_patch_handle_t patchHandle);
+                                                    int event,
+                                                    const record_client_info_t *clientInfo,
+                                                    const audio_config_base_t *clientConfig,
+                                                    std::vector<effect_descriptor_t> clientEffects,
+                                                    const audio_config_base_t *deviceConfig,
+                                                    std::vector<effect_descriptor_t> effects,
+                                                    audio_patch_handle_t patchHandle,
+                                                    audio_source_t source);
                             void      setAudioPortCallbacksEnabled(bool enabled);
 
                             uid_t uid() {
@@ -718,6 +768,8 @@
     audio_mode_t mPhoneState;
 
     sp<UidPolicy> mUidPolicy;
+    sp<SensorPrivacyPolicy> mSensorPrivacyPolicy;
+
     DefaultKeyedVector< audio_port_handle_t, sp<AudioRecordClient> >   mAudioRecordClients;
     DefaultKeyedVector< audio_port_handle_t, sp<AudioPlaybackClient> >   mAudioPlaybackClients;
 };
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index 2ff7675..6ae354b 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -75,11 +75,14 @@
     void onAudioPatchListUpdate() override { }
     audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t /*use*/) override { return 0; }
     void onDynamicPolicyMixStateUpdate(String8 /*regId*/, int32_t /*state*/) override { }
-    void onRecordingConfigurationUpdate(int /*event*/,
-                                        const record_client_info_t* /*clientInfo*/,
-                                        const struct audio_config_base* /*clientConfig*/,
-                                        const struct audio_config_base* /*deviceConfig*/,
-                                        audio_patch_handle_t /*patchHandle*/) override { }
+    void onRecordingConfigurationUpdate(int event __unused,
+                                        const record_client_info_t *clientInfo __unused,
+                                        const audio_config_base_t *clientConfig __unused,
+                                        std::vector<effect_descriptor_t> clientEffects __unused,
+                                        const audio_config_base_t *deviceConfig __unused,
+                                        std::vector<effect_descriptor_t> effects __unused,
+                                        audio_patch_handle_t patchHandle __unused,
+                                        audio_source_t source __unused) override { }
 };
 
 } // namespace android
diff --git a/services/mediacodec/main_swcodecservice.cpp b/services/mediacodec/main_swcodecservice.cpp
index 386abb2..79fea25 100644
--- a/services/mediacodec/main_swcodecservice.cpp
+++ b/services/mediacodec/main_swcodecservice.cpp
@@ -37,6 +37,12 @@
 static const char kVendorSeccompPolicyPath[] =
         "/vendor/etc/seccomp_policy/mediacodec.policy";
 
+// Disable Scudo's mismatch allocation check, as it is being triggered
+// by some third party code.
+extern "C" const char *__scudo_default_options() {
+  return "DeallocationTypeMismatch=false";
+}
+
 int main(int argc __unused, char** /*argv*/)
 {
     LOG(INFO) << "media swcodec service starting";
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index 336bbe8..e31eadc 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -38,10 +38,6 @@
 LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediaextractor.policy
 LOCAL_REQUIRED_MODULES_x86_64 := crash_dump.policy mediaextractor.policy
 
-# extractor libraries
-LOCAL_REQUIRED_MODULES += \
-    libmpeg2extractor \
-
 LOCAL_SRC_FILES := main_extractorservice.cpp
 LOCAL_SHARED_LIBRARIES := libmedia libmediaextractorservice libbinder libutils \
     liblog libbase libicuuc libavservices_minijail
diff --git a/services/medialog/Android.bp b/services/medialog/Android.bp
index ca96f62..bee5d25 100644
--- a/services/medialog/Android.bp
+++ b/services/medialog/Android.bp
@@ -9,10 +9,8 @@
     shared_libs: [
         "libaudioutils",
         "libbinder",
-        "libcutils",
         "liblog",
         "libmediautils",
-        "libnbaio",
         "libnblog",
         "libutils",
     ],