Merge "Fix potential overflow in Visualizer effect" into klp-dev am: b602286952 am: f59ab362c5 am: d7822b3313 am: 9a5ec6b4c3 am: d959eb23c5 am: 5df594ca92 am: b2c00418d3 am: c37d5f55a5 am: b028a26af0 am: fa1019952f am: 0dcde5355a am: cdabec640b am: 2b33d52dde am: 2b676113cf am: 14d8147645
am: 39c734f5c3

Change-Id: Ia206e90afb46f44a3bba60f081a9d9d2ead650f4
diff --git a/cmds/screenrecord/Overlay.cpp b/cmds/screenrecord/Overlay.cpp
index 9fd192c..be993e0 100644
--- a/cmds/screenrecord/Overlay.cpp
+++ b/cmds/screenrecord/Overlay.cpp
@@ -259,6 +259,11 @@
     const char* format = "%T";
     struct tm tm;
 
+    if (mUseMonotonicTimestamps) {
+        snprintf(buf, bufLen, "%" PRId64, monotonicNsec);
+        return;
+    }
+
     // localtime/strftime is not the fastest way to do this, but a trivial
     // benchmark suggests that the cost is negligible.
     int64_t realTime = mStartRealtimeNsecs +
diff --git a/cmds/screenrecord/Overlay.h b/cmds/screenrecord/Overlay.h
index ee3444d..1d8a569 100644
--- a/cmds/screenrecord/Overlay.h
+++ b/cmds/screenrecord/Overlay.h
@@ -37,7 +37,7 @@
  */
 class Overlay : public GLConsumer::FrameAvailableListener, Thread {
 public:
-    Overlay() : Thread(false),
+    Overlay(bool monotonicTimestamps) : Thread(false),
         mThreadResult(UNKNOWN_ERROR),
         mState(UNINITIALIZED),
         mFrameAvailable(false),
@@ -45,7 +45,8 @@
         mStartMonotonicNsecs(0),
         mStartRealtimeNsecs(0),
         mLastFrameNumber(-1),
-        mTotalDroppedFrames(0)
+        mTotalDroppedFrames(0),
+        mUseMonotonicTimestamps(monotonicTimestamps)
         {}
 
     // Creates a thread that performs the overlay.  Pass in the surface that
@@ -151,6 +152,8 @@
     nsecs_t mLastFrameNumber;
     size_t mTotalDroppedFrames;
 
+    bool mUseMonotonicTimestamps;
+
     static const char* kPropertyNames[];
 };
 
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 59d5661..a7eace1 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -68,6 +68,7 @@
 // Command-line parameters.
 static bool gVerbose = false;           // chatty on stdout
 static bool gRotate = false;            // rotate 90 degrees
+static bool gMonotonicTime = false;     // use system monotonic time for timestamps
 static enum {
     FORMAT_MP4, FORMAT_H264, FORMAT_FRAMES, FORMAT_RAW_FRAMES
 } gOutputFormat = FORMAT_MP4;           // data format for output
@@ -609,7 +610,7 @@
     sp<Overlay> overlay;
     if (gWantFrameTime) {
         // Send virtual display frames to an external texture.
-        overlay = new Overlay();
+        overlay = new Overlay(gMonotonicTime);
         err = overlay->start(encoderInputSurface, &bufferProducer);
         if (err != NO_ERROR) {
             if (encoder != NULL) encoder->release();
@@ -892,6 +893,7 @@
         { "show-frame-time",    no_argument,        NULL, 'f' },
         { "rotate",             no_argument,        NULL, 'r' },
         { "output-format",      required_argument,  NULL, 'o' },
+        { "monotonic-time",     no_argument,        NULL, 'm' },
         { NULL,                 0,                  NULL, 0 }
     };
 
@@ -971,6 +973,9 @@
                 return 2;
             }
             break;
+        case 'm':
+            gMonotonicTime = true;
+            break;
         default:
             if (ic != '?') {
                 fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic);
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 1c39b9c..f971ee2 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -39,6 +39,7 @@
 class IOMXRenderer;
 class NativeHandle;
 class Surface;
+struct omx_message;
 
 class IOMX : public IInterface {
 public:
@@ -179,6 +180,12 @@
             OMX_U32 range_offset, OMX_U32 range_length,
             OMX_U32 flags, OMX_TICKS timestamp, int fenceFd = -1) = 0;
 
+    virtual status_t emptyGraphicBuffer(
+            node_id node,
+            buffer_id buffer,
+            const sp<GraphicBuffer> &graphicBuffer,
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) = 0;
+
     virtual status_t getExtensionIndex(
             node_id node,
             const char *parameter_name,
@@ -200,6 +207,8 @@
             InternalOptionType type,
             const void *data,
             size_t size) = 0;
+
+    virtual status_t dispatchMessage(const omx_message &msg) = 0;
 };
 
 struct omx_message {
@@ -219,6 +228,8 @@
             OMX_EVENTTYPE event;
             OMX_U32 data1;
             OMX_U32 data2;
+            OMX_U32 data3;
+            OMX_U32 data4;
         } event_data;
 
         // if type == EMPTY_BUFFER_DONE
diff --git a/include/media/MediaDefs.h b/include/media/MediaDefs.h
new file mode 100644
index 0000000..5f2a32d
--- /dev/null
+++ b/include/media/MediaDefs.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_DEFS_H_
+
+#define MEDIA_DEFS_H_
+
+namespace android {
+
+extern const char *MEDIA_MIMETYPE_IMAGE_JPEG;
+
+extern const char *MEDIA_MIMETYPE_VIDEO_VP8;
+extern const char *MEDIA_MIMETYPE_VIDEO_VP9;
+extern const char *MEDIA_MIMETYPE_VIDEO_AVC;
+extern const char *MEDIA_MIMETYPE_VIDEO_HEVC;
+extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4;
+extern const char *MEDIA_MIMETYPE_VIDEO_H263;
+extern const char *MEDIA_MIMETYPE_VIDEO_MPEG2;
+extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
+extern const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION;
+
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG;           // layer III
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I;
+extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II;
+extern const char *MEDIA_MIMETYPE_AUDIO_MIDI;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC;
+extern const char *MEDIA_MIMETYPE_AUDIO_QCELP;
+extern const char *MEDIA_MIMETYPE_AUDIO_VORBIS;
+extern const char *MEDIA_MIMETYPE_AUDIO_OPUS;
+extern const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_RAW;
+extern const char *MEDIA_MIMETYPE_AUDIO_FLAC;
+extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
+extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
+extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
+extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
+
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
+extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
+extern const char *MEDIA_MIMETYPE_CONTAINER_OGG;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_AVI;
+extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS;
+
+extern const char *MEDIA_MIMETYPE_CONTAINER_WVM;
+
+extern const char *MEDIA_MIMETYPE_TEXT_3GPP;
+extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
+extern const char *MEDIA_MIMETYPE_TEXT_VTT;
+extern const char *MEDIA_MIMETYPE_TEXT_CEA_608;
+extern const char *MEDIA_MIMETYPE_TEXT_CEA_708;
+extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
+
+// These are values exported to JAVA API that need to be in sync with
+// frameworks/base/media/java/android/media/AudioFormat.java. Unfortunately,
+// they are not defined in frameworks/av, so defining them here.
+enum AudioEncoding {
+    kAudioEncodingPcm16bit = 2,
+    kAudioEncodingPcm8bit = 3,
+    kAudioEncodingPcmFloat = 4,
+};
+
+}  // namespace android
+
+#endif  // MEDIA_DEFS_H_
diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h
index 120de4f..3fd97ac 100644
--- a/include/media/nbaio/NBAIO.h
+++ b/include/media/nbaio/NBAIO.h
@@ -35,13 +35,16 @@
 
 // In addition to the usual status_t
 enum {
-    NEGOTIATE    = 0x80000010,  // Must (re-)negotiate format.  For negotiate() only, the offeree
-                                // doesn't accept offers, and proposes counter-offers
-    OVERRUN      = 0x80000011,  // availableToRead(), read(), or readVia() detected lost input due
-                                // to overrun; an event is counted and the caller should re-try
-    UNDERRUN     = 0x80000012,  // availableToWrite(), write(), or writeVia() detected a gap in
-                                // output due to underrun (not being called often enough, or with
-                                // enough data); an event is counted and the caller should re-try
+    NEGOTIATE    = (UNKNOWN_ERROR + 0x100),  // Must (re-)negotiate format.  For negotiate() only,
+                                             // the offeree doesn't accept offers, and proposes
+                                             // counter-offers
+    OVERRUN      = (UNKNOWN_ERROR + 0x101),  // availableToRead(), read(), or readVia() detected
+                                             // lost input due to overrun; an event is counted and
+                                             // the caller should re-try
+    UNDERRUN     = (UNKNOWN_ERROR + 0x102),  // availableToWrite(), write(), or writeVia() detected
+                                             // a gap in output due to underrun (not being called
+                                             // often enough, or with enough data); an event is
+                                             // counted and the caller should re-try
 };
 
 // Negotiation of format is based on the data provider and data sink, or the data consumer and
@@ -266,6 +269,17 @@
     //              One or more frames were lost due to overrun, try again to read more recent data.
     virtual ssize_t read(void *buffer, size_t count) = 0;
 
+    // Flush data from buffer.  There is no notion of overrun as all data is dropped.
+    // Flushed frames also count towards frames read.
+    //
+    // Return value:
+    //  >= 0    Number of frames successfully flushed
+    //  < 0     status_t error occurred
+    // Errors:
+    //  NEGOTIATE         (Re-)negotiation is needed.
+    //  INVALID_OPERATION Not implemented
+    virtual ssize_t flush() { return INVALID_OPERATION; }
+
     // Transfer data from source using a series of callbacks.  More suitable for zero-fill,
     // synthesis, and non-contiguous transfers (e.g. circular buffer or readv).
     // Inputs:
diff --git a/include/media/nbaio/PipeReader.h b/include/media/nbaio/PipeReader.h
index 7c733ad..00c2b3c 100644
--- a/include/media/nbaio/PipeReader.h
+++ b/include/media/nbaio/PipeReader.h
@@ -47,6 +47,8 @@
 
     virtual ssize_t read(void *buffer, size_t count);
 
+    virtual ssize_t flush();
+
     // NBAIO_Source end
 
 #if 0   // until necessary
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 7b3e71c..5a0913e 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -94,10 +94,6 @@
         DISALLOW_EVIL_CONSTRUCTORS(PortDescription);
     };
 
-    static bool isFlexibleColorFormat(
-            const sp<IOMX> &omx, IOMX::node_id node,
-            uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent);
-
     // Returns 0 if configuration is not supported.  NOTE: this is treated by
     // some OMX components as auto level, and by others as invalid level.
     static int /* OMX_VIDEO_AVCLEVELTYPE */ getAVCLevelFor(
@@ -342,9 +338,6 @@
             ssize_t *index = NULL);
 
     status_t setComponentRole(bool isEncoder, const char *mime);
-    static const char *getComponentRole(bool isEncoder, const char *mime);
-    static status_t setComponentRole(
-            const sp<IOMX> &omx, IOMX::node_id node, const char *role);
 
     status_t configureCodec(const char *mime, const sp<AMessage> &msg);
 
@@ -552,11 +545,6 @@
             OMX_ERRORTYPE error = OMX_ErrorUndefined,
             status_t internalError = UNKNOWN_ERROR);
 
-    static bool describeDefaultColorFormat(DescribeColorFormat2Params &describeParams);
-    static bool describeColorFormat(
-        const sp<IOMX> &omx, IOMX::node_id node,
-        DescribeColorFormat2Params &describeParams);
-
     status_t requestIDRFrame();
     status_t setParameters(const sp<AMessage> &params);
 
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 5f2a32d..359fb69 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (C) 2016 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,69 +14,18 @@
  * limitations under the License.
  */
 
-#ifndef MEDIA_DEFS_H_
 
-#define MEDIA_DEFS_H_
+#ifndef STAGEFRIGHT_MEDIA_DEFS_H_
+#define STAGEFRIGHT_MEDIA_DEFS_H_
 
-namespace android {
+/*
+ * Please, DO NOT USE!
+ *
+ * This file is here only for legacy reasons. Instead, include directly
+ * the header below.
+ *
+ */
 
-extern const char *MEDIA_MIMETYPE_IMAGE_JPEG;
+#include <media/MediaDefs.h>
 
-extern const char *MEDIA_MIMETYPE_VIDEO_VP8;
-extern const char *MEDIA_MIMETYPE_VIDEO_VP9;
-extern const char *MEDIA_MIMETYPE_VIDEO_AVC;
-extern const char *MEDIA_MIMETYPE_VIDEO_HEVC;
-extern const char *MEDIA_MIMETYPE_VIDEO_MPEG4;
-extern const char *MEDIA_MIMETYPE_VIDEO_H263;
-extern const char *MEDIA_MIMETYPE_VIDEO_MPEG2;
-extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
-extern const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION;
-
-extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
-extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG;           // layer III
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I;
-extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II;
-extern const char *MEDIA_MIMETYPE_AUDIO_MIDI;
-extern const char *MEDIA_MIMETYPE_AUDIO_AAC;
-extern const char *MEDIA_MIMETYPE_AUDIO_QCELP;
-extern const char *MEDIA_MIMETYPE_AUDIO_VORBIS;
-extern const char *MEDIA_MIMETYPE_AUDIO_OPUS;
-extern const char *MEDIA_MIMETYPE_AUDIO_G711_ALAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_G711_MLAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_RAW;
-extern const char *MEDIA_MIMETYPE_AUDIO_FLAC;
-extern const char *MEDIA_MIMETYPE_AUDIO_AAC_ADTS;
-extern const char *MEDIA_MIMETYPE_AUDIO_MSGSM;
-extern const char *MEDIA_MIMETYPE_AUDIO_AC3;
-extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
-
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
-extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
-extern const char *MEDIA_MIMETYPE_CONTAINER_OGG;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MATROSKA;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS;
-extern const char *MEDIA_MIMETYPE_CONTAINER_AVI;
-extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS;
-
-extern const char *MEDIA_MIMETYPE_CONTAINER_WVM;
-
-extern const char *MEDIA_MIMETYPE_TEXT_3GPP;
-extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
-extern const char *MEDIA_MIMETYPE_TEXT_VTT;
-extern const char *MEDIA_MIMETYPE_TEXT_CEA_608;
-extern const char *MEDIA_MIMETYPE_TEXT_CEA_708;
-extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
-
-// These are values exported to JAVA API that need to be in sync with
-// frameworks/base/media/java/android/media/AudioFormat.java. Unfortunately,
-// they are not defined in frameworks/av, so defining them here.
-enum AudioEncoding {
-    kAudioEncodingPcm16bit = 2,
-    kAudioEncodingPcm8bit = 3,
-    kAudioEncodingPcmFloat = 4,
-};
-
-}  // namespace android
-
-#endif  // MEDIA_DEFS_H_
+#endif  // STAGEFRIGHT_MEDIA_DEFS_H_
diff --git a/include/media/stagefright/foundation/ColorUtils.h b/include/media/stagefright/foundation/ColorUtils.h
index 2368b82..b889a02 100644
--- a/include/media/stagefright/foundation/ColorUtils.h
+++ b/include/media/stagefright/foundation/ColorUtils.h
@@ -138,6 +138,12 @@
             int32_t primaries, int32_t transfer, int32_t coeffs, bool fullRange,
             ColorAspects &aspects);
 
+    // unpack a uint32_t to a full ColorAspects struct
+    static ColorAspects unpackToColorAspects(uint32_t packed);
+
+    // pack a full ColorAspects struct into a uint32_t
+    static uint32_t packToU32(const ColorAspects &aspects);
+
     // updates Unspecified color aspects to their defaults based on the video size
     static void setDefaultCodecColorAspectsIfNeeded(
             ColorAspects &aspects, int32_t width, int32_t height);
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 202b5f8..e1e2405 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -37,6 +37,7 @@
     IResourceManagerService.cpp \
     IStreamSource.cpp \
     MediaCodecInfo.cpp \
+    MediaDefs.cpp \
     MediaUtils.cpp \
     Metadata.cpp \
     mediarecorder.cpp \
diff --git a/media/libmedia/ICrypto.cpp b/media/libmedia/ICrypto.cpp
index d47bd6a..81f0a97 100644
--- a/media/libmedia/ICrypto.cpp
+++ b/media/libmedia/ICrypto.cpp
@@ -302,6 +302,10 @@
             int32_t offset = data.readInt32();
 
             int32_t numSubSamples = data.readInt32();
+            if (numSubSamples < 0 || numSubSamples > 0xffff) {
+                reply->writeInt32(BAD_VALUE);
+                return OK;
+            }
 
             CryptoPlugin::SubSample *subSamples =
                 new CryptoPlugin::SubSample[numSubSamples];
diff --git a/media/libmedia/IMediaCodecService.cpp b/media/libmedia/IMediaCodecService.cpp
index dcf2b27..2d62419 100644
--- a/media/libmedia/IMediaCodecService.cpp
+++ b/media/libmedia/IMediaCodecService.cpp
@@ -33,7 +33,7 @@
 class BpMediaCodecService : public BpInterface<IMediaCodecService>
 {
 public:
-    BpMediaCodecService(const sp<IBinder>& impl)
+    explicit BpMediaCodecService(const sp<IBinder>& impl)
         : BpInterface<IMediaCodecService>(impl)
     {
     }
diff --git a/media/libmedia/IMediaDrmService.cpp b/media/libmedia/IMediaDrmService.cpp
index 9b6ecfd..84812dc 100644
--- a/media/libmedia/IMediaDrmService.cpp
+++ b/media/libmedia/IMediaDrmService.cpp
@@ -37,7 +37,7 @@
 class BpMediaDrmService: public BpInterface<IMediaDrmService>
 {
 public:
-    BpMediaDrmService(const sp<IBinder>& impl)
+    explicit BpMediaDrmService(const sp<IBinder>& impl)
         : BpInterface<IMediaDrmService>(impl)
     {
     }
diff --git a/media/libmedia/IMediaExtractor.cpp b/media/libmedia/IMediaExtractor.cpp
index e8ad75b..eb88efd 100644
--- a/media/libmedia/IMediaExtractor.cpp
+++ b/media/libmedia/IMediaExtractor.cpp
@@ -43,7 +43,7 @@
 
 class BpMediaExtractor : public BpInterface<IMediaExtractor> {
 public:
-    BpMediaExtractor(const sp<IBinder>& impl)
+    explicit BpMediaExtractor(const sp<IBinder>& impl)
         : BpInterface<IMediaExtractor>(impl)
     {
     }
diff --git a/media/libmedia/IMediaExtractorService.cpp b/media/libmedia/IMediaExtractorService.cpp
index d170c22..8b00d85 100644
--- a/media/libmedia/IMediaExtractorService.cpp
+++ b/media/libmedia/IMediaExtractorService.cpp
@@ -33,7 +33,7 @@
 class BpMediaExtractorService : public BpInterface<IMediaExtractorService>
 {
 public:
-    BpMediaExtractorService(const sp<IBinder>& impl)
+    explicit BpMediaExtractorService(const sp<IBinder>& impl)
         : BpInterface<IMediaExtractorService>(impl)
     {
     }
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index 595bad9..fdbc869 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -67,7 +67,7 @@
 
 class BpMediaSource : public BpInterface<IMediaSource> {
 public:
-    BpMediaSource(const sp<IBinder>& impl)
+    explicit BpMediaSource(const sp<IBinder>& impl)
         : BpInterface<IMediaSource>(impl), mBuffersSinceStop(0)
     {
     }
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index 6d5e7f6..5a3717e 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -55,6 +55,7 @@
     FREE_BUFFER,
     FILL_BUFFER,
     EMPTY_BUFFER,
+    EMPTY_GRAPHIC_BUFFER,
     GET_EXTENSION_INDEX,
     OBSERVER_ON_MSG,
     GET_GRAPHIC_BUFFER_USAGE,
@@ -62,6 +63,7 @@
     UPDATE_GRAPHIC_BUFFER_IN_META,
     CONFIGURE_VIDEO_TUNNEL_MODE,
     UPDATE_NATIVE_HANDLE_IN_META,
+    DISPATCH_MESSAGE,
 };
 
 class BpOMX : public BpInterface<IOMX> {
@@ -585,6 +587,27 @@
         return reply.readInt32();
     }
 
+    virtual status_t emptyGraphicBuffer(
+            node_id node,
+            buffer_id buffer,
+            const sp<GraphicBuffer> &graphicBuffer,
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
+        data.writeInt32((int32_t)node);
+        data.writeInt32((int32_t)buffer);
+        data.write(*graphicBuffer);
+        data.writeInt32(flags);
+        data.writeInt64(timestamp);
+        data.writeInt32(fenceFd >= 0);
+        if (fenceFd >= 0) {
+            data.writeFileDescriptor(fenceFd, true /* takeOwnership */);
+        }
+        remote()->transact(EMPTY_GRAPHIC_BUFFER, data, &reply);
+
+        return reply.readInt32();
+    }
+
     virtual status_t getExtensionIndex(
             node_id node,
             const char *parameter_name,
@@ -623,6 +646,22 @@
 
         return reply.readInt32();
     }
+
+    virtual status_t dispatchMessage(const omx_message &msg) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
+        data.writeInt32((int32_t)msg.node);
+        data.writeInt32(msg.fenceFd >= 0);
+        if (msg.fenceFd >= 0) {
+            data.writeFileDescriptor(msg.fenceFd, true /* takeOwnership */);
+        }
+        data.writeInt32(msg.type);
+        data.write(&msg.u, sizeof(msg.u));
+
+        remote()->transact(DISPATCH_MESSAGE, data, &reply);
+
+        return reply.readInt32();
+    }
 };
 
 IMPLEMENT_META_INTERFACE(OMX, "android.hardware.IOMX");
@@ -1193,6 +1232,24 @@
             return NO_ERROR;
         }
 
+        case EMPTY_GRAPHIC_BUFFER:
+        {
+            CHECK_OMX_INTERFACE(IOMX, data, reply);
+
+            node_id node = (node_id)data.readInt32();
+            buffer_id buffer = (buffer_id)data.readInt32();
+            sp<GraphicBuffer> graphicBuffer = new GraphicBuffer();
+            data.read(*graphicBuffer);
+            OMX_U32 flags = data.readInt32();
+            OMX_TICKS timestamp = data.readInt64();
+            bool haveFence = data.readInt32();
+            int fenceFd = haveFence ? ::dup(data.readFileDescriptor()) : -1;
+            reply->writeInt32(emptyGraphicBuffer(
+                    node, buffer, graphicBuffer, flags, timestamp, fenceFd));
+
+            return NO_ERROR;
+        }
+
         case GET_EXTENSION_INDEX:
         {
             CHECK_OMX_INTERFACE(IOMX, data, reply);
@@ -1218,6 +1275,24 @@
             return OK;
         }
 
+        case DISPATCH_MESSAGE:
+        {
+            CHECK_OMX_INTERFACE(IOMX, data, reply);
+            omx_message msg;
+            msg.node = data.readInt32();
+            int haveFence = data.readInt32();
+            msg.fenceFd = haveFence ? ::dup(data.readFileDescriptor()) : -1;
+            msg.type = (typeof(msg.type))data.readInt32();
+            status_t err = data.read(&msg.u, sizeof(msg.u));
+
+            if (err == OK) {
+                err = dispatchMessage(msg);
+            }
+            reply->writeInt32(err);
+
+            return NO_ERROR;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index 1b3b3eb..62a7bdf 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -121,9 +121,11 @@
 }
 
 bool MediaCodecInfo::hasQuirk(const char *name) const {
-    for (size_t ix = 0; ix < mQuirks.size(); ix++) {
-        if (mQuirks.itemAt(ix).equalsIgnoreCase(name)) {
-            return true;
+    if (name) {
+        for (size_t ix = 0; ix < mQuirks.size(); ix++) {
+            if (mQuirks.itemAt(ix).equalsIgnoreCase(name)) {
+                return true;
+            }
         }
     }
     return false;
@@ -190,9 +192,11 @@
 }
 
 ssize_t MediaCodecInfo::getCapabilityIndex(const char *mime) const {
-    for (size_t ix = 0; ix < mCaps.size(); ix++) {
-        if (mCaps.keyAt(ix).equalsIgnoreCase(mime)) {
-            return ix;
+    if (mime) {
+        for (size_t ix = 0; ix < mCaps.size(); ix++) {
+            if (mCaps.keyAt(ix).equalsIgnoreCase(mime)) {
+                return ix;
+            }
         }
     }
     return -1;
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libmedia/MediaDefs.cpp
similarity index 98%
rename from media/libstagefright/MediaDefs.cpp
rename to media/libmedia/MediaDefs.cpp
index 845462b..a2110c9 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libmedia/MediaDefs.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include <media/stagefright/MediaDefs.h>
+#include <media/MediaDefs.h>
 
 namespace android {
 
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 6b88404..4554472 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -666,7 +666,7 @@
               timeUs = mVideoLastDequeueTimeUs;
           }
           readBuffer(trackType, timeUs, &actualTimeUs, formatChange);
-          readBuffer(counterpartType, -1, NULL, formatChange);
+          readBuffer(counterpartType, -1, NULL, !formatChange);
           ALOGV("timeUs %lld actualTimeUs %lld", (long long)timeUs, (long long)actualTimeUs);
 
           break;
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 3fffdc1a..db24b33 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -304,8 +304,9 @@
                 notifyVideoSizeChanged();
             }
 
-            uint32_t flags = FLAG_CAN_PAUSE;
+            uint32_t flags = 0;
             if (mLiveSession->isSeekable()) {
+                flags |= FLAG_CAN_PAUSE;
                 flags |= FLAG_CAN_SEEK;
                 flags |= FLAG_CAN_SEEK_BACKWARD;
                 flags |= FLAG_CAN_SEEK_FORWARD;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
index 978d360..73b07bb 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
@@ -72,37 +72,37 @@
 
         if (cc->mData1 >= 0x20 && cc->mData1 <= 0x7f) {
             // 2 basic chars
-            sprintf(tmp, "[%d]Basic: %c %c", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]Basic: %c %c", cc->mType, cc->mData1, cc->mData2);
         } else if ((cc->mData1 == 0x11 || cc->mData1 == 0x19)
                  && cc->mData2 >= 0x30 && cc->mData2 <= 0x3f) {
             // 1 special char
-            sprintf(tmp, "[%d]Special: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]Special: %02x %02x", cc->mType, cc->mData1, cc->mData2);
         } else if ((cc->mData1 == 0x12 || cc->mData1 == 0x1A)
                  && cc->mData2 >= 0x20 && cc->mData2 <= 0x3f){
             // 1 Spanish/French char
-            sprintf(tmp, "[%d]Spanish: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]Spanish: %02x %02x", cc->mType, cc->mData1, cc->mData2);
         } else if ((cc->mData1 == 0x13 || cc->mData1 == 0x1B)
                  && cc->mData2 >= 0x20 && cc->mData2 <= 0x3f){
             // 1 Portuguese/German/Danish char
-            sprintf(tmp, "[%d]German: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]German: %02x %02x", cc->mType, cc->mData1, cc->mData2);
         } else if ((cc->mData1 == 0x11 || cc->mData1 == 0x19)
                  && cc->mData2 >= 0x20 && cc->mData2 <= 0x2f){
             // Mid-Row Codes (Table 69)
-            sprintf(tmp, "[%d]Mid-row: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]Mid-row: %02x %02x", cc->mType, cc->mData1, cc->mData2);
         } else if (((cc->mData1 == 0x14 || cc->mData1 == 0x1c)
                   && cc->mData2 >= 0x20 && cc->mData2 <= 0x2f)
                   ||
                    ((cc->mData1 == 0x17 || cc->mData1 == 0x1f)
                   && cc->mData2 >= 0x21 && cc->mData2 <= 0x23)){
             // Misc Control Codes (Table 70)
-            sprintf(tmp, "[%d]Ctrl: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]Ctrl: %02x %02x", cc->mType, cc->mData1, cc->mData2);
         } else if ((cc->mData1 & 0x70) == 0x10
                 && (cc->mData2 & 0x40) == 0x40
                 && ((cc->mData1 & 0x07) || !(cc->mData2 & 0x20)) ) {
             // Preamble Address Codes (Table 71)
-            sprintf(tmp, "[%d]PAC: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]PAC: %02x %02x", cc->mType, cc->mData1, cc->mData2);
         } else {
-            sprintf(tmp, "[%d]Invalid: %02x %02x", cc->mType, cc->mData1, cc->mData2);
+            snprintf(tmp, sizeof(tmp), "[%d]Invalid: %02x %02x", cc->mType, cc->mData1, cc->mData2);
         }
 
         if (out.size() > 0) {
diff --git a/media/libnbaio/PipeReader.cpp b/media/libnbaio/PipeReader.cpp
index a879647..fdea68e 100644
--- a/media/libnbaio/PipeReader.cpp
+++ b/media/libnbaio/PipeReader.cpp
@@ -45,6 +45,7 @@
     ALOG_ASSERT(readers > 0);
 }
 
+__attribute__((no_sanitize("integer")))
 ssize_t PipeReader::availableToRead()
 {
     if (CC_UNLIKELY(!mNegotiated)) {
@@ -54,16 +55,16 @@
     // read() is not multi-thread safe w.r.t. itself, so no mutex or atomic op needed to read mFront
     size_t avail = rear - mFront;
     if (CC_UNLIKELY(avail > mPipe.mMaxFrames)) {
-        // Discard 1/16 of the most recent data in pipe to avoid another overrun immediately
-        int32_t oldFront = mFront;
-        mFront = rear - mPipe.mMaxFrames + (mPipe.mMaxFrames >> 4);
-        mFramesOverrun += (size_t) (mFront - oldFront);
+        // Discard all data in pipe to avoid another overrun immediately
+        mFront = rear;
+        mFramesOverrun += avail;
         ++mOverruns;
         return OVERRUN;
     }
     return avail;
 }
 
+__attribute__((no_sanitize("integer")))
 ssize_t PipeReader::read(void *buffer, size_t count)
 {
     ssize_t avail = availableToRead();
@@ -97,4 +98,19 @@
     return red;
 }
 
+__attribute__((no_sanitize("integer")))
+ssize_t PipeReader::flush()
+{
+    if (CC_UNLIKELY(!mNegotiated)) {
+        return NEGOTIATE;
+    }
+    const int32_t rear = android_atomic_acquire_load(&mPipe.mRear);
+    const size_t flushed = rear - mFront;
+    // We don't check if flushed > mPipe.mMaxFrames (an overrun occurred) as the
+    // distinction is unimportant; all data is dropped.
+    mFront = rear;
+    mFramesRead += flushed;  // we consider flushed frames as read.
+    return flushed;
+}
+
 }   // namespace android
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index e7057ce..a6e8de3 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1608,11 +1608,11 @@
 
 status_t ACodec::setComponentRole(
         bool isEncoder, const char *mime) {
-    const char *role = getComponentRole(isEncoder, mime);
+    const char *role = GetComponentRole(isEncoder, mime);
     if (role == NULL) {
         return BAD_VALUE;
     }
-    status_t err = setComponentRole(mOMX, mNode, role);
+    status_t err = SetComponentRole(mOMX, mNode, role);
     if (err != OK) {
         ALOGW("[%s] Failed to set standard component role '%s'.",
              mComponentName.c_str(), role);
@@ -1620,98 +1620,6 @@
     return err;
 }
 
-//static
-const char *ACodec::getComponentRole(
-        bool isEncoder, const char *mime) {
-    struct MimeToRole {
-        const char *mime;
-        const char *decoderRole;
-        const char *encoderRole;
-    };
-
-    static const MimeToRole kMimeToRole[] = {
-        { MEDIA_MIMETYPE_AUDIO_MPEG,
-            "audio_decoder.mp3", "audio_encoder.mp3" },
-        { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I,
-            "audio_decoder.mp1", "audio_encoder.mp1" },
-        { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II,
-            "audio_decoder.mp2", "audio_encoder.mp2" },
-        { MEDIA_MIMETYPE_AUDIO_AMR_NB,
-            "audio_decoder.amrnb", "audio_encoder.amrnb" },
-        { MEDIA_MIMETYPE_AUDIO_AMR_WB,
-            "audio_decoder.amrwb", "audio_encoder.amrwb" },
-        { MEDIA_MIMETYPE_AUDIO_AAC,
-            "audio_decoder.aac", "audio_encoder.aac" },
-        { MEDIA_MIMETYPE_AUDIO_VORBIS,
-            "audio_decoder.vorbis", "audio_encoder.vorbis" },
-        { MEDIA_MIMETYPE_AUDIO_OPUS,
-            "audio_decoder.opus", "audio_encoder.opus" },
-        { MEDIA_MIMETYPE_AUDIO_G711_MLAW,
-            "audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
-        { MEDIA_MIMETYPE_AUDIO_G711_ALAW,
-            "audio_decoder.g711alaw", "audio_encoder.g711alaw" },
-        { MEDIA_MIMETYPE_VIDEO_AVC,
-            "video_decoder.avc", "video_encoder.avc" },
-        { MEDIA_MIMETYPE_VIDEO_HEVC,
-            "video_decoder.hevc", "video_encoder.hevc" },
-        { MEDIA_MIMETYPE_VIDEO_MPEG4,
-            "video_decoder.mpeg4", "video_encoder.mpeg4" },
-        { MEDIA_MIMETYPE_VIDEO_H263,
-            "video_decoder.h263", "video_encoder.h263" },
-        { MEDIA_MIMETYPE_VIDEO_VP8,
-            "video_decoder.vp8", "video_encoder.vp8" },
-        { MEDIA_MIMETYPE_VIDEO_VP9,
-            "video_decoder.vp9", "video_encoder.vp9" },
-        { MEDIA_MIMETYPE_AUDIO_RAW,
-            "audio_decoder.raw", "audio_encoder.raw" },
-        { MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
-            "video_decoder.dolby-vision", "video_encoder.dolby-vision" },
-        { MEDIA_MIMETYPE_AUDIO_FLAC,
-            "audio_decoder.flac", "audio_encoder.flac" },
-        { MEDIA_MIMETYPE_AUDIO_MSGSM,
-            "audio_decoder.gsm", "audio_encoder.gsm" },
-        { MEDIA_MIMETYPE_VIDEO_MPEG2,
-            "video_decoder.mpeg2", "video_encoder.mpeg2" },
-        { MEDIA_MIMETYPE_AUDIO_AC3,
-            "audio_decoder.ac3", "audio_encoder.ac3" },
-        { MEDIA_MIMETYPE_AUDIO_EAC3,
-            "audio_decoder.eac3", "audio_encoder.eac3" },
-    };
-
-    static const size_t kNumMimeToRole =
-        sizeof(kMimeToRole) / sizeof(kMimeToRole[0]);
-
-    size_t i;
-    for (i = 0; i < kNumMimeToRole; ++i) {
-        if (!strcasecmp(mime, kMimeToRole[i].mime)) {
-            break;
-        }
-    }
-
-    if (i == kNumMimeToRole) {
-        return NULL;
-    }
-
-    return isEncoder ? kMimeToRole[i].encoderRole
-                  : kMimeToRole[i].decoderRole;
-}
-
-//static
-status_t ACodec::setComponentRole(
-        const sp<IOMX> &omx, IOMX::node_id node, const char *role) {
-    OMX_PARAM_COMPONENTROLETYPE roleParams;
-    InitOMXParams(&roleParams);
-
-    strncpy((char *)roleParams.cRole,
-            role, OMX_MAX_STRINGNAME_SIZE - 1);
-
-    roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
-
-    return omx->setParameter(
-            node, OMX_IndexParamStandardComponentRole,
-            &roleParams, sizeof(roleParams));
-}
-
 status_t ACodec::configureCodec(
         const char *mime, const sp<AMessage> &msg) {
     int32_t encoder;
@@ -2068,7 +1976,7 @@
                 }
                 ALOGD("[%s] Requested output format %#x and got %#x.",
                         mComponentName.c_str(), requestedColorFormat, colorFormat);
-                if (!isFlexibleColorFormat(
+                if (!IsFlexibleColorFormat(
                                 mOMX, mNode, colorFormat, haveNativeWindow, &flexibleEquivalent)
                         || flexibleEquivalent != (OMX_U32)requestedColorFormat) {
                     // device did not handle flex-YUV request for native window, fall back
@@ -3089,7 +2997,7 @@
         // substitute back flexible color format to codec supported format
         OMX_U32 flexibleEquivalent;
         if (compressionFormat == OMX_VIDEO_CodingUnused
-                && isFlexibleColorFormat(
+                && IsFlexibleColorFormat(
                         mOMX, mNode, format.eColorFormat, usingNativeBuffers, &flexibleEquivalent)
                 && colorFormat == flexibleEquivalent) {
             ALOGI("[%s] using color format %#x in place of %#x",
@@ -3193,7 +3101,7 @@
         // find best legacy non-standard format
         OMX_U32 flexibleEquivalent;
         if (legacyFormat.eColorFormat == OMX_COLOR_FormatUnused
-                && isFlexibleColorFormat(
+                && IsFlexibleColorFormat(
                         mOMX, mNode, format.eColorFormat, false /* usingNativeBuffers */,
                         &flexibleEquivalent)
                 && flexibleEquivalent == OMX_COLOR_FormatYUV420Flexible) {
@@ -4747,188 +4655,6 @@
     }
 }
 
-// static
-bool ACodec::describeDefaultColorFormat(DescribeColorFormat2Params &params) {
-    MediaImage2 &image = params.sMediaImage;
-    memset(&image, 0, sizeof(image));
-
-    image.mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
-    image.mNumPlanes = 0;
-
-    const OMX_COLOR_FORMATTYPE fmt = params.eColorFormat;
-    image.mWidth = params.nFrameWidth;
-    image.mHeight = params.nFrameHeight;
-
-    // only supporting YUV420
-    if (fmt != OMX_COLOR_FormatYUV420Planar &&
-        fmt != OMX_COLOR_FormatYUV420PackedPlanar &&
-        fmt != OMX_COLOR_FormatYUV420SemiPlanar &&
-        fmt != OMX_COLOR_FormatYUV420PackedSemiPlanar &&
-        fmt != (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YV12) {
-        ALOGW("do not know color format 0x%x = %d", fmt, fmt);
-        return false;
-    }
-
-    // TEMPORARY FIX for some vendors that advertise sliceHeight as 0
-    if (params.nStride != 0 && params.nSliceHeight == 0) {
-        ALOGW("using sliceHeight=%u instead of what codec advertised (=0)",
-                params.nFrameHeight);
-        params.nSliceHeight = params.nFrameHeight;
-    }
-
-    // we need stride and slice-height to be non-zero and sensible. These values were chosen to
-    // prevent integer overflows further down the line, and do not indicate support for
-    // 32kx32k video.
-    if (params.nStride == 0 || params.nSliceHeight == 0
-            || params.nStride > 32768 || params.nSliceHeight > 32768) {
-        ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u",
-                fmt, fmt, params.nStride, params.nSliceHeight);
-        return false;
-    }
-
-    // set-up YUV format
-    image.mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV;
-    image.mNumPlanes = 3;
-    image.mBitDepth = 8;
-    image.mBitDepthAllocated = 8;
-    image.mPlane[image.Y].mOffset = 0;
-    image.mPlane[image.Y].mColInc = 1;
-    image.mPlane[image.Y].mRowInc = params.nStride;
-    image.mPlane[image.Y].mHorizSubsampling = 1;
-    image.mPlane[image.Y].mVertSubsampling = 1;
-
-    switch ((int)fmt) {
-        case HAL_PIXEL_FORMAT_YV12:
-            if (params.bUsingNativeBuffers) {
-                size_t ystride = align(params.nStride, 16);
-                size_t cstride = align(params.nStride / 2, 16);
-                image.mPlane[image.Y].mRowInc = ystride;
-
-                image.mPlane[image.V].mOffset = ystride * params.nSliceHeight;
-                image.mPlane[image.V].mColInc = 1;
-                image.mPlane[image.V].mRowInc = cstride;
-                image.mPlane[image.V].mHorizSubsampling = 2;
-                image.mPlane[image.V].mVertSubsampling = 2;
-
-                image.mPlane[image.U].mOffset = image.mPlane[image.V].mOffset
-                        + (cstride * params.nSliceHeight / 2);
-                image.mPlane[image.U].mColInc = 1;
-                image.mPlane[image.U].mRowInc = cstride;
-                image.mPlane[image.U].mHorizSubsampling = 2;
-                image.mPlane[image.U].mVertSubsampling = 2;
-                break;
-            } else {
-                // fall through as YV12 is used for YUV420Planar by some codecs
-            }
-
-        case OMX_COLOR_FormatYUV420Planar:
-        case OMX_COLOR_FormatYUV420PackedPlanar:
-            image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
-            image.mPlane[image.U].mColInc = 1;
-            image.mPlane[image.U].mRowInc = params.nStride / 2;
-            image.mPlane[image.U].mHorizSubsampling = 2;
-            image.mPlane[image.U].mVertSubsampling = 2;
-
-            image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset
-                    + (params.nStride * params.nSliceHeight / 4);
-            image.mPlane[image.V].mColInc = 1;
-            image.mPlane[image.V].mRowInc = params.nStride / 2;
-            image.mPlane[image.V].mHorizSubsampling = 2;
-            image.mPlane[image.V].mVertSubsampling = 2;
-            break;
-
-        case OMX_COLOR_FormatYUV420SemiPlanar:
-            // FIXME: NV21 for sw-encoder, NV12 for decoder and hw-encoder
-        case OMX_COLOR_FormatYUV420PackedSemiPlanar:
-            // NV12
-            image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
-            image.mPlane[image.U].mColInc = 2;
-            image.mPlane[image.U].mRowInc = params.nStride;
-            image.mPlane[image.U].mHorizSubsampling = 2;
-            image.mPlane[image.U].mVertSubsampling = 2;
-
-            image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset + 1;
-            image.mPlane[image.V].mColInc = 2;
-            image.mPlane[image.V].mRowInc = params.nStride;
-            image.mPlane[image.V].mHorizSubsampling = 2;
-            image.mPlane[image.V].mVertSubsampling = 2;
-            break;
-
-        default:
-            TRESPASS();
-    }
-    return true;
-}
-
-// static
-bool ACodec::describeColorFormat(
-        const sp<IOMX> &omx, IOMX::node_id node,
-        DescribeColorFormat2Params &describeParams)
-{
-    OMX_INDEXTYPE describeColorFormatIndex;
-    if (omx->getExtensionIndex(
-            node, "OMX.google.android.index.describeColorFormat",
-            &describeColorFormatIndex) == OK) {
-        DescribeColorFormatParams describeParamsV1(describeParams);
-        if (omx->getParameter(
-                node, describeColorFormatIndex,
-                &describeParamsV1, sizeof(describeParamsV1)) == OK) {
-            describeParams.initFromV1(describeParamsV1);
-            return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
-        }
-    } else if (omx->getExtensionIndex(
-            node, "OMX.google.android.index.describeColorFormat2", &describeColorFormatIndex) == OK
-               && omx->getParameter(
-            node, describeColorFormatIndex, &describeParams, sizeof(describeParams)) == OK) {
-        return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
-    }
-
-    return describeDefaultColorFormat(describeParams);
-}
-
-// static
-bool ACodec::isFlexibleColorFormat(
-         const sp<IOMX> &omx, IOMX::node_id node,
-         uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent) {
-    DescribeColorFormat2Params describeParams;
-    InitOMXParams(&describeParams);
-    describeParams.eColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
-    // reasonable dummy values
-    describeParams.nFrameWidth = 128;
-    describeParams.nFrameHeight = 128;
-    describeParams.nStride = 128;
-    describeParams.nSliceHeight = 128;
-    describeParams.bUsingNativeBuffers = (OMX_BOOL)usingNativeBuffers;
-
-    CHECK(flexibleEquivalent != NULL);
-
-    if (!describeColorFormat(omx, node, describeParams)) {
-        return false;
-    }
-
-    const MediaImage2 &img = describeParams.sMediaImage;
-    if (img.mType == MediaImage2::MEDIA_IMAGE_TYPE_YUV) {
-        if (img.mNumPlanes != 3
-                || img.mPlane[img.Y].mHorizSubsampling != 1
-                || img.mPlane[img.Y].mVertSubsampling != 1) {
-            return false;
-        }
-
-        // YUV 420
-        if (img.mPlane[img.U].mHorizSubsampling == 2
-                && img.mPlane[img.U].mVertSubsampling == 2
-                && img.mPlane[img.V].mHorizSubsampling == 2
-                && img.mPlane[img.V].mVertSubsampling == 2) {
-            // possible flexible YUV420 format
-            if (img.mBitDepth <= 8) {
-               *flexibleEquivalent = OMX_COLOR_FormatYUV420Flexible;
-               return true;
-            }
-        }
-    }
-    return false;
-}
-
 status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
     const char *niceIndex = portIndex == kPortIndexInput ? "input" : "output";
     OMX_PARAM_PORTDEFINITIONTYPE def;
@@ -4969,7 +4695,7 @@
                         describeParams.nSliceHeight = videoDef->nSliceHeight;
                         describeParams.bUsingNativeBuffers = OMX_FALSE;
 
-                        if (describeColorFormat(mOMX, mNode, describeParams)) {
+                        if (DescribeColorFormat(mOMX, mNode, describeParams)) {
                             notify->setBuffer(
                                     "image-data",
                                     ABuffer::CreateAsCopy(
@@ -5800,11 +5526,7 @@
 bool ACodec::BaseState::onOMXEvent(
         OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
     if (event == OMX_EventDataSpaceChanged) {
-        ColorAspects aspects;
-        aspects.mRange = (ColorAspects::Range)((data2 >> 24) & 0xFF);
-        aspects.mPrimaries = (ColorAspects::Primaries)((data2 >> 16) & 0xFF);
-        aspects.mMatrixCoeffs = (ColorAspects::MatrixCoeffs)((data2 >> 8) & 0xFF);
-        aspects.mTransfer = (ColorAspects::Transfer)(data2 & 0xFF);
+        ColorAspects aspects = ColorUtils::unpackToColorAspects(data2);
 
         mCodec->onDataSpaceChanged((android_dataspace)data1, aspects);
         return true;
@@ -8079,7 +7801,7 @@
         const AString &name, const AString &mime, bool isEncoder,
         sp<MediaCodecInfo::Capabilities> *caps) {
     (*caps).clear();
-    const char *role = getComponentRole(isEncoder, mime.c_str());
+    const char *role = GetComponentRole(isEncoder, mime.c_str());
     if (role == NULL) {
         return BAD_VALUE;
     }
@@ -8100,7 +7822,7 @@
         return err;
     }
 
-    err = setComponentRole(omx, node, role);
+    err = SetComponentRole(omx, node, role);
     if (err != OK) {
         omx->freeNode(node);
         client.disconnect();
@@ -8149,7 +7871,7 @@
             }
 
             OMX_U32 flexibleEquivalent;
-            if (isFlexibleColorFormat(
+            if (IsFlexibleColorFormat(
                     omx, node, portFormat.eColorFormat, false /* usingNativeWindow */,
                     &flexibleEquivalent)) {
                 bool marked = false;
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 98bcc56..b07d914 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -35,7 +35,6 @@
         MediaCodecList.cpp                \
         MediaCodecListOverrides.cpp       \
         MediaCodecSource.cpp              \
-        MediaDefs.cpp                     \
         MediaExtractor.cpp                \
         MediaSync.cpp                     \
         MidiExtractor.cpp                 \
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index ecbf418..416826a 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -809,6 +809,10 @@
         ALOGE("b/23540914");
         return ERROR_MALFORMED;
     }
+    if (depth > 100) {
+        ALOGE("b/27456299");
+        return ERROR_MALFORMED;
+    }
     uint32_t hdr[2];
     if (mDataSource->readAt(*offset, hdr, 8) < 8) {
         return ERROR_IO;
@@ -2285,6 +2289,12 @@
             return UNKNOWN_ERROR; // stop parsing after sidx
         }
 
+        case FOURCC('a', 'c', '-', '3'):
+        {
+            *offset += chunk_size;
+            return parseAC3SampleEntry(data_offset);
+        }
+
         case FOURCC('f', 't', 'y', 'p'):
         {
             if (chunk_data_size < 8 || depth != 0) {
@@ -2333,6 +2343,99 @@
     return OK;
 }
 
+status_t MPEG4Extractor::parseAC3SampleEntry(off64_t offset) {
+    // skip 16 bytes:
+    //  + 6-byte reserved,
+    //  + 2-byte data reference index,
+    //  + 8-byte reserved
+    offset += 16;
+    uint16_t channelCount;
+    if (!mDataSource->getUInt16(offset, &channelCount)) {
+        return ERROR_MALFORMED;
+    }
+    // skip 8 bytes:
+    //  + 2-byte channelCount,
+    //  + 2-byte sample size,
+    //  + 4-byte reserved
+    offset += 8;
+    uint16_t sampleRate;
+    if (!mDataSource->getUInt16(offset, &sampleRate)) {
+        ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read sample rate");
+        return ERROR_MALFORMED;
+    }
+
+    // skip 4 bytes:
+    //  + 2-byte sampleRate,
+    //  + 2-byte reserved
+    offset += 4;
+    return parseAC3SpecificBox(offset, sampleRate);
+}
+
+status_t MPEG4Extractor::parseAC3SpecificBox(
+        off64_t offset, uint16_t sampleRate) {
+    uint32_t size;
+    // + 4-byte size
+    // + 4-byte type
+    // + 3-byte payload
+    const uint32_t kAC3SpecificBoxSize = 11;
+    if (!mDataSource->getUInt32(offset, &size) || size < kAC3SpecificBoxSize) {
+        ALOGE("MPEG4Extractor: error while reading ac-3 block: cannot read specific box size");
+        return ERROR_MALFORMED;
+    }
+
+    offset += 4;
+    uint32_t type;
+    if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '3')) {
+        ALOGE("MPEG4Extractor: error while reading ac-3 specific block: header not dac3");
+        return ERROR_MALFORMED;
+    }
+
+    offset += 4;
+    const uint32_t kAC3SpecificBoxPayloadSize = 3;
+    uint8_t chunk[kAC3SpecificBoxPayloadSize];
+    if (mDataSource->readAt(offset, chunk, sizeof(chunk)) != sizeof(chunk)) {
+        ALOGE("MPEG4Extractor: error while reading ac-3 specific block: bitstream fields");
+        return ERROR_MALFORMED;
+    }
+
+    ABitReader br(chunk, sizeof(chunk));
+    static const unsigned channelCountTable[] = {2, 1, 2, 3, 3, 4, 4, 5};
+    static const unsigned sampleRateTable[] = {48000, 44100, 32000};
+
+    unsigned fscod = br.getBits(2);
+    if (fscod == 3) {
+        ALOGE("Incorrect fscod (3) in AC3 header");
+        return ERROR_MALFORMED;
+    }
+    unsigned boxSampleRate = sampleRateTable[fscod];
+    if (boxSampleRate != sampleRate) {
+        ALOGE("sample rate mismatch: boxSampleRate = %d, sampleRate = %d",
+            boxSampleRate, sampleRate);
+        return ERROR_MALFORMED;
+    }
+
+    unsigned bsid = br.getBits(5);
+    if (bsid > 8) {
+        ALOGW("Incorrect bsid in AC3 header. Possibly E-AC-3?");
+        return ERROR_MALFORMED;
+    }
+
+    // skip
+    unsigned bsmod __unused = br.getBits(3);
+
+    unsigned acmod = br.getBits(3);
+    unsigned lfeon = br.getBits(1);
+    unsigned channelCount = channelCountTable[acmod] + lfeon;
+
+    if (mLastTrack == NULL) {
+        return ERROR_MALFORMED;
+    }
+    mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AC3);
+    mLastTrack->meta->setInt32(kKeyChannelCount, channelCount);
+    mLastTrack->meta->setInt32(kKeySampleRate, sampleRate);
+    return OK;
+}
+
 status_t MPEG4Extractor::parseSegmentIndex(off64_t offset, size_t size) {
   ALOGV("MPEG4Extractor::parseSegmentIndex");
 
@@ -2877,7 +2980,7 @@
 
     int32_t type = U32_AT(&buffer[0]);
     if ((type == FOURCC('n', 'c', 'l', 'x') && size >= 11)
-            || (type == FOURCC('n', 'c', 'l', 'c' && size >= 10))) {
+            || (type == FOURCC('n', 'c', 'l', 'c') && size >= 10)) {
         int32_t primaries = U16_AT(&buffer[4]);
         int32_t transfer = U16_AT(&buffer[6]);
         int32_t coeffs = U16_AT(&buffer[8]);
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index c9bcfc3..74eb590 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -3300,7 +3300,12 @@
     // Each character is packed as the difference between its ASCII value and 0x60.
     // For "English", these are 00101, 01110, 00111.
     // XXX: Where is the padding bit located: 0x15C7?
-    mOwner->writeInt16(0);             // language code
+    const char *lang = NULL;
+    int16_t langCode = 0;
+    if (mMeta->findCString(kKeyMediaLanguage, &lang) && lang && strnlen(lang, 3) > 2) {
+        langCode = ((lang[0] & 0x1f) << 10) | ((lang[1] & 0x1f) << 5) | (lang[2] & 0x1f);
+    }
+    mOwner->writeInt16(langCode);      // language code
     mOwner->writeInt16(0);             // predefined
     mOwner->endBox();
 }
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 92ce88c..f2fdbc9 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -94,7 +94,7 @@
     sp<IMemory> mMemory;
     sp<DataSource> mSource;
     String8 mName;
-    RemoteDataSource(const sp<DataSource> &source);
+    explicit RemoteDataSource(const sp<DataSource> &source);
     DISALLOW_EVIL_CONSTRUCTORS(RemoteDataSource);
 };
 
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 276d731..ad27856 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -66,7 +66,7 @@
         const KeyedVector<String8, String8> *headers) {
     Mutex::Autolock autoLock(mLock);
 
-    if (mImpl != NULL) {
+    if (mImpl != NULL || path == NULL) {
         return -EINVAL;
     }
 
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index e40dbcf..16fbcd6 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -148,6 +148,12 @@
             OMX_U32 range_offset, OMX_U32 range_length,
             OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
 
+    virtual status_t emptyGraphicBuffer(
+            node_id node,
+            buffer_id buffer,
+            const sp<GraphicBuffer> &graphicBuffer,
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
+
     virtual status_t getExtensionIndex(
             node_id node,
             const char *parameter_name,
@@ -160,6 +166,8 @@
             const void *data,
             size_t size);
 
+    virtual status_t dispatchMessage(const omx_message &msg);
+
 private:
     mutable Mutex mLock;
 
@@ -476,6 +484,15 @@
             node, buffer, range_offset, range_length, flags, timestamp, fenceFd);
 }
 
+status_t MuxOMX::emptyGraphicBuffer(
+        node_id node,
+        buffer_id buffer,
+        const sp<GraphicBuffer> &graphicBuffer,
+        OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+    return getOMX(node)->emptyGraphicBuffer(
+            node, buffer, graphicBuffer, flags, timestamp, fenceFd);
+}
+
 status_t MuxOMX::getExtensionIndex(
         node_id node,
         const char *parameter_name,
@@ -492,6 +509,10 @@
     return getOMX(node)->setInternalOption(node, port_index, type, data, size);
 }
 
+status_t MuxOMX::dispatchMessage(const omx_message &msg) {
+    return getOMX(msg.node)->dispatchMessage(msg);
+}
+
 OMXClient::OMXClient() {
     char value[PROPERTY_VALUE_MAX];
     if (property_get("media.stagefright.codecremote", value, NULL)
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 36be7a0..34d7546 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -637,6 +637,11 @@
         msg->setInt32("track-id", trackID);
     }
 
+    const char *lang;
+    if (meta->findCString(kKeyMediaLanguage, &lang)) {
+        msg->setString("language", lang);
+    }
+
     if (!strncasecmp("video/", mime, 6)) {
         int32_t width, height;
         if (!meta->findInt32(kKeyWidth, &width)
@@ -1273,6 +1278,11 @@
         meta->setInt32(kKeyMaxBitRate, maxBitrate);
     }
 
+    AString lang;
+    if (msg->findString("language", &lang)) {
+        meta->setCString(kKeyMediaLanguage, lang.c_str());
+    }
+
     if (mime.startsWith("video/")) {
         int32_t width;
         int32_t height;
@@ -1505,6 +1515,7 @@
     { MEDIA_MIMETYPE_AUDIO_AAC,         AUDIO_FORMAT_AAC },
     { MEDIA_MIMETYPE_AUDIO_VORBIS,      AUDIO_FORMAT_VORBIS },
     { MEDIA_MIMETYPE_AUDIO_OPUS,        AUDIO_FORMAT_OPUS},
+    { MEDIA_MIMETYPE_AUDIO_AC3,         AUDIO_FORMAT_AC3},
     { 0, AUDIO_FORMAT_INVALID }
 };
 
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 1c5e3c6..9fbdb72 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -62,6 +62,7 @@
     OMX_AUDIO_AACObjectHE_PS,
     OMX_AUDIO_AACObjectLD,
     OMX_AUDIO_AACObjectELD,
+    OMX_AUDIO_AACObjectER_Scalable,
 };
 
 SoftAAC2::SoftAAC2(
diff --git a/media/libstagefright/codecs/avc/enc/Android.mk b/media/libstagefright/codecs/avc/enc/Android.mk
index 8ff2f35..2763c35 100644
--- a/media/libstagefright/codecs/avc/enc/Android.mk
+++ b/media/libstagefright/codecs/avc/enc/Android.mk
@@ -60,7 +60,7 @@
         libstagefright_avcenc
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright \
+        libmedia \
         libstagefright_avc_common \
         libstagefright_enc_common \
         libstagefright_foundation \
diff --git a/media/libstagefright/codecs/avcdec/Android.mk b/media/libstagefright/codecs/avcdec/Android.mk
index ef0dbfd..aa7cb90 100644
--- a/media/libstagefright/codecs/avcdec/Android.mk
+++ b/media/libstagefright/codecs/avcdec/Android.mk
@@ -14,7 +14,7 @@
 LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
 LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_SHARED_LIBRARIES  := libstagefright
+LOCAL_SHARED_LIBRARIES  := libmedia
 LOCAL_SHARED_LIBRARIES  += libstagefright_omx
 LOCAL_SHARED_LIBRARIES  += libstagefright_foundation
 LOCAL_SHARED_LIBRARIES  += libutils
diff --git a/media/libstagefright/codecs/avcenc/Android.mk b/media/libstagefright/codecs/avcenc/Android.mk
index 70e531b..30e08e8 100644
--- a/media/libstagefright/codecs/avcenc/Android.mk
+++ b/media/libstagefright/codecs/avcenc/Android.mk
@@ -12,12 +12,10 @@
 LOCAL_C_INCLUDES := $(TOP)/external/libavc/encoder
 LOCAL_C_INCLUDES += $(TOP)/external/libavc/common
 LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
-LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
-LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
 LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/hardware
 LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_SHARED_LIBRARIES  := libstagefright
+LOCAL_SHARED_LIBRARIES  := libmedia
 LOCAL_SHARED_LIBRARIES  += libstagefright_omx
 LOCAL_SHARED_LIBRARIES  += libstagefright_foundation
 LOCAL_SHARED_LIBRARIES  += libutils
diff --git a/media/libstagefright/codecs/flac/enc/Android.mk b/media/libstagefright/codecs/flac/enc/Android.mk
index 7e6e015..1ae1052 100644
--- a/media/libstagefright/codecs/flac/enc/Android.mk
+++ b/media/libstagefright/codecs/flac/enc/Android.mk
@@ -14,7 +14,7 @@
 LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+        libmedia libstagefright_omx libstagefright_foundation libutils liblog
 
 LOCAL_STATIC_LIBRARIES := \
         libFLAC \
diff --git a/media/libstagefright/codecs/g711/dec/Android.mk b/media/libstagefright/codecs/g711/dec/Android.mk
index b36c99d..96ddd47 100644
--- a/media/libstagefright/codecs/g711/dec/Android.mk
+++ b/media/libstagefright/codecs/g711/dec/Android.mk
@@ -9,7 +9,7 @@
         frameworks/native/include/media/openmax
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+        libmedia libstagefright_omx libstagefright_foundation libutils liblog
 
 LOCAL_MODULE := libstagefright_soft_g711dec
 LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/codecs/gsm/dec/Android.mk b/media/libstagefright/codecs/gsm/dec/Android.mk
index fe8c830..a8a4d1e 100644
--- a/media/libstagefright/codecs/gsm/dec/Android.mk
+++ b/media/libstagefright/codecs/gsm/dec/Android.mk
@@ -14,7 +14,7 @@
 LOCAL_SANITIZE := signed-integer-overflow unsigned-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+        libmedia libstagefright_omx libstagefright_foundation libutils liblog
 
 LOCAL_STATIC_LIBRARIES := \
         libgsm
diff --git a/media/libstagefright/codecs/hevcdec/Android.mk b/media/libstagefright/codecs/hevcdec/Android.mk
index 78c4637..5f05a7b 100644
--- a/media/libstagefright/codecs/hevcdec/Android.mk
+++ b/media/libstagefright/codecs/hevcdec/Android.mk
@@ -16,7 +16,7 @@
 LOCAL_CLANG := true
 LOCAL_SANITIZE := signed-integer-overflow
 
-LOCAL_SHARED_LIBRARIES  := libstagefright
+LOCAL_SHARED_LIBRARIES  := libmedia
 LOCAL_SHARED_LIBRARIES  += libstagefright_omx
 LOCAL_SHARED_LIBRARIES  += libstagefright_foundation
 LOCAL_SHARED_LIBRARIES  += libutils
diff --git a/media/libstagefright/codecs/m4v_h263/dec/Android.mk b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
index eb39b44..197495e 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/dec/Android.mk
@@ -71,7 +71,7 @@
         libstagefright_m4vh263dec
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+        libmedia libstagefright_omx libstagefright_foundation libutils liblog
 
 LOCAL_MODULE := libstagefright_soft_mpeg4dec
 LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/codecs/m4v_h263/enc/Android.mk b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
index ab079e8..c601af3 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/Android.mk
+++ b/media/libstagefright/codecs/m4v_h263/enc/Android.mk
@@ -64,7 +64,7 @@
         libstagefright_m4vh263enc
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright \
+        libmedia \
         libstagefright_enc_common \
         libstagefright_foundation \
         libstagefright_omx \
diff --git a/media/libstagefright/codecs/mp3dec/Android.mk b/media/libstagefright/codecs/mp3dec/Android.mk
index 11581c1..8422b62 100644
--- a/media/libstagefright/codecs/mp3dec/Android.mk
+++ b/media/libstagefright/codecs/mp3dec/Android.mk
@@ -81,7 +81,7 @@
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+        libmedia libstagefright_omx libstagefright_foundation libutils liblog
 
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_mp3dec
diff --git a/media/libstagefright/codecs/mpeg2dec/Android.mk b/media/libstagefright/codecs/mpeg2dec/Android.mk
index f1c1719..e7a2ea1 100644
--- a/media/libstagefright/codecs/mpeg2dec/Android.mk
+++ b/media/libstagefright/codecs/mpeg2dec/Android.mk
@@ -14,7 +14,7 @@
 LOCAL_C_INCLUDES += $(TOP)/frameworks/av/media/libstagefright/include
 LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_SHARED_LIBRARIES  := libstagefright
+LOCAL_SHARED_LIBRARIES  := libmedia
 LOCAL_SHARED_LIBRARIES  += libstagefright_omx
 LOCAL_SHARED_LIBRARIES  += libstagefright_foundation
 LOCAL_SHARED_LIBRARIES  += libutils
diff --git a/media/libstagefright/codecs/on2/dec/Android.mk b/media/libstagefright/codecs/on2/dec/Android.mk
index 76f7600..1aa63dd 100644
--- a/media/libstagefright/codecs/on2/dec/Android.mk
+++ b/media/libstagefright/codecs/on2/dec/Android.mk
@@ -15,7 +15,7 @@
         libvpx
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright libstagefright_omx libstagefright_foundation libutils liblog
+        libmedia libstagefright_omx libstagefright_foundation libutils liblog
 
 LOCAL_MODULE := libstagefright_soft_vpxdec
 LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/codecs/on2/enc/Android.mk b/media/libstagefright/codecs/on2/enc/Android.mk
index 1de318a..a165fff 100644
--- a/media/libstagefright/codecs/on2/enc/Android.mk
+++ b/media/libstagefright/codecs/on2/enc/Android.mk
@@ -2,7 +2,9 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES := \
-        SoftVPXEncoder.cpp
+        SoftVPXEncoder.cpp \
+        SoftVP8Encoder.cpp \
+        SoftVP9Encoder.cpp
 
 LOCAL_C_INCLUDES := \
         $(TOP)/external/libvpx/libvpx \
@@ -18,7 +20,7 @@
         libvpx
 
 LOCAL_SHARED_LIBRARIES := \
-        libstagefright libstagefright_omx libstagefright_foundation libutils liblog \
+        libmedia libstagefright_omx libstagefright_foundation libutils liblog \
         libhardware \
 
 LOCAL_MODULE := libstagefright_soft_vpxenc
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.cpp
new file mode 100644
index 0000000..04737a9
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "SoftVP8Encoder"
+#include "SoftVP8Encoder.h"
+
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include <media/hardware/HardwareAPI.h>
+#include <media/hardware/MetadataBufferType.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+
+#ifndef INT32_MAX
+#define INT32_MAX   2147483647
+#endif
+
+namespace android {
+
+static const CodecProfileLevel kVp8ProfileLevels[] = {
+    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version0 },
+    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version1 },
+    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version2 },
+    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version3 },
+};
+
+SoftVP8Encoder::SoftVP8Encoder(const char *name,
+                               const OMX_CALLBACKTYPE *callbacks,
+                               OMX_PTR appData,
+                               OMX_COMPONENTTYPE **component)
+    : SoftVPXEncoder(
+            name, callbacks, appData, component, "video_encoder.vp8",
+            OMX_VIDEO_CodingVP8, MEDIA_MIMETYPE_VIDEO_VP8, 2,
+            kVp8ProfileLevels, NELEM(kVp8ProfileLevels)),
+      mDCTPartitions(0),
+      mLevel(OMX_VIDEO_VP8Level_Version0) {
+}
+
+void SoftVP8Encoder::setCodecSpecificInterface() {
+    mCodecInterface = vpx_codec_vp8_cx();
+}
+
+void SoftVP8Encoder::setCodecSpecificConfiguration() {
+    switch (mLevel) {
+        case OMX_VIDEO_VP8Level_Version0:
+            mCodecConfiguration->g_profile = 0;
+            break;
+
+        case OMX_VIDEO_VP8Level_Version1:
+            mCodecConfiguration->g_profile = 1;
+            break;
+
+        case OMX_VIDEO_VP8Level_Version2:
+            mCodecConfiguration->g_profile = 2;
+            break;
+
+        case OMX_VIDEO_VP8Level_Version3:
+            mCodecConfiguration->g_profile = 3;
+            break;
+
+        default:
+            mCodecConfiguration->g_profile = 0;
+    }
+}
+
+vpx_codec_err_t SoftVP8Encoder::setCodecSpecificControls() {
+    vpx_codec_err_t codec_return = vpx_codec_control(mCodecContext,
+                                                     VP8E_SET_TOKEN_PARTITIONS,
+                                                     mDCTPartitions);
+    if (codec_return != VPX_CODEC_OK) {
+        ALOGE("Error setting dct partitions for vpx encoder.");
+    }
+    return codec_return;
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalGetParameter(OMX_INDEXTYPE index,
+                                                   OMX_PTR param) {
+    // can include extension index OMX_INDEXEXTTYPE
+    const int32_t indexFull = index;
+
+    switch (indexFull) {
+        case OMX_IndexParamVideoVp8:
+            return internalGetVp8Params(
+                (OMX_VIDEO_PARAM_VP8TYPE *)param);
+
+        default:
+            return SoftVPXEncoder::internalGetParameter(index, param);
+    }
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalSetParameter(OMX_INDEXTYPE index,
+                                                   const OMX_PTR param) {
+    // can include extension index OMX_INDEXEXTTYPE
+    const int32_t indexFull = index;
+
+    switch (indexFull) {
+        case OMX_IndexParamVideoVp8:
+            return internalSetVp8Params(
+                (const OMX_VIDEO_PARAM_VP8TYPE *)param);
+
+        default:
+            return SoftVPXEncoder::internalSetParameter(index, param);
+    }
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalGetVp8Params(
+        OMX_VIDEO_PARAM_VP8TYPE* vp8Params) {
+    if (vp8Params->nPortIndex != kOutputPortIndex) {
+        return OMX_ErrorUnsupportedIndex;
+    }
+
+    vp8Params->eProfile = OMX_VIDEO_VP8ProfileMain;
+    vp8Params->eLevel = mLevel;
+    vp8Params->bErrorResilientMode = mErrorResilience;
+    vp8Params->nDCTPartitions = mDCTPartitions;
+    return OMX_ErrorNone;
+}
+
+OMX_ERRORTYPE SoftVP8Encoder::internalSetVp8Params(
+        const OMX_VIDEO_PARAM_VP8TYPE* vp8Params) {
+    if (vp8Params->nPortIndex != kOutputPortIndex) {
+        return OMX_ErrorUnsupportedIndex;
+    }
+
+    if (vp8Params->eProfile != OMX_VIDEO_VP8ProfileMain) {
+        return OMX_ErrorBadParameter;
+    }
+
+    if (vp8Params->eLevel == OMX_VIDEO_VP8Level_Version0 ||
+        vp8Params->eLevel == OMX_VIDEO_VP8Level_Version1 ||
+        vp8Params->eLevel == OMX_VIDEO_VP8Level_Version2 ||
+        vp8Params->eLevel == OMX_VIDEO_VP8Level_Version3) {
+        mLevel = vp8Params->eLevel;
+    } else {
+        return OMX_ErrorBadParameter;
+    }
+
+    mErrorResilience = vp8Params->bErrorResilientMode;
+    if (vp8Params->nDCTPartitions <= kMaxDCTPartitions) {
+        mDCTPartitions = vp8Params->nDCTPartitions;
+    } else {
+        return OMX_ErrorBadParameter;
+    }
+    return OMX_ErrorNone;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h
new file mode 100644
index 0000000..b4904bf
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP8Encoder.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_VP8_ENCODER_H_
+
+#define SOFT_VP8_ENCODER_H_
+
+#include "SoftVPXEncoder.h"
+
+#include <OMX_VideoExt.h>
+#include <OMX_IndexExt.h>
+
+#include <hardware/gralloc.h>
+
+#include "vpx/vpx_encoder.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vp8cx.h"
+
+namespace android {
+
+// Exposes a vp8 encoder as an OMX Component
+//
+// In addition to the base class settings, Only following encoder settings are
+// available:
+//    - token partitioning
+struct SoftVP8Encoder : public SoftVPXEncoder {
+    SoftVP8Encoder(const char *name,
+                   const OMX_CALLBACKTYPE *callbacks,
+                   OMX_PTR appData,
+                   OMX_COMPONENTTYPE **component);
+
+protected:
+    // Returns current values for requested OMX
+    // parameters
+    virtual OMX_ERRORTYPE internalGetParameter(
+            OMX_INDEXTYPE index, OMX_PTR param);
+
+    // Validates, extracts and stores relevant OMX
+    // parameters
+    virtual OMX_ERRORTYPE internalSetParameter(
+            OMX_INDEXTYPE index, const OMX_PTR param);
+
+    // Populates |mCodecInterface| with codec specific settings.
+    virtual void setCodecSpecificInterface();
+
+    // Sets codec specific configuration.
+    virtual void setCodecSpecificConfiguration();
+
+    // Initializes codec specific encoder settings.
+    virtual vpx_codec_err_t setCodecSpecificControls();
+
+    // Gets vp8 specific parameters.
+    OMX_ERRORTYPE internalGetVp8Params(
+        OMX_VIDEO_PARAM_VP8TYPE* vp8Params);
+
+    // Handles vp8 specific parameters.
+    OMX_ERRORTYPE internalSetVp8Params(
+        const OMX_VIDEO_PARAM_VP8TYPE* vp8Params);
+
+private:
+    // Max value supported for DCT partitions
+    static const uint32_t kMaxDCTPartitions = 3;
+
+    // vp8 specific configuration parameter
+    // that enables token partitioning of
+    // the stream into substreams
+    int32_t mDCTPartitions;
+
+    // Encoder profile corresponding to OMX level parameter
+    //
+    // The inconsistency in the naming is caused by
+    // OMX spec referring vpx profiles (g_profile)
+    // as "levels" whereas using the name "profile" for
+    // something else.
+    OMX_VIDEO_VP8LEVELTYPE mLevel;
+
+    DISALLOW_EVIL_CONSTRUCTORS(SoftVP8Encoder);
+};
+
+}  // namespace android
+
+#endif  // SOFT_VP8_ENCODER_H_
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp
new file mode 100644
index 0000000..4c7290d
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "SoftVP9Encoder"
+#include "SoftVP9Encoder.h"
+
+#include <utils/Log.h>
+#include <utils/misc.h>
+
+#include <media/hardware/HardwareAPI.h>
+#include <media/hardware/MetadataBufferType.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+
+namespace android {
+
+static const CodecProfileLevel kVp9ProfileLevels[] = {
+    { OMX_VIDEO_VP9Profile0, OMX_VIDEO_VP9Level41 },
+};
+
+SoftVP9Encoder::SoftVP9Encoder(
+        const char *name, const OMX_CALLBACKTYPE *callbacks, OMX_PTR appData,
+        OMX_COMPONENTTYPE **component)
+    : SoftVPXEncoder(
+            name, callbacks, appData, component, "video_encoder.vp9",
+            OMX_VIDEO_CodingVP9, MEDIA_MIMETYPE_VIDEO_VP9, 4,
+            kVp9ProfileLevels, NELEM(kVp9ProfileLevels)),
+      mLevel(OMX_VIDEO_VP9Level1),
+      mTileColumns(0),
+      mFrameParallelDecoding(OMX_FALSE) {
+}
+
+void SoftVP9Encoder::setCodecSpecificInterface() {
+    mCodecInterface = vpx_codec_vp9_cx();
+}
+
+void SoftVP9Encoder::setCodecSpecificConfiguration() {
+    mCodecConfiguration->g_profile = 0;
+}
+
+vpx_codec_err_t SoftVP9Encoder::setCodecSpecificControls() {
+    vpx_codec_err_t codecReturn = vpx_codec_control(
+            mCodecContext, VP9E_SET_TILE_COLUMNS, mTileColumns);
+    if (codecReturn != VPX_CODEC_OK) {
+        ALOGE("Error setting VP9E_SET_TILE_COLUMNS to %d. vpx_codec_control() "
+              "returned %d", mTileColumns, codecReturn);
+        return codecReturn;
+    }
+    codecReturn = vpx_codec_control(
+            mCodecContext, VP9E_SET_FRAME_PARALLEL_DECODING,
+            mFrameParallelDecoding);
+    if (codecReturn != VPX_CODEC_OK) {
+        ALOGE("Error setting VP9E_SET_FRAME_PARALLEL_DECODING to %d."
+              "vpx_codec_control() returned %d", mFrameParallelDecoding,
+              codecReturn);
+        return codecReturn;
+    }
+    // For VP9, we always set CPU_USED to 8 (because the realtime default is 0
+    // which is too slow).
+    codecReturn = vpx_codec_control(mCodecContext, VP8E_SET_CPUUSED, 8);
+    if (codecReturn != VPX_CODEC_OK) {
+        ALOGE("Error setting VP8E_SET_CPUUSED to 8. vpx_codec_control() "
+              "returned %d", codecReturn);
+        return codecReturn;
+    }
+    return codecReturn;
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalGetParameter(
+        OMX_INDEXTYPE index, OMX_PTR param) {
+    // can include extension index OMX_INDEXEXTTYPE
+    const int32_t indexFull = index;
+
+    switch (indexFull) {
+        case OMX_IndexParamVideoVp9:
+            return internalGetVp9Params(
+                    (OMX_VIDEO_PARAM_VP9TYPE *)param);
+
+        default:
+            return SoftVPXEncoder::internalGetParameter(index, param);
+    }
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalSetParameter(
+        OMX_INDEXTYPE index, const OMX_PTR param) {
+    // can include extension index OMX_INDEXEXTTYPE
+    const int32_t indexFull = index;
+
+    switch (indexFull) {
+        case OMX_IndexParamVideoVp9:
+            return internalSetVp9Params(
+                    (const OMX_VIDEO_PARAM_VP9TYPE *)param);
+
+        default:
+            return SoftVPXEncoder::internalSetParameter(index, param);
+    }
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalGetVp9Params(
+        OMX_VIDEO_PARAM_VP9TYPE *vp9Params) {
+    if (vp9Params->nPortIndex != kOutputPortIndex) {
+        return OMX_ErrorUnsupportedIndex;
+    }
+
+    vp9Params->eProfile = OMX_VIDEO_VP9Profile0;
+    vp9Params->eLevel = mLevel;
+    vp9Params->bErrorResilientMode = mErrorResilience;
+    vp9Params->nTileColumns = mTileColumns;
+    vp9Params->bEnableFrameParallelDecoding = mFrameParallelDecoding;
+    return OMX_ErrorNone;
+}
+
+OMX_ERRORTYPE SoftVP9Encoder::internalSetVp9Params(
+        const OMX_VIDEO_PARAM_VP9TYPE *vp9Params) {
+    if (vp9Params->nPortIndex != kOutputPortIndex) {
+        return OMX_ErrorUnsupportedIndex;
+    }
+
+    if (vp9Params->eProfile != OMX_VIDEO_VP9Profile0) {
+        return OMX_ErrorBadParameter;
+    }
+
+    if (vp9Params->eLevel == OMX_VIDEO_VP9Level1 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level11 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level2 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level21 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level3 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level31 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level4 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level41 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level5 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level51 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level52 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level6 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level61 ||
+        vp9Params->eLevel == OMX_VIDEO_VP9Level62) {
+        mLevel = vp9Params->eLevel;
+    } else {
+        return OMX_ErrorBadParameter;
+    }
+
+    mErrorResilience = vp9Params->bErrorResilientMode;
+    mTileColumns = vp9Params->nTileColumns;
+    mFrameParallelDecoding = vp9Params->bEnableFrameParallelDecoding;
+    return OMX_ErrorNone;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h
new file mode 100644
index 0000000..85df69a
--- /dev/null
+++ b/media/libstagefright/codecs/on2/enc/SoftVP9Encoder.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_VP9_ENCODER_H_
+
+#define SOFT_VP9_ENCODER_H_
+
+#include "SoftVPXEncoder.h"
+
+#include <OMX_VideoExt.h>
+#include <OMX_IndexExt.h>
+
+#include <hardware/gralloc.h>
+
+#include "vpx/vpx_encoder.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vp8cx.h"
+
+namespace android {
+
+// Exposes a VP9 encoder as an OMX Component
+//
+// In addition to the base class settings, Only following encoder settings are
+// available:
+//    - tile rows
+//    - tile columns
+//    - frame parallel mode
+struct SoftVP9Encoder : public SoftVPXEncoder {
+    SoftVP9Encoder(const char *name,
+                   const OMX_CALLBACKTYPE *callbacks,
+                   OMX_PTR appData,
+                   OMX_COMPONENTTYPE **component);
+
+protected:
+    // Returns current values for requested OMX
+    // parameters
+    virtual OMX_ERRORTYPE internalGetParameter(
+            OMX_INDEXTYPE index, OMX_PTR param);
+
+    // Validates, extracts and stores relevant OMX
+    // parameters
+    virtual OMX_ERRORTYPE internalSetParameter(
+            OMX_INDEXTYPE index, const OMX_PTR param);
+
+    // Populates |mCodecInterface| with codec specific settings.
+    virtual void setCodecSpecificInterface();
+
+    // Sets codec specific configuration.
+    virtual void setCodecSpecificConfiguration();
+
+    // Initializes codec specific encoder settings.
+    virtual vpx_codec_err_t setCodecSpecificControls();
+
+    // Gets vp9 specific parameters.
+    OMX_ERRORTYPE internalGetVp9Params(
+        OMX_VIDEO_PARAM_VP9TYPE* vp9Params);
+
+    // Handles vp9 specific parameters.
+    OMX_ERRORTYPE internalSetVp9Params(
+        const OMX_VIDEO_PARAM_VP9TYPE* vp9Params);
+
+private:
+    // Encoder profile corresponding to OMX level parameter
+    //
+    // The inconsistency in the naming is caused by
+    // OMX spec referring vpx profiles (g_profile)
+    // as "levels" whereas using the name "profile" for
+    // something else.
+    OMX_VIDEO_VP9LEVELTYPE mLevel;
+
+    int32_t mTileColumns;
+
+    OMX_BOOL mFrameParallelDecoding;
+
+    DISALLOW_EVIL_CONSTRUCTORS(SoftVP9Encoder);
+};
+
+}  // namespace android
+
+#endif  // SOFT_VP9_ENCODER_H_
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 5edfbb5..5609032 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -18,6 +18,9 @@
 #define LOG_TAG "SoftVPXEncoder"
 #include "SoftVPXEncoder.h"
 
+#include "SoftVP8Encoder.h"
+#include "SoftVP9Encoder.h"
+
 #include <utils/Log.h>
 #include <utils/misc.h>
 
@@ -42,7 +45,6 @@
     params->nVersion.s.nStep = 0;
 }
 
-
 static int GetCPUCoreCount() {
     int cpuCoreCount = 1;
 #if defined(_SC_NPROCESSORS_ONLN)
@@ -55,30 +57,26 @@
     return cpuCoreCount;
 }
 
-static const CodecProfileLevel kProfileLevels[] = {
-    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version0 },
-    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version1 },
-    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version2 },
-    { OMX_VIDEO_VP8ProfileMain, OMX_VIDEO_VP8Level_Version3 },
-};
-
 SoftVPXEncoder::SoftVPXEncoder(const char *name,
                                const OMX_CALLBACKTYPE *callbacks,
                                OMX_PTR appData,
-                               OMX_COMPONENTTYPE **component)
+                               OMX_COMPONENTTYPE **component,
+                               const char* role,
+                               OMX_VIDEO_CODINGTYPE codingType,
+                               const char* mimeType,
+                               int32_t minCompressionRatio,
+                               const CodecProfileLevel *profileLevels,
+                               size_t numProfileLevels)
     : SoftVideoEncoderOMXComponent(
-            name, "video_encoder.vp8", OMX_VIDEO_CodingVP8,
-            kProfileLevels, NELEM(kProfileLevels),
+            name, role, codingType, profileLevels, numProfileLevels,
             176 /* width */, 144 /* height */,
             callbacks, appData, component),
       mCodecContext(NULL),
       mCodecConfiguration(NULL),
       mCodecInterface(NULL),
       mBitrateUpdated(false),
-      mBitrateControlMode(VPX_VBR),  // variable bitrate
-      mDCTPartitions(0),
+      mBitrateControlMode(VPX_VBR),
       mErrorResilience(OMX_FALSE),
-      mLevel(OMX_VIDEO_VP8Level_Version0),
       mKeyFrameInterval(0),
       mMinQuantizer(0),
       mMaxQuantizer(0),
@@ -96,10 +94,9 @@
 
     initPorts(
             kNumBuffers, kNumBuffers, kMinOutputBufferSize,
-            MEDIA_MIMETYPE_VIDEO_VP8, 2 /* minCompressionRatio */);
+            mimeType, minCompressionRatio);
 }
 
-
 SoftVPXEncoder::~SoftVPXEncoder() {
     releaseEncoder();
 }
@@ -108,18 +105,18 @@
     vpx_codec_err_t codec_return;
     status_t result = UNKNOWN_ERROR;
 
-    mCodecInterface = vpx_codec_vp8_cx();
+    setCodecSpecificInterface();
     if (mCodecInterface == NULL) {
         goto CLEAN_UP;
     }
-    ALOGD("VP8: initEncoder. BRMode: %u. TSLayers: %zu. KF: %u. QP: %u - %u",
+    ALOGD("VPx: initEncoder. BRMode: %u. TSLayers: %zu. KF: %u. QP: %u - %u",
           (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
           mMinQuantizer, mMaxQuantizer);
 
     mCodecConfiguration = new vpx_codec_enc_cfg_t;
     codec_return = vpx_codec_enc_config_default(mCodecInterface,
                                                 mCodecConfiguration,
-                                                0);  // Codec specific flags
+                                                0);
 
     if (codec_return != VPX_CODEC_OK) {
         ALOGE("Error populating default configuration for vpx encoder.");
@@ -131,27 +128,6 @@
     mCodecConfiguration->g_threads = GetCPUCoreCount();
     mCodecConfiguration->g_error_resilient = mErrorResilience;
 
-    switch (mLevel) {
-        case OMX_VIDEO_VP8Level_Version0:
-            mCodecConfiguration->g_profile = 0;
-            break;
-
-        case OMX_VIDEO_VP8Level_Version1:
-            mCodecConfiguration->g_profile = 1;
-            break;
-
-        case OMX_VIDEO_VP8Level_Version2:
-            mCodecConfiguration->g_profile = 2;
-            break;
-
-        case OMX_VIDEO_VP8Level_Version3:
-            mCodecConfiguration->g_profile = 3;
-            break;
-
-        default:
-            mCodecConfiguration->g_profile = 0;
-    }
-
     // OMX timebase unit is microsecond
     // g_timebase is in seconds (i.e. 1/1000000 seconds)
     mCodecConfiguration->g_timebase.num = 1;
@@ -253,7 +229,6 @@
             goto CLEAN_UP;
         }
     }
-
     // Set bitrate values for each layer
     for (size_t i = 0; i < mCodecConfiguration->ts_number_layers; i++) {
         mCodecConfiguration->ts_target_bitrate[i] =
@@ -271,7 +246,7 @@
     if (mMaxQuantizer > 0) {
         mCodecConfiguration->rc_max_quantizer = mMaxQuantizer;
     }
-
+    setCodecSpecificConfiguration();
     mCodecContext = new vpx_codec_ctx_t;
     codec_return = vpx_codec_enc_init(mCodecContext,
                                       mCodecInterface,
@@ -283,14 +258,6 @@
         goto CLEAN_UP;
     }
 
-    codec_return = vpx_codec_control(mCodecContext,
-                                     VP8E_SET_TOKEN_PARTITIONS,
-                                     mDCTPartitions);
-    if (codec_return != VPX_CODEC_OK) {
-        ALOGE("Error setting dct partitions for vpx encoder.");
-        goto CLEAN_UP;
-    }
-
     // Extra CBR settings
     if (mBitrateControlMode == VPX_CBR) {
         codec_return = vpx_codec_control(mCodecContext,
@@ -318,6 +285,13 @@
         }
     }
 
+    codec_return = setCodecSpecificControls();
+
+    if (codec_return != VPX_CODEC_OK) {
+        // The codec specific method would have logged the error.
+        goto CLEAN_UP;
+    }
+
     if (mColorFormat != OMX_COLOR_FormatYUV420Planar || mInputDataIsMeta) {
         free(mConversionBuffer);
         mConversionBuffer = NULL;
@@ -338,7 +312,6 @@
     return result;
 }
 
-
 status_t SoftVPXEncoder::releaseEncoder() {
     if (mCodecContext != NULL) {
         vpx_codec_destroy(mCodecContext);
@@ -362,7 +335,6 @@
     return OK;
 }
 
-
 OMX_ERRORTYPE SoftVPXEncoder::internalGetParameter(OMX_INDEXTYPE index,
                                                    OMX_PTR param) {
     // can include extension index OMX_INDEXEXTTYPE
@@ -393,54 +365,15 @@
             return OMX_ErrorNone;
         }
 
-        // VP8 specific parameters that use extension headers
-        case OMX_IndexParamVideoVp8: {
-            OMX_VIDEO_PARAM_VP8TYPE *vp8Params =
-                (OMX_VIDEO_PARAM_VP8TYPE *)param;
-
-            if (!isValidOMXParam(vp8Params)) {
-                return OMX_ErrorBadParameter;
-            }
-
-            if (vp8Params->nPortIndex != kOutputPortIndex) {
-                return OMX_ErrorUnsupportedIndex;
-            }
-
-            vp8Params->eProfile = OMX_VIDEO_VP8ProfileMain;
-            vp8Params->eLevel = mLevel;
-            vp8Params->nDCTPartitions = mDCTPartitions;
-            vp8Params->bErrorResilientMode = mErrorResilience;
-            return OMX_ErrorNone;
-        }
-
-        case OMX_IndexParamVideoAndroidVp8Encoder: {
-            OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vp8AndroidParams =
-                (OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param;
-
-            if (!isValidOMXParam(vp8AndroidParams)) {
-                return OMX_ErrorBadParameter;
-            }
-
-            if (vp8AndroidParams->nPortIndex != kOutputPortIndex) {
-                return OMX_ErrorUnsupportedIndex;
-            }
-
-            vp8AndroidParams->nKeyFrameInterval = mKeyFrameInterval;
-            vp8AndroidParams->eTemporalPattern = mTemporalPatternType;
-            vp8AndroidParams->nTemporalLayerCount = mTemporalLayers;
-            vp8AndroidParams->nMinQuantizer = mMinQuantizer;
-            vp8AndroidParams->nMaxQuantizer = mMaxQuantizer;
-            memcpy(vp8AndroidParams->nTemporalLayerBitrateRatio,
-                   mTemporalLayerBitrateRatio, sizeof(mTemporalLayerBitrateRatio));
-            return OMX_ErrorNone;
-        }
+        case OMX_IndexParamVideoAndroidVp8Encoder:
+            return internalGetAndroidVpxParams(
+                (OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param);
 
         default:
             return SoftVideoEncoderOMXComponent::internalGetParameter(index, param);
     }
 }
 
-
 OMX_ERRORTYPE SoftVPXEncoder::internalSetParameter(OMX_INDEXTYPE index,
                                                    const OMX_PTR param) {
     // can include extension index OMX_INDEXEXTTYPE
@@ -458,27 +391,9 @@
             return internalSetBitrateParams(bitRate);
         }
 
-        case OMX_IndexParamVideoVp8: {
-            const OMX_VIDEO_PARAM_VP8TYPE *vp8Params =
-                (const OMX_VIDEO_PARAM_VP8TYPE*) param;
-
-            if (!isValidOMXParam(vp8Params)) {
-                return OMX_ErrorBadParameter;
-            }
-
-            return internalSetVp8Params(vp8Params);
-        }
-
-        case OMX_IndexParamVideoAndroidVp8Encoder: {
-            const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vp8AndroidParams =
-                (const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE*) param;
-
-            if (!isValidOMXParam(vp8AndroidParams)) {
-                return OMX_ErrorBadParameter;
-            }
-
-            return internalSetAndroidVp8Params(vp8AndroidParams);
-        }
+        case OMX_IndexParamVideoAndroidVp8Encoder:
+            return internalSetAndroidVpxParams(
+                (const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param);
 
         default:
             return SoftVideoEncoderOMXComponent::internalSetParameter(index, param);
@@ -530,77 +445,21 @@
     }
 }
 
-OMX_ERRORTYPE SoftVPXEncoder::internalSetVp8Params(
-        const OMX_VIDEO_PARAM_VP8TYPE* vp8Params) {
-    if (vp8Params->nPortIndex != kOutputPortIndex) {
+OMX_ERRORTYPE SoftVPXEncoder::internalGetBitrateParams(
+        OMX_VIDEO_PARAM_BITRATETYPE* bitrate) {
+    if (bitrate->nPortIndex != kOutputPortIndex) {
         return OMX_ErrorUnsupportedIndex;
     }
 
-    if (vp8Params->eProfile != OMX_VIDEO_VP8ProfileMain) {
-        return OMX_ErrorBadParameter;
-    }
+    bitrate->nTargetBitrate = mBitrate;
 
-    if (vp8Params->eLevel == OMX_VIDEO_VP8Level_Version0 ||
-        vp8Params->eLevel == OMX_VIDEO_VP8Level_Version1 ||
-        vp8Params->eLevel == OMX_VIDEO_VP8Level_Version2 ||
-        vp8Params->eLevel == OMX_VIDEO_VP8Level_Version3) {
-        mLevel = vp8Params->eLevel;
+    if (mBitrateControlMode == VPX_VBR) {
+        bitrate->eControlRate = OMX_Video_ControlRateVariable;
+    } else if (mBitrateControlMode == VPX_CBR) {
+        bitrate->eControlRate = OMX_Video_ControlRateConstant;
     } else {
-        return OMX_ErrorBadParameter;
+        return OMX_ErrorUnsupportedSetting;
     }
-
-    if (vp8Params->nDCTPartitions <= kMaxDCTPartitions) {
-        mDCTPartitions = vp8Params->nDCTPartitions;
-    } else {
-        return OMX_ErrorBadParameter;
-    }
-
-    mErrorResilience = vp8Params->bErrorResilientMode;
-    return OMX_ErrorNone;
-}
-
-OMX_ERRORTYPE SoftVPXEncoder::internalSetAndroidVp8Params(
-        const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE* vp8AndroidParams) {
-    if (vp8AndroidParams->nPortIndex != kOutputPortIndex) {
-        return OMX_ErrorUnsupportedIndex;
-    }
-    if (vp8AndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternNone &&
-        vp8AndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
-        return OMX_ErrorBadParameter;
-    }
-    if (vp8AndroidParams->nTemporalLayerCount > OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS) {
-        return OMX_ErrorBadParameter;
-    }
-    if (vp8AndroidParams->nMinQuantizer > vp8AndroidParams->nMaxQuantizer) {
-        return OMX_ErrorBadParameter;
-    }
-
-    mTemporalPatternType = vp8AndroidParams->eTemporalPattern;
-    if (vp8AndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
-        mTemporalLayers = vp8AndroidParams->nTemporalLayerCount;
-    } else if (vp8AndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternNone) {
-        mTemporalLayers = 0;
-    }
-    // Check the bitrate distribution between layers is in increasing order
-    if (mTemporalLayers > 1) {
-        for (size_t i = 0; i < mTemporalLayers - 1; i++) {
-            if (vp8AndroidParams->nTemporalLayerBitrateRatio[i + 1] <=
-                    vp8AndroidParams->nTemporalLayerBitrateRatio[i]) {
-                ALOGE("Wrong bitrate ratio - should be in increasing order.");
-                return OMX_ErrorBadParameter;
-            }
-        }
-    }
-    mKeyFrameInterval = vp8AndroidParams->nKeyFrameInterval;
-    mMinQuantizer = vp8AndroidParams->nMinQuantizer;
-    mMaxQuantizer = vp8AndroidParams->nMaxQuantizer;
-    memcpy(mTemporalLayerBitrateRatio, vp8AndroidParams->nTemporalLayerBitrateRatio,
-            sizeof(mTemporalLayerBitrateRatio));
-    ALOGD("VP8: internalSetAndroidVp8Params. BRMode: %u. TS: %zu. KF: %u."
-          " QP: %u - %u BR0: %u. BR1: %u. BR2: %u",
-          (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
-          mMinQuantizer, mMaxQuantizer, mTemporalLayerBitrateRatio[0],
-          mTemporalLayerBitrateRatio[1], mTemporalLayerBitrateRatio[2]);
     return OMX_ErrorNone;
 }
 
@@ -623,71 +482,134 @@
     return OMX_ErrorNone;
 }
 
+OMX_ERRORTYPE SoftVPXEncoder::internalGetAndroidVpxParams(
+        OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams) {
+    if (vpxAndroidParams->nPortIndex != kOutputPortIndex) {
+        return OMX_ErrorUnsupportedIndex;
+    }
+
+    vpxAndroidParams->nKeyFrameInterval = mKeyFrameInterval;
+    vpxAndroidParams->eTemporalPattern = mTemporalPatternType;
+    vpxAndroidParams->nTemporalLayerCount = mTemporalLayers;
+    vpxAndroidParams->nMinQuantizer = mMinQuantizer;
+    vpxAndroidParams->nMaxQuantizer = mMaxQuantizer;
+    memcpy(vpxAndroidParams->nTemporalLayerBitrateRatio,
+           mTemporalLayerBitrateRatio, sizeof(mTemporalLayerBitrateRatio));
+    return OMX_ErrorNone;
+}
+
+OMX_ERRORTYPE SoftVPXEncoder::internalSetAndroidVpxParams(
+        const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams) {
+    if (vpxAndroidParams->nPortIndex != kOutputPortIndex) {
+        return OMX_ErrorUnsupportedIndex;
+    }
+    if (vpxAndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternNone &&
+            vpxAndroidParams->eTemporalPattern != OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+        return OMX_ErrorBadParameter;
+    }
+    if (vpxAndroidParams->nTemporalLayerCount > OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS) {
+        return OMX_ErrorBadParameter;
+    }
+    if (vpxAndroidParams->nMinQuantizer > vpxAndroidParams->nMaxQuantizer) {
+        return OMX_ErrorBadParameter;
+    }
+
+    mTemporalPatternType = vpxAndroidParams->eTemporalPattern;
+    if (vpxAndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternWebRTC) {
+        mTemporalLayers = vpxAndroidParams->nTemporalLayerCount;
+    } else if (vpxAndroidParams->eTemporalPattern == OMX_VIDEO_VPXTemporalLayerPatternNone) {
+        mTemporalLayers = 0;
+    }
+    // Check the bitrate distribution between layers is in increasing order
+    if (mTemporalLayers > 1) {
+        for (size_t i = 0; i < mTemporalLayers - 1; i++) {
+            if (vpxAndroidParams->nTemporalLayerBitrateRatio[i + 1] <=
+                    vpxAndroidParams->nTemporalLayerBitrateRatio[i]) {
+                ALOGE("Wrong bitrate ratio - should be in increasing order.");
+                return OMX_ErrorBadParameter;
+            }
+        }
+    }
+    mKeyFrameInterval = vpxAndroidParams->nKeyFrameInterval;
+    mMinQuantizer = vpxAndroidParams->nMinQuantizer;
+    mMaxQuantizer = vpxAndroidParams->nMaxQuantizer;
+    memcpy(mTemporalLayerBitrateRatio, vpxAndroidParams->nTemporalLayerBitrateRatio,
+            sizeof(mTemporalLayerBitrateRatio));
+    ALOGD("VPx: internalSetAndroidVpxParams. BRMode: %u. TS: %zu. KF: %u."
+            " QP: %u - %u BR0: %u. BR1: %u. BR2: %u",
+            (uint32_t)mBitrateControlMode, mTemporalLayers, mKeyFrameInterval,
+            mMinQuantizer, mMaxQuantizer, mTemporalLayerBitrateRatio[0],
+            mTemporalLayerBitrateRatio[1], mTemporalLayerBitrateRatio[2]);
+    return OMX_ErrorNone;
+}
+
 vpx_enc_frame_flags_t SoftVPXEncoder::getEncodeFlags() {
     vpx_enc_frame_flags_t flags = 0;
-    int patternIdx = mTemporalPatternIdx % mTemporalPatternLength;
-    mTemporalPatternIdx++;
-    switch (mTemporalPattern[patternIdx]) {
-        case kTemporalUpdateLast:
-            flags |= VP8_EFLAG_NO_UPD_GF;
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_REF_GF;
-            flags |= VP8_EFLAG_NO_REF_ARF;
-            break;
-        case kTemporalUpdateGoldenWithoutDependency:
-            flags |= VP8_EFLAG_NO_REF_GF;
-            // Deliberately no break here.
-        case kTemporalUpdateGolden:
-            flags |= VP8_EFLAG_NO_REF_ARF;
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_UPD_LAST;
-            break;
-        case kTemporalUpdateAltrefWithoutDependency:
-            flags |= VP8_EFLAG_NO_REF_ARF;
-            flags |= VP8_EFLAG_NO_REF_GF;
-            // Deliberately no break here.
-        case kTemporalUpdateAltref:
-            flags |= VP8_EFLAG_NO_UPD_GF;
-            flags |= VP8_EFLAG_NO_UPD_LAST;
-            break;
-        case kTemporalUpdateNoneNoRefAltref:
-            flags |= VP8_EFLAG_NO_REF_ARF;
-            // Deliberately no break here.
-        case kTemporalUpdateNone:
-            flags |= VP8_EFLAG_NO_UPD_GF;
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_UPD_LAST;
-            flags |= VP8_EFLAG_NO_UPD_ENTROPY;
-            break;
-        case kTemporalUpdateNoneNoRefGoldenRefAltRef:
-            flags |= VP8_EFLAG_NO_REF_GF;
-            flags |= VP8_EFLAG_NO_UPD_GF;
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_UPD_LAST;
-            flags |= VP8_EFLAG_NO_UPD_ENTROPY;
-            break;
-        case kTemporalUpdateGoldenWithoutDependencyRefAltRef:
-            flags |= VP8_EFLAG_NO_REF_GF;
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_UPD_LAST;
-            break;
-        case kTemporalUpdateLastRefAltRef:
-            flags |= VP8_EFLAG_NO_UPD_GF;
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_REF_GF;
-            break;
-        case kTemporalUpdateGoldenRefAltRef:
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_UPD_LAST;
-            break;
-        case kTemporalUpdateLastAndGoldenRefAltRef:
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_REF_GF;
-            break;
-        case kTemporalUpdateLastRefAll:
-            flags |= VP8_EFLAG_NO_UPD_ARF;
-            flags |= VP8_EFLAG_NO_UPD_GF;
-            break;
+    if (mTemporalPatternLength > 0) {
+      int patternIdx = mTemporalPatternIdx % mTemporalPatternLength;
+      mTemporalPatternIdx++;
+      switch (mTemporalPattern[patternIdx]) {
+          case kTemporalUpdateLast:
+              flags |= VP8_EFLAG_NO_UPD_GF;
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_REF_GF;
+              flags |= VP8_EFLAG_NO_REF_ARF;
+              break;
+          case kTemporalUpdateGoldenWithoutDependency:
+              flags |= VP8_EFLAG_NO_REF_GF;
+              // Deliberately no break here.
+          case kTemporalUpdateGolden:
+              flags |= VP8_EFLAG_NO_REF_ARF;
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_UPD_LAST;
+              break;
+          case kTemporalUpdateAltrefWithoutDependency:
+              flags |= VP8_EFLAG_NO_REF_ARF;
+              flags |= VP8_EFLAG_NO_REF_GF;
+              // Deliberately no break here.
+          case kTemporalUpdateAltref:
+              flags |= VP8_EFLAG_NO_UPD_GF;
+              flags |= VP8_EFLAG_NO_UPD_LAST;
+              break;
+          case kTemporalUpdateNoneNoRefAltref:
+              flags |= VP8_EFLAG_NO_REF_ARF;
+              // Deliberately no break here.
+          case kTemporalUpdateNone:
+              flags |= VP8_EFLAG_NO_UPD_GF;
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_UPD_LAST;
+              flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+              break;
+          case kTemporalUpdateNoneNoRefGoldenRefAltRef:
+              flags |= VP8_EFLAG_NO_REF_GF;
+              flags |= VP8_EFLAG_NO_UPD_GF;
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_UPD_LAST;
+              flags |= VP8_EFLAG_NO_UPD_ENTROPY;
+              break;
+          case kTemporalUpdateGoldenWithoutDependencyRefAltRef:
+              flags |= VP8_EFLAG_NO_REF_GF;
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_UPD_LAST;
+              break;
+          case kTemporalUpdateLastRefAltRef:
+              flags |= VP8_EFLAG_NO_UPD_GF;
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_REF_GF;
+              break;
+          case kTemporalUpdateGoldenRefAltRef:
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_UPD_LAST;
+              break;
+          case kTemporalUpdateLastAndGoldenRefAltRef:
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_REF_GF;
+              break;
+          case kTemporalUpdateLastRefAll:
+              flags |= VP8_EFLAG_NO_UPD_ARF;
+              flags |= VP8_EFLAG_NO_UPD_GF;
+              break;
+      }
     }
     return flags;
 }
@@ -765,10 +687,7 @@
         vpx_img_wrap(&raw_frame, VPX_IMG_FMT_I420, mWidth, mHeight,
                      kInputBufferAlignment, (uint8_t *)source);
 
-        vpx_enc_frame_flags_t flags = 0;
-        if (mTemporalPatternLength > 0) {
-            flags = getEncodeFlags();
-        }
+        vpx_enc_frame_flags_t flags = getEncodeFlags();
         if (mKeyFrameRequested) {
             flags |= VPX_EFLAG_FORCE_KF;
             mKeyFrameRequested = false;
@@ -779,7 +698,7 @@
             vpx_codec_err_t res = vpx_codec_enc_config_set(mCodecContext,
                                                            mCodecConfiguration);
             if (res != VPX_CODEC_OK) {
-                ALOGE("vp8 encoder failed to update bitrate: %s",
+                ALOGE("vpx encoder failed to update bitrate: %s",
                       vpx_codec_err_to_string(res));
                 notify(OMX_EventError,
                        OMX_ErrorUndefined,
@@ -849,9 +768,15 @@
 
 }  // namespace android
 
-
 android::SoftOMXComponent *createSoftOMXComponent(
         const char *name, const OMX_CALLBACKTYPE *callbacks,
         OMX_PTR appData, OMX_COMPONENTTYPE **component) {
-    return new android::SoftVPXEncoder(name, callbacks, appData, component);
+  if (!strcmp(name, "OMX.google.vp8.encoder")) {
+      return new android::SoftVP8Encoder(name, callbacks, appData, component);
+  } else if (!strcmp(name, "OMX.google.vp9.encoder")) {
+      return new android::SoftVP9Encoder(name, callbacks, appData, component);
+  } else {
+      CHECK(!"Unknown component");
+  }
+  return NULL;
 }
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index cd0a0cf..86e71da 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -31,18 +31,18 @@
 
 namespace android {
 
-// Exposes a vpx encoder as an OMX Component
+// Base class for a VPX Encoder OMX Component
 //
 // Boilerplate for callback bindings are taken care
 // by the base class SimpleSoftOMXComponent and its
 // parent SoftOMXComponent.
 //
-// Only following encoder settings are available
+// Only following encoder settings are available (codec specific settings might
+// be available in the sub-classes):
 //    - target bitrate
 //    - rate control (constant / variable)
 //    - frame rate
 //    - error resilience
-//    - token partitioning
 //    - reconstruction & loop filters (g_profile)
 //
 // Only following color formats are recognized
@@ -54,7 +54,7 @@
 //    - encoding deadline is realtime
 //    - multithreaded encoding utilizes a number of threads equal
 // to online cpu's available
-//    - the algorithm interface for encoder is vp8
+//    - the algorithm interface for encoder is decided by the sub-class in use
 //    - fractional bits of frame rate is discarded
 //    - OMX timestamps are in microseconds, therefore
 // encoder timebase is fixed to 1/1000000
@@ -63,7 +63,13 @@
     SoftVPXEncoder(const char *name,
                    const OMX_CALLBACKTYPE *callbacks,
                    OMX_PTR appData,
-                   OMX_COMPONENTTYPE **component);
+                   OMX_COMPONENTTYPE **component,
+                   const char* role,
+                   OMX_VIDEO_CODINGTYPE codingType,
+                   const char* mimeType,
+                   int32_t minCompressionRatio,
+                   const CodecProfileLevel *profileLevels,
+                   size_t numProfileLevels);
 
 protected:
     virtual ~SoftVPXEncoder();
@@ -87,7 +93,44 @@
     // encoding of the frame
     virtual void onQueueFilled(OMX_U32 portIndex);
 
-private:
+    // Initializes vpx encoder with available settings.
+    status_t initEncoder();
+
+    // Populates mCodecInterface with codec specific settings.
+    virtual void setCodecSpecificInterface() = 0;
+
+    // Sets codec specific configuration.
+    virtual void setCodecSpecificConfiguration() = 0;
+
+    // Sets codec specific encoder controls.
+    virtual vpx_codec_err_t setCodecSpecificControls() = 0;
+
+    // Get current encode flags.
+    virtual vpx_enc_frame_flags_t getEncodeFlags();
+
+    // Releases vpx encoder instance, with it's associated
+    // data structures.
+    //
+    // Unless called earlier, this is handled by the
+    // dtor.
+    status_t releaseEncoder();
+
+    // Get bitrate parameters.
+    virtual OMX_ERRORTYPE internalGetBitrateParams(
+        OMX_VIDEO_PARAM_BITRATETYPE* bitrate);
+
+    // Updates bitrate to reflect port settings.
+    virtual OMX_ERRORTYPE internalSetBitrateParams(
+        const OMX_VIDEO_PARAM_BITRATETYPE* bitrate);
+
+    // Gets Android vpx specific parameters.
+    OMX_ERRORTYPE internalGetAndroidVpxParams(
+            OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams);
+
+    // Handles Android vpx specific parameters.
+    OMX_ERRORTYPE internalSetAndroidVpxParams(
+            const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vpxAndroidParams);
+
     enum TemporalReferences {
         // For 1 layer case: reference all (last, golden, and alt ref), but only
         // update last.
@@ -137,9 +180,6 @@
     static const uint32_t kInputBufferAlignment = 1;
     static const uint32_t kOutputBufferAlignment = 2;
 
-    // Max value supported for DCT partitions
-    static const uint32_t kMaxDCTPartitions = 3;
-
     // Number of supported input color formats
     static const uint32_t kNumberOfSupportedColorFormats = 3;
 
@@ -161,23 +201,10 @@
     // Bitrate control mode, either constant or variable
     vpx_rc_mode mBitrateControlMode;
 
-    // vp8 specific configuration parameter
-    // that enables token partitioning of
-    // the stream into substreams
-    int32_t mDCTPartitions;
-
     // Parameter that denotes whether error resilience
     // is enabled in encoder
     OMX_BOOL mErrorResilience;
 
-    // Encoder profile corresponding to OMX level parameter
-    //
-    // The inconsistency in the naming is caused by
-    // OMX spec referring vpx profiles (g_profile)
-    // as "levels" whereas using the name "profile" for
-    // something else.
-    OMX_VIDEO_VP8LEVELTYPE mLevel;
-
     // Key frame interval in frames
     uint32_t mKeyFrameInterval;
 
@@ -216,31 +243,6 @@
 
     bool mKeyFrameRequested;
 
-    // Initializes vpx encoder with available settings.
-    status_t initEncoder();
-
-    // Releases vpx encoder instance, with it's associated
-    // data structures.
-    //
-    // Unless called earlier, this is handled by the
-    // dtor.
-    status_t releaseEncoder();
-
-    // Get current encode flags
-    vpx_enc_frame_flags_t getEncodeFlags();
-
-    // Updates bitrate to reflect port settings.
-    OMX_ERRORTYPE internalSetBitrateParams(
-        const OMX_VIDEO_PARAM_BITRATETYPE* bitrate);
-
-    // Handles vp8 specific parameters.
-    OMX_ERRORTYPE internalSetVp8Params(
-        const OMX_VIDEO_PARAM_VP8TYPE* vp8Params);
-
-    // Handles Android vp8 specific parameters.
-    OMX_ERRORTYPE internalSetAndroidVp8Params(
-        const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE* vp8AndroidParams);
-
     DISALLOW_EVIL_CONSTRUCTORS(SoftVPXEncoder);
 };
 
diff --git a/media/libstagefright/codecs/on2/h264dec/Android.mk b/media/libstagefright/codecs/on2/h264dec/Android.mk
index 7159674..9f2c055 100644
--- a/media/libstagefright/codecs/on2/h264dec/Android.mk
+++ b/media/libstagefright/codecs/on2/h264dec/Android.mk
@@ -99,7 +99,7 @@
 LOCAL_SANITIZE := signed-integer-overflow
 
 LOCAL_SHARED_LIBRARIES := \
-	libstagefright libstagefright_omx libstagefright_foundation libutils liblog \
+	libmedia libstagefright_omx libstagefright_foundation libutils liblog \
 
 LOCAL_MODULE := libstagefright_soft_h264dec
 
diff --git a/media/libstagefright/codecs/opus/dec/Android.mk b/media/libstagefright/codecs/opus/dec/Android.mk
index f272763..894c4da 100644
--- a/media/libstagefright/codecs/opus/dec/Android.mk
+++ b/media/libstagefright/codecs/opus/dec/Android.mk
@@ -10,7 +10,7 @@
         frameworks/native/include/media/openmax \
 
 LOCAL_SHARED_LIBRARIES := \
-        libopus libstagefright libstagefright_omx \
+        libopus libmedia libstagefright_omx \
         libstagefright_foundation libutils liblog
 
 LOCAL_CLANG := true
diff --git a/media/libstagefright/codecs/vorbis/dec/Android.mk b/media/libstagefright/codecs/vorbis/dec/Android.mk
index 039be6f..7af2993 100644
--- a/media/libstagefright/codecs/vorbis/dec/Android.mk
+++ b/media/libstagefright/codecs/vorbis/dec/Android.mk
@@ -10,7 +10,7 @@
         frameworks/native/include/media/openmax \
 
 LOCAL_SHARED_LIBRARIES := \
-        libvorbisidec libstagefright libstagefright_omx \
+        libvorbisidec libmedia libstagefright_omx \
         libstagefright_foundation libutils liblog
 
 LOCAL_MODULE := libstagefright_soft_vorbisdec
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index b03c769..ce164a2 100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -101,5 +101,12 @@
             <Limit name="bitrate" range="1-40000000" />
             <Feature name="bitrate-modes" value="VBR,CBR" />
         </MediaCodec>
+        <MediaCodec name="OMX.google.vp9.encoder" type="video/x-vnd.on2.vp9">
+            <!-- profiles and levels:  ProfileMain : Level_Version0-3 -->
+            <Limit name="size" min="2x2" max="2048x2048" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="bitrate" range="1-40000000" />
+            <Feature name="bitrate-modes" value="VBR,CBR" />
+        </MediaCodec>
     </Encoders>
 </Included>
diff --git a/media/libstagefright/foundation/ALooperRoster.cpp b/media/libstagefright/foundation/ALooperRoster.cpp
index 5f11fb6..8a7c3eb 100644
--- a/media/libstagefright/foundation/ALooperRoster.cpp
+++ b/media/libstagefright/foundation/ALooperRoster.cpp
@@ -100,7 +100,7 @@
     }
 }
 
-static void makeFourCC(uint32_t fourcc, char *s) {
+static void makeFourCC(uint32_t fourcc, char *s, size_t bufsz) {
     s[0] = (fourcc >> 24) & 0xff;
     if (s[0]) {
         s[1] = (fourcc >> 16) & 0xff;
@@ -108,7 +108,7 @@
         s[3] = fourcc & 0xff;
         s[4] = 0;
     } else {
-        sprintf(s, "%u", fourcc);
+        snprintf(s, bufsz, "%u", fourcc);
     }
 }
 
@@ -146,7 +146,7 @@
                 if (verboseStats) {
                     for (size_t j = 0; j < handler->mMessages.size(); j++) {
                         char fourcc[15];
-                        makeFourCC(handler->mMessages.keyAt(j), fourcc);
+                        makeFourCC(handler->mMessages.keyAt(j), fourcc, sizeof(fourcc));
                         s.appendFormat("\n    %s: %u",
                                 fourcc,
                                 handler->mMessages.valueAt(j));
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index b167543..04fac19 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -14,6 +14,9 @@
  * limitations under the License.
  */
 
+#define LOG_TAG "AString"
+#include <utils/Log.h>
+
 #include <ctype.h>
 #include <stdarg.h>
 #include <stdio.h>
@@ -40,14 +43,24 @@
     : mData(NULL),
       mSize(0),
       mAllocSize(1) {
-    setTo(s);
+    if (!s) {
+        ALOGW("ctor got NULL, using empty string instead");
+        clear();
+    } else {
+        setTo(s);
+    }
 }
 
 AString::AString(const char *s, size_t size)
     : mData(NULL),
       mSize(0),
       mAllocSize(1) {
-    setTo(s, size);
+    if (!s) {
+        ALOGW("ctor got NULL, using empty string instead");
+        clear();
+    } else {
+        setTo(s, size);
+    }
 }
 
 AString::AString(const String8 &from)
diff --git a/media/libstagefright/foundation/ColorUtils.cpp b/media/libstagefright/foundation/ColorUtils.cpp
index d7439b2..88a8351 100644
--- a/media/libstagefright/foundation/ColorUtils.cpp
+++ b/media/libstagefright/foundation/ColorUtils.cpp
@@ -343,6 +343,23 @@
 }
 
 // static
+ColorAspects ColorUtils::unpackToColorAspects(uint32_t packed) {
+    ColorAspects aspects;
+    aspects.mRange        = (ColorAspects::Range)((packed >> 24) & 0xFF);
+    aspects.mPrimaries    = (ColorAspects::Primaries)((packed >> 16) & 0xFF);
+    aspects.mMatrixCoeffs = (ColorAspects::MatrixCoeffs)((packed >> 8) & 0xFF);
+    aspects.mTransfer     = (ColorAspects::Transfer)(packed & 0xFF);
+
+    return aspects;
+}
+
+// static
+uint32_t ColorUtils::packToU32(const ColorAspects &aspects) {
+    return (aspects.mRange << 24) | (aspects.mPrimaries << 16)
+            | (aspects.mMatrixCoeffs << 8) | aspects.mTransfer;
+}
+
+// static
 void ColorUtils::setDefaultCodecColorAspectsIfNeeded(
         ColorAspects &aspects, int32_t width, int32_t height) {
     ColorAspects::MatrixCoeffs coeffs;
diff --git a/media/libstagefright/foundation/hexdump.cpp b/media/libstagefright/foundation/hexdump.cpp
index a44d832..872c5f3 100644
--- a/media/libstagefright/foundation/hexdump.cpp
+++ b/media/libstagefright/foundation/hexdump.cpp
@@ -49,7 +49,7 @@
         appendIndent(&line, indent);
 
         char tmp[32];
-        sprintf(tmp, "%08lx:  ", (unsigned long)offset);
+        snprintf(tmp, sizeof(tmp), "%08lx:  ", (unsigned long)offset);
 
         line.append(tmp);
 
@@ -60,7 +60,7 @@
             if (offset + i >= size) {
                 line.append("   ");
             } else {
-                sprintf(tmp, "%02x ", data[offset + i]);
+                snprintf(tmp, sizeof(tmp), "%02x ", data[offset + i]);
                 line.append(tmp);
             }
         }
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index 3942158..9af9918 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -517,7 +517,7 @@
         if (mOffset == 126 || mOffset == 127) {
             // Special treatment for the track number and genre.
             char tmp[16];
-            sprintf(tmp, "%d", (int)*frameData);
+            snprintf(tmp, sizeof(tmp), "%d", (int)*frameData);
 
             id->setTo(tmp);
             return;
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index b7ce189..6b57e6f 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -141,6 +141,9 @@
 
     Track *findTrackByMimePrefix(const char *mimePrefix);
 
+    status_t parseAC3SampleEntry(off64_t offset);
+    status_t parseAC3SpecificBox(off64_t offset, uint16_t sampleRate);
+
     MPEG4Extractor(const MPEG4Extractor &);
     MPEG4Extractor &operator=(const MPEG4Extractor &);
 };
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index 6c073f0..f7b6ab6 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -132,6 +132,12 @@
             OMX_U32 range_offset, OMX_U32 range_length,
             OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
 
+    virtual status_t emptyGraphicBuffer(
+            node_id node,
+            buffer_id buffer,
+            const sp<GraphicBuffer> &graphicBuffer,
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
+
     virtual status_t getExtensionIndex(
             node_id node,
             const char *parameter_name,
@@ -144,6 +150,8 @@
             const void *data,
             size_t size);
 
+    virtual status_t dispatchMessage(const omx_message &msg);
+
     virtual void binderDied(const wp<IBinder> &the_late_who);
 
     virtual bool isSecure(IOMX::node_id node);
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index 6411267..3753cb0 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -99,8 +99,6 @@
 
     status_t signalEndOfInputStream();
 
-    void signalEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2);
-
     status_t allocateSecureBuffer(
             OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
             void **buffer_data, sp<NativeHandle> *native_handle);
@@ -119,7 +117,7 @@
             OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
 
     status_t emptyGraphicBuffer(
-            OMX_BUFFERHEADERTYPE *header, const sp<GraphicBuffer> &buffer,
+            OMX::buffer_id buffer, const sp<GraphicBuffer> &graphicBuffer,
             OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
 
     status_t getExtensionIndex(
@@ -137,7 +135,6 @@
 
     // handles messages and removes them from the list
     void onMessages(std::list<omx_message> &messages);
-    void onMessage(const omx_message &msg);
     void onObserverDied(OMXMaster *master);
     void onGetHandleFailed();
     void onEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2);
@@ -163,7 +160,6 @@
     // Access this through getGraphicBufferSource().
     sp<GraphicBufferSource> mGraphicBufferSource;
 
-
     struct ActiveBuffer {
         OMX_U32 mPortIndex;
         OMX::buffer_id mID;
@@ -266,6 +262,8 @@
     // |msg| does not need to be sent to the event listener.
     bool handleMessage(omx_message &msg);
 
+    bool handleDataSpaceChanged(omx_message &msg);
+
     OMXNodeInstance(const OMXNodeInstance &);
     OMXNodeInstance &operator=(const OMXNodeInstance &);
 };
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index 8e82486..b060628 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -605,16 +605,27 @@
     int64_t timeUs = mBlockIter.blockTimeUs();
 
     for (int i = 0; i < block->GetFrameCount(); ++i) {
+        MatroskaExtractor::TrackInfo *trackInfo = &mExtractor->mTracks.editItemAt(mTrackIndex);
         const mkvparser::Block::Frame &frame = block->GetFrame(i);
+        size_t len = frame.len;
+        if (SIZE_MAX - len < trackInfo->mHeaderLen) {
+            return ERROR_MALFORMED;
+        }
 
-        MediaBuffer *mbuf = new MediaBuffer(frame.len);
+        len += trackInfo->mHeaderLen;
+        MediaBuffer *mbuf = new MediaBuffer(len);
+        uint8_t *data = static_cast<uint8_t *>(mbuf->data());
+        if (trackInfo->mHeader) {
+            memcpy(data, trackInfo->mHeader, trackInfo->mHeaderLen);
+        }
+
         mbuf->meta_data()->setInt64(kKeyTime, timeUs);
         mbuf->meta_data()->setInt32(kKeyIsSyncFrame, block->IsKey());
 
-        status_t err = frame.Read(mExtractor->mReader, static_cast<uint8_t *>(mbuf->data()));
+        status_t err = frame.Read(mExtractor->mReader, data + trackInfo->mHeaderLen);
         if (err == OK
                 && mExtractor->mIsWebm
-                && mExtractor->mTracks.itemAt(mTrackIndex).mEncrypted) {
+                && trackInfo->mEncrypted) {
             err = setWebmBlockCryptoInfo(mbuf);
         }
 
@@ -1164,6 +1175,42 @@
     }
 }
 
+status_t MatroskaExtractor::initTrackInfo(
+        const mkvparser::Track *track, const sp<MetaData> &meta, TrackInfo *trackInfo) {
+    trackInfo->mTrackNum = track->GetNumber();
+    trackInfo->mMeta = meta;
+    trackInfo->mExtractor = this;
+    trackInfo->mEncrypted = false;
+    trackInfo->mHeader = NULL;
+    trackInfo->mHeaderLen = 0;
+
+    for(size_t i = 0; i < track->GetContentEncodingCount(); i++) {
+        const mkvparser::ContentEncoding *encoding = track->GetContentEncodingByIndex(i);
+        for(size_t j = 0; j < encoding->GetEncryptionCount(); j++) {
+            const mkvparser::ContentEncoding::ContentEncryption *encryption;
+            encryption = encoding->GetEncryptionByIndex(j);
+            trackInfo->mMeta->setData(kKeyCryptoKey, 0, encryption->key_id, encryption->key_id_len);
+            trackInfo->mEncrypted = true;
+            break;
+        }
+
+        for(size_t j = 0; j < encoding->GetCompressionCount(); j++) {
+            const mkvparser::ContentEncoding::ContentCompression *compression;
+            compression = encoding->GetCompressionByIndex(j);
+            ALOGV("compression algo %llu settings_len %lld",
+                compression->algo, compression->settings_len);
+            if (compression->algo == 3
+                    && compression->settings
+                    && compression->settings_len > 0) {
+                trackInfo->mHeader = compression->settings;
+                trackInfo->mHeaderLen = compression->settings_len;
+            }
+        }
+    }
+
+    return OK;
+}
+
 void MatroskaExtractor::addTracks() {
     const mkvparser::Tracks *tracks = mSegment->GetTracks();
 
@@ -1288,21 +1335,7 @@
         mTracks.push();
         size_t n = mTracks.size() - 1;
         TrackInfo *trackInfo = &mTracks.editItemAt(n);
-        trackInfo->mTrackNum = track->GetNumber();
-        trackInfo->mMeta = meta;
-        trackInfo->mExtractor = this;
-
-        trackInfo->mEncrypted = false;
-        for(size_t i = 0; i < track->GetContentEncodingCount() && !trackInfo->mEncrypted; i++) {
-            const mkvparser::ContentEncoding *encoding = track->GetContentEncodingByIndex(i);
-            for(size_t j = 0; j < encoding->GetEncryptionCount(); j++) {
-                const mkvparser::ContentEncoding::ContentEncryption *encryption;
-                encryption = encoding->GetEncryptionByIndex(j);
-                meta->setData(kKeyCryptoKey, 0, encryption->key_id, encryption->key_id_len);
-                trackInfo->mEncrypted = true;
-                break;
-            }
-        }
+        initTrackInfo(track, meta, trackInfo);
 
         if (!strcmp("V_MPEG4/ISO/AVC", codecID) && codecPrivateSize == 0) {
             // Attempt to recover from AVC track without codec private data
diff --git a/media/libstagefright/matroska/MatroskaExtractor.h b/media/libstagefright/matroska/MatroskaExtractor.h
index 588bd39..19775ce 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.h
+++ b/media/libstagefright/matroska/MatroskaExtractor.h
@@ -63,6 +63,12 @@
         const MatroskaExtractor *mExtractor;
         Vector<const mkvparser::CuePoint*> mCuePoints;
 
+        // mHeader points to memory managed by mkvparser;
+        // mHeader would be deleted when mSegment is deleted
+        // in ~MatroskaExtractor.
+        unsigned char *mHeader;
+        size_t mHeaderLen;
+
         const mkvparser::Track* getTrack() const;
         const mkvparser::CuePoint::TrackPosition *find(long long timeNs) const;
     };
@@ -79,6 +85,7 @@
     int64_t mSeekPreRollNs;
 
     status_t synthesizeAVCC(TrackInfo *trackInfo, size_t index);
+    status_t initTrackInfo(const mkvparser::Track *track, const sp<MetaData> &meta, TrackInfo *trackInfo);
     void addTracks();
     void findThumbnails();
     void getColorInformation(const mkvparser::VideoTrack *vtrack, sp<MetaData> &meta);
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 1d819b5..8557a28 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -24,9 +24,6 @@
 
 #include "GraphicBufferSource.h"
 #include "OMXUtils.h"
-
-#include <OMX_Core.h>
-#include <OMX_IndexExt.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/ColorUtils.h>
@@ -41,10 +38,6 @@
 
 namespace android {
 
-static const bool EXTRA_CHECK = true;
-
-static const OMX_U32 kPortIndexInput = 0;
-
 GraphicBufferSource::PersistentProxyListener::PersistentProxyListener(
         const wp<IGraphicBufferConsumer> &consumer,
         const wp<ConsumerListener>& consumerListener) :
@@ -113,14 +106,16 @@
 }
 
 GraphicBufferSource::GraphicBufferSource(
-        OMXNodeInstance* nodeInstance,
+        const sp<IOMX> &omx,
+        IOMX::node_id nodeID,
         uint32_t bufferWidth,
         uint32_t bufferHeight,
         uint32_t bufferCount,
         uint32_t consumerUsage,
         const sp<IGraphicBufferConsumer> &consumer) :
     mInitCheck(UNKNOWN_ERROR),
-    mNodeInstance(nodeInstance),
+    mOMX(omx),
+    mNodeID(nodeID),
     mExecuting(false),
     mSuspended(false),
     mLastDataSpace(HAL_DATASPACE_UNKNOWN),
@@ -307,7 +302,7 @@
     mExecuting = false;
 }
 
-void GraphicBufferSource::addCodecBuffer(OMX_BUFFERHEADERTYPE* header) {
+void GraphicBufferSource::addCodecBuffer(IOMX::buffer_id bufferID) {
     Mutex::Autolock autoLock(mMutex);
 
     if (mExecuting) {
@@ -317,32 +312,33 @@
         return;
     }
 
-    ALOGV("addCodecBuffer h=%p size=%" PRIu32 " p=%p",
-            header, header->nAllocLen, header->pBuffer);
+    ALOGV("addCodecBuffer id=%u", bufferID);
+
     CodecBuffer codecBuffer;
-    codecBuffer.mHeader = header;
+    codecBuffer.mBufferID = bufferID;
     mCodecBuffers.add(codecBuffer);
 }
 
-void GraphicBufferSource::codecBufferEmptied(OMX_BUFFERHEADERTYPE* header, int fenceFd) {
+void GraphicBufferSource::codecBufferEmptied(const omx_message &msg) {
+    IOMX::buffer_id bufferID = msg.u.buffer_data.buffer;
+    int fenceFd = msg.fenceFd;
+
     Mutex::Autolock autoLock(mMutex);
     if (!mExecuting) {
         return;
     }
 
-    int cbi = findMatchingCodecBuffer_l(header);
+    int cbi = findMatchingCodecBuffer_l(bufferID);
     if (cbi < 0) {
         // This should never happen.
-        ALOGE("codecBufferEmptied: buffer not recognized (h=%p)", header);
+        ALOGE("codecBufferEmptied: buffer not recognized (id=%u)", bufferID);
         if (fenceFd >= 0) {
             ::close(fenceFd);
         }
         return;
     }
 
-    ALOGV("codecBufferEmptied h=%p size=%" PRIu32 " filled=%" PRIu32 " p=%p",
-            header, header->nAllocLen, header->nFilledLen,
-            header->pBuffer);
+    ALOGV("codecBufferEmptied id=%u", bufferID);
     CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
 
     // header->nFilledLen may not be the original value, so we can't compare
@@ -364,32 +360,6 @@
         return;
     }
 
-    if (EXTRA_CHECK && header->nAllocLen >= sizeof(MetadataBufferType)) {
-        // Pull the graphic buffer handle back out of the buffer, and confirm
-        // that it matches expectations.
-        OMX_U8* data = header->pBuffer;
-        MetadataBufferType type = *(MetadataBufferType *)data;
-        if (type == kMetadataBufferTypeGrallocSource
-                && header->nAllocLen >= sizeof(VideoGrallocMetadata)) {
-            VideoGrallocMetadata &grallocMeta = *(VideoGrallocMetadata *)data;
-            if (grallocMeta.pHandle != codecBuffer.mGraphicBuffer->handle) {
-                // should never happen
-                ALOGE("codecBufferEmptied: buffer's handle is %p, expected %p",
-                        grallocMeta.pHandle, codecBuffer.mGraphicBuffer->handle);
-                CHECK(!"codecBufferEmptied: mismatched buffer");
-            }
-        } else if (type == kMetadataBufferTypeANWBuffer
-                && header->nAllocLen >= sizeof(VideoNativeMetadata)) {
-            VideoNativeMetadata &nativeMeta = *(VideoNativeMetadata *)data;
-            if (nativeMeta.pBuffer != codecBuffer.mGraphicBuffer->getNativeBuffer()) {
-                // should never happen
-                ALOGE("codecBufferEmptied: buffer is %p, expected %p",
-                        nativeMeta.pBuffer, codecBuffer.mGraphicBuffer->getNativeBuffer());
-                CHECK(!"codecBufferEmptied: mismatched buffer");
-            }
-        }
-    }
-
     // Find matching entry in our cached copy of the BufferQueue slots.
     // If we find a match, release that slot.  If we don't, the BufferQueue
     // has dropped that GraphicBuffer, and there's nothing for us to release.
@@ -442,22 +412,24 @@
     return;
 }
 
-void GraphicBufferSource::codecBufferFilled(OMX_BUFFERHEADERTYPE* header) {
+void GraphicBufferSource::codecBufferFilled(omx_message &msg) {
     Mutex::Autolock autoLock(mMutex);
 
+    OMX_U32 &flags = msg.u.extended_buffer_data.flags;
+    OMX_TICKS &timestamp = msg.u.extended_buffer_data.timestamp;
+
     if (mMaxTimestampGapUs > 0ll
-            && !(header->nFlags & OMX_BUFFERFLAG_CODECCONFIG)) {
-        ssize_t index = mOriginalTimeUs.indexOfKey(header->nTimeStamp);
+            && !(flags & OMX_BUFFERFLAG_CODECCONFIG)) {
+        ssize_t index = mOriginalTimeUs.indexOfKey(timestamp);
         if (index >= 0) {
             ALOGV("OUT timestamp: %lld -> %lld",
-                    static_cast<long long>(header->nTimeStamp),
+                    static_cast<long long>(timestamp),
                     static_cast<long long>(mOriginalTimeUs[index]));
-            header->nTimeStamp = mOriginalTimeUs[index];
+            timestamp = mOriginalTimeUs[index];
             mOriginalTimeUs.removeItemsAt(index);
         } else {
             // giving up the effort as encoder doesn't appear to preserve pts
-            ALOGW("giving up limiting timestamp gap (pts = %lld)",
-                    header->nTimeStamp);
+            ALOGW("giving up limiting timestamp gap (pts = %lld)", timestamp);
             mMaxTimestampGapUs = -1ll;
         }
         if (mOriginalTimeUs.size() > BufferQueue::NUM_BUFFER_SLOTS) {
@@ -516,67 +488,16 @@
     mLastDataSpace = dataSpace;
 
     if (ColorUtils::convertDataSpaceToV0(dataSpace)) {
-        ColorAspects aspects = mColorAspects; // initially requested aspects
+        omx_message msg;
+        msg.type = omx_message::EVENT;
+        msg.node = mNodeID;
+        msg.fenceFd = -1;
+        msg.u.event_data.event = OMX_EventDataSpaceChanged;
+        msg.u.event_data.data1 = mLastDataSpace;
+        msg.u.event_data.data2 = ColorUtils::packToU32(mColorAspects);
+        msg.u.event_data.data3 = pixelFormat;
 
-        // request color aspects to encode
-        OMX_INDEXTYPE index;
-        status_t err = mNodeInstance->getExtensionIndex(
-                "OMX.google.android.index.describeColorAspects", &index);
-        if (err == OK) {
-            // V0 dataspace
-            DescribeColorAspectsParams params;
-            InitOMXParams(&params);
-            params.nPortIndex = kPortIndexInput;
-            params.nDataSpace = mLastDataSpace;
-            params.nPixelFormat = pixelFormat;
-            params.bDataSpaceChanged = OMX_TRUE;
-            params.sAspects = mColorAspects;
-
-            err = mNodeInstance->getConfig(index, &params, sizeof(params));
-            if (err == OK) {
-                aspects = params.sAspects;
-                ALOGD("Codec resolved it to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
-                        params.sAspects.mRange, asString(params.sAspects.mRange),
-                        params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
-                        params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
-                        params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
-                        err, asString(err));
-            } else {
-                params.sAspects = aspects;
-                err = OK;
-            }
-            params.bDataSpaceChanged = OMX_FALSE;
-            for (int triesLeft = 2; --triesLeft >= 0; ) {
-                status_t err = mNodeInstance->setConfig(index, &params, sizeof(params));
-                if (err == OK) {
-                    err = mNodeInstance->getConfig(index, &params, sizeof(params));
-                }
-                if (err != OK || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
-                        params.sAspects, aspects)) {
-                    // if we can't set or get color aspects, still communicate dataspace to client
-                    break;
-                }
-
-                ALOGW_IF(triesLeft == 0, "Codec repeatedly changed requested ColorAspects.");
-            }
-        }
-
-        ALOGV("Set color aspects to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
-                aspects.mRange, asString(aspects.mRange),
-                aspects.mPrimaries, asString(aspects.mPrimaries),
-                aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
-                aspects.mTransfer, asString(aspects.mTransfer),
-                err, asString(err));
-
-        // signal client that the dataspace has changed; this will update the output format
-        // TODO: we should tie this to an output buffer somehow, and signal the change
-        // just before the output buffer is returned to the client, but there are many
-        // ways this could fail (e.g. flushing), and we are not yet supporting this scenario.
-
-        mNodeInstance->signalEvent(
-                OMX_EventDataSpaceChanged, dataSpace,
-                (aspects.mRange << 24) | (aspects.mPrimaries << 16)
-                        | (aspects.mMatrixCoeffs << 8) | aspects.mTransfer);
+        mOMX->dispatchMessage(msg);
     }
 }
 
@@ -862,19 +783,21 @@
     codecBuffer.mSlot = item.mSlot;
     codecBuffer.mFrameNumber = item.mFrameNumber;
 
-    OMX_BUFFERHEADERTYPE* header = codecBuffer.mHeader;
-    sp<GraphicBuffer> buffer = codecBuffer.mGraphicBuffer;
-    status_t err = mNodeInstance->emptyGraphicBuffer(
-            header, buffer, OMX_BUFFERFLAG_ENDOFFRAME, timeUs,
-            item.mFence->isValid() ? item.mFence->dup() : -1);
+    IOMX::buffer_id bufferID = codecBuffer.mBufferID;
+    const sp<GraphicBuffer> &buffer = codecBuffer.mGraphicBuffer;
+    int fenceID = item.mFence->isValid() ? item.mFence->dup() : -1;
+
+    status_t err = mOMX->emptyGraphicBuffer(
+            mNodeID, bufferID, buffer, OMX_BUFFERFLAG_ENDOFFRAME, timeUs, fenceID);
+
     if (err != OK) {
-        ALOGW("WARNING: emptyNativeWindowBuffer failed: 0x%x", err);
+        ALOGW("WARNING: emptyGraphicBuffer failed: 0x%x", err);
         codecBuffer.mGraphicBuffer = NULL;
         return err;
     }
 
-    ALOGV("emptyNativeWindowBuffer succeeded, h=%p p=%p buf=%p bufhandle=%p",
-            header, header->pBuffer, buffer->getNativeBuffer(), buffer->handle);
+    ALOGV("emptyGraphicBuffer succeeded, id=%u buf=%p bufhandle=%p",
+            bufferID, buffer->getNativeBuffer(), buffer->handle);
     return OK;
 }
 
@@ -895,16 +818,17 @@
     // to stick a placeholder into codecBuffer.mGraphicBuffer to mark it as
     // in-use.
     CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
+    IOMX::buffer_id bufferID = codecBuffer.mBufferID;
 
-    OMX_BUFFERHEADERTYPE* header = codecBuffer.mHeader;
-    status_t err = mNodeInstance->emptyGraphicBuffer(
-            header, NULL /* buffer */, OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_EOS,
+    status_t err = mOMX->emptyGraphicBuffer(
+            mNodeID, bufferID, NULL /* buffer */,
+            OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_EOS,
             0 /* timestamp */, -1 /* fenceFd */);
     if (err != OK) {
         ALOGW("emptyDirectBuffer EOS failed: 0x%x", err);
     } else {
-        ALOGV("submitEndOfInputStream_l: buffer submitted, header=%p cbi=%d",
-                header, cbi);
+        ALOGV("submitEndOfInputStream_l: buffer submitted, id=%u cbi=%d",
+                bufferID, cbi);
         mEndOfStreamSent = true;
     }
 }
@@ -920,10 +844,9 @@
     return -1;
 }
 
-int GraphicBufferSource::findMatchingCodecBuffer_l(
-        const OMX_BUFFERHEADERTYPE* header) {
+int GraphicBufferSource::findMatchingCodecBuffer_l(IOMX::buffer_id bufferID) {
     for (int i = (int)mCodecBuffers.size() - 1; i>= 0; --i) {
-        if (mCodecBuffers[i].mHeader == header) {
+        if (mCodecBuffers[i].mBufferID == bufferID) {
             return i;
         }
     }
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index aa4ceb3..7ce9f98 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -52,7 +52,8 @@
 class GraphicBufferSource : public BufferQueue::ConsumerListener {
 public:
     GraphicBufferSource(
-            OMXNodeInstance* nodeInstance,
+            const sp<IOMX> &omx,
+            IOMX::node_id nodeID,
             uint32_t bufferWidth,
             uint32_t bufferHeight,
             uint32_t bufferCount,
@@ -94,15 +95,15 @@
     // A "codec buffer", i.e. a buffer that can be used to pass data into
     // the encoder, has been allocated.  (This call does not call back into
     // OMXNodeInstance.)
-    void addCodecBuffer(OMX_BUFFERHEADERTYPE* header);
+    void addCodecBuffer(IOMX::buffer_id bufferID);
 
     // Called from OnEmptyBufferDone.  If we have a BQ buffer available,
     // fill it with a new frame of data; otherwise, just mark it as available.
-    void codecBufferEmptied(OMX_BUFFERHEADERTYPE* header, int fenceFd);
+    void codecBufferEmptied(const omx_message &msg);
 
     // Called when omx_message::FILL_BUFFER_DONE is received. (Currently the
     // buffer source will fix timestamp in the header if needed.)
-    void codecBufferFilled(OMX_BUFFERHEADERTYPE* header);
+    void codecBufferFilled(omx_message &msg);
 
     // This is called after the last input frame has been submitted.  We
     // need to submit an empty buffer with the EOS flag set.  If we don't
@@ -203,7 +204,7 @@
     // Keep track of codec input buffers.  They may either be available
     // (mGraphicBuffer == NULL) or in use by the codec.
     struct CodecBuffer {
-        OMX_BUFFERHEADERTYPE* mHeader;
+        IOMX::buffer_id mBufferID;
 
         // buffer producer's frame-number for buffer
         uint64_t mFrameNumber;
@@ -224,7 +225,7 @@
     }
 
     // Finds the mCodecBuffers entry that matches.  Returns -1 if not found.
-    int findMatchingCodecBuffer_l(const OMX_BUFFERHEADERTYPE* header);
+    int findMatchingCodecBuffer_l(IOMX::buffer_id bufferID);
 
     // Fills a codec buffer with a frame from the BufferQueue.  This must
     // only be called when we know that a frame of data is ready (i.e. we're
@@ -261,8 +262,9 @@
     // Used to report constructor failure.
     status_t mInitCheck;
 
-    // Pointer back to the object that contains us.  We send buffers here.
-    OMXNodeInstance* mNodeInstance;
+    // Pointer back to the IOMX that created us.  We send buffers here.
+    sp<IOMX> mOMX;
+    IOMX::node_id mNodeID;
 
     // Set by omxExecuting() / omxIdling().
     bool mExecuting;
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index f7058d7..36bae4a 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -606,6 +606,21 @@
             buffer, range_offset, range_length, flags, timestamp, fenceFd);
 }
 
+status_t OMX::emptyGraphicBuffer(
+        node_id node,
+        buffer_id buffer,
+        const sp<GraphicBuffer> &graphicBuffer,
+        OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+    OMXNodeInstance *instance = findInstance(node);
+
+    if (instance == NULL) {
+        return NAME_NOT_FOUND;
+    }
+
+    return instance->emptyGraphicBuffer(
+            buffer, graphicBuffer, flags, timestamp, fenceFd);
+}
+
 status_t OMX::getExtensionIndex(
         node_id node,
         const char *parameter_name,
@@ -635,6 +650,18 @@
     return instance->setInternalOption(port_index, type, data, size);
 }
 
+status_t OMX::dispatchMessage(const omx_message &msg) {
+    sp<OMX::CallbackDispatcher> dispatcher = findDispatcher(msg.node);
+
+    if (dispatcher == NULL) {
+        return OMX_ErrorComponentNotFound;
+    }
+
+    dispatcher->post(msg, true /*realTime*/);
+
+    return OMX_ErrorNone;
+}
+
 OMX_ERRORTYPE OMX::OnEvent(
         node_id node,
         OMX_IN OMX_EVENTTYPE eEvent,
diff --git a/media/libstagefright/omx/OMXMaster.cpp b/media/libstagefright/omx/OMXMaster.cpp
index 6132a2c..ac9b0c3 100644
--- a/media/libstagefright/omx/OMXMaster.cpp
+++ b/media/libstagefright/omx/OMXMaster.cpp
@@ -32,26 +32,23 @@
 OMXMaster::OMXMaster()
     : mVendorLibHandle(NULL) {
 
-    mProcessName[0] = 0;
-    if (mProcessName[0] == 0) {
-        pid_t pid = getpid();
-        char filename[20];
-        snprintf(filename, sizeof(filename), "/proc/%d/comm", pid);
-        int fd = open(filename, O_RDONLY);
-        if (fd < 0) {
-            ALOGW("couldn't determine process name");
-            sprintf(mProcessName, "<unknown>");
-        } else {
-            ssize_t len = read(fd, mProcessName, sizeof(mProcessName));
-            if (len < 2) {
-                ALOGW("couldn't determine process name");
-                sprintf(mProcessName, "<unknown>");
-            } else {
-                // the name is newline terminated, so erase the newline
-                mProcessName[len - 1] = 0;
-            }
-            close(fd);
-        }
+    pid_t pid = getpid();
+    char filename[20];
+    snprintf(filename, sizeof(filename), "/proc/%d/comm", pid);
+    int fd = open(filename, O_RDONLY);
+    if (fd < 0) {
+      ALOGW("couldn't determine process name");
+      strlcpy(mProcessName, "<unknown>", sizeof(mProcessName));
+    } else {
+      ssize_t len = read(fd, mProcessName, sizeof(mProcessName));
+      if (len < 2) {
+        ALOGW("couldn't determine process name");
+        strlcpy(mProcessName, "<unknown>", sizeof(mProcessName));
+      } else {
+        // the name is newline terminated, so erase the newline
+        mProcessName[len - 1] = 0;
+      }
+      close(fd);
     }
 
     addVendorPlugin();
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 8c10310..2e13398 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -35,6 +35,7 @@
 #include <HardwareAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ColorUtils.h>
 #include <media/stagefright/MediaErrors.h>
 #include <utils/misc.h>
 #include <utils/NativeHandle.h>
@@ -845,7 +846,7 @@
 
     sp<GraphicBufferSource> bufferSource(getGraphicBufferSource());
     if (bufferSource != NULL && portIndex == kPortIndexInput) {
-        bufferSource->addCodecBuffer(header);
+        bufferSource->addCodecBuffer(*buffer);
     }
 
     CLOG_BUFFER(useBuffer, NEW_BUFFER_FMT(
@@ -1124,7 +1125,9 @@
         usageBits = 0;
     }
 
-    sp<GraphicBufferSource> bufferSource = new GraphicBufferSource(this,
+    sp<GraphicBufferSource> bufferSource = new GraphicBufferSource(
+            mOwner,
+            mNodeID,
             def.format.video.nFrameWidth,
             def.format.video.nFrameHeight,
             def.nBufferCountActual,
@@ -1198,10 +1201,6 @@
     return createGraphicBufferSource(portIndex, bufferConsumer, type);
 }
 
-void OMXNodeInstance::signalEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2) {
-    mOwner->OnEvent(mNodeID, event, arg1, arg2, NULL);
-}
-
 status_t OMXNodeInstance::signalEndOfInputStream() {
     // For non-Surface input, the MediaCodec should convert the call to a
     // pair of requests (dequeue input buffer, queue input buffer with EOS
@@ -1263,7 +1262,7 @@
 
     sp<GraphicBufferSource> bufferSource(getGraphicBufferSource());
     if (bufferSource != NULL && portIndex == kPortIndexInput) {
-        bufferSource->addCodecBuffer(header);
+        bufferSource->addCodecBuffer(*buffer);
     }
     CLOG_BUFFER(allocateSecureBuffer, NEW_BUFFER_FMT(
             *buffer, portIndex, "%zu@%p:%p", size, *buffer_data,
@@ -1317,7 +1316,7 @@
 
     sp<GraphicBufferSource> bufferSource(getGraphicBufferSource());
     if (bufferSource != NULL && portIndex == kPortIndexInput) {
-        bufferSource->addCodecBuffer(header);
+        bufferSource->addCodecBuffer(*buffer);
     }
 
     CLOG_BUFFER(allocateBufferWithBackup, NEW_BUFFER_FMT(*buffer, portIndex, "%zu@%p :> %u@%p",
@@ -1538,15 +1537,16 @@
 
 // like emptyBuffer, but the data is already in header->pBuffer
 status_t OMXNodeInstance::emptyGraphicBuffer(
-        OMX_BUFFERHEADERTYPE *header, const sp<GraphicBuffer> &graphicBuffer,
+        OMX::buffer_id buffer, const sp<GraphicBuffer> &graphicBuffer,
         OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
+    Mutex::Autolock autoLock(mLock);
+
+    OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer, kPortIndexInput);
     if (header == NULL) {
         ALOGE("b/25884056");
         return BAD_VALUE;
     }
 
-    Mutex::Autolock autoLock(mLock);
-    OMX::buffer_id buffer = findBufferID(header);
     status_t err = updateGraphicBufferInMeta_l(
             kPortIndexInput, graphicBuffer, buffer, header,
             true /* updateCodecBuffer */);
@@ -1735,9 +1735,7 @@
 
         if (bufferSource != NULL) {
             // fix up the buffer info (especially timestamp) if needed
-            bufferSource->codecBufferFilled(buffer);
-
-            msg.u.extended_buffer_data.timestamp = buffer->nTimeStamp;
+            bufferSource->codecBufferFilled(msg);
         }
     } else if (msg.type == omx_message::EMPTY_BUFFER_DONE) {
         OMX_BUFFERHEADERTYPE *buffer =
@@ -1760,14 +1758,92 @@
             // Don't dispatch a message back to ACodec, since it doesn't
             // know that anyone asked to have the buffer emptied and will
             // be very confused.
-            bufferSource->codecBufferEmptied(buffer, msg.fenceFd);
+            bufferSource->codecBufferEmptied(msg);
             return true;
         }
+    } else if (msg.type == omx_message::EVENT &&
+            msg.u.event_data.event == OMX_EventDataSpaceChanged) {
+        handleDataSpaceChanged(msg);
     }
 
     return false;
 }
 
+bool OMXNodeInstance::handleDataSpaceChanged(omx_message &msg) {
+    android_dataspace dataSpace = (android_dataspace) msg.u.event_data.data1;
+    android_dataspace origDataSpace = dataSpace;
+
+    if (!ColorUtils::convertDataSpaceToV0(dataSpace)) {
+        // Do not process the data space change, don't notify client either
+        return true;
+    }
+
+    android_pixel_format pixelFormat = (android_pixel_format)msg.u.event_data.data3;
+
+    ColorAspects requestedAspects = ColorUtils::unpackToColorAspects(msg.u.event_data.data2);
+    ColorAspects aspects = requestedAspects; // initially requested aspects
+
+    // request color aspects to encode
+    OMX_INDEXTYPE index;
+    status_t err = getExtensionIndex(
+            "OMX.google.android.index.describeColorAspects", &index);
+    if (err == OK) {
+        // V0 dataspace
+        DescribeColorAspectsParams params;
+        InitOMXParams(&params);
+        params.nPortIndex = kPortIndexInput;
+        params.nDataSpace = origDataSpace;
+        params.nPixelFormat = pixelFormat;
+        params.bDataSpaceChanged = OMX_TRUE;
+        params.sAspects = requestedAspects;
+
+        err = getConfig(index, &params, sizeof(params));
+        if (err == OK) {
+            aspects = params.sAspects;
+            ALOGD("Codec resolved it to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+                    params.sAspects.mRange, asString(params.sAspects.mRange),
+                    params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+                    params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+                    params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+                    err, asString(err));
+        } else {
+            params.sAspects = aspects;
+            err = OK;
+        }
+        params.bDataSpaceChanged = OMX_FALSE;
+        for (int triesLeft = 2; --triesLeft >= 0; ) {
+            status_t err = setConfig(index, &params, sizeof(params));
+            if (err == OK) {
+                err = getConfig(index, &params, sizeof(params));
+            }
+            if (err != OK || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
+                    params.sAspects, aspects)) {
+                // if we can't set or get color aspects, still communicate dataspace to client
+                break;
+            }
+
+            ALOGW_IF(triesLeft == 0, "Codec repeatedly changed requested ColorAspects.");
+        }
+    }
+
+    ALOGV("Set color aspects to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+            aspects.mRange, asString(aspects.mRange),
+            aspects.mPrimaries, asString(aspects.mPrimaries),
+            aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+            aspects.mTransfer, asString(aspects.mTransfer),
+            err, asString(err));
+
+    // signal client that the dataspace has changed; this will update the output format
+    // TODO: we should tie this to an output buffer somehow, and signal the change
+    // just before the output buffer is returned to the client, but there are many
+    // ways this could fail (e.g. flushing), and we are not yet supporting this scenario.
+
+    msg.u.event_data.data1 = (OMX_U32) dataSpace;
+    msg.u.event_data.data2 = (OMX_U32) ColorUtils::packToU32(aspects);
+
+    return false;
+}
+
 void OMXNodeInstance::onMessages(std::list<omx_message> &messages) {
     for (std::list<omx_message>::iterator it = messages.begin(); it != messages.end(); ) {
         if (handleMessage(*it)) {
diff --git a/media/libstagefright/omx/OMXUtils.cpp b/media/libstagefright/omx/OMXUtils.cpp
index 799696c..c5879b8 100644
--- a/media/libstagefright/omx/OMXUtils.cpp
+++ b/media/libstagefright/omx/OMXUtils.cpp
@@ -20,7 +20,10 @@
 #include <string.h>
 
 #include <media/hardware/HardwareAPI.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AUtils.h>
 #include <media/stagefright/MediaErrors.h>
+#include <media/MediaDefs.h>
 #include "OMXUtils.h"
 
 namespace android {
@@ -101,5 +104,273 @@
 
 /**************************************************************************************************/
 
+const char *GetComponentRole(bool isEncoder, const char *mime) {
+    struct MimeToRole {
+        const char *mime;
+        const char *decoderRole;
+        const char *encoderRole;
+    };
+
+    static const MimeToRole kMimeToRole[] = {
+        { MEDIA_MIMETYPE_AUDIO_MPEG,
+            "audio_decoder.mp3", "audio_encoder.mp3" },
+        { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I,
+            "audio_decoder.mp1", "audio_encoder.mp1" },
+        { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II,
+            "audio_decoder.mp2", "audio_encoder.mp2" },
+        { MEDIA_MIMETYPE_AUDIO_AMR_NB,
+            "audio_decoder.amrnb", "audio_encoder.amrnb" },
+        { MEDIA_MIMETYPE_AUDIO_AMR_WB,
+            "audio_decoder.amrwb", "audio_encoder.amrwb" },
+        { MEDIA_MIMETYPE_AUDIO_AAC,
+            "audio_decoder.aac", "audio_encoder.aac" },
+        { MEDIA_MIMETYPE_AUDIO_VORBIS,
+            "audio_decoder.vorbis", "audio_encoder.vorbis" },
+        { MEDIA_MIMETYPE_AUDIO_OPUS,
+            "audio_decoder.opus", "audio_encoder.opus" },
+        { MEDIA_MIMETYPE_AUDIO_G711_MLAW,
+            "audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
+        { MEDIA_MIMETYPE_AUDIO_G711_ALAW,
+            "audio_decoder.g711alaw", "audio_encoder.g711alaw" },
+        { MEDIA_MIMETYPE_VIDEO_AVC,
+            "video_decoder.avc", "video_encoder.avc" },
+        { MEDIA_MIMETYPE_VIDEO_HEVC,
+            "video_decoder.hevc", "video_encoder.hevc" },
+        { MEDIA_MIMETYPE_VIDEO_MPEG4,
+            "video_decoder.mpeg4", "video_encoder.mpeg4" },
+        { MEDIA_MIMETYPE_VIDEO_H263,
+            "video_decoder.h263", "video_encoder.h263" },
+        { MEDIA_MIMETYPE_VIDEO_VP8,
+            "video_decoder.vp8", "video_encoder.vp8" },
+        { MEDIA_MIMETYPE_VIDEO_VP9,
+            "video_decoder.vp9", "video_encoder.vp9" },
+        { MEDIA_MIMETYPE_AUDIO_RAW,
+            "audio_decoder.raw", "audio_encoder.raw" },
+        { MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
+            "video_decoder.dolby-vision", "video_encoder.dolby-vision" },
+        { MEDIA_MIMETYPE_AUDIO_FLAC,
+            "audio_decoder.flac", "audio_encoder.flac" },
+        { MEDIA_MIMETYPE_AUDIO_MSGSM,
+            "audio_decoder.gsm", "audio_encoder.gsm" },
+        { MEDIA_MIMETYPE_VIDEO_MPEG2,
+            "video_decoder.mpeg2", "video_encoder.mpeg2" },
+        { MEDIA_MIMETYPE_AUDIO_AC3,
+            "audio_decoder.ac3", "audio_encoder.ac3" },
+        { MEDIA_MIMETYPE_AUDIO_EAC3,
+            "audio_decoder.eac3", "audio_encoder.eac3" },
+    };
+
+    static const size_t kNumMimeToRole =
+        sizeof(kMimeToRole) / sizeof(kMimeToRole[0]);
+
+    size_t i;
+    for (i = 0; i < kNumMimeToRole; ++i) {
+        if (!strcasecmp(mime, kMimeToRole[i].mime)) {
+            break;
+        }
+    }
+
+    if (i == kNumMimeToRole) {
+        return NULL;
+    }
+
+    return isEncoder ? kMimeToRole[i].encoderRole
+                  : kMimeToRole[i].decoderRole;
+}
+
+status_t SetComponentRole(const sp<IOMX> &omx, IOMX::node_id node, const char *role) {
+    OMX_PARAM_COMPONENTROLETYPE roleParams;
+    InitOMXParams(&roleParams);
+
+    strncpy((char *)roleParams.cRole,
+            role, OMX_MAX_STRINGNAME_SIZE - 1);
+
+    roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
+
+    return omx->setParameter(
+            node, OMX_IndexParamStandardComponentRole,
+            &roleParams, sizeof(roleParams));
+}
+
+bool DescribeDefaultColorFormat(DescribeColorFormat2Params &params) {
+    MediaImage2 &image = params.sMediaImage;
+    memset(&image, 0, sizeof(image));
+
+    image.mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+    image.mNumPlanes = 0;
+
+    const OMX_COLOR_FORMATTYPE fmt = params.eColorFormat;
+    image.mWidth = params.nFrameWidth;
+    image.mHeight = params.nFrameHeight;
+
+    // only supporting YUV420
+    if (fmt != OMX_COLOR_FormatYUV420Planar &&
+        fmt != OMX_COLOR_FormatYUV420PackedPlanar &&
+        fmt != OMX_COLOR_FormatYUV420SemiPlanar &&
+        fmt != OMX_COLOR_FormatYUV420PackedSemiPlanar &&
+        fmt != (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YV12) {
+        ALOGW("do not know color format 0x%x = %d", fmt, fmt);
+        return false;
+    }
+
+    // TEMPORARY FIX for some vendors that advertise sliceHeight as 0
+    if (params.nStride != 0 && params.nSliceHeight == 0) {
+        ALOGW("using sliceHeight=%u instead of what codec advertised (=0)",
+                params.nFrameHeight);
+        params.nSliceHeight = params.nFrameHeight;
+    }
+
+    // we need stride and slice-height to be non-zero and sensible. These values were chosen to
+    // prevent integer overflows further down the line, and do not indicate support for
+    // 32kx32k video.
+    if (params.nStride == 0 || params.nSliceHeight == 0
+            || params.nStride > 32768 || params.nSliceHeight > 32768) {
+        ALOGW("cannot describe color format 0x%x = %d with stride=%u and sliceHeight=%u",
+                fmt, fmt, params.nStride, params.nSliceHeight);
+        return false;
+    }
+
+    // set-up YUV format
+    image.mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV;
+    image.mNumPlanes = 3;
+    image.mBitDepth = 8;
+    image.mBitDepthAllocated = 8;
+    image.mPlane[image.Y].mOffset = 0;
+    image.mPlane[image.Y].mColInc = 1;
+    image.mPlane[image.Y].mRowInc = params.nStride;
+    image.mPlane[image.Y].mHorizSubsampling = 1;
+    image.mPlane[image.Y].mVertSubsampling = 1;
+
+    switch ((int)fmt) {
+        case HAL_PIXEL_FORMAT_YV12:
+            if (params.bUsingNativeBuffers) {
+                size_t ystride = align(params.nStride, 16);
+                size_t cstride = align(params.nStride / 2, 16);
+                image.mPlane[image.Y].mRowInc = ystride;
+
+                image.mPlane[image.V].mOffset = ystride * params.nSliceHeight;
+                image.mPlane[image.V].mColInc = 1;
+                image.mPlane[image.V].mRowInc = cstride;
+                image.mPlane[image.V].mHorizSubsampling = 2;
+                image.mPlane[image.V].mVertSubsampling = 2;
+
+                image.mPlane[image.U].mOffset = image.mPlane[image.V].mOffset
+                        + (cstride * params.nSliceHeight / 2);
+                image.mPlane[image.U].mColInc = 1;
+                image.mPlane[image.U].mRowInc = cstride;
+                image.mPlane[image.U].mHorizSubsampling = 2;
+                image.mPlane[image.U].mVertSubsampling = 2;
+                break;
+            } else {
+                // fall through as YV12 is used for YUV420Planar by some codecs
+            }
+
+        case OMX_COLOR_FormatYUV420Planar:
+        case OMX_COLOR_FormatYUV420PackedPlanar:
+            image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
+            image.mPlane[image.U].mColInc = 1;
+            image.mPlane[image.U].mRowInc = params.nStride / 2;
+            image.mPlane[image.U].mHorizSubsampling = 2;
+            image.mPlane[image.U].mVertSubsampling = 2;
+
+            image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset
+                    + (params.nStride * params.nSliceHeight / 4);
+            image.mPlane[image.V].mColInc = 1;
+            image.mPlane[image.V].mRowInc = params.nStride / 2;
+            image.mPlane[image.V].mHorizSubsampling = 2;
+            image.mPlane[image.V].mVertSubsampling = 2;
+            break;
+
+        case OMX_COLOR_FormatYUV420SemiPlanar:
+            // FIXME: NV21 for sw-encoder, NV12 for decoder and hw-encoder
+        case OMX_COLOR_FormatYUV420PackedSemiPlanar:
+            // NV12
+            image.mPlane[image.U].mOffset = params.nStride * params.nSliceHeight;
+            image.mPlane[image.U].mColInc = 2;
+            image.mPlane[image.U].mRowInc = params.nStride;
+            image.mPlane[image.U].mHorizSubsampling = 2;
+            image.mPlane[image.U].mVertSubsampling = 2;
+
+            image.mPlane[image.V].mOffset = image.mPlane[image.U].mOffset + 1;
+            image.mPlane[image.V].mColInc = 2;
+            image.mPlane[image.V].mRowInc = params.nStride;
+            image.mPlane[image.V].mHorizSubsampling = 2;
+            image.mPlane[image.V].mVertSubsampling = 2;
+            break;
+
+        default:
+            TRESPASS();
+    }
+    return true;
+}
+
+bool DescribeColorFormat(
+        const sp<IOMX> &omx, IOMX::node_id node,
+        DescribeColorFormat2Params &describeParams)
+{
+    OMX_INDEXTYPE describeColorFormatIndex;
+    if (omx->getExtensionIndex(
+            node, "OMX.google.android.index.describeColorFormat",
+            &describeColorFormatIndex) == OK) {
+        DescribeColorFormatParams describeParamsV1(describeParams);
+        if (omx->getParameter(
+                node, describeColorFormatIndex,
+                &describeParamsV1, sizeof(describeParamsV1)) == OK) {
+            describeParams.initFromV1(describeParamsV1);
+            return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+        }
+    } else if (omx->getExtensionIndex(
+            node, "OMX.google.android.index.describeColorFormat2", &describeColorFormatIndex) == OK
+               && omx->getParameter(
+            node, describeColorFormatIndex, &describeParams, sizeof(describeParams)) == OK) {
+        return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+    }
+
+    return DescribeDefaultColorFormat(describeParams);
+}
+
+// static
+bool IsFlexibleColorFormat(
+         const sp<IOMX> &omx, IOMX::node_id node,
+         uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent) {
+    DescribeColorFormat2Params describeParams;
+    InitOMXParams(&describeParams);
+    describeParams.eColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
+    // reasonable dummy values
+    describeParams.nFrameWidth = 128;
+    describeParams.nFrameHeight = 128;
+    describeParams.nStride = 128;
+    describeParams.nSliceHeight = 128;
+    describeParams.bUsingNativeBuffers = (OMX_BOOL)usingNativeBuffers;
+
+    CHECK(flexibleEquivalent != NULL);
+
+    if (!DescribeColorFormat(omx, node, describeParams)) {
+        return false;
+    }
+
+    const MediaImage2 &img = describeParams.sMediaImage;
+    if (img.mType == MediaImage2::MEDIA_IMAGE_TYPE_YUV) {
+        if (img.mNumPlanes != 3
+                || img.mPlane[img.Y].mHorizSubsampling != 1
+                || img.mPlane[img.Y].mVertSubsampling != 1) {
+            return false;
+        }
+
+        // YUV 420
+        if (img.mPlane[img.U].mHorizSubsampling == 2
+                && img.mPlane[img.U].mVertSubsampling == 2
+                && img.mPlane[img.V].mHorizSubsampling == 2
+                && img.mPlane[img.V].mVertSubsampling == 2) {
+            // possible flexible YUV420 format
+            if (img.mBitDepth <= 8) {
+               *flexibleEquivalent = OMX_COLOR_FormatYUV420Flexible;
+               return true;
+            }
+        }
+    }
+    return false;
+}
+
 }  // namespace android
 
diff --git a/media/libstagefright/omx/OMXUtils.h b/media/libstagefright/omx/OMXUtils.h
index 0c5e537..315f118 100644
--- a/media/libstagefright/omx/OMXUtils.h
+++ b/media/libstagefright/omx/OMXUtils.h
@@ -17,6 +17,8 @@
 #ifndef OMX_UTILS_H_
 #define OMX_UTILS_H_
 
+#include <media/IOMX.h>
+
 /***** DO NOT USE THIS INCLUDE!!! INTERAL ONLY!!! UNLESS YOU RESIDE IN media/libstagefright *****/
 
 // OMXUtils contains omx-specific utility functions for stagefright/omx library
@@ -36,6 +38,19 @@
 
 status_t StatusFromOMXError(OMX_ERRORTYPE err);
 
+const char *GetComponentRole(bool isEncoder, const char *mime);
+status_t SetComponentRole(const sp<IOMX> &omx, IOMX::node_id node, const char *role);
+
+struct DescribeColorFormat2Params;
+
+bool IsFlexibleColorFormat(
+        const sp<IOMX> &omx, IOMX::node_id node,
+        uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent);
+bool DescribeDefaultColorFormat(DescribeColorFormat2Params &describeParams);
+bool DescribeColorFormat(
+        const sp<IOMX> &omx, IOMX::node_id node,
+        DescribeColorFormat2Params &describeParams);
+
 }  // namespace android
 
 #endif
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index 13afd45..f523b97 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -77,19 +77,34 @@
     switch (index) {
         case OMX_IndexParamPortDefinition:
         {
-            portIndex = ((OMX_PARAM_PORTDEFINITIONTYPE *)params)->nPortIndex;
+            const OMX_PARAM_PORTDEFINITIONTYPE *portDefs =
+                    (const OMX_PARAM_PORTDEFINITIONTYPE *) params;
+            if (!isValidOMXParam(portDefs)) {
+                return false;
+            }
+            portIndex = portDefs->nPortIndex;
             break;
         }
 
         case OMX_IndexParamAudioPcm:
         {
-            portIndex = ((OMX_AUDIO_PARAM_PCMMODETYPE *)params)->nPortIndex;
+            const OMX_AUDIO_PARAM_PCMMODETYPE *pcmMode =
+                    (const OMX_AUDIO_PARAM_PCMMODETYPE *) params;
+            if (!isValidOMXParam(pcmMode)) {
+                return false;
+            }
+            portIndex = pcmMode->nPortIndex;
             break;
         }
 
         case OMX_IndexParamAudioAac:
         {
-            portIndex = ((OMX_AUDIO_PARAM_AACPROFILETYPE *)params)->nPortIndex;
+            const OMX_AUDIO_PARAM_AACPROFILETYPE *aacMode =
+                    (const OMX_AUDIO_PARAM_AACPROFILETYPE *) params;
+            if (!isValidOMXParam(aacMode)) {
+                return false;
+            }
+            portIndex = aacMode->nPortIndex;
             break;
         }
 
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index 0f9c00c..a773ca2 100755
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -56,6 +56,7 @@
     { "OMX.google.vp8.decoder", "vpxdec", "video_decoder.vp8" },
     { "OMX.google.vp9.decoder", "vpxdec", "video_decoder.vp9" },
     { "OMX.google.vp8.encoder", "vpxenc", "video_encoder.vp8" },
+    { "OMX.google.vp9.encoder", "vpxenc", "video_encoder.vp9" },
     { "OMX.google.raw.decoder", "rawdec", "audio_decoder.raw" },
     { "OMX.google.flac.encoder", "flacenc", "audio_encoder.flac" },
     { "OMX.google.gsm.decoder", "gsmdec", "audio_decoder.gsm" },
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index 8b0331a..325084c 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -17,7 +17,6 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "ASessionDescription"
 #include <utils/Log.h>
-#include <cutils/log.h>
 
 #include "ASessionDescription.h"
 
@@ -212,7 +211,7 @@
 
     *PT = x;
 
-    char key[32];
+    char key[20];
     snprintf(key, sizeof(key), "a=rtpmap:%lu", x);
     if (findAttribute(index, key, desc)) {
         snprintf(key, sizeof(key), "a=fmtp:%lu", x);
@@ -231,11 +230,8 @@
     *width = 0;
     *height = 0;
 
-    char key[33];
+    char key[20];
     snprintf(key, sizeof(key), "a=framesize:%lu", PT);
-    if (PT > 9999999) {
-        android_errorWriteLog(0x534e4554, "25747670");
-    }
     AString value;
     if (!findAttribute(index, key, &value)) {
         return false;
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 76e2e6e..5505aa4 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -25,6 +25,7 @@
 #endif
 
 #include <utils/Log.h>
+#include <cutils/properties.h> // for property_get
 
 #include "APacketSource.h"
 #include "ARTPConnection.h"
@@ -807,11 +808,7 @@
                         result = UNKNOWN_ERROR;
                     } else {
                         parsePlayResponse(response);
-
-                        sp<AMessage> timeout = new AMessage('tiou', this);
-                        mCheckTimeoutGeneration++;
-                        timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
-                        timeout->post(kStartupTimeoutUs);
+                        postTimeout();
                     }
                 }
 
@@ -1153,10 +1150,7 @@
 
                         // Post new timeout in order to make sure to use
                         // fake timestamps if no new Sender Reports arrive
-                        sp<AMessage> timeout = new AMessage('tiou', this);
-                        mCheckTimeoutGeneration++;
-                        timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
-                        timeout->post(kStartupTimeoutUs);
+                        postTimeout();
                     }
                 }
 
@@ -1248,10 +1242,7 @@
 
                 // Start new timeoutgeneration to avoid getting timeout
                 // before PLAY response arrive
-                sp<AMessage> timeout = new AMessage('tiou', this);
-                mCheckTimeoutGeneration++;
-                timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
-                timeout->post(kStartupTimeoutUs);
+                postTimeout();
 
                 int64_t timeUs;
                 CHECK(msg->findInt64("time", &timeUs));
@@ -1305,10 +1296,7 @@
 
                         // Post new timeout in order to make sure to use
                         // fake timestamps if no new Sender Reports arrive
-                        sp<AMessage> timeout = new AMessage('tiou', this);
-                        mCheckTimeoutGeneration++;
-                        timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
-                        timeout->post(kStartupTimeoutUs);
+                        postTimeout();
 
                         ssize_t i = response->mHeaders.indexOfKey("rtp-info");
                         CHECK_GE(i, 0);
@@ -1964,6 +1952,16 @@
         msg->post();
     }
 
+    void postTimeout() {
+        sp<AMessage> timeout = new AMessage('tiou', this);
+        mCheckTimeoutGeneration++;
+        timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
+
+        int64_t startupTimeoutUs;
+        startupTimeoutUs = property_get_int64("media.rtsp.timeout-us", kStartupTimeoutUs);
+        timeout->post(startupTimeoutUs);
+    }
+
     DISALLOW_EVIL_CONSTRUCTORS(MyHandler);
 };
 
diff --git a/media/mtp/MtpDevice.h b/media/mtp/MtpDevice.h
index 4be44cf..c034c13 100644
--- a/media/mtp/MtpDevice.h
+++ b/media/mtp/MtpDevice.h
@@ -100,7 +100,7 @@
     bool                    sendObject(MtpObjectHandle handle, int size, int srcFD);
     bool                    deleteObject(MtpObjectHandle handle);
     MtpObjectHandle         getParent(MtpObjectHandle handle);
-    MtpObjectHandle         getStorageID(MtpObjectHandle handle);
+    MtpStorageID            getStorageID(MtpObjectHandle handle);
 
     MtpObjectPropertyList*  getObjectPropsSupported(MtpObjectFormat format);
 
diff --git a/media/mtp/MtpDeviceInfo.cpp b/media/mtp/MtpDeviceInfo.cpp
index 3e1dff7..3d5cb06 100644
--- a/media/mtp/MtpDeviceInfo.cpp
+++ b/media/mtp/MtpDeviceInfo.cpp
@@ -69,6 +69,7 @@
 
     if (!packet.getString(string)) return false;
     mVendorExtensionDesc = strdup((const char *)string);
+    if (!mVendorExtensionDesc) return false;
 
     if (!packet.getUInt16(mFunctionalMode)) return false;
     mOperations = packet.getAUInt16();
@@ -84,12 +85,16 @@
 
     if (!packet.getString(string)) return false;
     mManufacturer = strdup((const char *)string);
+    if (!mManufacturer) return false;
     if (!packet.getString(string)) return false;
     mModel = strdup((const char *)string);
+    if (!mModel) return false;
     if (!packet.getString(string)) return false;
     mVersion = strdup((const char *)string);
+    if (!mVersion) return false;
     if (!packet.getString(string)) return false;
     mSerial = strdup((const char *)string);
+    if (!mSerial) return false;
 
     return true;
 }
diff --git a/media/mtp/MtpObjectInfo.cpp b/media/mtp/MtpObjectInfo.cpp
index 0573104..43b745f 100644
--- a/media/mtp/MtpObjectInfo.cpp
+++ b/media/mtp/MtpObjectInfo.cpp
@@ -77,6 +77,7 @@
 
     if (!packet.getString(string)) return false;
     mName = strdup((const char *)string);
+    if (!mName) return false;
 
     if (!packet.getString(string)) return false;
     if (parseDateTime((const char*)string, time))
@@ -88,6 +89,7 @@
 
     if (!packet.getString(string)) return false;
     mKeywords = strdup((const char *)string);
+    if (!mKeywords) return false;
 
     return true;
 }
diff --git a/media/mtp/MtpPacket.cpp b/media/mtp/MtpPacket.cpp
index 35ecb4f..3dd4248 100644
--- a/media/mtp/MtpPacket.cpp
+++ b/media/mtp/MtpPacket.cpp
@@ -70,8 +70,8 @@
     char* bufptr = buffer;
 
     for (size_t i = 0; i < mPacketSize; i++) {
-        sprintf(bufptr, "%02X ", mBuffer[i]);
-        bufptr += strlen(bufptr);
+        bufptr += snprintf(bufptr, sizeof(buffer) - (bufptr - buffer), "%02X ",
+                           mBuffer[i]);
         if (i % DUMP_BYTES_PER_ROW == (DUMP_BYTES_PER_ROW - 1)) {
             ALOGV("%s", buffer);
             bufptr = buffer;
diff --git a/media/mtp/MtpStorageInfo.cpp b/media/mtp/MtpStorageInfo.cpp
index 5d4ebbf..8801a38 100644
--- a/media/mtp/MtpStorageInfo.cpp
+++ b/media/mtp/MtpStorageInfo.cpp
@@ -58,8 +58,10 @@
 
     if (!packet.getString(string)) return false;
     mStorageDescription = strdup((const char *)string);
+    if (!mStorageDescription) return false;
     if (!packet.getString(string)) return false;
     mVolumeIdentifier = strdup((const char *)string);
+    if (!mVolumeIdentifier) return false;
 
     return true;
 }
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index be71f43..8d34ab0 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -99,11 +99,12 @@
             break;
         default:
             ALOGE("Invalid event DrmPlugin::EventType %d, ignored", (int)eventType);
-            return;
+            goto cleanup;
     }
 
     (*mListener)(mObj, &sessionId, ndkEventType, extra, data, dataSize);
 
+ cleanup:
     delete [] sessionId.ptr;
     delete [] data;
 }
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index e7fc381..068b917 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -27,6 +27,10 @@
     AudioHwDevice.cpp           \
     AudioStreamOut.cpp          \
     SpdifStreamOut.cpp          \
+    DeviceHalLocal.cpp          \
+    DevicesFactoryHalLocal.cpp	\
+    EffectHalLocal.cpp          \
+    EffectsFactoryHalLocal.cpp	\
     Effects.cpp                 \
     AudioMixer.cpp.arm          \
     BufferProviders.cpp         \
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 60093cc..b5ab782 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -44,11 +44,15 @@
 
 #include "AudioMixer.h"
 #include "AudioFlinger.h"
+#include "DeviceHalInterface.h"
+#include "DevicesFactoryHalInterface.h"
+#include "EffectsFactoryHalInterface.h"
 #include "ServiceUtilities.h"
+// FIXME: Remove after streams HAL is componentized
+#include "DeviceHalLocal.h"
 
 #include <media/AudioResamplerPublic.h>
 
-#include <media/EffectsFactoryApi.h>
 #include <audio_effects/effect_visualizer.h>
 #include <audio_effects/effect_ns.h>
 #include <audio_effects/effect_aec.h>
@@ -85,6 +89,7 @@
 static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
 static const char kHardwareLockedString[] = "Hardware lock is taken\n";
 static const char kClientLockedString[] = "Client lock is taken\n";
+static const char kNoEffectsFactory[] = "Effects Factory is absent\n";
 
 
 nsecs_t AudioFlinger::mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs;
@@ -141,35 +146,6 @@
     return "unknown";
 }
 
-static int load_audio_interface(const char *if_name, audio_hw_device_t **dev)
-{
-    const hw_module_t *mod;
-    int rc;
-
-    rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
-    ALOGE_IF(rc, "%s couldn't load audio hw module %s.%s (%s)", __func__,
-                 AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
-    if (rc) {
-        goto out;
-    }
-    rc = audio_hw_device_open(mod, dev);
-    ALOGE_IF(rc, "%s couldn't open audio hw device in %s.%s (%s)", __func__,
-                 AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
-    if (rc) {
-        goto out;
-    }
-    if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
-        ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
-        rc = BAD_VALUE;
-        goto out;
-    }
-    return 0;
-
-out:
-    *dev = NULL;
-    return rc;
-}
-
 // ----------------------------------------------------------------------------
 
 AudioFlinger::AudioFlinger()
@@ -205,6 +181,9 @@
     // in bad state, reset the state upon service start.
     BatteryNotifier::getInstance().noteResetAudio();
 
+    mDevicesFactoryHal = DevicesFactoryHalInterface::create();
+    mEffectsFactoryHal = EffectsFactoryHalInterface::create();
+
 #ifdef TEE_SINK
     char value[PROPERTY_VALUE_MAX];
     (void) property_get("ro.debuggable", value, "0");
@@ -263,7 +242,6 @@
 
     for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
         // no mHardwareLock needed, as there are no other references to this
-        audio_hw_device_close(mAudioHwDevs.valueAt(i)->hwDevice());
         delete mAudioHwDevs.valueAt(i);
     }
 
@@ -302,10 +280,12 @@
         // then try to find a module supporting the requested device.
         for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
             AudioHwDevice *audioHwDevice = mAudioHwDevs.valueAt(i);
-            audio_hw_device_t *dev = audioHwDevice->hwDevice();
-            if ((dev->get_supported_devices != NULL) &&
-                    (dev->get_supported_devices(dev) & devices) == devices)
+            sp<DeviceHalInterface> dev = audioHwDevice->hwDevice();
+            uint32_t supportedDevices;
+            if (dev->getSupportedDevices(&supportedDevices) == OK &&
+                    (supportedDevices & devices) == devices) {
                 return audioHwDevice;
+            }
         }
     } else {
         // check a match for the requested module handle
@@ -419,7 +399,12 @@
             write(fd, result.string(), result.size());
         }
 
-        EffectDumpEffects(fd);
+        if (mEffectsFactoryHal.get() != NULL) {
+            mEffectsFactoryHal->dumpEffects(fd);
+        } else {
+            String8 result(kNoEffectsFactory);
+            write(fd, result.string(), result.size());
+        }
 
         dumpClients(fd, args);
         if (clientLocked) {
@@ -447,8 +432,8 @@
         }
         // dump all hardware devs
         for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
-            audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
-            dev->dump(dev, fd);
+            sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+            dev->dump(fd);
         }
 
 #ifdef TEE_SINK
@@ -810,7 +795,7 @@
 
         mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
         if (dev->canSetMasterVolume()) {
-            dev->hwDevice()->set_master_volume(dev->hwDevice(), value);
+            dev->hwDevice()->setMasterVolume(value);
         }
         mHardwareStatus = AUDIO_HW_IDLE;
     }
@@ -847,9 +832,9 @@
 
     { // scope for the lock
         AutoMutex lock(mHardwareLock);
-        audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+        sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
         mHardwareStatus = AUDIO_HW_SET_MODE;
-        ret = dev->set_mode(dev, mode);
+        ret = dev->setMode(mode);
         mHardwareStatus = AUDIO_HW_IDLE;
     }
 
@@ -878,8 +863,8 @@
     AutoMutex lock(mHardwareLock);
     mHardwareStatus = AUDIO_HW_SET_MIC_MUTE;
     for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
-        audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
-        status_t result = dev->set_mic_mute(dev, state);
+        sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+        status_t result = dev->setMicMute(state);
         if (result != NO_ERROR) {
             ret = result;
         }
@@ -899,8 +884,8 @@
     AutoMutex lock(mHardwareLock);
     mHardwareStatus = AUDIO_HW_GET_MIC_MUTE;
     for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
-        audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
-        status_t result = dev->get_mic_mute(dev, &state);
+        sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+        status_t result = dev->getMicMute(&state);
         if (result == NO_ERROR) {
             mute = mute && state;
         }
@@ -932,7 +917,7 @@
 
         mHardwareStatus = AUDIO_HW_SET_MASTER_MUTE;
         if (dev->canSetMasterMute()) {
-            dev->hwDevice()->set_master_mute(dev->hwDevice(), muted);
+            dev->hwDevice()->setMasterMute(muted);
         }
         mHardwareStatus = AUDIO_HW_IDLE;
     }
@@ -1110,8 +1095,8 @@
             AutoMutex lock(mHardwareLock);
             mHardwareStatus = AUDIO_HW_SET_PARAMETER;
             for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
-                audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
-                status_t result = dev->set_parameters(dev, keyValuePairs.string());
+                sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+                status_t result = dev->setParameters(keyValuePairs);
                 // return success if at least one audio device accepts the parameters as not all
                 // HALs are requested to support all parameters. If no audio device supports the
                 // requested parameters, the last error is reported.
@@ -1192,16 +1177,16 @@
         String8 out_s8;
 
         for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
-            char *s;
+            String8 s;
+            status_t result;
             {
             AutoMutex lock(mHardwareLock);
             mHardwareStatus = AUDIO_HW_GET_PARAMETER;
-            audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
-            s = dev->get_parameters(dev, keys.string());
+            sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+            result = dev->getParameters(keys, &s);
             mHardwareStatus = AUDIO_HW_IDLE;
             }
-            out_s8 += String8(s ? s : "");
-            free(s);
+            if (result == OK) out_s8 += s;
         }
         return out_s8;
     }
@@ -1238,14 +1223,14 @@
     proposed.channel_mask = channelMask;
     proposed.format = format;
 
-    audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+    sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
     size_t frames;
     for (;;) {
         // Note: config is currently a const parameter for get_input_buffer_size()
         // but we use a copy from proposed in case config changes from the call.
         config = proposed;
-        frames = dev->get_input_buffer_size(dev, &config);
-        if (frames != 0) {
+        status_t result = dev->getInputBufferSize(&config, &frames);
+        if (result == OK && frames != 0) {
             break; // hal success, config is the result
         }
         // change one parameter of the configuration each iteration to a more "common" value
@@ -1292,9 +1277,9 @@
     }
 
     AutoMutex lock(mHardwareLock);
-    audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+    sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
     mHardwareStatus = AUDIO_HW_SET_VOICE_VOLUME;
-    ret = dev->set_voice_volume(dev, value);
+    ret = dev->setVoiceVolume(value);
     mHardwareStatus = AUDIO_HW_IDLE;
 
     return ret;
@@ -1632,16 +1617,16 @@
         }
     }
 
-    audio_hw_device_t *dev;
+    sp<DeviceHalInterface> dev;
 
-    int rc = load_audio_interface(name, &dev);
+    int rc = mDevicesFactoryHal->openDevice(name, &dev);
     if (rc) {
         ALOGE("loadHwModule() error %d loading module %s", rc, name);
         return AUDIO_MODULE_HANDLE_NONE;
     }
 
     mHardwareStatus = AUDIO_HW_INIT;
-    rc = dev->init_check(dev);
+    rc = dev->initCheck();
     mHardwareStatus = AUDIO_HW_IDLE;
     if (rc) {
         ALOGE("loadHwModule() init check error %d for module %s", rc, name);
@@ -1659,32 +1644,26 @@
 
         if (0 == mAudioHwDevs.size()) {
             mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
-            if (NULL != dev->get_master_volume) {
-                float mv;
-                if (OK == dev->get_master_volume(dev, &mv)) {
-                    mMasterVolume = mv;
-                }
+            float mv;
+            if (OK == dev->getMasterVolume(&mv)) {
+                mMasterVolume = mv;
             }
 
             mHardwareStatus = AUDIO_HW_GET_MASTER_MUTE;
-            if (NULL != dev->get_master_mute) {
-                bool mm;
-                if (OK == dev->get_master_mute(dev, &mm)) {
-                    mMasterMute = mm;
-                }
+            bool mm;
+            if (OK == dev->getMasterMute(&mm)) {
+                mMasterMute = mm;
             }
         }
 
         mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
-        if ((NULL != dev->set_master_volume) &&
-            (OK == dev->set_master_volume(dev, mMasterVolume))) {
+        if (OK == dev->setMasterVolume(mMasterVolume)) {
             flags = static_cast<AudioHwDevice::Flags>(flags |
                     AudioHwDevice::AHWD_CAN_SET_MASTER_VOLUME);
         }
 
         mHardwareStatus = AUDIO_HW_SET_MASTER_MUTE;
-        if ((NULL != dev->set_master_mute) &&
-            (OK == dev->set_master_mute(dev, mMasterMute))) {
+        if (OK == dev->setMasterMute(mMasterMute)) {
             flags = static_cast<AudioHwDevice::Flags>(flags |
                     AudioHwDevice::AHWD_CAN_SET_MASTER_MUTE);
         }
@@ -1695,8 +1674,7 @@
     audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
     mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));
 
-    ALOGI("loadHwModule() Loaded %s audio interface from %s (%s) handle %d",
-          name, dev->common.module->name, dev->common.module->id, handle);
+    ALOGI("loadHwModule() Loaded %s audio interface, handle %d", name, handle);
 
     return handle;
 
@@ -1746,13 +1724,15 @@
         return mHwAvSyncIds.valueAt(index);
     }
 
-    audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+    sp<DeviceHalInterface> dev = mPrimaryHardwareDev->hwDevice();
     if (dev == NULL) {
         return AUDIO_HW_SYNC_INVALID;
     }
-    char *reply = dev->get_parameters(dev, AUDIO_PARAMETER_HW_AV_SYNC);
-    AudioParameter param = AudioParameter(String8(reply));
-    free(reply);
+    String8 reply;
+    AudioParameter param;
+    if (dev->getParameters(String8(AUDIO_PARAMETER_HW_AV_SYNC), &reply) == OK) {
+        param = AudioParameter(reply);
+    }
 
     int value;
     if (param.getInt(String8(AUDIO_PARAMETER_HW_AV_SYNC), value) != NO_ERROR) {
@@ -1937,7 +1917,7 @@
 
             AutoMutex lock(mHardwareLock);
             mHardwareStatus = AUDIO_HW_SET_MODE;
-            mPrimaryHardwareDev->hwDevice()->set_mode(mPrimaryHardwareDev->hwDevice(), mMode);
+            mPrimaryHardwareDev->hwDevice()->setMode(mMode);
             mHardwareStatus = AUDIO_HW_IDLE;
         }
         return NO_ERROR;
@@ -2033,7 +2013,7 @@
     AudioStreamOut *out = thread->clearOutput();
     ALOG_ASSERT(out != NULL, "out shouldn't be NULL");
     // from now on thread->mOutput is NULL
-    out->hwDev()->close_output_stream(out->hwDev(), out->stream);
+    static_cast<DeviceHalLocal*>(out->hwDev().get())->closeOutputStream(out->stream);
     delete out;
 }
 
@@ -2128,10 +2108,10 @@
     }
 
     audio_config_t halconfig = *config;
-    audio_hw_device_t *inHwHal = inHwDev->hwDevice();
+    sp<DeviceHalInterface> inHwHal = inHwDev->hwDevice();
     audio_stream_in_t *inStream = NULL;
-    status_t status = inHwHal->open_input_stream(inHwHal, *input, devices, &halconfig,
-                                        &inStream, flags, address.string(), source);
+    status_t status = static_cast<DeviceHalLocal*>(inHwHal.get())->openInputStream(
+            *input, devices, &halconfig, flags, address.string(), source, &inStream);
     ALOGV("openInput_l() openInputStream returned input %p, SamplingRate %d"
            ", Format %#x, Channels %x, flags %#x, status %d addr %s",
             inStream,
@@ -2152,8 +2132,8 @@
         // FIXME describe the change proposed by HAL (save old values so we can log them here)
         ALOGV("openInput_l() reopening with proposed sampling rate and channel mask");
         inStream = NULL;
-        status = inHwHal->open_input_stream(inHwHal, *input, devices, &halconfig,
-                                            &inStream, flags, address.string(), source);
+        status = static_cast<DeviceHalLocal*>(inHwHal.get())->openInputStream(
+                *input, devices, &halconfig, flags, address.string(), source, &inStream);
         // FIXME log this new status; HAL should not propose any further changes
     }
 
@@ -2304,7 +2284,7 @@
     AudioStreamIn *in = thread->clearInput();
     ALOG_ASSERT(in != NULL, "in shouldn't be NULL");
     // from now on thread->mInput is NULL
-    in->hwDev()->close_input_stream(in->hwDev(), in->stream);
+    static_cast<DeviceHalLocal*>(in->hwDev().get())->closeInputStream(in->stream);
     delete in;
 }
 
@@ -2596,24 +2576,39 @@
 //  Effect management
 // ----------------------------------------------------------------------------
 
+sp<EffectsFactoryHalInterface> AudioFlinger::getEffectsFactory() {
+    return mEffectsFactoryHal;
+}
 
 status_t AudioFlinger::queryNumberEffects(uint32_t *numEffects) const
 {
     Mutex::Autolock _l(mLock);
-    return EffectQueryNumberEffects(numEffects);
+    if (mEffectsFactoryHal.get()) {
+        return mEffectsFactoryHal->queryNumberEffects(numEffects);
+    } else {
+        return -ENODEV;
+    }
 }
 
 status_t AudioFlinger::queryEffect(uint32_t index, effect_descriptor_t *descriptor) const
 {
     Mutex::Autolock _l(mLock);
-    return EffectQueryEffect(index, descriptor);
+    if (mEffectsFactoryHal.get()) {
+        return mEffectsFactoryHal->getDescriptor(index, descriptor);
+    } else {
+        return -ENODEV;
+    }
 }
 
 status_t AudioFlinger::getEffectDescriptor(const effect_uuid_t *pUuid,
         effect_descriptor_t *descriptor) const
 {
     Mutex::Autolock _l(mLock);
-    return EffectGetDescriptor(pUuid, descriptor);
+    if (mEffectsFactoryHal.get()) {
+        return mEffectsFactoryHal->getDescriptor(pUuid, descriptor);
+    } else {
+        return -ENODEV;
+    }
 }
 
 
@@ -2633,8 +2628,8 @@
     effect_descriptor_t desc;
 
     pid_t pid = IPCThreadState::self()->getCallingPid();
-    ALOGV("createEffect pid %d, effectClient %p, priority %d, sessionId %d, io %d",
-            pid, effectClient.get(), priority, sessionId, io);
+    ALOGV("createEffect pid %d, effectClient %p, priority %d, sessionId %d, io %d, factory %p",
+            pid, effectClient.get(), priority, sessionId, io, mEffectsFactoryHal.get());
 
     if (pDesc == NULL) {
         lStatus = BAD_VALUE;
@@ -2654,10 +2649,15 @@
         goto Exit;
     }
 
+    if (mEffectsFactoryHal.get() == NULL) {
+        lStatus = NO_INIT;
+        goto Exit;
+    }
+
     {
-        if (!EffectIsNullUuid(&pDesc->uuid)) {
+        if (!EffectsFactoryHalInterface::isNullUuid(&pDesc->uuid)) {
             // if uuid is specified, request effect descriptor
-            lStatus = EffectGetDescriptor(&pDesc->uuid, &desc);
+            lStatus = mEffectsFactoryHal->getDescriptor(&pDesc->uuid, &desc);
             if (lStatus < 0) {
                 ALOGW("createEffect() error %d from EffectGetDescriptor", lStatus);
                 goto Exit;
@@ -2665,7 +2665,7 @@
         } else {
             // if uuid is not specified, look for an available implementation
             // of the required type in effect factory
-            if (EffectIsNullUuid(&pDesc->type)) {
+            if (EffectsFactoryHalInterface::isNullUuid(&pDesc->type)) {
                 ALOGW("createEffect() no effect type");
                 lStatus = BAD_VALUE;
                 goto Exit;
@@ -2675,13 +2675,13 @@
             d.flags = 0; // prevent compiler warning
             bool found = false;
 
-            lStatus = EffectQueryNumberEffects(&numEffects);
+            lStatus = mEffectsFactoryHal->queryNumberEffects(&numEffects);
             if (lStatus < 0) {
                 ALOGW("createEffect() error %d from EffectQueryNumberEffects", lStatus);
                 goto Exit;
             }
             for (uint32_t i = 0; i < numEffects; i++) {
-                lStatus = EffectQueryEffect(i, &desc);
+                lStatus = mEffectsFactoryHal->getDescriptor(i, &desc);
                 if (lStatus < 0) {
                     ALOGW("createEffect() error %d from EffectQueryEffect", lStatus);
                     continue;
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index c4b89f8..1890c94 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -71,6 +71,9 @@
 class AudioMixer;
 class AudioBuffer;
 class AudioResampler;
+class DeviceHalInterface;
+class DevicesFactoryHalInterface;
+class EffectsFactoryHalInterface;
 class FastMixer;
 class PassthruBufferProvider;
 class ServerProxy;
@@ -271,6 +274,7 @@
 
     sp<NBLog::Writer>   newWriter_l(size_t size, const char *name);
     void                unregisterWriter(const sp<NBLog::Writer>& writer);
+    sp<EffectsFactoryHalInterface> getEffectsFactory();
 private:
     static const size_t kLogMemorySize = 40 * 1024;
     sp<MemoryDealer>    mLogMemoryDealer;   // == 0 when NBLog is disabled
@@ -614,7 +618,7 @@
         audio_stream_in_t* const stream;
         audio_input_flags_t flags;
 
-        audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); }
+        sp<DeviceHalInterface> hwDev() const { return audioHwDev->hwDevice(); }
 
         AudioStreamIn(AudioHwDevice *dev, audio_stream_in_t *in, audio_input_flags_t flags) :
             audioHwDev(dev), stream(in), flags(flags) {}
@@ -645,6 +649,8 @@
                 AudioHwDevice*                      mPrimaryHardwareDev; // mAudioHwDevs[0] or NULL
                 DefaultKeyedVector<audio_module_handle_t, AudioHwDevice*>  mAudioHwDevs;
 
+                sp<DevicesFactoryHalInterface> mDevicesFactoryHal;
+
     // for dump, indicates which hardware operation is currently in progress (but not stream ops)
     enum hardware_call_state {
         AUDIO_HW_IDLE = 0,              // no operation in progress
@@ -760,6 +766,7 @@
     nsecs_t mGlobalEffectEnableTime;  // when a global effect was last enabled
 
     sp<PatchPanel> mPatchPanel;
+    sp<EffectsFactoryHalInterface> mEffectsFactoryHal;
 
     bool        mSystemReady;
 };
diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp
index 7494930..ee10f8d 100644
--- a/services/audioflinger/AudioHwDevice.cpp
+++ b/services/audioflinger/AudioHwDevice.cpp
@@ -93,5 +93,10 @@
     return status;
 }
 
+uint32_t AudioHwDevice::version() const
+{
+    uint32_t result;
+    return mHwDevice->getVersion(&result) == OK ? result : 0;
+}
 
 }; // namespace android
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index b9f65c1..54e1d59 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -26,6 +26,7 @@
 #include <utils/Errors.h>
 #include <system/audio.h>
 
+#include "DeviceHalInterface.h"
 
 namespace android {
 
@@ -40,7 +41,7 @@
 
     AudioHwDevice(audio_module_handle_t handle,
                   const char *moduleName,
-                  audio_hw_device_t *hwDevice,
+                  sp<DeviceHalInterface> hwDevice,
                   Flags flags)
         : mHandle(handle)
         , mModuleName(strdup(moduleName))
@@ -58,8 +59,8 @@
 
     audio_module_handle_t handle() const { return mHandle; }
     const char *moduleName() const { return mModuleName; }
-    audio_hw_device_t *hwDevice() const { return mHwDevice; }
-    uint32_t version() const { return mHwDevice->common.version; }
+    sp<DeviceHalInterface> hwDevice() const { return mHwDevice; }
+    uint32_t version() const;
 
     /** This method creates and opens the audio hardware output stream.
      * The "address" parameter qualifies the "devices" audio device type if needed.
@@ -79,7 +80,7 @@
 private:
     const audio_module_handle_t mHandle;
     const char * const          mModuleName;
-    audio_hw_device_t * const   mHwDevice;
+    sp<DeviceHalInterface>      mHwDevice;
     const Flags                 mFlags;
 };
 
diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp
index b7ca5d9..21914b9 100644
--- a/services/audioflinger/AudioResamplerDyn.cpp
+++ b/services/audioflinger/AudioResamplerDyn.cpp
@@ -149,6 +149,15 @@
 }
 
 template<typename TC, typename TI, typename TO>
+void AudioResamplerDyn<TC, TI, TO>::InBuffer::reset()
+{
+    // clear resampler state
+    if (mState != nullptr) {
+        memset(mState, 0, mStateCount * sizeof(TI));
+    }
+}
+
+template<typename TC, typename TI, typename TO>
 void AudioResamplerDyn<TC, TI, TO>::Constants::set(
         int L, int halfNumCoefs, int inSampleRate, int outSampleRate)
 {
@@ -528,6 +537,9 @@
             mBuffer.frameCount = inFrameCount;
             provider->getNextBuffer(&mBuffer);
             if (mBuffer.raw == NULL) {
+                // We are either at the end of playback or in an underrun situation.
+                // Reset buffer to prevent pop noise at the next buffer.
+                mInBuffer.reset();
                 goto resample_exit;
             }
             inFrameCount -= mBuffer.frameCount;
diff --git a/services/audioflinger/AudioResamplerDyn.h b/services/audioflinger/AudioResamplerDyn.h
index 3b1c381..a5ea821 100644
--- a/services/audioflinger/AudioResamplerDyn.h
+++ b/services/audioflinger/AudioResamplerDyn.h
@@ -96,6 +96,8 @@
         inline void readAdvance(TI*& impulse, const int halfNumCoefs,
                 const TI* const in, const size_t inputIndex);
 
+        void reset();
+
     private:
         // tuning parameter guidelines: 2 <= multiple <= 8
         static const int kStateSizeMultipleOfFilterLength = 4;
diff --git a/services/audioflinger/AudioStreamOut.cpp b/services/audioflinger/AudioStreamOut.cpp
index 6026bbb..4839480 100644
--- a/services/audioflinger/AudioStreamOut.cpp
+++ b/services/audioflinger/AudioStreamOut.cpp
@@ -23,6 +23,10 @@
 
 #include "AudioHwDevice.h"
 #include "AudioStreamOut.h"
+#include "DeviceHalInterface.h"
+
+// FIXME: Remove after streams HAL is componentized
+#include "DeviceHalLocal.h"
 
 namespace android {
 
@@ -40,7 +44,7 @@
 {
 }
 
-audio_hw_device_t *AudioStreamOut::hwDev() const
+sp<DeviceHalInterface> AudioStreamOut::hwDev() const
 {
     return audioHwDev->hwDevice();
 }
@@ -121,14 +125,13 @@
                 ? (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO)
                 : flags;
 
-    int status = hwDev()->open_output_stream(
-            hwDev(),
+    int status = static_cast<DeviceHalLocal*>(hwDev().get())->openOutputStream(
             handle,
             devices,
             customFlags,
             config,
-            &outStream,
-            address);
+            address,
+            &outStream);
     ALOGV("AudioStreamOut::open(), HAL returned "
             " stream %p, sampleRate %d, Format %#x, "
             "channelMask %#x, status %d",
@@ -144,14 +147,13 @@
         struct audio_config customConfig = *config;
         customConfig.format = AUDIO_FORMAT_PCM_16_BIT;
 
-        status = hwDev()->open_output_stream(
-                hwDev(),
+        status = static_cast<DeviceHalLocal*>(hwDev().get())->openOutputStream(
                 handle,
                 devices,
                 customFlags,
                 &customConfig,
-                &outStream,
-                address);
+                address,
+                &outStream);
         ALOGV("AudioStreamOut::open(), treat IEC61937 as PCM, status = %d", status);
     }
 
diff --git a/services/audioflinger/AudioStreamOut.h b/services/audioflinger/AudioStreamOut.h
index 768f537..d132f6f 100644
--- a/services/audioflinger/AudioStreamOut.h
+++ b/services/audioflinger/AudioStreamOut.h
@@ -23,11 +23,10 @@
 
 #include <system/audio.h>
 
-#include "AudioStreamOut.h"
-
 namespace android {
 
 class AudioHwDevice;
+class DeviceHalInterface;
 
 /**
  * Managed access to a HAL output stream.
@@ -41,7 +40,7 @@
     audio_stream_out_t *stream;
     const audio_output_flags_t flags;
 
-    audio_hw_device_t *hwDev() const;
+    sp<DeviceHalInterface> hwDev() const;
 
     AudioStreamOut(AudioHwDevice *dev, audio_output_flags_t flags);
 
diff --git a/services/audioflinger/BufferProviders.cpp b/services/audioflinger/BufferProviders.cpp
index 7b6dfcb..c1ff1af 100644
--- a/services/audioflinger/BufferProviders.cpp
+++ b/services/audioflinger/BufferProviders.cpp
@@ -21,12 +21,13 @@
 #include <audio_utils/primitives.h>
 #include <audio_utils/format.h>
 #include <media/AudioResamplerPublic.h>
-#include <media/EffectsFactoryApi.h>
 
 #include <utils/Log.h>
 
 #include "Configuration.h"
 #include "BufferProviders.h"
+#include "EffectHalInterface.h"
+#include "EffectsFactoryHalInterface.h"
 
 #ifndef ARRAY_SIZE
 #define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
@@ -145,13 +146,22 @@
     ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)",
             this, inputChannelMask, outputChannelMask, format,
             sampleRate, sessionId);
-    if (!sIsMultichannelCapable
-            || EffectCreate(&sDwnmFxDesc.uuid,
-                    sessionId,
-                    SESSION_ID_INVALID_AND_IGNORED,
-                    &mDownmixHandle) != 0) {
+    if (!sIsMultichannelCapable) {
+        ALOGE("DownmixerBufferProvider() error: not multichannel capable");
+        return;
+    }
+    mEffectsFactory = EffectsFactoryHalInterface::create();
+    if (mEffectsFactory.get() == NULL) {
+        ALOGE("DownmixerBufferProvider() error: could not obtain the effects factory");
+        return;
+    }
+    if (mEffectsFactory->createEffect(&sDwnmFxDesc.uuid,
+                                      sessionId,
+                                      SESSION_ID_INVALID_AND_IGNORED,
+                                      &mDownmixInterface) != 0) {
          ALOGE("DownmixerBufferProvider() error creating downmixer effect");
-         mDownmixHandle = NULL;
+         mDownmixInterface.clear();
+         mEffectsFactory.clear();
          return;
      }
      // channel input configuration will be overridden per-track
@@ -173,28 +183,28 @@
      uint32_t replySize = sizeof(int);
 
      // Configure downmixer
-     status_t status = (*mDownmixHandle)->command(mDownmixHandle,
+     status_t status = mDownmixInterface->command(
              EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
              &mDownmixConfig /*pCmdData*/,
              &replySize, &cmdStatus /*pReplyData*/);
      if (status != 0 || cmdStatus != 0) {
          ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer",
                  status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
+         mDownmixInterface.clear();
+         mEffectsFactory.clear();
          return;
      }
 
      // Enable downmixer
      replySize = sizeof(int);
-     status = (*mDownmixHandle)->command(mDownmixHandle,
+     status = mDownmixInterface->command(
              EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
              &replySize, &cmdStatus /*pReplyData*/);
      if (status != 0 || cmdStatus != 0) {
          ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer",
                  status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
+         mDownmixInterface.clear();
+         mEffectsFactory.clear();
          return;
      }
 
@@ -211,15 +221,15 @@
      param->vsize = sizeof(downmix_type_t);
      memcpy(param->data + psizePadded, &downmixType, param->vsize);
      replySize = sizeof(int);
-     status = (*mDownmixHandle)->command(mDownmixHandle,
+     status = mDownmixInterface->command(
              EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */,
              param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/);
      free(param);
      if (status != 0 || cmdStatus != 0) {
          ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type",
                  status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
+         mDownmixInterface.clear();
+         mEffectsFactory.clear();
          return;
      }
      ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType);
@@ -228,8 +238,6 @@
 DownmixerBufferProvider::~DownmixerBufferProvider()
 {
     ALOGV("~DownmixerBufferProvider (%p)", this);
-    EffectRelease(mDownmixHandle);
-    mDownmixHandle = NULL;
 }
 
 void DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
@@ -239,7 +247,7 @@
     mDownmixConfig.outputCfg.buffer.frameCount = frames;
     mDownmixConfig.outputCfg.buffer.raw = dst;
     // may be in-place if src == dst.
-    status_t res = (*mDownmixHandle)->process(mDownmixHandle,
+    status_t res = mDownmixInterface->process(
             &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
     ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res);
 }
@@ -248,8 +256,13 @@
 /*static*/ status_t DownmixerBufferProvider::init()
 {
     // find multichannel downmix effect if we have to play multichannel content
+    sp<EffectsFactoryHalInterface> effectsFactory = EffectsFactoryHalInterface::create();
+    if (effectsFactory.get() == NULL) {
+        ALOGE("AudioMixer() error: could not obtain the effects factory");
+        return NO_INIT;
+    }
     uint32_t numEffects = 0;
-    int ret = EffectQueryNumberEffects(&numEffects);
+    int ret = effectsFactory->queryNumberEffects(&numEffects);
     if (ret != 0) {
         ALOGE("AudioMixer() error %d querying number of effects", ret);
         return NO_INIT;
@@ -257,7 +270,7 @@
     ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
 
     for (uint32_t i = 0 ; i < numEffects ; i++) {
-        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
+        if (effectsFactory->getDescriptor(i, &sDwnmFxDesc) == 0) {
             ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
             if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
                 ALOGI("found effect \"%s\" from %s",
diff --git a/services/audioflinger/BufferProviders.h b/services/audioflinger/BufferProviders.h
index abd43c6..cb13689 100644
--- a/services/audioflinger/BufferProviders.h
+++ b/services/audioflinger/BufferProviders.h
@@ -24,9 +24,13 @@
 #include <media/AudioBufferProvider.h>
 #include <system/audio.h>
 #include <sonic.h>
+#include <utils/StrongPointer.h>
 
 namespace android {
 
+class EffectHalInterface;
+class EffectsFactoryHalInterface;
+
 // ----------------------------------------------------------------------------
 
 class PassthruBufferProvider : public AudioBufferProvider {
@@ -97,12 +101,13 @@
     //Overrides
     virtual void copyFrames(void *dst, const void *src, size_t frames);
 
-    bool isValid() const { return mDownmixHandle != NULL; }
+    bool isValid() const { return mDownmixInterface.get() != NULL; }
     static status_t init();
     static bool isMultichannelCapable() { return sIsMultichannelCapable; }
 
 protected:
-    effect_handle_t    mDownmixHandle;
+    sp<EffectsFactoryHalInterface> mEffectsFactory;
+    sp<EffectHalInterface> mDownmixInterface;
     effect_config_t    mDownmixConfig;
 
     // effect descriptor for the downmixer used by the mixer
diff --git a/services/audioflinger/DeviceHalInterface.h b/services/audioflinger/DeviceHalInterface.h
new file mode 100644
index 0000000..ea682f0
--- /dev/null
+++ b/services/audioflinger/DeviceHalInterface.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
+
+#include <hardware/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class DeviceHalInterface : public RefBase
+{
+  public:
+    // The destructor automatically closes the device.
+    virtual ~DeviceHalInterface() {}
+
+    // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
+    virtual status_t getSupportedDevices(uint32_t *devices) = 0;
+
+    // Get the hardware module version.
+    virtual status_t getVersion(uint32_t *version) = 0;
+
+    // Check to see if the audio hardware interface has been initialized.
+    virtual status_t initCheck() = 0;
+
+    // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
+    virtual status_t setVoiceVolume(float volume) = 0;
+
+    // Set the audio volume for all audio activities other than voice call.
+    virtual status_t setMasterVolume(float volume) = 0;
+
+    // Get the current master volume value for the HAL.
+    virtual status_t getMasterVolume(float *volume) = 0;
+
+    // Called when the audio mode changes.
+    virtual status_t setMode(audio_mode_t mode) = 0;
+
+    // Muting control.
+    virtual status_t setMicMute(bool state) = 0;
+    virtual status_t getMicMute(bool *state) = 0;
+    virtual status_t setMasterMute(bool state) = 0;
+    virtual status_t getMasterMute(bool *state) = 0;
+
+    // Set global audio parameters.
+    virtual status_t setParameters(const String8& kvPairs) = 0;
+
+    // Get global audio parameters.
+    virtual status_t getParameters(const String8& keys, String8 *values) = 0;
+
+    // Returns audio input buffer size according to parameters passed.
+    virtual status_t getInputBufferSize(const struct audio_config *config,
+            size_t *size) = 0;
+
+    // Creates and opens the audio hardware output stream. The stream is closed
+    // by releasing all references to the returned object.
+    // FIXME: Enable when StreamOutHalInterface is introduced.
+    // virtual status_t openOutputStream(
+    //         audio_io_handle_t handle,
+    //         audio_devices_t devices,
+    //         audio_output_flags_t flags,
+    //         struct audio_config *config,
+    //         const char *address,
+    //         sp<StreamOutHalInterface> *outStream) = 0;
+
+    // Creates and opens the audio hardware input stream. The stream is closed
+    // by releasing all references to the returned object.
+    // FIXME: Enable when StreamInHalInterface is introduced.
+    // virtual status_t openInputStream(
+    //         audio_io_handle_t handle,
+    //         audio_devices_t devices,
+    //         struct audio_config *config,
+    //         audio_input_flags_t flags,
+    //         const char *address,
+    //         audio_source_t source,
+    //         sp<StreamInHalInterface> *inStream) = 0;
+
+    // Creates an audio patch between several source and sink ports.
+    virtual status_t createAudioPatch(
+            unsigned int num_sources,
+            const struct audio_port_config *sources,
+            unsigned int num_sinks,
+            const struct audio_port_config *sinks,
+            audio_patch_handle_t *patch) = 0;
+
+    // Releases an audio patch.
+    virtual status_t releaseAudioPatch(audio_patch_handle_t patch) = 0;
+
+    // Fills the list of supported attributes for a given audio port.
+    virtual status_t getAudioPort(struct audio_port *port) = 0;
+
+    // Set audio port configuration.
+    virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
+
+    virtual status_t dump(int fd) = 0;
+
+  protected:
+    // Subclasses can not be constructed directly by clients.
+    DeviceHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
diff --git a/services/audioflinger/DeviceHalLocal.cpp b/services/audioflinger/DeviceHalLocal.cpp
new file mode 100644
index 0000000..b602f16
--- /dev/null
+++ b/services/audioflinger/DeviceHalLocal.cpp
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioFlinger::DeviceHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include "DeviceHalLocal.h"
+
+namespace android {
+
+DeviceHalLocal::DeviceHalLocal(audio_hw_device_t *dev)
+        : mDev(dev) {
+}
+
+DeviceHalLocal::~DeviceHalLocal() {
+    int status = audio_hw_device_close(mDev);
+    ALOGW_IF(status, "Error closing audio hw device %p: %s", mDev, strerror(-status));
+    mDev = 0;
+}
+
+status_t DeviceHalLocal::getSupportedDevices(uint32_t *devices) {
+    if (mDev->get_supported_devices == NULL) return INVALID_OPERATION;
+    *devices = mDev->get_supported_devices(mDev);
+    return OK;
+}
+
+status_t DeviceHalLocal::getVersion(uint32_t *version) {
+    *version = mDev->common.version;
+    return OK;
+}
+
+status_t DeviceHalLocal::initCheck() {
+    return mDev->init_check(mDev);
+}
+
+status_t DeviceHalLocal::setVoiceVolume(float volume) {
+    return mDev->set_voice_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::setMasterVolume(float volume) {
+    if (mDev->set_master_volume == NULL) return INVALID_OPERATION;
+    return mDev->set_master_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::getMasterVolume(float *volume) {
+    if (mDev->get_master_volume == NULL) return INVALID_OPERATION;
+    return mDev->get_master_volume(mDev, volume);
+}
+
+status_t DeviceHalLocal::setMode(audio_mode_t mode) {
+    return mDev->set_mode(mDev, mode);
+}
+
+status_t DeviceHalLocal::setMicMute(bool state) {
+    return mDev->set_mic_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::getMicMute(bool *state) {
+    return mDev->get_mic_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::setMasterMute(bool state) {
+    if (mDev->set_master_mute == NULL) return INVALID_OPERATION;
+    return mDev->set_master_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::getMasterMute(bool *state) {
+    if (mDev->get_master_mute == NULL) return INVALID_OPERATION;
+    return mDev->get_master_mute(mDev, state);
+}
+
+status_t DeviceHalLocal::setParameters(const String8& kvPairs) {
+    return mDev->set_parameters(mDev, kvPairs.string());
+}
+
+status_t DeviceHalLocal::getParameters(const String8& keys, String8 *values) {
+    char *c_values = mDev->get_parameters(mDev, keys.string());
+    if (c_values != NULL) {
+        values->setTo(c_values);
+        free(c_values);
+    } else {
+        values->clear();
+    }
+    return OK;
+}
+
+status_t DeviceHalLocal::getInputBufferSize(
+        const struct audio_config *config, size_t *size) {
+    *size = mDev->get_input_buffer_size(mDev, config);
+    return OK;
+}
+
+status_t DeviceHalLocal::createAudioPatch(
+        unsigned int num_sources,
+        const struct audio_port_config *sources,
+        unsigned int num_sinks,
+        const struct audio_port_config *sinks,
+        audio_patch_handle_t *patch) {
+    return mDev->create_audio_patch(mDev, num_sources, sources, num_sinks, sinks, patch);
+}
+
+status_t DeviceHalLocal::releaseAudioPatch(audio_patch_handle_t patch) {
+    return mDev->release_audio_patch(mDev, patch);
+}
+
+status_t DeviceHalLocal::getAudioPort(struct audio_port *port) {
+    return mDev->get_audio_port(mDev, port);
+}
+
+status_t DeviceHalLocal::setAudioPortConfig(const struct audio_port_config *config) {
+    return mDev->set_audio_port_config(mDev, config);
+}
+
+status_t DeviceHalLocal::dump(int fd) {
+    return mDev->dump(mDev, fd);
+}
+
+status_t DeviceHalLocal::openOutputStream(
+        audio_io_handle_t handle,
+        audio_devices_t devices,
+        audio_output_flags_t flags,
+        struct audio_config *config,
+        const char *address,
+        struct audio_stream_out **stream_out) {
+    return mDev->open_output_stream(mDev, handle, devices, flags, config, stream_out, address);
+}
+
+status_t DeviceHalLocal::closeOutputStream(struct audio_stream_out *stream_out) {
+    mDev->close_output_stream(mDev, stream_out);
+    return OK;
+}
+
+status_t DeviceHalLocal::openInputStream(
+        audio_io_handle_t handle,
+        audio_devices_t devices,
+        struct audio_config *config,
+        audio_input_flags_t flags,
+        const char *address,
+        audio_source_t source,
+        struct audio_stream_in **stream_in) {
+    return mDev->open_input_stream(
+            mDev, handle, devices, config, stream_in, flags, address, source);
+}
+
+status_t DeviceHalLocal::closeInputStream(struct audio_stream_in *stream_in) {
+    mDev->close_input_stream(mDev, stream_in);
+    return OK;
+}
+
+} // namespace android
diff --git a/services/audioflinger/DeviceHalLocal.h b/services/audioflinger/DeviceHalLocal.h
new file mode 100644
index 0000000..cc53e9c
--- /dev/null
+++ b/services/audioflinger/DeviceHalLocal.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
+#define ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
+
+#include "DeviceHalInterface.h"
+
+namespace android {
+
+class DeviceHalLocal : public DeviceHalInterface
+{
+  public:
+    // The destructor automatically closes the device.
+    virtual ~DeviceHalLocal();
+
+    // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
+    virtual status_t getSupportedDevices(uint32_t *devices);
+
+    // Get the hardware module version.
+    virtual status_t getVersion(uint32_t *version);
+
+    // Check to see if the audio hardware interface has been initialized.
+    virtual status_t initCheck();
+
+    // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
+    virtual status_t setVoiceVolume(float volume);
+
+    // Set the audio volume for all audio activities other than voice call.
+    virtual status_t setMasterVolume(float volume);
+
+    // Get the current master volume value for the HAL.
+    virtual status_t getMasterVolume(float *volume);
+
+    // Called when the audio mode changes.
+    virtual status_t setMode(audio_mode_t mode);
+
+    // Muting control.
+    virtual status_t setMicMute(bool state);
+    virtual status_t getMicMute(bool *state);
+    virtual status_t setMasterMute(bool state);
+    virtual status_t getMasterMute(bool *state);
+
+    // Set global audio parameters.
+    virtual status_t setParameters(const String8& kvPairs);
+
+    // Get global audio parameters.
+    virtual status_t getParameters(const String8& keys, String8 *values);
+
+    // Returns audio input buffer size according to parameters passed.
+    virtual status_t getInputBufferSize(const struct audio_config *config,
+            size_t *size);
+
+    // Creates and opens the audio hardware output stream. The stream is closed
+    // by releasing all references to the returned object.
+    // FIXME: Enable when StreamOutHalInterface is introduced.
+    // virtual status_t openOutputStream(
+    //         audio_io_handle_t handle,
+    //         audio_devices_t devices,
+    //         audio_output_flags_t flags,
+    //         struct audio_config *config,
+    //         const char *address,
+    //         sp<StreamOutHalInterface> *outStream);
+
+    // Creates and opens the audio hardware input stream. The stream is closed
+    // by releasing all references to the returned object.
+    // FIXME: Enable when StreamInHalInterface is introduced.
+    // virtual status_t openInputStream(
+    //         audio_io_handle_t handle,
+    //         audio_devices_t devices,
+    //         struct audio_config *config,
+    //         audio_input_flags_t flags,
+    //         const char *address,
+    //         audio_source_t source,
+    //         sp<StreamInHalInterface> *inStream);
+
+    // Creates an audio patch between several source and sink ports.
+    virtual status_t createAudioPatch(
+            unsigned int num_sources,
+            const struct audio_port_config *sources,
+            unsigned int num_sinks,
+            const struct audio_port_config *sinks,
+            audio_patch_handle_t *patch);
+
+    // Releases an audio patch.
+    virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
+
+    // Fills the list of supported attributes for a given audio port.
+    virtual status_t getAudioPort(struct audio_port *port);
+
+    // Set audio port configuration.
+    virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+
+    virtual status_t dump(int fd);
+
+    // FIXME: Remove when StreamOutHalInterface is introduced.
+    status_t openOutputStream(
+            audio_io_handle_t handle,
+            audio_devices_t devices,
+            audio_output_flags_t flags,
+            struct audio_config *config,
+            const char *address,
+            struct audio_stream_out **stream_out);
+
+    // FIXME: Remove when StreamOutHalInterface is introduced.
+    status_t closeOutputStream(struct audio_stream_out *stream_out);
+
+    // FIXME: Remove when StreamInHalInterface is introduced.
+    status_t openInputStream(
+            audio_io_handle_t handle,
+            audio_devices_t devices,
+            struct audio_config *config,
+            audio_input_flags_t flags,
+            const char *address,
+            audio_source_t source,
+            struct audio_stream_in **stream_in);
+
+    // FIXME: Remove when StreamInHalInterface is introduced.
+    status_t closeInputStream(struct audio_stream_in *stream_in);
+
+  private:
+    audio_hw_device_t *mDev;
+
+    friend class DevicesFactoryHalLocal;
+
+    // Can not be constructed directly by clients.
+    explicit DeviceHalLocal(audio_hw_device_t *dev);
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
diff --git a/services/audioflinger/DevicesFactoryHalInterface.h b/services/audioflinger/DevicesFactoryHalInterface.h
new file mode 100644
index 0000000..70c8260
--- /dev/null
+++ b/services/audioflinger/DevicesFactoryHalInterface.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_INTERFACE_H
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include "DeviceHalInterface.h"
+
+namespace android {
+
+class DevicesFactoryHalInterface : public RefBase
+{
+  public:
+    virtual ~DevicesFactoryHalInterface() {}
+
+    // Opens a device with the specified name. To close the device, it is
+    // necessary to release references to the returned object.
+    virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device) = 0;
+
+    static sp<DevicesFactoryHalInterface> create();
+
+  protected:
+    // Subclasses can not be constructed directly by clients.
+    DevicesFactoryHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_INTERFACE_H
diff --git a/services/audioflinger/DevicesFactoryHalLocal.cpp b/services/audioflinger/DevicesFactoryHalLocal.cpp
new file mode 100644
index 0000000..437be25
--- /dev/null
+++ b/services/audioflinger/DevicesFactoryHalLocal.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioFlinger::DevicesFactoryHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <string.h>
+
+#include <hardware/audio.h>
+#include <utils/Log.h>
+
+#include "DeviceHalLocal.h"
+#include "DevicesFactoryHalLocal.h"
+
+namespace android {
+
+// static
+sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
+    return new DevicesFactoryHalLocal();
+}
+
+static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
+{
+    const hw_module_t *mod;
+    int rc;
+
+    rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);
+    if (rc) {
+        ALOGE("%s couldn't load audio hw module %s.%s (%s)", __func__,
+                AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
+        goto out;
+    }
+    rc = audio_hw_device_open(mod, dev);
+    if (rc) {
+        ALOGE("%s couldn't open audio hw device in %s.%s (%s)", __func__,
+                AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));
+        goto out;
+    }
+    if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {
+        ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
+        rc = BAD_VALUE;
+        audio_hw_device_close(*dev);
+        goto out;
+    }
+    return OK;
+
+out:
+    *dev = NULL;
+    return rc;
+}
+
+status_t DevicesFactoryHalLocal::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+    audio_hw_device_t *dev;
+    status_t rc = load_audio_interface(name, &dev);
+    if (rc == OK) {
+        *device = new DeviceHalLocal(dev);
+    }
+    return rc;
+}
+
+} // namespace android
diff --git a/services/audioflinger/DevicesFactoryHalLocal.h b/services/audioflinger/DevicesFactoryHalLocal.h
new file mode 100644
index 0000000..55d422c
--- /dev/null
+++ b/services/audioflinger/DevicesFactoryHalLocal.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_H
+#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_H
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include "DeviceHalLocal.h"
+#include "DevicesFactoryHalInterface.h"
+
+namespace android {
+
+class DevicesFactoryHalLocal : public DevicesFactoryHalInterface
+{
+  public:
+    virtual ~DevicesFactoryHalLocal() {}
+
+    // Opens a device with the specified name. To close the device, it is
+    // necessary to release references to the returned object.
+    virtual status_t openDevice(const char *name, sp<DeviceHalInterface> *device);
+
+  private:
+    friend class DevicesFactoryHalInterface;
+
+    // Can not be constructed directly by clients.
+    DevicesFactoryHalLocal() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_H
diff --git a/services/audioflinger/EffectHalInterface.h b/services/audioflinger/EffectHalInterface.h
new file mode 100644
index 0000000..e774520
--- /dev/null
+++ b/services/audioflinger/EffectHalInterface.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_EFFECT_HAL_INTERFACE_H
+
+#include <hardware/audio_effect.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class EffectHalInterface : public RefBase
+{
+  public:
+    // The destructor automatically releases the effect.
+    virtual ~EffectHalInterface() {}
+
+    // Effect process function. Takes input samples as specified
+    // in input buffer descriptor and output processed samples as specified
+    // in output buffer descriptor.
+    virtual status_t process(audio_buffer_t *inBuffer, audio_buffer_t *outBuffer) = 0;
+
+    // Process reverse stream function. This function is used to pass
+    // a reference stream to the effect engine.
+    virtual status_t processReverse(audio_buffer_t *inBuffer, audio_buffer_t *outBuffer) = 0;
+
+    // Send a command and receive a response to/from effect engine.
+    virtual status_t command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+            uint32_t *replySize, void *pReplyData) = 0;
+
+    // Returns the effect descriptor.
+    virtual status_t getDescriptor(effect_descriptor_t *pDescriptor) = 0;
+
+  protected:
+    // Subclasses can not be constructed directly by clients.
+    EffectHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_HAL_INTERFACE_H
diff --git a/services/audioflinger/EffectHalLocal.cpp b/services/audioflinger/EffectHalLocal.cpp
new file mode 100644
index 0000000..aae1921
--- /dev/null
+++ b/services/audioflinger/EffectHalLocal.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioFlinger::EffectHalLocal"
+//#define LOG_NDEBUG 0
+
+#include <media/EffectsFactoryApi.h>
+#include <utils/Log.h>
+
+#include "EffectHalLocal.h"
+
+namespace android {
+
+EffectHalLocal::EffectHalLocal(effect_handle_t handle)
+        : mHandle(handle) {
+}
+
+EffectHalLocal::~EffectHalLocal() {
+    int status = EffectRelease(mHandle);
+    ALOGW_IF(status, "Error releasing effect %p: %s", mHandle, strerror(-status));
+    mHandle = 0;
+}
+
+status_t EffectHalLocal::process(audio_buffer_t *inBuffer, audio_buffer_t *outBuffer) {
+    return (*mHandle)->process(mHandle, inBuffer, outBuffer);
+}
+
+status_t EffectHalLocal::processReverse(audio_buffer_t *inBuffer, audio_buffer_t *outBuffer) {
+    return (*mHandle)->process_reverse(mHandle, inBuffer, outBuffer);
+}
+
+status_t EffectHalLocal::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+        uint32_t *replySize, void *pReplyData) {
+    return (*mHandle)->command(mHandle, cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+}
+
+status_t EffectHalLocal::getDescriptor(effect_descriptor_t *pDescriptor) {
+    return (*mHandle)->get_descriptor(mHandle, pDescriptor);
+}
+
+} // namespace android
diff --git a/services/audioflinger/EffectHalLocal.h b/services/audioflinger/EffectHalLocal.h
new file mode 100644
index 0000000..92b2153
--- /dev/null
+++ b/services/audioflinger/EffectHalLocal.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
+#define ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
+
+#include "EffectHalInterface.h"
+
+namespace android {
+
+class EffectHalLocal : public EffectHalInterface
+{
+  public:
+    // The destructor automatically releases the effect.
+    virtual ~EffectHalLocal();
+
+    // Effect process function. Takes input samples as specified
+    // in input buffer descriptor and output processed samples as specified
+    // in output buffer descriptor.
+    virtual status_t process(audio_buffer_t *inBuffer, audio_buffer_t *outBuffer);
+
+    // Process reverse stream function. This function is used to pass
+    // a reference stream to the effect engine.
+    virtual status_t processReverse(audio_buffer_t *inBuffer, audio_buffer_t *outBuffer);
+
+    // Send a command and receive a response to/from effect engine.
+    virtual status_t command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
+            uint32_t *replySize, void *pReplyData);
+
+    // Returns the effect descriptor.
+    virtual status_t getDescriptor(effect_descriptor_t *pDescriptor);
+
+    // FIXME: Remove after converting the main audio HAL
+    effect_handle_t handle() const { return mHandle; }
+
+  private:
+    effect_handle_t mHandle;
+
+    friend class EffectsFactoryHalLocal;
+
+    // Can not be constructed directly by clients.
+    explicit EffectHalLocal(effect_handle_t handle);
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 5b7ae96..25c1dbb 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -27,6 +27,10 @@
 #include <media/EffectsFactoryApi.h>
 
 #include "AudioFlinger.h"
+#include "EffectHalInterface.h"
+// FIXME: Remove after converting the main audio HAL
+#include "EffectHalLocal.h"
+#include "EffectsFactoryHalInterface.h"
 #include "ServiceUtilities.h"
 
 // ----------------------------------------------------------------------------
@@ -64,7 +68,6 @@
       mThread(thread), mChain(chain), mId(id), mSessionId(sessionId),
       mDescriptor(*desc),
       // mConfig is set by configure() and not used before then
-      mEffectInterface(NULL),
       mStatus(NO_INIT), mState(IDLE),
       // mMaxDisableWaitCnt is set by configure() and not used before then
       // mDisableWaitCnt is set by process() and updateState() and not used before then
@@ -75,7 +78,15 @@
     int lStatus;
 
     // create effect engine from effect factory
-    mStatus = EffectCreate(&desc->uuid, sessionId, thread->id(), &mEffectInterface);
+    mStatus = -ENODEV;
+    sp<AudioFlinger> audioFlinger = mAudioFlinger.promote();
+    if (audioFlinger.get() != NULL) {
+        sp<EffectsFactoryHalInterface> effectsFactory = audioFlinger->getEffectsFactory();
+        if (effectsFactory.get() != NULL) {
+            mStatus = effectsFactory->createEffect(
+                    &desc->uuid, sessionId, thread->id(), &mEffectInterface);
+        }
+    }
 
     if (mStatus != NO_ERROR) {
         return;
@@ -86,21 +97,20 @@
         goto Error;
     }
 
-    ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface);
+    ALOGV("Constructor success name %s, Interface %p", mDescriptor.name, mEffectInterface.get());
     return;
 Error:
-    EffectRelease(mEffectInterface);
-    mEffectInterface = NULL;
+    mEffectInterface.clear();
     ALOGV("Constructor Error %d", mStatus);
 }
 
 AudioFlinger::EffectModule::~EffectModule()
 {
     ALOGV("Destructor %p", this);
-    if (mEffectInterface != NULL) {
+    if (mEffectInterface.get() != NULL) {
         remove_effect_from_hal_l();
         // release effect engine
-        EffectRelease(mEffectInterface);
+        mEffectInterface.clear();
     }
 }
 
@@ -266,7 +276,7 @@
 {
     Mutex::Autolock _l(mLock);
 
-    if (mState == DESTROYED || mEffectInterface == NULL ||
+    if (mState == DESTROYED || mEffectInterface.get() == NULL ||
             mConfig.inputCfg.buffer.raw == NULL ||
             mConfig.outputCfg.buffer.raw == NULL) {
         return;
@@ -282,9 +292,7 @@
         int ret;
         if (isProcessImplemented()) {
             // do the actual processing in the effect engine
-            ret = (*mEffectInterface)->process(mEffectInterface,
-                                                   &mConfig.inputCfg.buffer,
-                                                   &mConfig.outputCfg.buffer);
+            ret = mEffectInterface->process(&mConfig.inputCfg.buffer, &mConfig.outputCfg.buffer);
         } else {
             if (mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
                 size_t frameCnt = mConfig.inputCfg.buffer.frameCount * FCC_2;  //always stereo here
@@ -330,10 +338,10 @@
 
 void AudioFlinger::EffectModule::reset_l()
 {
-    if (mStatus != NO_ERROR || mEffectInterface == NULL) {
+    if (mStatus != NO_ERROR || mEffectInterface.get() == NULL) {
         return;
     }
-    (*mEffectInterface)->command(mEffectInterface, EFFECT_CMD_RESET, 0, NULL, 0, NULL);
+    mEffectInterface->command(EFFECT_CMD_RESET, 0, NULL, 0, NULL);
 }
 
 status_t AudioFlinger::EffectModule::configure()
@@ -343,7 +351,7 @@
     uint32_t size;
     audio_channel_mask_t channelMask;
 
-    if (mEffectInterface == NULL) {
+    if (mEffectInterface.get() == NULL) {
         status = NO_INIT;
         goto exit;
     }
@@ -406,12 +414,11 @@
 
     status_t cmdStatus;
     size = sizeof(int);
-    status = (*mEffectInterface)->command(mEffectInterface,
-                                                   EFFECT_CMD_SET_CONFIG,
-                                                   sizeof(effect_config_t),
-                                                   &mConfig,
-                                                   &size,
-                                                   &cmdStatus);
+    status = mEffectInterface->command(EFFECT_CMD_SET_CONFIG,
+                                       sizeof(effect_config_t),
+                                       &mConfig,
+                                       &size,
+                                       &cmdStatus);
     if (status == 0) {
         status = cmdStatus;
     }
@@ -433,12 +440,11 @@
         }
 
         *((int32_t *)p->data + 1)= latency;
-        (*mEffectInterface)->command(mEffectInterface,
-                                     EFFECT_CMD_SET_PARAM,
-                                     sizeof(effect_param_t) + 8,
-                                     &buf32,
-                                     &size,
-                                     &cmdStatus);
+        mEffectInterface->command(EFFECT_CMD_SET_PARAM,
+                                  sizeof(effect_param_t) + 8,
+                                  &buf32,
+                                  &size,
+                                  &cmdStatus);
     }
 
     mMaxDisableWaitCnt = (MAX_DISABLE_TIME_MS * mConfig.outputCfg.samplingRate) /
@@ -452,17 +458,16 @@
 status_t AudioFlinger::EffectModule::init()
 {
     Mutex::Autolock _l(mLock);
-    if (mEffectInterface == NULL) {
+    if (mEffectInterface.get() == NULL) {
         return NO_INIT;
     }
     status_t cmdStatus;
     uint32_t size = sizeof(status_t);
-    status_t status = (*mEffectInterface)->command(mEffectInterface,
-                                                   EFFECT_CMD_INIT,
-                                                   0,
-                                                   NULL,
-                                                   &size,
-                                                   &cmdStatus);
+    status_t status = mEffectInterface->command(EFFECT_CMD_INIT,
+                                                0,
+                                                NULL,
+                                                &size,
+                                                &cmdStatus);
     if (status == 0) {
         status = cmdStatus;
     }
@@ -477,7 +482,8 @@
         if (thread != 0) {
             audio_stream_t *stream = thread->stream();
             if (stream != NULL) {
-                stream->add_audio_effect(stream, mEffectInterface);
+                stream->add_audio_effect(stream,
+                        static_cast<EffectHalLocal*>(mEffectInterface.get())->handle());
             }
         }
     }
@@ -503,7 +509,7 @@
 
 status_t AudioFlinger::EffectModule::start_l()
 {
-    if (mEffectInterface == NULL) {
+    if (mEffectInterface.get() == NULL) {
         return NO_INIT;
     }
     if (mStatus != NO_ERROR) {
@@ -511,12 +517,11 @@
     }
     status_t cmdStatus;
     uint32_t size = sizeof(status_t);
-    status_t status = (*mEffectInterface)->command(mEffectInterface,
-                                                   EFFECT_CMD_ENABLE,
-                                                   0,
-                                                   NULL,
-                                                   &size,
-                                                   &cmdStatus);
+    status_t status = mEffectInterface->command(EFFECT_CMD_ENABLE,
+                                                0,
+                                                NULL,
+                                                &size,
+                                                &cmdStatus);
     if (status == 0) {
         status = cmdStatus;
     }
@@ -534,7 +539,7 @@
 
 status_t AudioFlinger::EffectModule::stop_l()
 {
-    if (mEffectInterface == NULL) {
+    if (mEffectInterface.get() == NULL) {
         return NO_INIT;
     }
     if (mStatus != NO_ERROR) {
@@ -542,12 +547,11 @@
     }
     status_t cmdStatus = NO_ERROR;
     uint32_t size = sizeof(status_t);
-    status_t status = (*mEffectInterface)->command(mEffectInterface,
-                                                   EFFECT_CMD_DISABLE,
-                                                   0,
-                                                   NULL,
-                                                   &size,
-                                                   &cmdStatus);
+    status_t status = mEffectInterface->command(EFFECT_CMD_DISABLE,
+                                                0,
+                                                NULL,
+                                                &size,
+                                                &cmdStatus);
     if (status == NO_ERROR) {
         status = cmdStatus;
     }
@@ -565,7 +569,8 @@
         if (thread != 0) {
             audio_stream_t *stream = thread->stream();
             if (stream != NULL) {
-                stream->remove_audio_effect(stream, mEffectInterface);
+                stream->remove_audio_effect(stream,
+                        static_cast<EffectHalLocal*>(mEffectInterface.get())->handle());
             }
         }
     }
@@ -586,9 +591,9 @@
                                              void *pReplyData)
 {
     Mutex::Autolock _l(mLock);
-    ALOGVV("command(), cmdCode: %d, mEffectInterface: %p", cmdCode, mEffectInterface);
+    ALOGVV("command(), cmdCode: %d, mEffectInterface: %p", cmdCode, mEffectInterface.get());
 
-    if (mState == DESTROYED || mEffectInterface == NULL) {
+    if (mState == DESTROYED || mEffectInterface.get() == NULL) {
         return NO_INIT;
     }
     if (mStatus != NO_ERROR) {
@@ -616,12 +621,11 @@
         android_errorWriteLog(0x534e4554, "30204301");
         return -EINVAL;
     }
-    status_t status = (*mEffectInterface)->command(mEffectInterface,
-                                                   cmdCode,
-                                                   cmdSize,
-                                                   pCmdData,
-                                                   replySize,
-                                                   pReplyData);
+    status_t status = mEffectInterface->command(cmdCode,
+                                                cmdSize,
+                                                pCmdData,
+                                                replySize,
+                                                pReplyData);
     if (cmdCode != EFFECT_CMD_GET_PARAM && status == NO_ERROR) {
         uint32_t size = (replySize == NULL) ? 0 : *replySize;
         for (size_t i = 1; i < mHandles.size(); i++) {
@@ -743,12 +747,11 @@
         if (controller) {
             pVolume = volume;
         }
-        status = (*mEffectInterface)->command(mEffectInterface,
-                                              EFFECT_CMD_SET_VOLUME,
-                                              size,
-                                              volume,
-                                              &size,
-                                              pVolume);
+        status = mEffectInterface->command(EFFECT_CMD_SET_VOLUME,
+                                           size,
+                                           volume,
+                                           &size,
+                                           pVolume);
         if (controller && status == NO_ERROR && size == sizeof(volume)) {
             *left = volume[0];
             *right = volume[1];
@@ -773,12 +776,11 @@
         uint32_t size = sizeof(status_t);
         uint32_t cmd = audio_is_output_devices(device) ? EFFECT_CMD_SET_DEVICE :
                             EFFECT_CMD_SET_INPUT_DEVICE;
-        status = (*mEffectInterface)->command(mEffectInterface,
-                                              cmd,
-                                              sizeof(uint32_t),
-                                              &device,
-                                              &size,
-                                              &cmdStatus);
+        status = mEffectInterface->command(cmd,
+                                           sizeof(uint32_t),
+                                           &device,
+                                           &size,
+                                           &cmdStatus);
     }
     return status;
 }
@@ -793,12 +795,11 @@
     if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_MODE_MASK) == EFFECT_FLAG_AUDIO_MODE_IND) {
         status_t cmdStatus;
         uint32_t size = sizeof(status_t);
-        status = (*mEffectInterface)->command(mEffectInterface,
-                                              EFFECT_CMD_SET_AUDIO_MODE,
-                                              sizeof(audio_mode_t),
-                                              &mode,
-                                              &size,
-                                              &cmdStatus);
+        status = mEffectInterface->command(EFFECT_CMD_SET_AUDIO_MODE,
+                                           sizeof(audio_mode_t),
+                                           &mode,
+                                           &size,
+                                           &cmdStatus);
         if (status == NO_ERROR) {
             status = cmdStatus;
         }
@@ -815,12 +816,11 @@
     status_t status = NO_ERROR;
     if ((mDescriptor.flags & EFFECT_FLAG_AUDIO_SOURCE_MASK) == EFFECT_FLAG_AUDIO_SOURCE_IND) {
         uint32_t size = 0;
-        status = (*mEffectInterface)->command(mEffectInterface,
-                                              EFFECT_CMD_SET_AUDIO_SOURCE,
-                                              sizeof(audio_source_t),
-                                              &source,
-                                              &size,
-                                              NULL);
+        status = mEffectInterface->command(EFFECT_CMD_SET_AUDIO_SOURCE,
+                                           sizeof(audio_source_t),
+                                           &source,
+                                           &size,
+                                           NULL);
     }
     return status;
 }
@@ -867,12 +867,11 @@
 
         cmd.isOffload = offloaded;
         cmd.ioHandle = io;
-        status = (*mEffectInterface)->command(mEffectInterface,
-                                              EFFECT_CMD_OFFLOAD,
-                                              sizeof(effect_offload_param_t),
-                                              &cmd,
-                                              &size,
-                                              &cmdStatus);
+        status = mEffectInterface->command(EFFECT_CMD_OFFLOAD,
+                                           sizeof(effect_offload_param_t),
+                                           &cmd,
+                                           &size,
+                                           &cmdStatus);
         if (status == NO_ERROR) {
             status = cmdStatus;
         }
@@ -1015,7 +1014,7 @@
 
     result.append("\t\tSession Status State Engine:\n");
     snprintf(buffer, SIZE, "\t\t%05d   %03d    %03d   %p\n",
-            mSessionId, mStatus, mState, mEffectInterface);
+            mSessionId, mStatus, mState, mEffectInterface.get());
     result.append(buffer);
 
     result.append("\t\tDescriptor:\n");
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 5980433..53dbe4b 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -148,7 +148,7 @@
     const audio_session_t mSessionId; // audio session ID
     const effect_descriptor_t mDescriptor;// effect descriptor received from effect engine
     effect_config_t     mConfig;    // input and output audio configuration
-    effect_handle_t  mEffectInterface; // Effect module C API
+    sp<EffectHalInterface> mEffectInterface; // Effect module HAL
     status_t            mStatus;    // initialization status
     effect_state        mState;     // current activation state
     Vector<EffectHandle *> mHandles;    // list of client handles
diff --git a/services/audioflinger/EffectsFactoryHalInterface.h b/services/audioflinger/EffectsFactoryHalInterface.h
new file mode 100644
index 0000000..a865dd1
--- /dev/null
+++ b/services/audioflinger/EffectsFactoryHalInterface.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_INTERFACE_H
+#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_INTERFACE_H
+
+#include <hardware/audio_effect.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include "EffectHalInterface.h"
+
+namespace android {
+
+class EffectsFactoryHalInterface : public RefBase
+{
+  public:
+    virtual ~EffectsFactoryHalInterface() {}
+
+    // Returns the number of different effects in all loaded libraries.
+    virtual status_t queryNumberEffects(uint32_t *pNumEffects) = 0;
+
+    // Returns a descriptor of the next available effect.
+    virtual status_t getDescriptor(uint32_t index,
+            effect_descriptor_t *pDescriptor) = 0;
+
+    virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
+            effect_descriptor_t *pDescriptor) = 0;
+
+    // Creates an effect engine of the specified type.
+    // To release the effect engine, it is necessary to release references
+    // to the returned effect object.
+    virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
+            int32_t sessionId, int32_t ioId,
+            sp<EffectHalInterface> *effect) = 0;
+
+    virtual status_t dumpEffects(int fd) = 0;
+
+    static sp<EffectsFactoryHalInterface> create();
+
+    // Helper function to compare effect uuid to EFFECT_UUID_NULL.
+    static bool isNullUuid(const effect_uuid_t *pEffectUuid);
+
+  protected:
+    // Subclasses can not be constructed directly by clients.
+    EffectsFactoryHalInterface() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_INTERFACE_H
diff --git a/services/audioflinger/EffectsFactoryHalLocal.cpp b/services/audioflinger/EffectsFactoryHalLocal.cpp
new file mode 100644
index 0000000..bbdef5d
--- /dev/null
+++ b/services/audioflinger/EffectsFactoryHalLocal.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/EffectsFactoryApi.h>
+
+#include "EffectHalLocal.h"
+#include "EffectsFactoryHalLocal.h"
+
+namespace android {
+
+// static
+sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
+    return new EffectsFactoryHalLocal();
+}
+
+// static
+bool EffectsFactoryHalInterface::isNullUuid(const effect_uuid_t *pEffectUuid) {
+    return EffectIsNullUuid(pEffectUuid);
+}
+
+status_t EffectsFactoryHalLocal::queryNumberEffects(uint32_t *pNumEffects) {
+    return EffectQueryNumberEffects(pNumEffects);
+}
+
+status_t EffectsFactoryHalLocal::getDescriptor(
+        uint32_t index, effect_descriptor_t *pDescriptor) {
+    return EffectQueryEffect(index, pDescriptor);
+}
+
+status_t EffectsFactoryHalLocal::getDescriptor(
+        const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
+    return EffectGetDescriptor(pEffectUuid, pDescriptor);
+}
+
+status_t EffectsFactoryHalLocal::createEffect(
+        const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
+        sp<EffectHalInterface> *effect) {
+    effect_handle_t handle;
+    int result = EffectCreate(pEffectUuid, sessionId, ioId, &handle);
+    if (result == 0) {
+        *effect = new EffectHalLocal(handle);
+    }
+    return result;
+}
+
+status_t EffectsFactoryHalLocal::dumpEffects(int fd) {
+    return EffectDumpEffects(fd);
+}
+
+} // namespace android
diff --git a/services/audioflinger/EffectsFactoryHalLocal.h b/services/audioflinger/EffectsFactoryHalLocal.h
new file mode 100644
index 0000000..244176a
--- /dev/null
+++ b/services/audioflinger/EffectsFactoryHalLocal.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
+#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
+
+#include "EffectsFactoryHalInterface.h"
+
+namespace android {
+
+class EffectsFactoryHalLocal : public EffectsFactoryHalInterface
+{
+  public:
+    virtual ~EffectsFactoryHalLocal() {}
+
+    // Returns the number of different effects in all loaded libraries.
+    virtual status_t queryNumberEffects(uint32_t *pNumEffects);
+
+    // Returns a descriptor of the next available effect.
+    virtual status_t getDescriptor(uint32_t index,
+            effect_descriptor_t *pDescriptor);
+
+    virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
+            effect_descriptor_t *pDescriptor);
+
+    // Creates an effect engine of the specified type.
+    // To release the effect engine, it is necessary to release references
+    // to the returned effect object.
+    virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
+            int32_t sessionId, int32_t ioId,
+            sp<EffectHalInterface> *effect);
+
+    virtual status_t dumpEffects(int fd);
+
+  private:
+    friend class EffectsFactoryHalInterface;
+
+    // Can not be constructed directly by clients.
+    EffectsFactoryHalLocal() {}
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index bee17fd..c94ea45 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -202,9 +202,9 @@
                     if (hwModule != AUDIO_MODULE_HANDLE_NONE) {
                         ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(hwModule);
                         if (index >= 0) {
-                            audio_hw_device_t *hwDevice =
+                            sp<DeviceHalInterface> hwDevice =
                                     audioflinger->mAudioHwDevs.valueAt(index)->hwDevice();
-                            hwDevice->release_audio_patch(hwDevice, halHandle);
+                            hwDevice->releaseAudioPatch(halHandle);
                         }
                     }
                 }
@@ -344,13 +344,12 @@
                         goto exit;
                     }
 
-                    audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
-                    status = hwDevice->create_audio_patch(hwDevice,
-                                                           patch->num_sources,
-                                                           patch->sources,
-                                                           patch->num_sinks,
-                                                           patch->sinks,
-                                                           &halHandle);
+                    sp<DeviceHalInterface> hwDevice = audioHwDevice->hwDevice();
+                    status = hwDevice->createAudioPatch(patch->num_sources,
+                                                        patch->sources,
+                                                        patch->num_sinks,
+                                                        patch->sinks,
+                                                        &halHandle);
                 }
             }
         } break;
@@ -623,8 +622,8 @@
                     status = INVALID_OPERATION;
                     break;
                 }
-                audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
-                status = hwDevice->release_audio_patch(hwDevice, removedPatch->mHalHandle);
+                sp<DeviceHalInterface> hwDevice = audioHwDevice->hwDevice();
+                status = hwDevice->releaseAudioPatch(removedPatch->mHalHandle);
             }
         } break;
         case AUDIO_PORT_TYPE_MIX: {
@@ -688,8 +687,8 @@
 
     AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
     if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
-        return hwDevice->set_audio_port_config(hwDevice, config);
+        sp<DeviceHalInterface> hwDevice = audioHwDevice->hwDevice();
+        return hwDevice->setAudioPortConfig(config);
     } else {
         return INVALID_OPERATION;
     }
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 5601bde..cfa3e1a 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -166,7 +166,7 @@
                                         // 'volatile' means accessed without lock or
                                         // barrier, but is read/written atomically
     bool                mIsInvalid; // non-resettable latch, set by invalidate()
-    AudioTrackServerProxy*  mAudioTrackServerProxy;
+    sp<AudioTrackServerProxy>  mAudioTrackServerProxy;
     bool                mResumeToStopping; // track was paused in stopping state.
     bool                mFlushHwPending; // track requests for thread flush
     audio_output_flags_t mFlags;
@@ -214,8 +214,8 @@
     Vector < Buffer* >          mBufferQueue;
     AudioBufferProvider::Buffer mOutBuffer;
     bool                        mActive;
-    DuplicatingThread* const mSourceThread; // for waitTimeMs() in write()
-    AudioTrackClientProxy*      mClientProxy;
+    DuplicatingThread* const    mSourceThread; // for waitTimeMs() in write()
+    sp<AudioTrackClientProxy>   mClientProxy;
 };  // end of OutputTrack
 
 // playback track, used by PatchPanel
diff --git a/services/audioflinger/SpdifStreamOut.h b/services/audioflinger/SpdifStreamOut.h
index c870250..801c1f0 100644
--- a/services/audioflinger/SpdifStreamOut.h
+++ b/services/audioflinger/SpdifStreamOut.h
@@ -25,7 +25,6 @@
 
 #include "AudioHwDevice.h"
 #include "AudioStreamOut.h"
-#include "SpdifStreamOut.h"
 
 #include <audio_utils/spdif/SPDIFEncoder.h>
 
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index bab8b78..923fcf6 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -143,6 +143,12 @@
 // Direct output thread minimum sleep time in idle or active(underrun) state
 static const nsecs_t kDirectMinSleepTimeUs = 10000;
 
+// The universal constant for ubiquitous 20ms value. The value of 20ms seems to provide a good
+// balance between power consumption and latency, and allows threads to be scheduled reliably
+// by the CFS scheduler.
+// FIXME Express other hardcoded references to 20ms with references to this constant and move
+// it appropriately.
+#define FMS_20 20
 
 // Whether to use fast mixer
 static const enum {
@@ -3515,13 +3521,12 @@
     mPatch = *patch;
 
     if (mOutput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        audio_hw_device_t *hwDevice = mOutput->audioHwDev->hwDevice();
-        status = hwDevice->create_audio_patch(hwDevice,
-                                               patch->num_sources,
-                                               patch->sources,
-                                               patch->num_sinks,
-                                               patch->sinks,
-                                               handle);
+        sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
+        status = hwDevice->createAudioPatch(patch->num_sources,
+                                            patch->sources,
+                                            patch->num_sinks,
+                                            patch->sinks,
+                                            handle);
     } else {
         char *address;
         if (strcmp(patch->sinks[0].ext.device.address, "") != 0) {
@@ -3567,8 +3572,8 @@
     mOutDevice = AUDIO_DEVICE_NONE;
 
     if (mOutput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        audio_hw_device_t *hwDevice = mOutput->audioHwDev->hwDevice();
-        status = hwDevice->release_audio_patch(hwDevice, handle);
+        sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
+        status = hwDevice->releaseAudioPatch(handle);
     } else {
         AudioParameter param;
         param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
@@ -4308,7 +4313,7 @@
                 // read original volumes with volume control
                 float typeVolume = mStreamTypes[track->streamType()].volume;
                 float v = masterVolume * typeVolume;
-                AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
+                sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
                 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
                 vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
                 vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
@@ -4798,7 +4803,7 @@
     } else {
         float typeVolume = mStreamTypes[track->streamType()].volume;
         float v = mMasterVolume * typeVolume;
-        AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
+        sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
         gain_minifloat_packed_t vlr = proxy->getVolumeLR();
         left = float_from_gain(gain_minifloat_unpack_left(vlr));
         if (left > GAIN_FLOAT_UNITY) {
@@ -5887,7 +5892,7 @@
                                          ) :
     ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD, systemReady),
     mInput(input), mActiveTracksGen(0), mRsmpInBuffer(NULL),
-    // mRsmpInFrames and mRsmpInFramesP2 are set by readInputParameters_l()
+    // mRsmpInFrames, mRsmpInFramesP2, and mRsmpInFramesOA are set by readInputParameters_l()
     mRsmpInRear(0)
 #ifdef TEE_SINK
     , mTeeSink(teeSink)
@@ -5939,7 +5944,8 @@
     if (initFastCapture) {
         // create a Pipe for FastCapture to write to, and for us and fast tracks to read from
         NBAIO_Format format = mInputSource->format();
-        size_t pipeFramesP2 = roundup(mSampleRate / 25);    // double-buffering of 20 ms each
+        // quadruple-buffering of 20 ms each; this ensures we can sleep for 20ms in RecordThread
+        size_t pipeFramesP2 = roundup(4 * FMS_20 * mSampleRate / 1000);
         size_t pipeSize = pipeFramesP2 * Format_frameSize(format);
         void *pipeBuffer;
         const sp<MemoryDealer> roHeap(readOnlyHeap());
@@ -6269,11 +6275,31 @@
         // If an NBAIO source is present, use it to read the normal capture's data
         if (mPipeSource != 0) {
             size_t framesToRead = mBufferSize / mFrameSize;
+            framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
             framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
                     framesToRead);
-            if (framesRead == 0) {
-                // since pipe is non-blocking, simulate blocking input
-                sleepUs = (framesToRead * 1000000LL) / mSampleRate;
+            // since pipe is non-blocking, simulate blocking input by waiting for 1/2 of
+            // buffer size or at least for 20ms.
+            size_t sleepFrames = max(
+                    min(mPipeFramesP2, mRsmpInFramesP2) / 2, FMS_20 * mSampleRate / 1000);
+            if (framesRead <= (ssize_t) sleepFrames) {
+                sleepUs = (sleepFrames * 1000000LL) / mSampleRate;
+            }
+            if (framesRead < 0) {
+                status_t status = (status_t) framesRead;
+                switch (status) {
+                case OVERRUN:
+                    ALOGW("overrun on read from pipe");
+                    framesRead = 0;
+                    break;
+                case NEGOTIATE:
+                    ALOGE("re-negotiation is needed");
+                    framesRead = -1;  // Will cause an attempt to recover.
+                    break;
+                default:
+                    ALOGE("unknown error %d on read from pipe", status);
+                    break;
+                }
             }
         // otherwise use the HAL / AudioStreamIn directly
         } else {
@@ -6507,6 +6533,16 @@
         }
     }
     mInput->stream->common.standby(&mInput->stream->common);
+
+    // If going into standby, flush the pipe source.
+    if (mPipeSource.get() != nullptr) {
+        const ssize_t flushed = mPipeSource->flush();
+        if (flushed > 0) {
+            ALOGV("Input standby flushed PipeSource %zd frames", flushed);
+            mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += flushed;
+            mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = systemTime();
+        }
+    }
 }
 
 // RecordThread::createRecordTrack_l() must be called with AudioFlinger::mLock held
@@ -7451,9 +7487,9 @@
     // The current value is higher than necessary.  However it should not add to latency.
 
     // Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer
-    size_t bufferSize = (mRsmpInFramesP2 + mFrameCount - 1) * mFrameSize;
-    (void)posix_memalign(&mRsmpInBuffer, 32, bufferSize);
-    memset(mRsmpInBuffer, 0, bufferSize); // if posix_memalign fails, will segv here.
+    mRsmpInFramesOA = mRsmpInFramesP2 + mFrameCount - 1;
+    (void)posix_memalign(&mRsmpInBuffer, 32, mRsmpInFramesOA * mFrameSize);
+    memset(mRsmpInBuffer, 0, mRsmpInFramesOA * mFrameSize); // if posix_memalign fails, will segv here.
 
     // AudioRecord mSampleRate and mChannelCount are constant due to AudioRecord API constraints.
     // But if thread's mSampleRate or mChannelCount changes, how will that affect active tracks?
@@ -7589,13 +7625,12 @@
     }
 
     if (mInput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        audio_hw_device_t *hwDevice = mInput->audioHwDev->hwDevice();
-        status = hwDevice->create_audio_patch(hwDevice,
-                                               patch->num_sources,
-                                               patch->sources,
-                                               patch->num_sinks,
-                                               patch->sinks,
-                                               handle);
+        sp<DeviceHalInterface> hwDevice = mInput->audioHwDev->hwDevice();
+        status = hwDevice->createAudioPatch(patch->num_sources,
+                                            patch->sources,
+                                            patch->num_sinks,
+                                            patch->sinks,
+                                            handle);
     } else {
         char *address;
         if (strcmp(patch->sources[0].ext.device.address, "") != 0) {
@@ -7631,8 +7666,8 @@
     mInDevice = AUDIO_DEVICE_NONE;
 
     if (mInput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        audio_hw_device_t *hwDevice = mInput->audioHwDev->hwDevice();
-        status = hwDevice->release_audio_patch(hwDevice, handle);
+        sp<DeviceHalInterface> hwDevice = mInput->audioHwDev->hwDevice();
+        status = hwDevice->releaseAudioPatch(handle);
     } else {
         AudioParameter param;
         param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index bcc0a2e..15536f1 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1353,9 +1353,10 @@
             Condition                           mStartStopCond;
 
             // resampler converts input at HAL Hz to output at AudioRecord client Hz
-            void                               *mRsmpInBuffer; //
+            void                               *mRsmpInBuffer;  // size = mRsmpInFramesOA
             size_t                              mRsmpInFrames;  // size of resampler input in frames
             size_t                              mRsmpInFramesP2;// size rounded up to a power-of-2
+            size_t                              mRsmpInFramesOA;// mRsmpInFramesP2 + over-allocation
 
             // rolling index that is never cleared
             int32_t                             mRsmpInRear;    // last filled frame + 1
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 6b97246..7c48375 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -156,7 +156,7 @@
     int                 mUid;
     Vector < sp<SyncEvent> >mSyncEvents;
     const bool          mIsOut;
-    ServerProxy*        mServerProxy;
+    sp<ServerProxy>     mServerProxy;
     const int           mId;
     sp<NBAIO_Sink>      mTeeSink;
     sp<NBAIO_Source>    mTeeSource;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 3cca054..02489c2 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -52,7 +52,7 @@
 
 // TODO move to a common header  (Also shared with AudioTrack.cpp)
 #define NANOS_PER_SECOND    1000000000
-#define TIME_TO_NANOS(time) ((uint64_t)time.tv_sec * NANOS_PER_SECOND + time.tv_nsec)
+#define TIME_TO_NANOS(time) ((uint64_t)(time).tv_sec * NANOS_PER_SECOND + (time).tv_nsec)
 
 namespace android {
 
@@ -93,7 +93,6 @@
         mFrameCount(frameCount),
         mSessionId(sessionId),
         mIsOut(isOut),
-        mServerProxy(NULL),
         mId(android_atomic_inc(&nextTrackId)),
         mTerminated(false),
         mType(type),
@@ -218,7 +217,7 @@
     dumpTee(-1, mTeeSource, mId);
 #endif
     // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
-    delete mServerProxy;
+    mServerProxy.clear();
     if (mCblk != NULL) {
         if (mClient == 0) {
             delete mCblk;
@@ -364,7 +363,6 @@
     mFastIndex(-1),
     mCachedVolume(1.0),
     mIsInvalid(false),
-    mAudioTrackServerProxy(NULL),
     mResumeToStopping(false),
     mFlushHwPending(false),
     mFlags(flags)
@@ -1145,7 +1143,7 @@
               sampleRate, format, channelMask, frameCount,
               NULL, 0, AUDIO_SESSION_NONE, uid, AUDIO_OUTPUT_FLAG_NONE,
               TYPE_OUTPUT),
-    mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
+    mActive(false), mSourceThread(sourceThread)
 {
 
     if (mCblk != NULL) {
@@ -1170,7 +1168,6 @@
 AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
 {
     clearBufferQueue();
-    delete mClientProxy;
     // superclass destructor will now delete the server proxy and shared memory both refer to
 }
 
diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk
index 3505e0f..bf94f71 100644
--- a/services/audioflinger/tests/Android.mk
+++ b/services/audioflinger/tests/Android.mk
@@ -34,8 +34,10 @@
 
 LOCAL_SRC_FILES:= \
 	test-mixer.cpp \
-	../AudioMixer.cpp.arm \
-	../BufferProviders.cpp
+	../AudioMixer.cpp.arm  \
+	../BufferProviders.cpp \
+	../EffectHalLocal.cpp  \
+	../EffectsFactoryHalLocal.cpp
 
 LOCAL_C_INCLUDES := \
 	$(call include-path-for, audio-effects) \
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index bb8a8fa..a1221fc 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -67,6 +67,16 @@
         API_INPUT_TELEPHONY_RX, // used for capture from telephony RX path
     } input_type_t;
 
+   enum {
+        API_INPUT_CONCURRENCY_NONE = 0,
+        API_INPUT_CONCURRENCY_CALL = (1 << 0),      // Concurrency with a call
+        API_INPUT_CONCURRENCY_CAPTURE = (1 << 1),   // Concurrency with another capture
+
+        API_INPUT_CONCURRENCY_ALL = (API_INPUT_CONCURRENCY_CALL | API_INPUT_CONCURRENCY_CAPTURE),
+   };
+
+   typedef uint32_t concurrency_type__mask_t;
+
 public:
     virtual ~AudioPolicyInterface() {}
     //
@@ -140,7 +150,8 @@
                                      input_type_t *inputType) = 0;
     // indicates to the audio policy manager that the input starts being used.
     virtual status_t startInput(audio_io_handle_t input,
-                                audio_session_t session) = 0;
+                                audio_session_t session,
+                                concurrency_type__mask_t *concurrency) = 0;
     // indicates to the audio policy manager that the input stops being used.
     virtual status_t stopInput(audio_io_handle_t input,
                                audio_session_t session) = 0;
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 55ee91f..71d70de 100755
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -33,9 +33,9 @@
 
 /**
  * A device mask for all audio input devices that are considered "virtual" when evaluating
- * active inputs in getActiveInput()
+ * active inputs in getActiveInputs()
  */
-#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL  (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_FM_TUNER)
+#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL  (AUDIO_DEVICE_IN_REMOTE_SUBMIX)
 
 
 /**
@@ -109,6 +109,44 @@
             ((device & APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL) != 0));
 }
 
+/**
+ * Returns the priority of a given audio source for capture. The priority is used when more than one
+ * capture session is active on a given input stream to determine which session drives routing and
+ * effect configuration.
+ *
+ * @param[in] inputSource to consider. Valid sources are:
+ * - AUDIO_SOURCE_VOICE_COMMUNICATION
+ * - AUDIO_SOURCE_CAMCORDER
+ * - AUDIO_SOURCE_MIC
+ * - AUDIO_SOURCE_FM_TUNER
+ * - AUDIO_SOURCE_VOICE_RECOGNITION
+ * - AUDIO_SOURCE_HOTWORD
+ *
+ * @return the corresponding input source priority or 0 if priority is irrelevant for this source.
+ *      This happens when the specified source cannot share a given input stream (e.g remote submix)
+ *      The higher the value, the higher the priority.
+ */
+static inline int32_t source_priority(audio_source_t inputSource)
+{
+    switch (inputSource) {
+    case AUDIO_SOURCE_VOICE_COMMUNICATION:
+        return 6;
+    case AUDIO_SOURCE_CAMCORDER:
+        return 5;
+    case AUDIO_SOURCE_MIC:
+        return 4;
+    case AUDIO_SOURCE_FM_TUNER:
+        return 3;
+    case AUDIO_SOURCE_VOICE_RECOGNITION:
+        return 2;
+    case AUDIO_SOURCE_HOTWORD:
+        return 1;
+    default:
+        break;
+    }
+    return 0;
+}
+
 /* Indicates if audio formats are equivalent when considering a match between
  * audio HAL supported formats and client requested formats
  */
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index ccea023..2b66c81 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -63,7 +63,9 @@
                              const sp<AudioSession>& audioSession);
     status_t removeAudioSession(audio_session_t session);
     sp<AudioSession> getAudioSession(audio_session_t session) const;
-    AudioSessionCollection getActiveAudioSessions() const;
+    AudioSessionCollection getAudioSessions(bool activeOnly) const;
+    size_t getAudioSessionCount(bool activeOnly) const;
+    audio_source_t getHighestPrioritySource(bool activeOnly) const;
 
     // implementation of AudioSessionInfoProvider
     virtual audio_config_base_t getConfig() const;
@@ -100,7 +102,7 @@
      * Only considers inputs from physical devices (e.g. main mic, headset mic) when
      * ignoreVirtualInputs is true.
      */
-    audio_io_handle_t getActiveInput(bool ignoreVirtualInputs = true);
+    Vector<sp <AudioInputDescriptor> > getActiveInputs(bool ignoreVirtualInputs = true);
 
     audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index d00d49f..99c0cd2 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -166,6 +166,10 @@
     virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
                                    const struct audio_port_config *srcConfig = NULL) const = 0;
     virtual sp<AudioPort> getAudioPort() const = 0;
+    virtual bool hasSameHwModuleAs(const sp<AudioPortConfig>& other) const {
+        return (other != 0) &&
+                (other->getAudioPort()->getModuleHandle() == getAudioPort()->getModuleHandle());
+    }
     uint32_t mSamplingRate;
     audio_format_t mFormat;
     audio_channel_mask_t mChannelMask;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
index 388c25d..18fba25 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
@@ -91,8 +91,10 @@
     uint32_t getOpenCount() const;
 
     AudioSessionCollection getActiveSessions() const;
+    size_t getActiveSessionCount() const;
     bool hasActiveSession() const;
     bool isSourceActive(audio_source_t source) const;
+    audio_source_t getHighestPrioritySource(bool activeOnly) const;
 
     // implementation of AudioSessionInfoUpdateListener
     virtual void onSessionInfoUpdate() const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
index 1612714..069c9e7 100644
--- a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
+++ b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
@@ -67,19 +67,24 @@
 };
 struct StreamTraits
 {
-  typedef audio_stream_type_t Type;
-  typedef Vector<Type> Collection;
+    typedef audio_stream_type_t Type;
+    typedef Vector<Type> Collection;
 };
 struct DeviceCategoryTraits
 {
-  typedef device_category Type;
-  typedef Vector<Type> Collection;
+    typedef device_category Type;
+    typedef Vector<Type> Collection;
+};
+struct AudioModeTraits
+{
+    typedef audio_mode_t Type;
+    typedef Vector<Type> Collection;
 };
 template <typename T>
 struct DefaultTraits
 {
-  typedef T Type;
-  typedef Vector<Type> Collection;
+    typedef T Type;
+    typedef Vector<Type> Collection;
 };
 
 template <class Traits>
@@ -110,6 +115,8 @@
 
     static uint32_t maskFromString(const std::string &str, const char *del = "|");
 
+    static void maskToString(uint32_t mask, std::string &str, const char *del = "|");
+
 protected:
     struct Table {
         const char *literal;
@@ -117,7 +124,6 @@
     };
 
     static const Table mTable[];
-    static const size_t mSize;
 };
 
 typedef TypeConverter<DeviceTraits> DeviceConverter;
@@ -130,6 +136,7 @@
 typedef TypeConverter<GainModeTraits> GainModeConverter;
 typedef TypeConverter<StreamTraits> StreamTypeConverter;
 typedef TypeConverter<DeviceCategoryTraits> DeviceCategoryConverter;
+typedef TypeConverter<AudioModeTraits> AudioModeConverter;
 
 inline
 static SampleRateTraits::Collection samplingRatesFromString(const std::string &samplingRates,
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 6dacaa4..1164607 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -132,6 +132,12 @@
     return mSessions.isSourceActive(source);
 }
 
+audio_source_t AudioInputDescriptor::getHighestPrioritySource(bool activeOnly) const
+{
+
+    return mSessions.getHighestPrioritySource(activeOnly);
+}
+
 bool AudioInputDescriptor::isSoundTrigger() const {
     // sound trigger and non sound trigger sessions are not mixed
     // on a given input
@@ -143,9 +149,22 @@
     return mSessions.valueFor(session);
 }
 
-AudioSessionCollection AudioInputDescriptor::getActiveAudioSessions() const
+AudioSessionCollection AudioInputDescriptor::getAudioSessions(bool activeOnly) const
 {
-    return mSessions.getActiveSessions();
+    if (activeOnly) {
+        return mSessions.getActiveSessions();
+    } else {
+        return mSessions;
+    }
+}
+
+size_t AudioInputDescriptor::getAudioSessionCount(bool activeOnly) const
+{
+    if (activeOnly) {
+        return mSessions.getActiveSessionCount();
+    } else {
+        return mSessions.size();
+    }
 }
 
 status_t AudioInputDescriptor::addAudioSession(audio_session_t session,
@@ -234,17 +253,19 @@
     return count;
 }
 
-audio_io_handle_t AudioInputCollection::getActiveInput(bool ignoreVirtualInputs)
+Vector<sp <AudioInputDescriptor> > AudioInputCollection::getActiveInputs(bool ignoreVirtualInputs)
 {
+    Vector<sp <AudioInputDescriptor> > activeInputs;
+
     for (size_t i = 0; i < size(); i++) {
         const sp<AudioInputDescriptor>  inputDescriptor = valueAt(i);
         if ((inputDescriptor->isActive())
                 && (!ignoreVirtualInputs ||
                     !is_virtual_input_device(inputDescriptor->mDevice))) {
-            return keyAt(i);
+            activeInputs.add(inputDescriptor);
         }
     }
-    return 0;
+    return activeInputs;
 }
 
 audio_devices_t AudioInputCollection::getSupportedDevices(audio_io_handle_t handle) const
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 1dbc3d0..93b7f47 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -81,7 +81,7 @@
         return sharesHwModuleWith(outputDesc->subOutput1()) ||
                     sharesHwModuleWith(outputDesc->subOutput2());
     } else {
-        return (getModuleHandle() == outputDesc->getModuleHandle());
+        return hasSameHwModuleAs(outputDesc);
     }
 }
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index da983c5..3b63239 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -18,6 +18,7 @@
 //#define LOG_NDEBUG 0
 
 #include <AudioPolicyInterface.h>
+#include "policy.h"
 #include "AudioSession.h"
 #include "AudioGain.h"
 #include "TypeConverter.h"
@@ -214,9 +215,20 @@
     return activeSessions;
 }
 
+size_t AudioSessionCollection::getActiveSessionCount() const
+{
+    size_t activeCount = 0;
+    for (size_t i = 0; i < size(); i++) {
+        if (valueAt(i)->activeCount() != 0) {
+            activeCount++;
+        }
+    }
+    return activeCount;
+}
+
 bool AudioSessionCollection::hasActiveSession() const
 {
-    return getActiveSessions().size() != 0;
+    return getActiveSessionCount() != 0;
 }
 
 bool AudioSessionCollection::isSourceActive(audio_source_t source) const
@@ -236,6 +248,25 @@
     return false;
 }
 
+audio_source_t AudioSessionCollection::getHighestPrioritySource(bool activeOnly) const
+{
+    audio_source_t source = AUDIO_SOURCE_DEFAULT;
+    int32_t priority = -1;
+
+    for (size_t i = 0; i < size(); i++) {
+        const sp<AudioSession>  audioSession = valueAt(i);
+        if (activeOnly && audioSession->activeCount() == 0) {
+            continue;
+        }
+        int32_t curPriority = source_priority(audioSession->inputSource());
+        if (curPriority > priority) {
+            priority = curPriority;
+            source = audioSession->inputSource();
+        }
+    }
+    return source;
+}
+
 void AudioSessionCollection::onSessionInfoUpdate() const
 {
     for (size_t i = 0; i < size(); i++) {
@@ -243,7 +274,6 @@
     }
 }
 
-
 status_t AudioSessionCollection::dump(int fd, int spaces) const
 {
     const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 7a942cd..8cbbf44 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -42,8 +42,8 @@
     }
 }
 
-status_t HwModule::addOutputProfile(const String8 &name, const audio_config_t *config,
-                                    audio_devices_t device, const String8 &address)
+status_t HwModule::addOutputProfile(const String8& name, const audio_config_t *config,
+                                    audio_devices_t device, const String8& address)
 {
     sp<IOProfile> profile = new OutputProfile(name);
 
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 57f2534..74ef4ec 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -108,8 +108,18 @@
 
     AudioPort::dump(fd, 4);
 
-    snprintf(buffer, SIZE, "    - flags: 0x%04x\n", getFlags());
+    snprintf(buffer, SIZE, "    - flags: 0x%04x", getFlags());
     result.append(buffer);
+    std::string flagsLiteral;
+    if (getRole() == AUDIO_PORT_ROLE_SINK) {
+        InputFlagConverter::maskToString(getFlags(), flagsLiteral);
+    } else if (getRole() == AUDIO_PORT_ROLE_SOURCE) {
+        OutputFlagConverter::maskToString(getFlags(), flagsLiteral);
+    }
+    if (!flagsLiteral.empty()) {
+        result.appendFormat(" (%s)", flagsLiteral.c_str());
+    }
+    result.append("\n");
     write(fd, result.string(), result.size());
     mSupportedDevices.dump(fd, String8("Supported"), 4, false);
 }
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index 48bfd79..cfc0985 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -78,10 +78,6 @@
         MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_STUB),
 };
 
-template<>
-const size_t DeviceConverter::mSize = sizeof(DeviceConverter::mTable) /
-        sizeof(DeviceConverter::mTable[0]);
-
 
 template <>
 const OutputFlagConverter::Table OutputFlagConverter::mTable[] = {
@@ -96,9 +92,6 @@
     MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_RAW),
     MAKE_STRING_FROM_ENUM(AUDIO_OUTPUT_FLAG_SYNC),
 };
-template<>
-const size_t OutputFlagConverter::mSize = sizeof(OutputFlagConverter::mTable) /
-        sizeof(OutputFlagConverter::mTable[0]);
 
 
 template <>
@@ -108,9 +101,6 @@
     MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_RAW),
     MAKE_STRING_FROM_ENUM(AUDIO_INPUT_FLAG_SYNC),
 };
-template<>
-const size_t InputFlagConverter::mSize = sizeof(InputFlagConverter::mTable) /
-        sizeof(InputFlagConverter::mTable[0]);
 
 
 template <>
@@ -144,9 +134,6 @@
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_IEC61937),
     MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_DOLBY_TRUEHD),
 };
-template<>
-const size_t FormatConverter::mSize = sizeof(FormatConverter::mTable) /
-        sizeof(FormatConverter::mTable[0]);
 
 
 template <>
@@ -157,9 +144,6 @@
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
 };
-template<>
-const size_t OutputChannelConverter::mSize = sizeof(OutputChannelConverter::mTable) /
-        sizeof(OutputChannelConverter::mTable[0]);
 
 
 template <>
@@ -168,9 +152,6 @@
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_STEREO),
     MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
 };
-template<>
-const size_t InputChannelConverter::mSize = sizeof(InputChannelConverter::mTable) /
-        sizeof(InputChannelConverter::mTable[0]);
 
 template <>
 const ChannelIndexConverter::Table ChannelIndexConverter::mTable[] = {
@@ -183,9 +164,6 @@
     {"AUDIO_CHANNEL_INDEX_MASK_7", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_7)},
     {"AUDIO_CHANNEL_INDEX_MASK_8", static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_INDEX_MASK_8)},
 };
-template<>
-const size_t ChannelIndexConverter::mSize = sizeof(ChannelIndexConverter::mTable) /
-        sizeof(ChannelIndexConverter::mTable[0]);
 
 
 template <>
@@ -195,9 +173,6 @@
     MAKE_STRING_FROM_ENUM(AUDIO_GAIN_MODE_RAMP),
 };
 
-template<>
-const size_t GainModeConverter::mSize = sizeof(GainModeConverter::mTable) /
-        sizeof(GainModeConverter::mTable[0]);
 
 template <>
 const DeviceCategoryConverter::Table DeviceCategoryConverter::mTable[] = {
@@ -207,9 +182,6 @@
     MAKE_STRING_FROM_ENUM(DEVICE_CATEGORY_EXT_MEDIA)
 };
 
-template<>
-const size_t DeviceCategoryConverter::mSize = sizeof(DeviceCategoryConverter::mTable) /
-        sizeof(DeviceCategoryConverter::mTable[0]);
 
 template <>
 const StreamTypeConverter::Table StreamTypeConverter::mTable[] = {
@@ -228,26 +200,37 @@
     MAKE_STRING_FROM_ENUM(AUDIO_STREAM_PATCH),
 };
 
+
 template<>
-const size_t StreamTypeConverter::mSize = sizeof(StreamTypeConverter::mTable) /
-        sizeof(StreamTypeConverter::mTable[0]);
+const AudioModeConverter::Table AudioModeConverter::mTable[] = {
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_INVALID),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_CURRENT),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_NORMAL),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_RINGTONE),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_CALL),
+    MAKE_STRING_FROM_ENUM(AUDIO_MODE_IN_COMMUNICATION),
+};
+
 
 template <class Traits>
 bool TypeConverter<Traits>::toString(const typename Traits::Type &value, std::string &str)
 {
-    for (size_t i = 0; i < mSize; i++) {
+    for (size_t i = 0; i < sizeof(mTable) / sizeof(mTable[0]); i++) {
         if (mTable[i].value == value) {
             str = mTable[i].literal;
             return true;
         }
     }
+    char result[64];
+    snprintf(result, sizeof(result), "Unknown enum value %d", value);
+    str = result;
     return false;
 }
 
 template <class Traits>
 bool TypeConverter<Traits>::fromString(const std::string &str, typename Traits::Type &result)
 {
-    for (size_t i = 0; i < mSize; i++) {
+    for (size_t i = 0; i < sizeof(mTable) / sizeof(mTable[0]); i++) {
         if (strcmp(mTable[i].literal, str.c_str()) == 0) {
             ALOGV("stringToEnum() found %s", mTable[i].literal);
             result = mTable[i].value;
@@ -288,6 +271,19 @@
     return value;
 }
 
+template <class Traits>
+void TypeConverter<Traits>::maskToString(uint32_t mask, std::string &str, const char *del)
+{
+    bool first_flag = true;
+    for (size_t i = 0; i < sizeof(mTable) / sizeof(mTable[0]); i++) {
+        if ((mask & mTable[i].value) == mTable[i].value) {
+            if (!first_flag) str += del;
+            first_flag = false;
+            str += mTable[i].literal;
+        }
+    }
+}
+
 template class TypeConverter<DeviceTraits>;
 template class TypeConverter<OutputFlagTraits>;
 template class TypeConverter<InputFlagTraits>;
@@ -298,6 +294,7 @@
 template class TypeConverter<GainModeTraits>;
 template class TypeConverter<StreamTraits>;
 template class TypeConverter<DeviceCategoryTraits>;
+template class TypeConverter<AudioModeTraits>;
 
 }; // namespace android
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index f1704bc..2dd5a57 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -448,15 +448,17 @@
         // FIXME: would be better to refine to only inputs whose profile connects to the
         // call TX device but this information is not in the audio patch and logic here must be
         // symmetric to the one in startInput()
-        audio_io_handle_t activeInput = mInputs.getActiveInput();
-        if (activeInput != 0) {
-            sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
-            if (activeDesc->getModuleHandle() == txSourceDeviceDesc->getModuleHandle()) {
-                //FIXME: consider all active sessions
-                AudioSessionCollection activeSessions = activeDesc->getActiveAudioSessions();
-                audio_session_t activeSession = activeSessions.keyAt(0);
-                stopInput(activeInput, activeSession);
-                releaseInput(activeInput, activeSession);
+        Vector<sp <AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+        for (size_t i = 0; i < activeInputs.size(); i++) {
+            sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+            if (activeDesc->hasSameHwModuleAs(txSourceDeviceDesc)) {
+                AudioSessionCollection activeSessions =
+                        activeDesc->getAudioSessions(true /*activeOnly*/);
+                for (size_t j = 0; j < activeSessions.size(); j++) {
+                    audio_session_t activeSession = activeSessions.keyAt(j);
+                    stopInput(activeDesc->mIoHandle, activeSession);
+                    releaseInput(activeDesc->mIoHandle, activeSession);
+                }
             }
         }
 
@@ -627,15 +629,16 @@
         }
     }
 
-    audio_io_handle_t activeInput = mInputs.getActiveInput();
-    if (activeInput != 0) {
-        sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
-        audio_devices_t newDevice = getNewInputDevice(activeInput);
+    Vector<sp <AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+    for (size_t i = 0; i < activeInputs.size(); i++) {
+        sp<AudioInputDescriptor> activeDesc = activeInputs[i];
+        audio_devices_t newDevice = getNewInputDevice(activeDesc);
         // Force new input selection if the new device can not be reached via current input
-        if (activeDesc->mProfile->getSupportedDevices().types() & (newDevice & ~AUDIO_DEVICE_BIT_IN)) {
-            setInputDevice(activeInput, newDevice);
+        if (activeDesc->mProfile->getSupportedDevices().types() &
+                (newDevice & ~AUDIO_DEVICE_BIT_IN)) {
+            setInputDevice(activeDesc->mIoHandle, newDevice);
         } else {
-            closeInput(activeInput);
+            closeInput(activeDesc->mIoHandle);
         }
     }
 }
@@ -1416,6 +1419,7 @@
 
     *input = AUDIO_IO_HANDLE_NONE;
     *inputType = API_INPUT_INVALID;
+
     audio_devices_t device;
     // handle legacy remote submix case where the address was not always specified
     String8 address = String8("");
@@ -1559,14 +1563,22 @@
                                                               isSoundTrigger,
                                                               policyMix, mpClientInterface);
 
-// TODO enable input reuse
-#if 0
+
     // reuse an open input if possible
     for (size_t i = 0; i < mInputs.size(); i++) {
         sp<AudioInputDescriptor> desc = mInputs.valueAt(i);
-        // reuse input if it shares the same profile and same sound trigger attribute
-        if (profile == desc->mProfile &&
-            isSoundTrigger == desc->isSoundTrigger()) {
+        // reuse input if:
+        // - it shares the same profile
+        //      AND
+        // - it is not a reroute submix input
+        //      AND
+        // - it is: not used for sound trigger
+        //                OR
+        //          used for sound trigger and all clients use the same session ID
+        //
+        if ((profile == desc->mProfile) &&
+            (isSoundTrigger == desc->isSoundTrigger()) &&
+            !is_virtual_input_device(device)) {
 
             sp<AudioSession> as = desc->getAudioSession(session);
             if (as != 0) {
@@ -1576,16 +1588,33 @@
                 } else {
                     ALOGW("getInputForDevice() record with different attributes"
                           " exists for session %d", session);
-                    return input;
+                    break;
                 }
+            } else if (isSoundTrigger) {
+                break;
+            }
+            // force close input if current source is now the highest priority request on this input
+            // and current input properties are not exactly as requested.
+            if ((desc->mSamplingRate != samplingRate ||
+                    desc->mChannelMask != channelMask ||
+                    !audio_formats_match(desc->mFormat, format)) &&
+                    (source_priority(desc->getHighestPrioritySource(false /*activeOnly*/)) <
+                     source_priority(inputSource))) {
+                ALOGV("%s: ", __FUNCTION__);
+                AudioSessionCollection sessions = desc->getAudioSessions(false /*activeOnly*/);
+                for (size_t j = 0; j < sessions.size(); j++) {
+                    audio_session_t currentSession = sessions.keyAt(j);
+                    stopInput(desc->mIoHandle, currentSession);
+                    releaseInput(desc->mIoHandle, currentSession);
+                }
+                break;
             } else {
                 desc->addAudioSession(session, audioSession);
+                ALOGV("%s: reusing input %d", __FUNCTION__, mInputs.keyAt(i));
+                return mInputs.keyAt(i);
             }
-            ALOGV("getInputForDevice() reusing input %d", mInputs.keyAt(i));
-            return mInputs.keyAt(i);
         }
     }
-#endif
 
     audio_config_t config = AUDIO_CONFIG_INITIALIZER;
     config.sample_rate = profileSamplingRate;
@@ -1628,10 +1657,50 @@
     return input;
 }
 
+bool AudioPolicyManager::isConcurentCaptureAllowed(const sp<AudioInputDescriptor>& inputDesc,
+        const sp<AudioSession>& audioSession)
+{
+    // Do not allow capture if an active voice call is using a software patch and
+    // the call TX source device is on the same HW module.
+    // FIXME: would be better to refine to only inputs whose profile connects to the
+    // call TX device but this information is not in the audio patch
+    if (mCallTxPatch != 0 &&
+        inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
+        return false;
+    }
+
+    // starting concurrent capture is enabled if:
+    // 1) capturing for re-routing
+    // 2) capturing for HOTWORD source
+    // 3) capturing for FM TUNER source
+    // 3) All other active captures are either for re-routing or HOTWORD
+
+    if (is_virtual_input_device(inputDesc->mDevice) ||
+            audioSession->inputSource() == AUDIO_SOURCE_HOTWORD ||
+            audioSession->inputSource() == AUDIO_SOURCE_FM_TUNER) {
+        return true;
+    }
+
+    Vector< sp<AudioInputDescriptor> > activeInputs = mInputs.getActiveInputs();
+    for (size_t i = 0; i <  activeInputs.size(); i++) {
+        sp<AudioInputDescriptor> activeInput = activeInputs[i];
+        if ((activeInput->inputSource() != AUDIO_SOURCE_HOTWORD) &&
+                (activeInput->inputSource() != AUDIO_SOURCE_FM_TUNER) &&
+                !is_virtual_input_device(activeInput->mDevice)) {
+            return false;
+        }
+    }
+
+    return true;
+}
+
+
 status_t AudioPolicyManager::startInput(audio_io_handle_t input,
-                                        audio_session_t session)
+                                        audio_session_t session,
+                                        concurrency_type__mask_t *concurrency)
 {
     ALOGV("startInput() input %d", input);
+    *concurrency = API_INPUT_CONCURRENCY_NONE;
     ssize_t index = mInputs.indexOfKey(input);
     if (index < 0) {
         ALOGW("startInput() unknown input %d", input);
@@ -1645,81 +1714,62 @@
         return BAD_VALUE;
     }
 
-    // virtual input devices are compatible with other input devices
-    if (!is_virtual_input_device(inputDesc->mDevice)) {
-
-        // for a non-virtual input device, check if there is another (non-virtual) active input
-        audio_io_handle_t activeInput = mInputs.getActiveInput();
-        if (activeInput != 0 && activeInput != input) {
-
-            // If the already active input uses AUDIO_SOURCE_HOTWORD then it is closed,
-            // otherwise the active input continues and the new input cannot be started.
-            sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
-            if ((activeDesc->inputSource() == AUDIO_SOURCE_HOTWORD) &&
-                    !activeDesc->hasPreemptedSession(session)) {
-                ALOGW("startInput(%d) preempting low-priority input %d", input, activeInput);
-                //FIXME: consider all active sessions
-                AudioSessionCollection activeSessions = activeDesc->getActiveAudioSessions();
-                audio_session_t activeSession = activeSessions.keyAt(0);
-                SortedVector<audio_session_t> sessions =
-                                           activeDesc->getPreemptedSessions();
-                sessions.add(activeSession);
-                inputDesc->setPreemptedSessions(sessions);
-                stopInput(activeInput, activeSession);
-                releaseInput(activeInput, activeSession);
-            } else {
-                ALOGE("startInput(%d) failed: other input %d already started", input, activeInput);
-                return INVALID_OPERATION;
-            }
-        }
-
-        // Do not allow capture if an active voice call is using a software patch and
-        // the call TX source device is on the same HW module.
-        // FIXME: would be better to refine to only inputs whose profile connects to the
-        // call TX device but this information is not in the audio patch
-        if (mCallTxPatch != 0 &&
-            inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
-            return INVALID_OPERATION;
-        }
+    if (!isConcurentCaptureAllowed(inputDesc, audioSession)) {
+        ALOGW("startInput(%d) failed: other input already started", input);
+        return INVALID_OPERATION;
     }
 
+    if (isInCall()) {
+        *concurrency |= API_INPUT_CONCURRENCY_CALL;
+    }
+    if (mInputs.activeInputsCount() != 0) {
+        *concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
+    }
+
+    // increment activity count before calling getNewInputDevice() below as only active sessions
+    // are considered for device selection
+    audioSession->changeActiveCount(1);
+
     // Routing?
     mInputRoutes.incRouteActivity(session);
 
-    if (!inputDesc->isActive() || mInputRoutes.hasRouteChanged(session)) {
-        // if input maps to a dynamic policy with an activity listener, notify of state change
-        if ((inputDesc->mPolicyMix != NULL)
-                && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
-            mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
-                    MIX_STATE_MIXING);
-        }
+    if (audioSession->activeCount() == 1 || mInputRoutes.hasRouteChanged(session)) {
 
-        if (mInputs.activeInputsCount() == 0) {
-            SoundTrigger::setCaptureState(true);
-        }
-        setInputDevice(input, getNewInputDevice(input), true /* force */);
+        setInputDevice(input, getNewInputDevice(inputDesc), true /* force */);
 
-        // automatically enable the remote submix output when input is started if not
-        // used by a policy mix of type MIX_TYPE_RECORDERS
-        // For remote submix (a virtual device), we open only one input per capture request.
-        if (audio_is_remote_submix_device(inputDesc->mDevice)) {
-            String8 address = String8("");
-            if (inputDesc->mPolicyMix == NULL) {
-                address = String8("0");
-            } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
-                address = inputDesc->mPolicyMix->mDeviceAddress;
+        if (inputDesc->getAudioSessionCount(true/*activeOnly*/) == 1) {
+            // if input maps to a dynamic policy with an activity listener, notify of state change
+            if ((inputDesc->mPolicyMix != NULL)
+                    && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+                mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
+                        MIX_STATE_MIXING);
             }
-            if (address != "") {
-                setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                        AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                        address, "remote-submix");
+
+            if (mInputs.activeInputsCount() == 0) {
+                SoundTrigger::setCaptureState(true);
+            }
+
+            // automatically enable the remote submix output when input is started if not
+            // used by a policy mix of type MIX_TYPE_RECORDERS
+            // For remote submix (a virtual device), we open only one input per capture request.
+            if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+                String8 address = String8("");
+                if (inputDesc->mPolicyMix == NULL) {
+                    address = String8("0");
+                } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
+                    address = inputDesc->mPolicyMix->mDeviceAddress;
+                }
+                if (address != "") {
+                    setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                            AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+                            address, "remote-submix");
+                }
             }
         }
     }
 
     ALOGV("AudioPolicyManager::startInput() input source = %d", audioSession->inputSource());
 
-    audioSession->changeActiveCount(1);
     return NO_ERROR;
 }
 
@@ -1750,36 +1800,41 @@
     // Routing?
     mInputRoutes.decRouteActivity(session);
 
-    if (!inputDesc->isActive()) {
-        // if input maps to a dynamic policy with an activity listener, notify of state change
-        if ((inputDesc->mPolicyMix != NULL)
-                && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
-            mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
-                    MIX_STATE_IDLE);
-        }
+    if (audioSession->activeCount() == 0) {
 
-        // automatically disable the remote submix output when input is stopped if not
-        // used by a policy mix of type MIX_TYPE_RECORDERS
-        if (audio_is_remote_submix_device(inputDesc->mDevice)) {
-            String8 address = String8("");
-            if (inputDesc->mPolicyMix == NULL) {
-                address = String8("0");
-            } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
-                address = inputDesc->mPolicyMix->mDeviceAddress;
+        if (inputDesc->isActive()) {
+            setInputDevice(input, getNewInputDevice(inputDesc), false /* force */);
+        } else {
+            // if input maps to a dynamic policy with an activity listener, notify of state change
+            if ((inputDesc->mPolicyMix != NULL)
+                    && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+                mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
+                        MIX_STATE_IDLE);
             }
-            if (address != "") {
-                setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                                         AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                                         address, "remote-submix");
+
+            // automatically disable the remote submix output when input is stopped if not
+            // used by a policy mix of type MIX_TYPE_RECORDERS
+            if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+                String8 address = String8("");
+                if (inputDesc->mPolicyMix == NULL) {
+                    address = String8("0");
+                } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
+                    address = inputDesc->mPolicyMix->mDeviceAddress;
+                }
+                if (address != "") {
+                    setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                                             AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+                                             address, "remote-submix");
+                }
             }
-        }
 
-        resetInputDevice(input);
+            resetInputDevice(input);
 
-        if (mInputs.activeInputsCount() == 0) {
-            SoundTrigger::setCaptureState(false);
+            if (mInputs.activeInputsCount() == 0) {
+                SoundTrigger::setCaptureState(false);
+            }
+            inputDesc->clearPreemptedSessions();
         }
-        inputDesc->clearPreemptedSessions();
     }
     return NO_ERROR;
 }
@@ -2259,7 +2314,9 @@
     snprintf(buffer, SIZE, " Primary Output: %d\n",
              hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
     result.append(buffer);
-    snprintf(buffer, SIZE, " Phone state: %d\n", mEngine->getPhoneState());
+    std::string stateLiteral;
+    AudioModeConverter::toString(mEngine->getPhoneState(), stateLiteral);
+    snprintf(buffer, SIZE, " Phone state: %s\n", stateLiteral.c_str());
     result.append(buffer);
     snprintf(buffer, SIZE, " Force use for communications %d\n",
              mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION));
@@ -2648,7 +2705,7 @@
                 // create a software bridge in PatchPanel if:
                 // - source and sink devices are on differnt HW modules OR
                 // - audio HAL version is < 3.0
-                if ((srcDeviceDesc->getModuleHandle() != sinkDeviceDesc->getModuleHandle()) ||
+                if (!srcDeviceDesc->hasSameHwModuleAs(sinkDeviceDesc) ||
                         (srcDeviceDesc->mModule->getHalVersion() < AUDIO_DEVICE_API_VERSION_3_0)) {
                     // support only one sink device for now to simplify output selection logic
                     if (patch->num_sinks > 1) {
@@ -2748,7 +2805,7 @@
                 return BAD_VALUE;
             }
             setInputDevice(inputDesc->mIoHandle,
-                           getNewInputDevice(inputDesc->mIoHandle),
+                           getNewInputDevice(inputDesc),
                            true,
                            NULL);
         } else if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) {
@@ -4156,7 +4213,7 @@
 
     ALOGVV("getOutputsForDevice() device %04x", device);
     for (size_t i = 0; i < openOutputs.size(); i++) {
-        ALOGVV("output %d isDuplicated=%d device=%04x",
+        ALOGVV("output %zu isDuplicated=%d device=%04x",
                 i, openOutputs.valueAt(i)->isDuplicated(),
                 openOutputs.valueAt(i)->supportedDevices());
         if ((device & openOutputs.valueAt(i)->supportedDevices()) == device) {
@@ -4278,33 +4335,36 @@
             ((mAvailableInputDevices.types() & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET &
                     ~AUDIO_DEVICE_BIT_IN) != 0) ||
             ((mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_ALL_SCO) != 0);
-    // suspend A2DP output if:
-    //      (NOT already suspended) &&
-    //      ((SCO device is connected &&
-    //       (forced usage for communication || for record is SCO))) ||
-    //      (phone state is ringing || in call)
+
+    // if suspended, restore A2DP output if:
+    //      ((SCO device is NOT connected) ||
+    //       ((forced usage communication is NOT SCO) && (forced usage for record is NOT SCO) &&
+    //        (phone state is NOT in call) && (phone state is NOT ringing)))
     //
-    // restore A2DP output if:
-    //      (Already suspended) &&
-    //      ((SCO device is NOT connected ||
-    //       (forced usage NOT for communication && NOT for record is SCO))) &&
-    //      (phone state is NOT ringing && NOT in call)
+    // if not suspended, suspend A2DP output if:
+    //      (SCO device is connected) &&
+    //       ((forced usage for communication is SCO) || (forced usage for record is SCO) ||
+    //       ((phone state is in call) || (phone state is ringing)))
     //
     if (mA2dpSuspended) {
-        if ((!isScoConnected ||
-             ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) != AUDIO_POLICY_FORCE_BT_SCO) &&
-              (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) != AUDIO_POLICY_FORCE_BT_SCO))) &&
-             ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) &&
+        if (!isScoConnected ||
+             ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) !=
+                     AUDIO_POLICY_FORCE_BT_SCO) &&
+              (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) !=
+                      AUDIO_POLICY_FORCE_BT_SCO) &&
+              (mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) &&
               (mEngine->getPhoneState() != AUDIO_MODE_RINGTONE))) {
 
             mpClientInterface->restoreOutput(a2dpOutput);
             mA2dpSuspended = false;
         }
     } else {
-        if ((isScoConnected &&
-             ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) == AUDIO_POLICY_FORCE_BT_SCO) ||
-              (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO))) ||
-             ((mEngine->getPhoneState() == AUDIO_MODE_IN_CALL) ||
+        if (isScoConnected &&
+             ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) ==
+                     AUDIO_POLICY_FORCE_BT_SCO) ||
+              (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) ==
+                      AUDIO_POLICY_FORCE_BT_SCO) ||
+              (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL) ||
               (mEngine->getPhoneState() == AUDIO_MODE_RINGTONE))) {
 
             mpClientInterface->suspendOutput(a2dpOutput);
@@ -4375,9 +4435,9 @@
     return device;
 }
 
-audio_devices_t AudioPolicyManager::getNewInputDevice(audio_io_handle_t input)
+audio_devices_t AudioPolicyManager::getNewInputDevice(const sp<AudioInputDescriptor>& inputDesc)
 {
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
+    audio_devices_t device = AUDIO_DEVICE_NONE;
 
     ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
     if (index >= 0) {
@@ -4389,7 +4449,12 @@
         }
     }
 
-    audio_devices_t device = getDeviceAndMixForInputSource(inputDesc->inputSource());
+    audio_source_t source = inputDesc->getHighestPrioritySource(true /*activeOnly*/);
+    if (isInCall()) {
+        device = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+    } else if (source != AUDIO_SOURCE_DEFAULT) {
+        device = getDeviceAndMixForInputSource(source);
+    }
 
     return device;
 }
@@ -4597,7 +4662,7 @@
                         == AUDIO_DEVICE_NONE) {
                     continue;
                 }
-                ALOGVV("checkDeviceMuteStrategies() %s strategy %d (curDevice %04x)",
+                ALOGVV("checkDeviceMuteStrategies() %s strategy %zu (curDevice %04x)",
                       mute ? "muting" : "unmuting", i, curDevice);
                 setStrategyMute((routing_strategy)i, mute, desc, mute ? 0 : delayMs);
                 if (isStrategyActive(desc, (routing_strategy)i)) {
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 5c2b673..44e9baf 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -139,7 +139,8 @@
 
         // indicates to the audio policy manager that the input starts being used.
         virtual status_t startInput(audio_io_handle_t input,
-                                    audio_session_t session);
+                                    audio_session_t session,
+                                    concurrency_type__mask_t *concurrency);
 
         // indicates to the audio policy manager that the input stops being used.
         virtual status_t stopInput(audio_io_handle_t input,
@@ -405,7 +406,7 @@
         void updateDevicesAndOutputs();
 
         // selects the most appropriate device on input for current state
-        audio_devices_t getNewInputDevice(audio_io_handle_t input);
+        audio_devices_t getNewInputDevice(const sp<AudioInputDescriptor>& inputDesc);
 
         virtual uint32_t getMaxEffectsCpuLoad()
         {
@@ -506,6 +507,8 @@
 
         void clearAudioSources(uid_t uid);
 
+        bool isConcurentCaptureAllowed(const sp<AudioInputDescriptor>& inputDesc,
+                const sp<AudioSession>& audioSession);
 
         static bool streamsMatchForvolume(audio_stream_type_t stream1,
                                           audio_stream_type_t stream2);
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index b732b20..9a28137 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -57,11 +57,11 @@
     }
     mInputSources.clear();
 
-    for (i = 0; i < mInputs.size(); i++) {
-        mInputs.valueAt(i)->mEffects.clear();
-        delete mInputs.valueAt(i);
+    for (i = 0; i < mInputSessions.size(); i++) {
+        mInputSessions.valueAt(i)->mEffects.clear();
+        delete mInputSessions.valueAt(i);
     }
-    mInputs.clear();
+    mInputSessions.clear();
 
     // release audio output processing resources
     for (i = 0; i < mOutputStreams.size(); i++) {
@@ -93,19 +93,19 @@
         ALOGV("addInputEffects(): no processing needs to be attached to this source");
         return status;
     }
-    ssize_t idx = mInputs.indexOfKey(input);
-    EffectVector *inputDesc;
+    ssize_t idx = mInputSessions.indexOfKey(audioSession);
+    EffectVector *sessionDesc;
     if (idx < 0) {
-        inputDesc = new EffectVector(audioSession);
-        mInputs.add(input, inputDesc);
+        sessionDesc = new EffectVector(audioSession);
+        mInputSessions.add(audioSession, sessionDesc);
     } else {
         // EffectVector is existing and we just need to increase ref count
-        inputDesc = mInputs.valueAt(idx);
+        sessionDesc = mInputSessions.valueAt(idx);
     }
-    inputDesc->mRefCount++;
+    sessionDesc->mRefCount++;
 
-    ALOGV("addInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
-    if (inputDesc->mRefCount == 1) {
+    ALOGV("addInputEffects(): input: %d, refCount: %d", input, sessionDesc->mRefCount);
+    if (sessionDesc->mRefCount == 1) {
         Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
         for (size_t i = 0; i < effects.size(); i++) {
             EffectDesc *effect = effects[i];
@@ -123,30 +123,31 @@
             }
             ALOGV("addInputEffects(): added Fx %s on source: %d",
                   effect->mName, (int32_t)aliasSource);
-            inputDesc->mEffects.add(fx);
+            sessionDesc->mEffects.add(fx);
         }
-        inputDesc->setProcessorEnabled(true);
+        sessionDesc->setProcessorEnabled(true);
     }
     return status;
 }
 
 
-status_t AudioPolicyEffects::releaseInputEffects(audio_io_handle_t input)
+status_t AudioPolicyEffects::releaseInputEffects(audio_io_handle_t input,
+                                                 audio_session_t audioSession)
 {
     status_t status = NO_ERROR;
 
     Mutex::Autolock _l(mLock);
-    ssize_t index = mInputs.indexOfKey(input);
+    ssize_t index = mInputSessions.indexOfKey(audioSession);
     if (index < 0) {
         return status;
     }
-    EffectVector *inputDesc = mInputs.valueAt(index);
-    inputDesc->mRefCount--;
-    ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, inputDesc->mRefCount);
-    if (inputDesc->mRefCount == 0) {
-        inputDesc->setProcessorEnabled(false);
-        delete inputDesc;
-        mInputs.removeItemsAt(index);
+    EffectVector *sessionDesc = mInputSessions.valueAt(index);
+    sessionDesc->mRefCount--;
+    ALOGV("releaseInputEffects(): input: %d, refCount: %d", input, sessionDesc->mRefCount);
+    if (sessionDesc->mRefCount == 0) {
+        sessionDesc->setProcessorEnabled(false);
+        delete sessionDesc;
+        mInputSessions.removeItemsAt(index);
         ALOGV("releaseInputEffects(): all effects released");
     }
     return status;
@@ -160,16 +161,16 @@
 
     Mutex::Autolock _l(mLock);
     size_t index;
-    for (index = 0; index < mInputs.size(); index++) {
-        if (mInputs.valueAt(index)->mSessionId == audioSession) {
+    for (index = 0; index < mInputSessions.size(); index++) {
+        if (mInputSessions.valueAt(index)->mSessionId == audioSession) {
             break;
         }
     }
-    if (index == mInputs.size()) {
+    if (index == mInputSessions.size()) {
         *count = 0;
         return BAD_VALUE;
     }
-    Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects;
+    Vector< sp<AudioEffect> > effects = mInputSessions.valueAt(index)->mEffects;
 
     for (size_t i = 0; i < effects.size(); i++) {
         effect_descriptor_t desc = effects[i]->descriptor();
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index afdaf98..9428409 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -62,7 +62,8 @@
                              audio_session_t audioSession);
 
     // Add all input effects associated to this input
-    status_t releaseInputEffects(audio_io_handle_t input);
+    status_t releaseInputEffects(audio_io_handle_t input,
+                                 audio_session_t audioSession);
 
 
     // Return a list of effect descriptors for default output effects
@@ -178,12 +179,12 @@
                          size_t *curSize,
                          size_t *totSize);
 
-    // protects access to mInputSources, mInputs, mOutputStreams, mOutputSessions
+    // protects access to mInputSources, mInputSessions, mOutputStreams, mOutputSessions
     Mutex mLock;
     // Automatic input effects are configured per audio_source_t
     KeyedVector< audio_source_t, EffectDescVector* > mInputSources;
     // Automatic input effects are unique for audio_io_handle_t
-    KeyedVector< audio_io_handle_t, EffectVector* > mInputs;
+    KeyedVector< audio_session_t, EffectVector* > mInputSessions;
 
     // Automatic output effects are organized per audio_stream_type_t
     KeyedVector< audio_stream_type_t, EffectDescVector* > mOutputStreams;
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 451ce84..f6da920 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -361,8 +361,23 @@
         return NO_INIT;
     }
     Mutex::Autolock _l(mLock);
+    AudioPolicyInterface::concurrency_type__mask_t concurrency;
+    status_t status = mAudioPolicyManager->startInput(input, session, &concurrency);
 
-    return mAudioPolicyManager->startInput(input, session);
+    if (status == NO_ERROR) {
+        LOG_ALWAYS_FATAL_IF(concurrency & ~AudioPolicyInterface::API_INPUT_CONCURRENCY_ALL,
+                            "startInput(): invalid concurrency type %d", (int)concurrency);
+
+        // enforce permission (if any) required for each type of concurrency
+        if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CALL) {
+            //TODO: check incall capture permission
+        }
+        if (concurrency & AudioPolicyInterface::API_INPUT_CONCURRENCY_CAPTURE) {
+            //TODO: check concurrent capture permission
+        }
+    }
+
+    return status;
 }
 
 status_t AudioPolicyService::stopInput(audio_io_handle_t input,
@@ -390,7 +405,7 @@
     }
     if (audioPolicyEffects != 0) {
         // release audio processors from the input
-        status_t status = audioPolicyEffects->releaseInputEffects(input);
+        status_t status = audioPolicyEffects->releaseInputEffects(input, session);
         if(status != NO_ERROR) {
             ALOGW("Failed to release effects on input %d", input);
         }
@@ -566,7 +581,8 @@
         *count = 0;
         return NO_INIT;
     }
-    return audioPolicyEffects->queryDefaultInputEffects(audioSession, descriptors, count);
+    return audioPolicyEffects->queryDefaultInputEffects(
+            (audio_session_t)audioSession, descriptors, count);
 }
 
 bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
index 946c380..517fba1 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
@@ -307,7 +307,7 @@
 }
 
 void AudioPolicyService::releaseInput(audio_io_handle_t input,
-                                      audio_session_t session __unused)
+                                      audio_session_t session)
 {
     if (mpAudioPolicy == NULL) {
         return;
@@ -321,7 +321,7 @@
     }
     if (audioPolicyEffects != 0) {
         // release audio processors from the input
-        status_t status = audioPolicyEffects->releaseInputEffects(input);
+        status_t status = audioPolicyEffects->releaseInputEffects(input, session);
         if(status != NO_ERROR) {
             ALOGW("Failed to release effects on input %d", input);
         }
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index a9a2d3c..4e337a0 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -5,19 +5,20 @@
 LOCAL_SRC_FILES := MediaExtractorService.cpp
 LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
 LOCAL_MODULE:= libmediaextractorservice
-LOCAL_32_BIT_ONLY := true
 include $(BUILD_SHARED_LIBRARY)
 
 
 # service executable
 include $(CLEAR_VARS)
+# seccomp filters are defined for the following architectures:
 LOCAL_REQUIRED_MODULES_arm := mediaextractor-seccomp.policy
+LOCAL_REQUIRED_MODULES_arm64 := mediaextractor-seccomp.policy
 LOCAL_REQUIRED_MODULES_x86 := mediaextractor-seccomp.policy
+# TODO add seccomp filter for x86_64.
 LOCAL_SRC_FILES := main_extractorservice.cpp minijail/minijail.cpp
 LOCAL_SHARED_LIBRARIES := libmedia libmediaextractorservice libbinder libutils liblog libicuuc libminijail
 LOCAL_STATIC_LIBRARIES := libicuandroid_utils
 LOCAL_MODULE:= mediaextractor
-LOCAL_32_BIT_ONLY := true
 LOCAL_INIT_RC := mediaextractor.rc
 LOCAL_C_INCLUDES := frameworks/av/media/libmedia
 include $(BUILD_EXECUTABLE)
diff --git a/services/mediaextractor/minijail/Android.mk b/services/mediaextractor/minijail/Android.mk
index 79c5505..3a93340 100644
--- a/services/mediaextractor/minijail/Android.mk
+++ b/services/mediaextractor/minijail/Android.mk
@@ -1,18 +1,12 @@
 LOCAL_PATH := $(call my-dir)
 
-ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64 x86 x86_64))
+# TODO add filter for x86_64
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64 x86))
 include $(CLEAR_VARS)
 LOCAL_MODULE := mediaextractor-seccomp.policy
 LOCAL_MODULE_CLASS := ETC
 LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
-
-# mediaextractor runs in 32-bit combatibility mode. For 64 bit architectures,
-# use the 32 bit policy
-ifdef TARGET_2ND_ARCH
-    LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediaextractor-seccomp-$(TARGET_2ND_ARCH).policy
-else
-    LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediaextractor-seccomp-$(TARGET_ARCH).policy
-endif
+LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediaextractor-seccomp-$(TARGET_ARCH).policy
 
 # allow device specific additions to the syscall whitelist
 ifneq (,$(wildcard $(BOARD_SECCOMP_POLICY)/mediaextractor-seccomp.policy))
diff --git a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm64.policy b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm64.policy
new file mode 100644
index 0000000..ae6ac05
--- /dev/null
+++ b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm64.policy
@@ -0,0 +1,36 @@
+# Organized by frequency of systemcall - in descending order for
+# best performance.
+ioctl: 1
+futex: 1
+prctl: 1
+write: 1
+getpriority: 1
+close: 1
+dup: 1
+mmap: 1
+munmap: 1
+openat: 1
+mprotect: 1
+madvise: 1
+getuid: 1
+fstat: 1
+read: 1
+setpriority: 1
+sigaltstack: 1
+clone: 1
+lseek: 1
+newfstatat: 1
+faccessat: 1
+restart_syscall: 1
+exit: 1
+exit_group: 1
+rt_sigreturn: 1
+getrlimit: 1
+
+# for attaching to debuggerd on process crash
+rt_sigaction: 1
+# socket: arg0 == AF_LOCAL
+socket: arg0 == 1
+connect: 1
+rt_tgsigqueueinfo: 1
+writev: 1