Merge "AudioFlinger: fix capture stop sequence" into nyc-mr1-dev
diff --git a/camera/cameraserver/cameraserver.rc b/camera/cameraserver/cameraserver.rc
index 624baff..c1dad2c 100644
--- a/camera/cameraserver/cameraserver.rc
+++ b/camera/cameraserver/cameraserver.rc
@@ -1,7 +1,7 @@
 service cameraserver /system/bin/cameraserver
     class main
     user cameraserver
-    group audio camera input drmrpc inet media mediadrm net_bt net_bt_admin net_bw_acct
+    group audio camera input drmrpc
     ioprio rt 4
     writepid /dev/cpuset/camera-daemon/tasks
-    writepid /dev/stune/foreground/tasks
+    writepid /dev/stune/top-app/tasks
diff --git a/include/media/IDataSource.h b/include/media/IDataSource.h
index 838e29f..655f337 100644
--- a/include/media/IDataSource.h
+++ b/include/media/IDataSource.h
@@ -25,6 +25,7 @@
 namespace android {
 
 class IMemory;
+class DecryptHandle;
 
 // A binder interface for implementing a stagefright DataSource remotely.
 class IDataSource : public IInterface {
@@ -47,6 +48,8 @@
     virtual uint32_t getFlags() = 0;
     // get a description of the source, e.g. the url or filename it is based on
     virtual String8 toString() = 0;
+    // Initialize DRM and return a DecryptHandle.
+    virtual sp<DecryptHandle> DrmInitialization(const char *mime) = 0;
 
 private:
     DISALLOW_EVIL_CONSTRUCTORS(IDataSource);
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 4f2517c..94d2896 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -371,6 +371,10 @@
             int32_t width, int32_t height,
             OMX_VIDEO_CODINGTYPE compressionFormat, float frameRate = -1.0);
 
+    // sets |portIndex| port buffer numbers to be |bufferNum|. NOTE: Component could reject
+    // this setting if the |bufferNum| is less than the minimum buffer num of the port.
+    status_t setPortBufferNum(OMX_U32 portIndex, int bufferNum);
+
     // gets index or sets it to 0 on error. Returns error from codec.
     status_t initDescribeColorAspectsIndex();
 
@@ -480,6 +484,12 @@
     status_t getIntraRefreshPeriod(uint32_t *intraRefreshPeriod);
     status_t setIntraRefreshPeriod(uint32_t intraRefreshPeriod, bool inConfigure);
 
+    // Configures temporal layering based on |msg|. |inConfigure| shall be true iff this is called
+    // during configure() call. on success the configured layering is set in |outputFormat|. If
+    // |outputFormat| is mOutputFormat, it is copied to trigger an output format changed event.
+    status_t configureTemporalLayers(
+            const sp<AMessage> &msg, bool inConfigure, sp<AMessage> &outputFormat);
+
     status_t setMinBufferSize(OMX_U32 portIndex, size_t size);
 
     status_t setupMPEG4EncoderParameters(const sp<AMessage> &msg);
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index a6901a8..8f0eaa7 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -65,6 +65,7 @@
 
     status_t setGeoData(int latitudex10000, int longitudex10000);
     status_t setCaptureRate(float captureFps);
+    status_t setTemporalLayerCount(uint32_t layerCount);
     virtual void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
     virtual int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
 
@@ -187,6 +188,7 @@
     // Acquire lock before calling these methods
     off64_t addSample_l(MediaBuffer *buffer);
     off64_t addLengthPrefixedSample_l(MediaBuffer *buffer);
+    off64_t addMultipleLengthPrefixedSamples_l(MediaBuffer *buffer);
 
     bool exceedsFileSizeLimit();
     bool use32BitFileOffset() const;
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index be7e5c1..6ba7b32 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -204,6 +204,8 @@
                                    // transfer Function, value defined by ColorAspects.Transfer.
     kKeyColorMatrix      = 'cMtx', // int32_t,
                                    // color Matrix, value defined by ColorAspects.MatrixCoeffs.
+    kKeyTemporalLayerId  = 'iLyr', // int32_t, temporal layer-id. 0-based (0 => base layer)
+    kKeyTemporalLayerCount = 'cLyr', // int32_t, number of temporal layers encoded
 };
 
 enum {
diff --git a/media/common_time/Android.mk b/media/common_time/Android.mk
index 632acbc..aaa0db2 100644
--- a/media/common_time/Android.mk
+++ b/media/common_time/Android.mk
@@ -19,4 +19,6 @@
                           libutils \
                           liblog
 
+LOCAL_CFLAGS := -Wall -Werror
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/common_time/cc_helper.cpp b/media/common_time/cc_helper.cpp
index 8d8556c..222b7ce 100644
--- a/media/common_time/cc_helper.cpp
+++ b/media/common_time/cc_helper.cpp
@@ -80,7 +80,7 @@
     }
 }
 
-void CCHelper::CommonClockListener::onTimelineChanged(uint64_t timelineID) {
+void CCHelper::CommonClockListener::onTimelineChanged(uint64_t timelineID __unused) {
     // do nothing; listener is only really used as a token so the server can
     // find out when clients die.
 }
diff --git a/media/libmedia/IDataSource.cpp b/media/libmedia/IDataSource.cpp
index 7aeba5a..51c9938 100644
--- a/media/libmedia/IDataSource.cpp
+++ b/media/libmedia/IDataSource.cpp
@@ -23,6 +23,7 @@
 
 #include <binder/IMemory.h>
 #include <binder/Parcel.h>
+#include <drm/drm_framework_common.h>
 #include <media/stagefright/foundation/ADebug.h>
 
 namespace android {
@@ -34,6 +35,7 @@
     CLOSE,
     GET_FLAGS,
     TO_STRING,
+    DRM_INITIALIZATION,
 };
 
 struct BpDataSource : public BpInterface<IDataSource> {
@@ -84,6 +86,47 @@
         remote()->transact(TO_STRING, data, &reply);
         return reply.readString8();
     }
+
+    virtual sp<DecryptHandle> DrmInitialization(const char *mime) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IDataSource::getInterfaceDescriptor());
+        if (mime == NULL) {
+            data.writeInt32(0);
+        } else {
+            data.writeInt32(1);
+            data.writeCString(mime);
+        }
+        remote()->transact(DRM_INITIALIZATION, data, &reply);
+        sp<DecryptHandle> handle;
+        if (reply.dataAvail() != 0) {
+            handle = new DecryptHandle();
+            handle->decryptId = reply.readInt32();
+            handle->mimeType = reply.readString8();
+            handle->decryptApiType = reply.readInt32();
+            handle->status = reply.readInt32();
+
+            const int bufferLength = data.readInt32();
+            if (bufferLength != -1) {
+                handle->decryptInfo = new DecryptInfo();
+                handle->decryptInfo->decryptBufferLength = bufferLength;
+            }
+
+            size_t size = data.readInt32();
+            for (size_t i = 0; i < size; ++i) {
+                DrmCopyControl key = (DrmCopyControl)data.readInt32();
+                int value = data.readInt32();
+                handle->copyControlVector.add(key, value);
+            }
+
+            size = data.readInt32();
+            for (size_t i = 0; i < size; ++i) {
+                String8 key = data.readString8();
+                String8 value = data.readString8();
+                handle->extendedData.add(key, value);
+            }
+        }
+        return handle;
+    }
 };
 
 IMPLEMENT_META_INTERFACE(DataSource, "android.media.IDataSource");
@@ -126,6 +169,42 @@
             reply->writeString8(toString());
             return NO_ERROR;
         } break;
+        case DRM_INITIALIZATION: {
+            CHECK_INTERFACE(IDataSource, data, reply);
+            const char *mime = NULL;
+            const int32_t flag = data.readInt32();
+            if (flag != 0) {
+                mime = data.readCString();
+            }
+            sp<DecryptHandle> handle = DrmInitialization(mime);
+            if (handle != NULL) {
+                reply->writeInt32(handle->decryptId);
+                reply->writeString8(handle->mimeType);
+                reply->writeInt32(handle->decryptApiType);
+                reply->writeInt32(handle->status);
+
+                if (handle->decryptInfo != NULL) {
+                    reply->writeInt32(handle->decryptInfo->decryptBufferLength);
+                } else {
+                    reply->writeInt32(-1);
+                }
+
+                size_t size = handle->copyControlVector.size();
+                reply->writeInt32(size);
+                for (size_t i = 0; i < size; ++i) {
+                    reply->writeInt32(handle->copyControlVector.keyAt(i));
+                    reply->writeInt32(handle->copyControlVector.valueAt(i));
+                }
+
+                size = handle->extendedData.size();
+                reply->writeInt32(size);
+                for (size_t i = 0; i < size; ++i) {
+                    reply->writeString8(handle->extendedData.keyAt(i));
+                    reply->writeString8(handle->extendedData.valueAt(i));
+                }
+            }
+            return NO_ERROR;
+        } break;
 
         default:
             return BBinder::onTransact(code, data, reply, flags);
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 34445e0..f352d5b 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -877,6 +877,7 @@
         ALOGV("Delete Track: %p", mpAudioTrack.get());
         mpAudioTrack.clear();
     }
+    clearWaveGens();
 }
 
 ////////////////////////////////////////////////////////////////////////////////
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 32c4b8a..b03ae3f 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -22,6 +22,8 @@
 #include "WebmWriter.h"
 #include "StagefrightRecorder.h"
 
+#include <algorithm>
+
 #include <android/hardware/ICamera.h>
 
 #include <binder/IPCThreadState.h>
@@ -1565,9 +1567,23 @@
         format->setInt32("level", mVideoEncoderLevel);
     }
 
+    uint32_t tsLayers = 0;
     format->setInt32("priority", 0 /* realtime */);
     if (mCaptureFpsEnable) {
         format->setFloat("operating-rate", mCaptureFps);
+
+        // enable layering for all time lapse and high frame rate recordings
+        if (mFrameRate / mCaptureFps >= 1.9 || mCaptureFps / mFrameRate >= 1.9) {
+            tsLayers = 2; // use at least two layers
+            for (float fps = mCaptureFps / 1.9; fps > mFrameRate; fps /= 2) {
+                ++tsLayers;
+            }
+
+            uint32_t bLayers = std::min(2u, tsLayers - 1); // use up-to 2 B-layers
+            uint32_t pLayers = tsLayers - bLayers;
+            format->setString(
+                    "ts-schema", AStringPrintf("android.generic.%u+%u", pLayers, bLayers));
+        }
     }
 
     if (mMetaDataStoredInVideoBuffers != kMetadataBufferTypeInvalid) {
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 3d836c8..1a0539d 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -1298,6 +1298,13 @@
     }
 #endif
 
+    if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
+        int32_t layerId;
+        if (mb->meta_data()->findInt32(kKeyTemporalLayerId, &layerId)) {
+            meta->setInt32("temporal-layer-id", layerId);
+        }
+    }
+
     if (trackType == MEDIA_TRACK_TYPE_TIMEDTEXT) {
         const char *mime;
         CHECK(mTimedTextTrack.mSource != NULL
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 4504b58..134da14 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -784,12 +784,9 @@
             }
 
             if (mVideoDecoder != NULL) {
-                float rate = getFrameRate();
-                if (rate > 0) {
-                    sp<AMessage> params = new AMessage();
-                    params->setFloat("operating-rate", rate * mPlaybackSettings.mSpeed);
-                    mVideoDecoder->setParameters(params);
-                }
+                sp<AMessage> params = new AMessage();
+                params->setFloat("playback-speed", mPlaybackSettings.mSpeed);
+                mVideoDecoder->setParameters(params);
             }
 
             sp<AMessage> response = new AMessage;
@@ -1680,6 +1677,27 @@
             return err;
         }
     }
+
+    if (!audio) {
+        sp<AMessage> params = new AMessage();
+        float rate = getFrameRate();
+        if (rate > 0) {
+            params->setFloat("frame-rate-total", rate);
+        }
+
+        sp<MetaData> fileMeta = getFileMeta();
+        if (fileMeta != NULL) {
+            int32_t videoTemporalLayerCount;
+            if (fileMeta->findInt32(kKeyTemporalLayerCount, &videoTemporalLayerCount)
+                    && videoTemporalLayerCount > 0) {
+                params->setInt32("temporal-layer-count", videoTemporalLayerCount);
+            }
+        }
+
+        if (params->countEntries() > 0) {
+            (*decoder)->setParameters(params);
+        }
+    }
     return OK;
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 4678956..fa19410 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -41,6 +41,12 @@
 
 namespace android {
 
+static float kDisplayRefreshingRate = 60.f;
+
+// The default total video frame rate of a stream when that info is not available from
+// the source.
+static float kDefaultVideoFrameRateTotal = 30.f;
+
 static inline bool getAudioDeepBufferSetting() {
     return property_get_bool("media.stagefright.audio.deep", false /* default_value */);
 }
@@ -69,11 +75,17 @@
       mIsSecure(false),
       mFormatChangePending(false),
       mTimeChangePending(false),
+      mFrameRateTotal(kDefaultVideoFrameRateTotal),
+      mPlaybackSpeed(1.0f),
+      mNumVideoTemporalLayerTotal(1),
+      mNumVideoTemporalLayerAllowed(1),
+      mCurrentMaxVideoTemporalLayerId(0),
       mResumePending(false),
       mComponentName("decoder") {
     mCodecLooper = new ALooper;
     mCodecLooper->setName("NPDecoder-CL");
     mCodecLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+    mVideoTemporalLayerAggregateFps[0] = mFrameRateTotal;
 }
 
 NuPlayer::Decoder::~Decoder() {
@@ -329,11 +341,73 @@
 }
 
 void NuPlayer::Decoder::onSetParameters(const sp<AMessage> &params) {
-    if (mCodec == NULL) {
-        ALOGW("onSetParameters called before codec is created.");
-        return;
+    bool needAdjustLayers = false;
+    float frameRateTotal;
+    if (params->findFloat("frame-rate-total", &frameRateTotal)
+            && mFrameRateTotal != frameRateTotal) {
+        needAdjustLayers = true;
+        mFrameRateTotal = frameRateTotal;
     }
-    mCodec->setParameters(params);
+
+    int32_t numVideoTemporalLayerTotal;
+    if (params->findInt32("temporal-layer-count", &numVideoTemporalLayerTotal)
+            && numVideoTemporalLayerTotal > 0
+            && numVideoTemporalLayerTotal <= kMaxNumVideoTemporalLayers
+            && mNumVideoTemporalLayerTotal != numVideoTemporalLayerTotal) {
+        needAdjustLayers = true;
+        mNumVideoTemporalLayerTotal = numVideoTemporalLayerTotal;
+    }
+
+    if (needAdjustLayers) {
+        // TODO: For now, layer fps is calculated for some specific architectures.
+        // But it really should be extracted from the stream.
+        mVideoTemporalLayerAggregateFps[0] =
+            mFrameRateTotal / (float)(1ll << (mNumVideoTemporalLayerTotal - 1));
+        for (int32_t i = 1; i < mNumVideoTemporalLayerTotal; ++i) {
+            mVideoTemporalLayerAggregateFps[i] =
+                mFrameRateTotal / (float)(1ll << (mNumVideoTemporalLayerTotal - i))
+                + mVideoTemporalLayerAggregateFps[i - 1];
+        }
+    }
+
+    float playbackSpeed;
+    if (params->findFloat("playback-speed", &playbackSpeed)
+            && mPlaybackSpeed != playbackSpeed) {
+        needAdjustLayers = true;
+        mPlaybackSpeed = playbackSpeed;
+    }
+
+    if (needAdjustLayers) {
+        int32_t layerId;
+        for (layerId = 0; layerId < mNumVideoTemporalLayerTotal; ++layerId) {
+            if (mVideoTemporalLayerAggregateFps[layerId] * mPlaybackSpeed
+                    > kDisplayRefreshingRate) {
+                --layerId;
+                break;
+            }
+        }
+        if (layerId < 0) {
+            layerId = 0;
+        } else if (layerId >= mNumVideoTemporalLayerTotal) {
+            layerId = mNumVideoTemporalLayerTotal - 1;
+        }
+        mNumVideoTemporalLayerAllowed = layerId + 1;
+        if (mCurrentMaxVideoTemporalLayerId > layerId) {
+            mCurrentMaxVideoTemporalLayerId = layerId;
+        }
+        ALOGV("onSetParameters: allowed layers=%d, current max layerId=%d",
+                mNumVideoTemporalLayerAllowed, mCurrentMaxVideoTemporalLayerId);
+
+        if (mCodec == NULL) {
+            ALOGW("onSetParameters called before codec is created.");
+            return;
+        }
+
+        sp<AMessage> codecParams = new AMessage();
+        codecParams->setFloat("operating-rate",
+                mVideoTemporalLayerAggregateFps[layerId] * mPlaybackSpeed);
+        mCodec->setParameters(codecParams);
+    }
 }
 
 void NuPlayer::Decoder::onSetRenderer(const sp<Renderer> &renderer) {
@@ -742,13 +816,27 @@
         }
 
         dropAccessUnit = false;
-        if (!mIsAudio
-                && !mIsSecure
-                && mRenderer->getVideoLateByUs() > 100000ll
-                && mIsVideoAVC
-                && !IsAVCReferenceFrame(accessUnit)) {
-            dropAccessUnit = true;
-            ++mNumInputFramesDropped;
+        if (!mIsAudio && !mIsSecure) {
+            int32_t layerId = 0;
+            if (mRenderer->getVideoLateByUs() > 100000ll
+                    && mIsVideoAVC
+                    && !IsAVCReferenceFrame(accessUnit)) {
+                dropAccessUnit = true;
+            } else if (accessUnit->meta()->findInt32("temporal-layer-id", &layerId)) {
+                // Add only one layer each time.
+                if (layerId > mCurrentMaxVideoTemporalLayerId + 1
+                        || layerId >= mNumVideoTemporalLayerAllowed) {
+                    dropAccessUnit = true;
+                    ALOGV("dropping layer(%d), speed=%g, allowed layer count=%d, max layerId=%d",
+                            layerId, mPlaybackSpeed, mNumVideoTemporalLayerAllowed,
+                            mCurrentMaxVideoTemporalLayerId);
+                } else if (layerId > mCurrentMaxVideoTemporalLayerId) {
+                    mCurrentMaxVideoTemporalLayerId = layerId;
+                }
+            }
+            if (dropAccessUnit) {
+                ++mNumInputFramesDropped;
+            }
         }
     } while (dropAccessUnit);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index ae08b4b..0c619ed 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -57,6 +57,10 @@
         kWhatSetVideoSurface     = 'sSur'
     };
 
+    enum {
+        kMaxNumVideoTemporalLayers = 32,
+    };
+
     sp<Surface> mSurface;
 
     sp<Source> mSource;
@@ -90,6 +94,12 @@
     bool mIsSecure;
     bool mFormatChangePending;
     bool mTimeChangePending;
+    float mFrameRateTotal;
+    float mPlaybackSpeed;
+    int32_t mNumVideoTemporalLayerTotal;
+    int32_t mNumVideoTemporalLayerAllowed;
+    int32_t mCurrentMaxVideoTemporalLayerId;
+    float mVideoTemporalLayerAggregateFps[kMaxNumVideoTemporalLayers];
 
     bool mResumePending;
     AString mComponentName;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index d97d5b1..0d368e6 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2462,6 +2462,109 @@
     return OK;
 }
 
+status_t ACodec::configureTemporalLayers(
+        const sp<AMessage> &msg, bool inConfigure, sp<AMessage> &outputFormat) {
+    if (!mIsVideo || !mIsEncoder) {
+        return INVALID_OPERATION;
+    }
+
+    AString tsSchema;
+    if (!msg->findString("ts-schema", &tsSchema)) {
+        return OK;
+    }
+
+    unsigned int numLayers = 0;
+    unsigned int numBLayers = 0;
+    int tags;
+    char dummy;
+    OMX_VIDEO_ANDROID_TEMPORALLAYERINGPATTERNTYPE pattern =
+        OMX_VIDEO_AndroidTemporalLayeringPatternNone;
+    if (sscanf(tsSchema.c_str(), "webrtc.vp8.%u-layer%c", &numLayers, &dummy) == 1
+            && numLayers > 0) {
+        pattern = OMX_VIDEO_AndroidTemporalLayeringPatternWebRTC;
+    } else if ((tags = sscanf(tsSchema.c_str(), "android.generic.%u%c%u%c",
+                    &numLayers, &dummy, &numBLayers, &dummy))
+            && (tags == 1 || (tags == 3 && dummy == '+'))
+            && numLayers > 0 && numLayers < UINT32_MAX - numBLayers) {
+        numLayers += numBLayers;
+        pattern = OMX_VIDEO_AndroidTemporalLayeringPatternAndroid;
+    } else {
+        ALOGI("Ignoring unsupported ts-schema [%s]", tsSchema.c_str());
+        return BAD_VALUE;
+    }
+
+    OMX_VIDEO_PARAM_ANDROID_TEMPORALLAYERINGTYPE layerParams;
+    InitOMXParams(&layerParams);
+    layerParams.nPortIndex = kPortIndexOutput;
+
+    status_t err = mOMX->getParameter(
+        mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+        &layerParams, sizeof(layerParams));
+
+    if (err != OK) {
+        return err;
+    } else if (!(layerParams.eSupportedPatterns & pattern)) {
+        return BAD_VALUE;
+    }
+
+    numLayers = min(numLayers, layerParams.nLayerCountMax);
+    numBLayers = min(numBLayers, layerParams.nBLayerCountMax);
+
+    if (!inConfigure) {
+        OMX_VIDEO_CONFIG_ANDROID_TEMPORALLAYERINGTYPE layerConfig;
+        InitOMXParams(&layerConfig);
+        layerConfig.nPortIndex = kPortIndexOutput;
+        layerConfig.ePattern = pattern;
+        layerConfig.nPLayerCountActual = numLayers - numBLayers;
+        layerConfig.nBLayerCountActual = numBLayers;
+        layerConfig.bBitrateRatiosSpecified = OMX_FALSE;
+
+        err = mOMX->setConfig(
+                mNode, (OMX_INDEXTYPE)OMX_IndexConfigAndroidVideoTemporalLayering,
+                &layerConfig, sizeof(layerConfig));
+    } else {
+        layerParams.ePattern = pattern;
+        layerParams.nPLayerCountActual = numLayers - numBLayers;
+        layerParams.nBLayerCountActual = numBLayers;
+        layerParams.bBitrateRatiosSpecified = OMX_FALSE;
+
+        err = mOMX->setParameter(
+                mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+                &layerParams, sizeof(layerParams));
+    }
+
+    AString configSchema;
+    if (pattern == OMX_VIDEO_AndroidTemporalLayeringPatternAndroid) {
+        configSchema = AStringPrintf("android.generic.%u+%u", numLayers - numBLayers, numBLayers);
+    } else if (pattern == OMX_VIDEO_AndroidTemporalLayeringPatternWebRTC) {
+        configSchema = AStringPrintf("webrtc.vp8.%u", numLayers);
+    }
+
+    if (err != OK) {
+        ALOGW("Failed to set temporal layers to %s (requested %s)",
+                configSchema.c_str(), tsSchema.c_str());
+        return err;
+    }
+
+    err = mOMX->getParameter(
+            mNode, (OMX_INDEXTYPE)OMX_IndexParamAndroidVideoTemporalLayering,
+            &layerParams, sizeof(layerParams));
+
+    if (err == OK) {
+        ALOGD("Temporal layers requested:%s configured:%s got:%s(%u: P=%u, B=%u)",
+                tsSchema.c_str(), configSchema.c_str(),
+                asString(layerParams.ePattern), layerParams.ePattern,
+                layerParams.nPLayerCountActual, layerParams.nBLayerCountActual);
+
+        if (outputFormat.get() == mOutputFormat.get()) {
+            mOutputFormat = mOutputFormat->dup(); // trigger an output format change event
+        }
+        // assume we got what we configured
+        outputFormat->setString("ts-schema", configSchema);
+    }
+    return err;
+}
+
 status_t ACodec::setMinBufferSize(OMX_U32 portIndex, size_t size) {
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
@@ -3146,6 +3249,29 @@
     return ERROR_UNSUPPORTED;
 }
 
+status_t ACodec::setPortBufferNum(OMX_U32 portIndex, int bufferNum) {
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+    def.nPortIndex = portIndex;
+    status_t err;
+    ALOGD("Setting [%s] %s port buffer number: %d", mComponentName.c_str(),
+            portIndex == kPortIndexInput ? "input" : "output", bufferNum);
+    err = mOMX->getParameter(
+        mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    if (err != OK) {
+        return err;
+    }
+    def.nBufferCountActual = bufferNum;
+    err = mOMX->setParameter(
+        mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    if (err != OK) {
+        // Component could reject this request.
+        ALOGW("Fail to set [%s] %s port buffer number: %d", mComponentName.c_str(),
+            portIndex == kPortIndexInput ? "input" : "output", bufferNum);
+    }
+    return OK;
+}
+
 status_t ACodec::setupVideoDecoder(
         const char *mime, const sp<AMessage> &msg, bool haveNativeWindow,
         bool usingSwRenderer, sp<AMessage> &outputFormat) {
@@ -3202,6 +3328,24 @@
         return err;
     }
 
+    // Set the component input buffer number to be |tmp|. If succeed,
+    // component will set input port buffer number to be |tmp|. If fail,
+    // component will keep the same buffer number as before.
+    if (msg->findInt32("android._num-input-buffers", &tmp)) {
+        err = setPortBufferNum(kPortIndexInput, tmp);
+        if (err != OK)
+            return err;
+    }
+
+    // Set the component output buffer number to be |tmp|. If succeed,
+    // component will set output port buffer number to be |tmp|. If fail,
+    // component will keep the same buffer number as before.
+    if (msg->findInt32("android._num-output-buffers", &tmp)) {
+        err = setPortBufferNum(kPortIndexOutput, tmp);
+        if (err != OK)
+            return err;
+    }
+
     int32_t frameRateInt;
     float frameRateFloat;
     if (!msg->findFloat("frame-rate", &frameRateFloat)) {
@@ -3735,6 +3879,10 @@
             break;
     }
 
+    if (err != OK) {
+        return err;
+    }
+
     // Set up color aspects on input, but propagate them to the output format, as they will
     // not be read back from encoder.
     err = setColorAspectsForVideoEncoder(msg, outputFormat, inputFormat);
@@ -3753,6 +3901,29 @@
         err = OK;
     }
 
+    if (err != OK) {
+        return err;
+    }
+
+    switch (compressionFormat) {
+        case OMX_VIDEO_CodingAVC:
+        case OMX_VIDEO_CodingHEVC:
+            err = configureTemporalLayers(msg, true /* inConfigure */, outputFormat);
+            if (err != OK) {
+                err = OK; // ignore failure
+            }
+            break;
+
+        case OMX_VIDEO_CodingVP8:
+        case OMX_VIDEO_CodingVP9:
+            // TODO: do we need to support android.generic layering? webrtc layering is
+            // already set up in setupVPXEncoderParameters.
+            break;
+
+        default:
+            break;
+    }
+
     if (err == OK) {
         ALOGI("setupVideoEncoder succeeded");
     }
@@ -4236,18 +4407,25 @@
 
     AString tsSchema;
     if (msg->findString("ts-schema", &tsSchema)) {
-        if (tsSchema == "webrtc.vp8.1-layer") {
+        unsigned int numLayers = 0;
+        unsigned int numBLayers = 0;
+        int tags;
+        char dummy;
+        if (sscanf(tsSchema.c_str(), "webrtc.vp8.%u-layer%c", &numLayers, &dummy) == 1
+                && numLayers > 0) {
             pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
-            tsLayers = 1;
-        } else if (tsSchema == "webrtc.vp8.2-layer") {
+            tsLayers = numLayers;
+        } else if ((tags = sscanf(tsSchema.c_str(), "android.generic.%u%c%u%c",
+                        &numLayers, &dummy, &numBLayers, &dummy))
+                && (tags == 1 || (tags == 3 && dummy == '+'))
+                && numLayers > 0 && numLayers < UINT32_MAX - numBLayers) {
             pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
-            tsLayers = 2;
-        } else if (tsSchema == "webrtc.vp8.3-layer") {
-            pattern = OMX_VIDEO_VPXTemporalLayerPatternWebRTC;
-            tsLayers = 3;
+            // VPX does not have a concept of B-frames, so just count all layers
+            tsLayers = numLayers + numBLayers;
         } else {
-            ALOGW("Unsupported ts-schema [%s]", tsSchema.c_str());
+            ALOGW("Ignoring unsupported ts-schema [%s]", tsSchema.c_str());
         }
+        tsLayers = min(tsLayers, (size_t)OMX_VIDEO_ANDROID_MAXVP8TEMPORALLAYERS);
     }
 
     OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE vp8type;
@@ -7301,7 +7479,12 @@
         }
     }
 
-    return OK;
+    status_t err = configureTemporalLayers(params, false /* inConfigure */, mOutputFormat);
+    if (err != OK) {
+        err = OK; // ignore failure
+    }
+
+    return err;
 }
 
 void ACodec::onSignalEndOfInputStream() {
diff --git a/media/libstagefright/AMRExtractor.cpp b/media/libstagefright/AMRExtractor.cpp
index 1458802..0e98db8 100644
--- a/media/libstagefright/AMRExtractor.cpp
+++ b/media/libstagefright/AMRExtractor.cpp
@@ -100,7 +100,10 @@
 static status_t getFrameSizeByOffset(const sp<DataSource> &source,
         off64_t offset, bool isWide, size_t *frameSize) {
     uint8_t header;
-    if (source->readAt(offset, &header, 1) < 1) {
+    ssize_t count = source->readAt(offset, &header, 1);
+    if (count == 0) {
+        return ERROR_END_OF_STREAM;
+    } else if (count < 0) {
         return ERROR_IO;
     }
 
@@ -140,7 +143,10 @@
 
     if (mDataSource->getSize(&streamSize) == OK) {
          while (offset < streamSize) {
-            if (getFrameSizeByOffset(source, offset, mIsWide, &frameSize) != OK) {
+             status_t status = getFrameSizeByOffset(source, offset, mIsWide, &frameSize);
+             if (status == ERROR_END_OF_STREAM) {
+                 break;
+             } else if (status != OK) {
                 return;
             }
 
diff --git a/media/libstagefright/CallbackDataSource.cpp b/media/libstagefright/CallbackDataSource.cpp
index f14d34d..0df7da4 100644
--- a/media/libstagefright/CallbackDataSource.cpp
+++ b/media/libstagefright/CallbackDataSource.cpp
@@ -109,6 +109,10 @@
     }
 }
 
+sp<DecryptHandle> CallbackDataSource::DrmInitialization(const char *mime) {
+    return mIDataSource->DrmInitialization(mime);
+}
+
 TinyCacheSource::TinyCacheSource(const sp<DataSource>& source)
     : mSource(source), mCachedOffset(0), mCachedSize(0) {
     mName = String8::format("TinyCacheSource(%s)", mSource->toString().string());
@@ -146,12 +150,19 @@
         }
     }
 
+
     // Fill the cache and copy to the caller.
     const ssize_t numRead = mSource->readAt(offset, mCache, kCacheSize);
     if (numRead <= 0) {
+        // Flush cache on error
+        mCachedSize = 0;
+        mCachedOffset = 0;
         return numRead;
     }
     if ((size_t)numRead > kCacheSize) {
+        // Flush cache on error
+        mCachedSize = 0;
+        mCachedOffset = 0;
         return ERROR_OUT_OF_RANGE;
     }
 
@@ -172,4 +183,11 @@
     return mSource->flags();
 }
 
+sp<DecryptHandle> TinyCacheSource::DrmInitialization(const char *mime) {
+    // flush cache when DrmInitialization occurs since decrypted
+    // data may differ from what is in cache.
+    mCachedOffset = 0;
+    mCachedSize = 0;
+    return mSource->DrmInitialization(mime);
+}
 } // namespace android
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 6a67fcf..58448010 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -44,6 +44,7 @@
 
 #include <byteswap.h>
 #include "include/ID3.h"
+#include "include/avc_utils.h"
 
 #ifndef UINT32_MAX
 #define UINT32_MAX       (4294967295U)
@@ -2471,6 +2472,15 @@
         if (!strcasecmp(mMetaKeyMap[index].c_str(), "com.android.capture.fps")) {
             mFileMetaData->setFloat(kKeyCaptureFramerate, *(float *)&val);
         }
+    } else if (dataType == 67 && dataSize >= 4) {
+        // BE signed int32
+        uint32_t val;
+        if (!mDataSource->getUInt32(offset, &val)) {
+            return ERROR_MALFORMED;
+        }
+        if (!strcasecmp(mMetaKeyMap[index].c_str(), "com.android.video.temporal_layers_count")) {
+            mFileMetaData->setInt32(kKeyTemporalLayerCount, val);
+        }
     } else {
         // add more keys if needed
         ALOGV("ignoring key: type %d, size %d", dataType, dataSize);
@@ -4464,6 +4474,12 @@
                     kKeyTargetTime, targetSampleTimeUs);
         }
 
+        if (mIsAVC) {
+            uint32_t layerId = FindAVCLayerId(
+                    (const uint8_t *)mBuffer->data(), mBuffer->range_length());
+            mBuffer->meta_data()->setInt32(kKeyTemporalLayerId, layerId);
+        }
+
         if (isSyncSample) {
             mBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
         }
@@ -4627,6 +4643,12 @@
                         kKeyTargetTime, targetSampleTimeUs);
             }
 
+            if (mIsAVC) {
+                uint32_t layerId = FindAVCLayerId(
+                        (const uint8_t *)mBuffer->data(), mBuffer->range_length());
+                mBuffer->meta_data()->setInt32(kKeyTemporalLayerId, layerId);
+            }
+
             if (isSyncSample) {
                 mBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1);
             }
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 24fb987..427891d 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -43,6 +43,7 @@
 
 #include "include/ESDS.h"
 #include "include/HevcUtils.h"
+#include "include/avc_utils.h"
 
 #ifndef __predict_false
 #define __predict_false(exp) __builtin_expect((exp) != 0, 0)
@@ -70,6 +71,7 @@
 static const char kMetaKey_Build[]      = "com.android.build";
 #endif
 static const char kMetaKey_CaptureFps[] = "com.android.capture.fps";
+static const char kMetaKey_TemporalLayerCount[] = "com.android.video.temporal_layers_count";
 
 static const uint8_t kMandatoryHevcNalUnitTypes[3] = {
     kHevcNalUnitTypeVps,
@@ -1162,6 +1164,37 @@
     }
 }
 
+off64_t MPEG4Writer::addMultipleLengthPrefixedSamples_l(MediaBuffer *buffer) {
+    off64_t old_offset = mOffset;
+
+    const size_t kExtensionNALSearchRange = 64; // bytes to look for non-VCL NALUs
+
+    const uint8_t *dataStart = (const uint8_t *)buffer->data() + buffer->range_offset();
+    const uint8_t *currentNalStart = dataStart;
+    const uint8_t *nextNalStart;
+    const uint8_t *data = dataStart;
+    size_t nextNalSize;
+    size_t searchSize = buffer->range_length() > kExtensionNALSearchRange ?
+                   kExtensionNALSearchRange : buffer->range_length();
+
+    while (getNextNALUnit(&data, &searchSize, &nextNalStart,
+            &nextNalSize, true) == OK) {
+        size_t currentNalSize = nextNalStart - currentNalStart - 4 /* strip start-code */;
+        MediaBuffer *nalBuf = new MediaBuffer((void *)currentNalStart, currentNalSize);
+        addLengthPrefixedSample_l(nalBuf);
+        nalBuf->release();
+
+        currentNalStart = nextNalStart;
+    }
+
+    size_t currentNalOffset = currentNalStart - dataStart;
+    buffer->set_range(buffer->range_offset() + currentNalOffset,
+            buffer->range_length() - currentNalOffset);
+    addLengthPrefixedSample_l(buffer);
+
+    return old_offset;
+}
+
 off64_t MPEG4Writer::addLengthPrefixedSample_l(MediaBuffer *buffer) {
     off64_t old_offset = mOffset;
 
@@ -1381,6 +1414,19 @@
     return OK;
 }
 
+status_t MPEG4Writer::setTemporalLayerCount(uint32_t layerCount) {
+    if (layerCount > 9) {
+        return BAD_VALUE;
+    }
+
+    if (layerCount > 0) {
+        mMetaKeys->setInt32(kMetaKey_TemporalLayerCount, layerCount);
+        mMoovExtraSize += sizeof(kMetaKey_TemporalLayerCount) + 4 + 32;
+    }
+
+    return OK;
+}
+
 void MPEG4Writer::write(const void *data, size_t size) {
     write(data, 1, size);
 }
@@ -1496,6 +1542,14 @@
     mIsMPEG4 = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
                !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC);
 
+    // store temporal layer count
+    if (!mIsAudio) {
+        int32_t count;
+        if (mMeta->findInt32(kKeyTemporalLayerCount, &count) && count > 1) {
+            mOwner->setTemporalLayerCount(count);
+        }
+    }
+
     setTimeScale();
 }
 
@@ -1684,7 +1738,7 @@
         List<MediaBuffer *>::iterator it = chunk->mSamples.begin();
 
         off64_t offset = (chunk->mTrack->isAvc() || chunk->mTrack->isHevc())
-                                ? addLengthPrefixedSample_l(*it)
+                                ? addMultipleLengthPrefixedSamples_l(*it)
                                 : addSample_l(*it);
 
         if (isFirstSample) {
@@ -2593,7 +2647,7 @@
             trackProgressStatus(timestampUs);
         }
         if (!hasMultipleTracks) {
-            off64_t offset = (mIsAvc || mIsHevc) ? mOwner->addLengthPrefixedSample_l(copy)
+            off64_t offset = (mIsAvc || mIsHevc) ? mOwner->addMultipleLengthPrefixedSamples_l(copy)
                                  : mOwner->addSample_l(copy);
 
             uint32_t count = (mOwner->use32BitFileOffset()
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index 1c6c882..92ce88c 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -88,6 +88,7 @@
     virtual void close();
     virtual uint32_t getFlags();
     virtual String8 toString();
+    virtual sp<DecryptHandle> DrmInitialization(const char *mime);
 
 private:
     sp<IMemory> mMemory;
@@ -134,6 +135,10 @@
     return mName;
 }
 
+sp<DecryptHandle> RemoteDataSource::DrmInitialization(const char *mime) {
+    return mSource->DrmInitialization(mime);
+}
+
 // static
 sp<IMediaExtractor> MediaExtractor::Create(
         const sp<DataSource> &source, const char *mime) {
@@ -146,10 +151,6 @@
         ALOGW("creating media extractor in calling process");
         return CreateFromService(source, mime);
     } else {
-        // remote extractor
-        ALOGV("get service manager");
-        sp<IBinder> binder = defaultServiceManager()->getService(String16("media.extractor"));
-
         // Check if it's WVM, since WVMExtractor needs to be created in the media server process,
         // not the extractor process.
         String8 mime8;
@@ -160,6 +161,21 @@
             return new WVMExtractor(source);
         }
 
+        // Check if it's es-based DRM, since DRMExtractor needs to be created in the media server
+        // process, not the extractor process.
+        if (SniffDRM(source, &mime8, &confidence, &meta)) {
+            const char *drmMime = mime8.string();
+            ALOGV("Detected media content as '%s' with confidence %.2f", drmMime, confidence);
+            if (!strncmp(drmMime, "drm+es_based+", 13)) {
+                // DRMExtractor sets container metadata kKeyIsDRM to 1
+                return new DRMExtractor(source, drmMime + 14);
+            }
+        }
+
+        // remote extractor
+        ALOGV("get service manager");
+        sp<IBinder> binder = defaultServiceManager()->getService(String16("media.extractor"));
+
         if (binder != 0) {
             sp<IMediaExtractorService> mediaExService(interface_cast<IMediaExtractorService>(binder));
             sp<IMediaExtractor> ex = mediaExService->makeExtractor(RemoteDataSource::wrap(source), mime);
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index 542a06d..c9c49f5 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -123,7 +123,7 @@
       mNumSampleSizes(0),
       mHasTimeToSample(false),
       mTimeToSampleCount(0),
-      mTimeToSample(),
+      mTimeToSample(NULL),
       mSampleTimeEntries(NULL),
       mCompositionTimeDeltaEntries(NULL),
       mNumCompositionTimeDeltaEntries(0),
@@ -132,7 +132,8 @@
       mNumSyncSamples(0),
       mSyncSamples(NULL),
       mLastSyncSampleIndex(0),
-      mSampleToChunkEntries(NULL) {
+      mSampleToChunkEntries(NULL),
+      mTotalSize(0) {
     mSampleIterator = new SampleIterator(this);
 }
 
@@ -143,6 +144,9 @@
     delete[] mSyncSamples;
     mSyncSamples = NULL;
 
+    delete[] mTimeToSample;
+    mTimeToSample = NULL;
+
     delete mCompositionDeltaLookup;
     mCompositionDeltaLookup = NULL;
 
@@ -233,13 +237,43 @@
         return ERROR_MALFORMED;
     }
 
-    if (SIZE_MAX / sizeof(SampleToChunkEntry) <= (size_t)mNumSampleToChunkOffsets)
+    if ((uint64_t)SIZE_MAX / sizeof(SampleToChunkEntry) <=
+            (uint64_t)mNumSampleToChunkOffsets) {
+        ALOGE("Sample-to-chunk table size too large.");
         return ERROR_OUT_OF_RANGE;
+    }
+
+    mTotalSize += (uint64_t)mNumSampleToChunkOffsets *
+            sizeof(SampleToChunkEntry);
+    if (mTotalSize > kMaxTotalSize) {
+        ALOGE("Sample-to-chunk table size would make sample table too large.\n"
+              "    Requested sample-to-chunk table size = %llu\n"
+              "    Eventual sample table size >= %llu\n"
+              "    Allowed sample table size = %llu\n",
+              (unsigned long long)mNumSampleToChunkOffsets *
+                      sizeof(SampleToChunkEntry),
+              (unsigned long long)mTotalSize,
+              (unsigned long long)kMaxTotalSize);
+        return ERROR_OUT_OF_RANGE;
+    }
 
     mSampleToChunkEntries =
         new (std::nothrow) SampleToChunkEntry[mNumSampleToChunkOffsets];
-    if (!mSampleToChunkEntries)
+    if (!mSampleToChunkEntries) {
+        ALOGE("Cannot allocate sample-to-chunk table with %llu entries.",
+                (unsigned long long)mNumSampleToChunkOffsets);
         return ERROR_OUT_OF_RANGE;
+    }
+
+    if (mNumSampleToChunkOffsets == 0) {
+        return OK;
+    }
+
+    if ((off64_t)(SIZE_MAX - 8 -
+            ((mNumSampleToChunkOffsets - 1) * sizeof(SampleToChunkEntry)))
+            < mSampleToChunkOffset) {
+        return ERROR_MALFORMED;
+    }
 
     for (uint32_t i = 0; i < mNumSampleToChunkOffsets; ++i) {
         uint8_t buffer[12];
@@ -358,20 +392,41 @@
         // 2) mTimeToSampleCount is the number of entries of the time-to-sample
         //    table.
         // 3) We hope that the table size does not exceed UINT32_MAX.
-        ALOGE("  Error: Time-to-sample table size too large.");
+        ALOGE("Time-to-sample table size too large.");
         return ERROR_OUT_OF_RANGE;
     }
 
     // Note: At this point, we know that mTimeToSampleCount * 2 will not
     // overflow because of the above condition.
-    if (!mDataSource->getVector(data_offset + 8, &mTimeToSample,
-                                mTimeToSampleCount * 2)) {
-        ALOGE("  Error: Incomplete data read for time-to-sample table.");
+
+    uint64_t allocSize = (uint64_t)mTimeToSampleCount * 2 * sizeof(uint32_t);
+    mTotalSize += allocSize;
+    if (mTotalSize > kMaxTotalSize) {
+        ALOGE("Time-to-sample table size would make sample table too large.\n"
+              "    Requested time-to-sample table size = %llu\n"
+              "    Eventual sample table size >= %llu\n"
+              "    Allowed sample table size = %llu\n",
+              (unsigned long long)allocSize,
+              (unsigned long long)mTotalSize,
+              (unsigned long long)kMaxTotalSize);
+        return ERROR_OUT_OF_RANGE;
+    }
+
+    mTimeToSample = new (std::nothrow) uint32_t[mTimeToSampleCount * 2];
+    if (!mTimeToSample) {
+        ALOGE("Cannot allocate time-to-sample table with %llu entries.",
+                (unsigned long long)mTimeToSampleCount);
+        return ERROR_OUT_OF_RANGE;
+    }
+
+    if (mDataSource->readAt(data_offset + 8, mTimeToSample,
+            (size_t)allocSize) < (ssize_t)allocSize) {
+        ALOGE("Incomplete data read for time-to-sample table.");
         return ERROR_IO;
     }
 
-    for (size_t i = 0; i < mTimeToSample.size(); ++i) {
-        mTimeToSample.editItemAt(i) = ntohl(mTimeToSample[i]);
+    for (size_t i = 0; i < mTimeToSampleCount * 2; ++i) {
+        mTimeToSample[i] = ntohl(mTimeToSample[i]);
     }
 
     mHasTimeToSample = true;
@@ -413,18 +468,33 @@
     }
 
     mNumCompositionTimeDeltaEntries = numEntries;
-    uint64_t allocSize = (uint64_t)numEntries * 2 * sizeof(uint32_t);
-    if (allocSize > UINT32_MAX) {
+    uint64_t allocSize = (uint64_t)numEntries * 2 * sizeof(int32_t);
+    if (allocSize > SIZE_MAX) {
+        ALOGE("Composition-time-to-sample table size too large.");
+        return ERROR_OUT_OF_RANGE;
+    }
+
+    mTotalSize += allocSize;
+    if (mTotalSize > kMaxTotalSize) {
+        ALOGE("Composition-time-to-sample table would make sample table too large.\n"
+              "    Requested composition-time-to-sample table size = %llu\n"
+              "    Eventual sample table size >= %llu\n"
+              "    Allowed sample table size = %llu\n",
+              (unsigned long long)allocSize,
+              (unsigned long long)mTotalSize,
+              (unsigned long long)kMaxTotalSize);
         return ERROR_OUT_OF_RANGE;
     }
 
     mCompositionTimeDeltaEntries = new (std::nothrow) int32_t[2 * numEntries];
-    if (!mCompositionTimeDeltaEntries)
+    if (!mCompositionTimeDeltaEntries) {
+        ALOGE("Cannot allocate composition-time-to-sample table with %llu "
+                "entries.", (unsigned long long)numEntries);
         return ERROR_OUT_OF_RANGE;
+    }
 
-    if (mDataSource->readAt(
-                data_offset + 8, mCompositionTimeDeltaEntries, numEntries * 8)
-            < (ssize_t)numEntries * 8) {
+    if (mDataSource->readAt(data_offset + 8, mCompositionTimeDeltaEntries,
+            (size_t)allocSize) < (ssize_t)allocSize) {
         delete[] mCompositionTimeDeltaEntries;
         mCompositionTimeDeltaEntries = NULL;
 
@@ -465,18 +535,33 @@
         ALOGV("Table of sync samples is empty or has only a single entry!");
     }
 
-    uint64_t allocSize = mNumSyncSamples * (uint64_t)sizeof(uint32_t);
+    uint64_t allocSize = (uint64_t)mNumSyncSamples * sizeof(uint32_t);
     if (allocSize > SIZE_MAX) {
+        ALOGE("Sync sample table size too large.");
+        return ERROR_OUT_OF_RANGE;
+    }
+
+    mTotalSize += allocSize;
+    if (mTotalSize > kMaxTotalSize) {
+        ALOGE("Sync sample table size would make sample table too large.\n"
+              "    Requested sync sample table size = %llu\n"
+              "    Eventual sample table size >= %llu\n"
+              "    Allowed sample table size = %llu\n",
+              (unsigned long long)allocSize,
+              (unsigned long long)mTotalSize,
+              (unsigned long long)kMaxTotalSize);
         return ERROR_OUT_OF_RANGE;
     }
 
     mSyncSamples = new (std::nothrow) uint32_t[mNumSyncSamples];
-    if (!mSyncSamples)
+    if (!mSyncSamples) {
+        ALOGE("Cannot allocate sync sample table with %llu entries.",
+                (unsigned long long)mNumSyncSamples);
         return ERROR_OUT_OF_RANGE;
+    }
 
-    size_t size = mNumSyncSamples * sizeof(uint32_t);
-    if (mDataSource->readAt(mSyncSampleOffset + 8, mSyncSamples, size)
-            != (ssize_t)size) {
+    if (mDataSource->readAt(mSyncSampleOffset + 8, mSyncSamples,
+            (size_t)allocSize) != (ssize_t)allocSize) {
         return ERROR_IO;
     }
 
@@ -544,9 +629,24 @@
         return;
     }
 
-    mSampleTimeEntries = new (std::nothrow) SampleTimeEntry[mNumSampleSizes];
-    if (!mSampleTimeEntries)
+    mTotalSize += (uint64_t)mNumSampleSizes * sizeof(SampleTimeEntry);
+    if (mTotalSize > kMaxTotalSize) {
+        ALOGE("Sample entry table size would make sample table too large.\n"
+              "    Requested sample entry table size = %llu\n"
+              "    Eventual sample table size >= %llu\n"
+              "    Allowed sample table size = %llu\n",
+              (unsigned long long)mNumSampleSizes * sizeof(SampleTimeEntry),
+              (unsigned long long)mTotalSize,
+              (unsigned long long)kMaxTotalSize);
         return;
+    }
+
+    mSampleTimeEntries = new (std::nothrow) SampleTimeEntry[mNumSampleSizes];
+    if (!mSampleTimeEntries) {
+        ALOGE("Cannot allocate sample entry table with %llu entries.",
+                (unsigned long long)mNumSampleSizes);
+        return;
+    }
 
     uint32_t sampleIndex = 0;
     uint32_t sampleTime = 0;
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 377f5fd..d2ba02e 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -158,6 +158,12 @@
     // TODO: Use Flexible color instead
     videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
 
+    // For the thumbnail extraction case, try to allocate single buffer
+    // in both input and output ports. NOTE: This request may fail if
+    // component requires more than that for decoding.
+    videoFormat->setInt32("android._num-input-buffers", 1);
+    videoFormat->setInt32("android._num-output-buffers", 1);
+
     status_t err;
     sp<ALooper> looper = new ALooper;
     looper->start();
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 4303d09..1c76ad7 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -22,6 +22,7 @@
 #include <sys/stat.h>
 
 #include <utility>
+#include <vector>
 
 #include "include/ESDS.h"
 #include "include/HevcUtils.h"
@@ -1316,6 +1317,20 @@
         }
 
         convertMessageToMetaDataColorAspects(msg, meta);
+
+        AString tsSchema;
+        if (msg->findString("ts-schema", &tsSchema)) {
+            unsigned int numLayers = 0;
+            unsigned int numBLayers = 0;
+            char dummy;
+            int tags = sscanf(tsSchema.c_str(), "android.generic.%u%c%u%c",
+                    &numLayers, &dummy, &numBLayers, &dummy);
+            if ((tags == 1 || (tags == 3 && dummy == '+'))
+                    && numLayers > 0 && numLayers < UINT32_MAX - numBLayers
+                    && numLayers + numBLayers <= INT32_MAX) {
+                meta->setInt32(kKeyTemporalLayerCount, numLayers + numBLayers);
+            }
+        }
     } else if (mime.startsWith("audio/")) {
         int32_t numChannels;
         if (msg->findInt32("channel-count", &numChannels)) {
@@ -1377,24 +1392,24 @@
     // reassemble the csd data into its original form
     sp<ABuffer> csd0, csd1, csd2;
     if (msg->findBuffer("csd-0", &csd0)) {
+        int csd0size = csd0->size();
         if (mime == MEDIA_MIMETYPE_VIDEO_AVC) {
             sp<ABuffer> csd1;
             if (msg->findBuffer("csd-1", &csd1)) {
-                char avcc[1024]; // that oughta be enough, right?
-                size_t outsize = reassembleAVCC(csd0, csd1, avcc);
-                meta->setData(kKeyAVCC, kKeyAVCC, avcc, outsize);
+                std::vector<char> avcc(csd0size + csd1->size() + 1024);
+                size_t outsize = reassembleAVCC(csd0, csd1, avcc.data());
+                meta->setData(kKeyAVCC, kKeyAVCC, avcc.data(), outsize);
             }
         } else if (mime == MEDIA_MIMETYPE_AUDIO_AAC || mime == MEDIA_MIMETYPE_VIDEO_MPEG4) {
-            int csd0size = csd0->size();
-            char esds[csd0size + 31];
+            std::vector<char> esds(csd0size + 31);
             // The written ESDS is actually for an audio stream, but it's enough
             // for transporting the CSD to muxers.
-            reassembleESDS(csd0, esds);
-            meta->setData(kKeyESDS, kKeyESDS, esds, sizeof(esds));
+            reassembleESDS(csd0, esds.data());
+            meta->setData(kKeyESDS, kKeyESDS, esds.data(), esds.size());
         } else if (mime == MEDIA_MIMETYPE_VIDEO_HEVC) {
-            uint8_t hvcc[1024]; // that oughta be enough, right?
-            size_t outsize = reassembleHVCC(csd0, hvcc, 1024, 4);
-            meta->setData(kKeyHVCC, kKeyHVCC, hvcc, outsize);
+            std::vector<uint8_t> hvcc(csd0size + 1024);
+            size_t outsize = reassembleHVCC(csd0, hvcc.data(), hvcc.size(), 4);
+            meta->setData(kKeyHVCC, kKeyHVCC, hvcc.data(), outsize);
         } else if (mime == MEDIA_MIMETYPE_VIDEO_VP9) {
             meta->setData(kKeyVp9CodecPrivate, 0, csd0->data(), csd0->size());
         } else if (mime == MEDIA_MIMETYPE_AUDIO_OPUS) {
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index ccf3440..0396dc6 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -491,6 +491,28 @@
     return true;
 }
 
+uint32_t FindAVCLayerId(const uint8_t *data, size_t size) {
+    CHECK(data != NULL);
+
+    const unsigned kSvcNalType = 0xE;
+    const unsigned kSvcNalSearchRange = 32;
+    // SVC NAL
+    // |---0 1110|1--- ----|---- ----|iii- ---|
+    //       ^                        ^
+    //   NAL-type = 0xE               layer-Id
+    //
+    // layer_id 0 is for base layer, while 1, 2, ... are enhancement layers.
+    // Layer n uses reference frames from layer 0, 1, ..., n-1.
+
+    uint32_t layerId = 0;
+    sp<ABuffer> svcNAL = FindNAL(
+            data, size > kSvcNalSearchRange ? kSvcNalSearchRange : size, kSvcNalType);
+    if (svcNAL != NULL && svcNAL->size() >= 4) {
+        layerId = (*(svcNAL->data() + 3) >> 5) & 0x7;
+    }
+    return layerId;
+}
+
 sp<MetaData> MakeAACCodecSpecificData(
         unsigned profile, unsigned sampling_freq_index,
         unsigned channel_configuration) {
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
index 61b9bfd..cecc52b 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
@@ -122,7 +122,8 @@
       mSignalledError(false),
       mStride(mWidth){
     initPorts(
-            kNumBuffers, INPUT_BUF_SIZE, kNumBuffers, CODEC_MIME_TYPE);
+            1 /* numMinInputBuffers */, kNumBuffers, INPUT_BUF_SIZE,
+            1 /* numMinOutputBuffers */, kNumBuffers, CODEC_MIME_TYPE);
 
     GETTIME(&mTimeStart, NULL);
 
@@ -381,6 +382,48 @@
     resetPlugin();
 }
 
+bool SoftAVC::getVUIParams() {
+    IV_API_CALL_STATUS_T status;
+    ih264d_ctl_get_vui_params_ip_t s_ctl_get_vui_params_ip;
+    ih264d_ctl_get_vui_params_op_t s_ctl_get_vui_params_op;
+
+    s_ctl_get_vui_params_ip.e_cmd = IVD_CMD_VIDEO_CTL;
+    s_ctl_get_vui_params_ip.e_sub_cmd =
+        (IVD_CONTROL_API_COMMAND_TYPE_T)IH264D_CMD_CTL_GET_VUI_PARAMS;
+
+    s_ctl_get_vui_params_ip.u4_size =
+        sizeof(ih264d_ctl_get_vui_params_ip_t);
+
+    s_ctl_get_vui_params_op.u4_size = sizeof(ih264d_ctl_get_vui_params_op_t);
+
+    status = ivdec_api_function(
+            (iv_obj_t *)mCodecCtx, (void *)&s_ctl_get_vui_params_ip,
+            (void *)&s_ctl_get_vui_params_op);
+
+    if (status != IV_SUCCESS) {
+        ALOGW("Error in getting VUI params: 0x%x",
+                s_ctl_get_vui_params_op.u4_error_code);
+        return false;
+    }
+
+    int32_t primaries = s_ctl_get_vui_params_op.u1_colour_primaries;
+    int32_t transfer = s_ctl_get_vui_params_op.u1_tfr_chars;
+    int32_t coeffs = s_ctl_get_vui_params_op.u1_matrix_coeffs;
+    bool fullRange = s_ctl_get_vui_params_op.u1_video_full_range_flag;
+
+    ColorAspects colorAspects;
+    ColorUtils::convertIsoColorAspectsToCodecAspects(
+            primaries, transfer, coeffs, fullRange, colorAspects);
+
+    // Update color aspects if necessary.
+    if (colorAspectsDiffer(colorAspects, mBitstreamColorAspects)) {
+        mBitstreamColorAspects = colorAspects;
+        status_t err = handleColorAspectsChange();
+        CHECK(err == OK);
+    }
+    return true;
+}
+
 bool SoftAVC::setDecodeArgs(
         ivd_video_decode_ip_t *ps_dec_ip,
         ivd_video_decode_op_t *ps_dec_op,
@@ -606,6 +649,8 @@
 
             bool resChanged = (IVD_RES_CHANGED == (s_dec_op.u4_error_code & 0xFF));
 
+            getVUIParams();
+
             GETTIME(&mTimeEnd, NULL);
             /* Compute time taken for decode() */
             TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
@@ -641,16 +686,22 @@
                 continue;
             }
 
+            // Combine the resolution change and coloraspects change in one PortSettingChange event
+            // if necessary.
             if ((0 < s_dec_op.u4_pic_wd) && (0 < s_dec_op.u4_pic_ht)) {
                 uint32_t width = s_dec_op.u4_pic_wd;
                 uint32_t height = s_dec_op.u4_pic_ht;
                 bool portWillReset = false;
                 handlePortSettingsChange(&portWillReset, width, height);
-
                 if (portWillReset) {
                     resetDecoder();
                     return;
                 }
+            } else if (mUpdateColorAspects) {
+                notify(OMX_EventPortSettingsChanged, kOutputPortIndex,
+                    kDescribeColorAspectsIndex, NULL);
+                mUpdateColorAspects = false;
+                return;
             }
 
             if (s_dec_op.u4_output_present) {
@@ -705,6 +756,10 @@
     }
 }
 
+int SoftAVC::getColorAspectPreference() {
+    return kPreferBitstream;
+}
+
 }  // namespace android
 
 android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.h b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
index c710c76..154ca38 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.h
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
@@ -59,6 +59,7 @@
     virtual void onQueueFilled(OMX_U32 portIndex);
     virtual void onPortFlushCompleted(OMX_U32 portIndex);
     virtual void onReset();
+    virtual int getColorAspectPreference();
 private:
     // Number of input and output buffers
     enum {
@@ -116,6 +117,8 @@
             OMX_BUFFERHEADERTYPE *outHeader,
             size_t timeStampIx);
 
+    bool getVUIParams();
+
     DISALLOW_EVIL_CONSTRUCTORS(SoftAVC);
 };
 
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 8022467..3490008 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -156,7 +156,7 @@
         outHeader->nFlags = 0;
         outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
         outHeader->nTimeStamp = *(OMX_TICKS *)mImg->user_priv;
-        if (outHeader->nAllocLen >= outHeader->nFilledLen) {
+        if (outputBufferSafe(outHeader)) {
             uint8_t *dst = outHeader->pBuffer;
             const uint8_t *srcY = (const uint8_t *)mImg->planes[VPX_PLANE_Y];
             const uint8_t *srcU = (const uint8_t *)mImg->planes[VPX_PLANE_U];
@@ -166,8 +166,6 @@
             size_t srcVStride = mImg->stride[VPX_PLANE_V];
             copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
         } else {
-            ALOGE("b/27597103, buffer too small");
-            android_errorWriteLog(0x534e4554, "27597103");
             outHeader->nFilledLen = 0;
         }
 
@@ -197,6 +195,25 @@
     return true;
 }
 
+bool SoftVPX::outputBufferSafe(OMX_BUFFERHEADERTYPE *outHeader) {
+    uint32_t width = outputBufferWidth();
+    uint32_t height = outputBufferHeight();
+    uint64_t nFilledLen = width;
+    nFilledLen *= height;
+    if (nFilledLen > UINT32_MAX / 3) {
+        ALOGE("b/29421675, nFilledLen overflow %llu w %u h %u",
+                (unsigned long long)nFilledLen, width, height);
+        android_errorWriteLog(0x534e4554, "29421675");
+        return false;
+    } else if (outHeader->nAllocLen < outHeader->nFilledLen) {
+        ALOGE("b/27597103, buffer too small");
+        android_errorWriteLog(0x534e4554, "27597103");
+        return false;
+    }
+
+    return true;
+}
+
 void SoftVPX::onQueueFilled(OMX_U32 /* portIndex */) {
     if (mOutputPortSettingsChange != NONE || mEOSStatus == OUTPUT_FRAMES_FLUSHED) {
         return;
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.h b/media/libstagefright/codecs/on2/dec/SoftVPX.h
index 8ccbae2..84cf79c 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.h
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.h
@@ -66,6 +66,7 @@
     status_t initDecoder();
     status_t destroyDecoder();
     bool outputBuffers(bool flushDecoder, bool display, bool eos, bool *portWillReset);
+    bool outputBufferSafe(OMX_BUFFERHEADERTYPE *outHeader);
 
     DISALLOW_EVIL_CONSTRUCTORS(SoftVPX);
 };
diff --git a/media/libstagefright/include/CallbackDataSource.h b/media/libstagefright/include/CallbackDataSource.h
index 43e9b8d..d2187d5 100644
--- a/media/libstagefright/include/CallbackDataSource.h
+++ b/media/libstagefright/include/CallbackDataSource.h
@@ -41,6 +41,7 @@
     virtual String8 toString() {
         return mName;
     }
+    virtual sp<DecryptHandle> DrmInitialization(const char *mime = NULL);
 
 private:
     sp<IDataSource> mIDataSource;
@@ -68,6 +69,7 @@
     virtual String8 toString() {
         return mName;
     }
+    virtual sp<DecryptHandle> DrmInitialization(const char *mime = NULL);
 
 private:
     // 2kb comes from experimenting with the time-to-first-frame from a MediaPlayer
diff --git a/media/libstagefright/include/SampleTable.h b/media/libstagefright/include/SampleTable.h
index 54da497..2100ca6 100644
--- a/media/libstagefright/include/SampleTable.h
+++ b/media/libstagefright/include/SampleTable.h
@@ -24,7 +24,6 @@
 #include <media/stagefright/MediaErrors.h>
 #include <utils/RefBase.h>
 #include <utils/threads.h>
-#include <utils/Vector.h>
 
 namespace android {
 
@@ -96,6 +95,9 @@
     static const uint32_t kSampleSizeType32;
     static const uint32_t kSampleSizeTypeCompact;
 
+    // Limit the total size of all internal tables to 200MiB.
+    static const size_t kMaxTotalSize = 200 * (1 << 20);
+
     sp<DataSource> mDataSource;
     Mutex mLock;
 
@@ -113,7 +115,7 @@
 
     bool mHasTimeToSample;
     uint32_t mTimeToSampleCount;
-    Vector<uint32_t> mTimeToSample;
+    uint32_t* mTimeToSample;
 
     struct SampleTimeEntry {
         uint32_t mSampleIndex;
@@ -139,6 +141,9 @@
     };
     SampleToChunkEntry *mSampleToChunkEntries;
 
+    // Approximate size of all tables combined.
+    uint64_t mTotalSize;
+
     friend struct SampleIterator;
 
     // normally we don't round
diff --git a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
index 4529007..c9fd745 100644
--- a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
+++ b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
@@ -21,6 +21,7 @@
 #include "SimpleSoftOMXComponent.h"
 
 #include <media/stagefright/foundation/AHandlerReflector.h>
+#include <media/stagefright/foundation/ColorUtils.h>
 #include <media/IOMX.h>
 
 #include <utils/RefBase.h>
@@ -43,6 +44,16 @@
             OMX_COMPONENTTYPE **component);
 
 protected:
+    enum {
+        kDescribeColorAspectsIndex = kPrepareForAdaptivePlaybackIndex + 1,
+    };
+
+    enum {
+        kNotSupported,
+        kPreferBitstream,
+        kPreferContainer,
+    };
+
     virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
     virtual void onReset();
 
@@ -55,15 +66,37 @@
     virtual OMX_ERRORTYPE getConfig(
             OMX_INDEXTYPE index, OMX_PTR params);
 
+    virtual OMX_ERRORTYPE setConfig(
+            OMX_INDEXTYPE index, const OMX_PTR params);
+
     virtual OMX_ERRORTYPE getExtensionIndex(
             const char *name, OMX_INDEXTYPE *index);
 
+    virtual bool supportsDescribeColorAspects();
+
+    virtual int getColorAspectPreference();
+
+    // This function sets both minimum buffer count and actual buffer count of
+    // input port to be |numInputBuffers|. It will also set both minimum buffer
+    // count and actual buffer count of output port to be |numOutputBuffers|.
     void initPorts(OMX_U32 numInputBuffers,
             OMX_U32 inputBufferSize,
             OMX_U32 numOutputBuffers,
             const char *mimeType,
             OMX_U32 minCompressionRatio = 1u);
 
+    // This function sets input port's minimum buffer count to |numMinInputBuffers|,
+    // sets input port's actual buffer count to |numInputBuffers|, sets output port's
+    // minimum buffer count to |numMinOutputBuffers| and sets output port's actual buffer
+    // count to be |numOutputBuffers|.
+    void initPorts(OMX_U32 numMinInputBuffers,
+            OMX_U32 numInputBuffers,
+            OMX_U32 inputBufferSize,
+            OMX_U32 numMinOutputBuffers,
+            OMX_U32 numOutputBuffers,
+            const char *mimeType,
+            OMX_U32 minCompressionRatio = 1u);
+
     virtual void updatePortDefinitions(bool updateCrop = true, bool updateInputSize = false);
 
     uint32_t outputBufferWidth();
@@ -74,6 +107,10 @@
         kCropSet,
         kCropChanged,
     };
+
+    // This function will handle several port change events which include
+    // size changed, crop changed, stride changed and coloraspects changed.
+    // It will trigger OMX_EventPortSettingsChanged event if necessary.
     void handlePortSettingsChange(
             bool *portWillReset, uint32_t width, uint32_t height,
             CropSettingsMode cropSettingsMode = kCropUnSet, bool fakeStride = false);
@@ -99,6 +136,29 @@
         AWAITING_ENABLED
     } mOutputPortSettingsChange;
 
+    bool mUpdateColorAspects;
+
+    Mutex mColorAspectsLock;
+    // color aspects passed from the framework.
+    ColorAspects mDefaultColorAspects;
+    // color aspects parsed from the bitstream.
+    ColorAspects mBitstreamColorAspects;
+    // final color aspects after combining the above two aspects.
+    ColorAspects mFinalColorAspects;
+
+    bool colorAspectsDiffer(const ColorAspects &a, const ColorAspects &b);
+
+    // This functions takes two color aspects and updates the mFinalColorAspects
+    // based on |preferredAspects|.
+    void updateFinalColorAspects(
+            const ColorAspects &otherAspects, const ColorAspects &preferredAspects);
+
+    // This function will update the mFinalColorAspects based on codec preference.
+    status_t handleColorAspectsChange();
+
+    // Helper function to dump the ColorAspects.
+    void dumpColorAspects(const ColorAspects &colorAspects);
+
 private:
     uint32_t mMinInputBufferSize;
     uint32_t mMinCompressionRatio;
diff --git a/media/libstagefright/include/avc_utils.h b/media/libstagefright/include/avc_utils.h
index 7465b35..235ee63 100644
--- a/media/libstagefright/include/avc_utils.h
+++ b/media/libstagefright/include/avc_utils.h
@@ -85,6 +85,7 @@
 
 bool IsIDR(const sp<ABuffer> &accessUnit);
 bool IsAVCReferenceFrame(const sp<ABuffer> &accessUnit);
+uint32_t FindAVCLayerId(const uint8_t *data, size_t size);
 
 const char *AVCProfileToString(uint8_t profile);
 
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index d3553bd..409cef7 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -68,6 +68,11 @@
         mCodingType(codingType),
         mProfileLevels(profileLevels),
         mNumProfileLevels(numProfileLevels) {
+
+    // init all the color aspects to be Unspecified.
+    memset(&mDefaultColorAspects, 0, sizeof(ColorAspects));
+    memset(&mBitstreamColorAspects, 0, sizeof(ColorAspects));
+    memset(&mFinalColorAspects, 0, sizeof(ColorAspects));
 }
 
 void SoftVideoDecoderOMXComponent::initPorts(
@@ -76,6 +81,18 @@
         OMX_U32 numOutputBuffers,
         const char *mimeType,
         OMX_U32 minCompressionRatio) {
+    initPorts(numInputBuffers, numInputBuffers, inputBufferSize,
+            numOutputBuffers, numOutputBuffers, mimeType, minCompressionRatio);
+}
+
+void SoftVideoDecoderOMXComponent::initPorts(
+        OMX_U32 numMinInputBuffers,
+        OMX_U32 numInputBuffers,
+        OMX_U32 inputBufferSize,
+        OMX_U32 numMinOutputBuffers,
+        OMX_U32 numOutputBuffers,
+        const char *mimeType,
+        OMX_U32 minCompressionRatio) {
     mMinInputBufferSize = inputBufferSize;
     mMinCompressionRatio = minCompressionRatio;
 
@@ -84,8 +101,8 @@
 
     def.nPortIndex = kInputPortIndex;
     def.eDir = OMX_DirInput;
-    def.nBufferCountMin = numInputBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferCountMin = numMinInputBuffers;
+    def.nBufferCountActual = numInputBuffers;
     def.nBufferSize = inputBufferSize;
     def.bEnabled = OMX_TRUE;
     def.bPopulated = OMX_FALSE;
@@ -107,8 +124,8 @@
 
     def.nPortIndex = kOutputPortIndex;
     def.eDir = OMX_DirOutput;
-    def.nBufferCountMin = numOutputBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferCountMin = numMinOutputBuffers;
+    def.nBufferCountActual = numOutputBuffers;
     def.bEnabled = OMX_TRUE;
     def.bPopulated = OMX_FALSE;
     def.eDomain = OMX_PortDomainVideo;
@@ -224,9 +241,66 @@
             notify(OMX_EventPortSettingsChanged, kOutputPortIndex,
                    OMX_IndexConfigCommonOutputCrop, NULL);
         }
+    } else if (mUpdateColorAspects) {
+        notify(OMX_EventPortSettingsChanged, kOutputPortIndex,
+                kDescribeColorAspectsIndex, NULL);
+        mUpdateColorAspects = false;
     }
 }
 
+void SoftVideoDecoderOMXComponent::dumpColorAspects(const ColorAspects &colorAspects) {
+    ALOGD("dumpColorAspects: (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) ",
+            colorAspects.mRange, asString(colorAspects.mRange),
+            colorAspects.mPrimaries, asString(colorAspects.mPrimaries),
+            colorAspects.mMatrixCoeffs, asString(colorAspects.mMatrixCoeffs),
+            colorAspects.mTransfer, asString(colorAspects.mTransfer));
+}
+
+bool SoftVideoDecoderOMXComponent::colorAspectsDiffer(
+        const ColorAspects &a, const ColorAspects &b) {
+    if (a.mRange != b.mRange
+        || a.mPrimaries != b.mPrimaries
+        || a.mTransfer != b.mTransfer
+        || a.mMatrixCoeffs != b.mMatrixCoeffs) {
+        return true;
+    }
+    return false;
+}
+
+void SoftVideoDecoderOMXComponent::updateFinalColorAspects(
+        const ColorAspects &otherAspects, const ColorAspects &preferredAspects) {
+    Mutex::Autolock autoLock(mColorAspectsLock);
+    ColorAspects newAspects;
+    newAspects.mRange = preferredAspects.mRange != ColorAspects::RangeUnspecified ?
+        preferredAspects.mRange : otherAspects.mRange;
+    newAspects.mPrimaries = preferredAspects.mPrimaries != ColorAspects::PrimariesUnspecified ?
+        preferredAspects.mPrimaries : otherAspects.mPrimaries;
+    newAspects.mTransfer = preferredAspects.mTransfer != ColorAspects::TransferUnspecified ?
+        preferredAspects.mTransfer : otherAspects.mTransfer;
+    newAspects.mMatrixCoeffs = preferredAspects.mMatrixCoeffs != ColorAspects::MatrixUnspecified ?
+        preferredAspects.mMatrixCoeffs : otherAspects.mMatrixCoeffs;
+
+    // Check to see if need update mFinalColorAspects.
+    if (colorAspectsDiffer(mFinalColorAspects, newAspects)) {
+        mFinalColorAspects = newAspects;
+        mUpdateColorAspects = true;
+    }
+}
+
+status_t SoftVideoDecoderOMXComponent::handleColorAspectsChange() {
+    int perference = getColorAspectPreference();
+    ALOGD("Color Aspects preference: %d ", perference);
+
+    if (perference == kPreferBitstream) {
+        updateFinalColorAspects(mDefaultColorAspects, mBitstreamColorAspects);
+    } else if (perference == kPreferContainer) {
+        updateFinalColorAspects(mBitstreamColorAspects, mDefaultColorAspects);
+    } else {
+        return OMX_ErrorUnsupportedSetting;
+    }
+    return OK;
+}
+
 void SoftVideoDecoderOMXComponent::copyYV12FrameToOutputBuffer(
         uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
         size_t srcYStride, size_t srcUStride, size_t srcVStride) {
@@ -450,7 +524,7 @@
 
 OMX_ERRORTYPE SoftVideoDecoderOMXComponent::getConfig(
         OMX_INDEXTYPE index, OMX_PTR params) {
-    switch (index) {
+    switch ((int)index) {
         case OMX_IndexConfigCommonOutputCrop:
         {
             OMX_CONFIG_RECTTYPE *rectParams = (OMX_CONFIG_RECTTYPE *)params;
@@ -470,22 +544,88 @@
 
             return OMX_ErrorNone;
         }
+        case kDescribeColorAspectsIndex:
+        {
+            if (!supportsDescribeColorAspects()) {
+                return OMX_ErrorUnsupportedIndex;
+            }
+
+            DescribeColorAspectsParams* colorAspectsParams =
+                    (DescribeColorAspectsParams *)params;
+
+            if (colorAspectsParams->nPortIndex != kOutputPortIndex) {
+                return OMX_ErrorBadParameter;
+            }
+
+            colorAspectsParams->sAspects = mFinalColorAspects;
+            if (colorAspectsParams->bRequestingDataSpace || colorAspectsParams->bDataSpaceChanged) {
+                return OMX_ErrorUnsupportedSetting;
+            }
+
+            return OMX_ErrorNone;
+        }
 
         default:
             return OMX_ErrorUnsupportedIndex;
     }
 }
 
+OMX_ERRORTYPE SoftVideoDecoderOMXComponent::setConfig(
+        OMX_INDEXTYPE index, const OMX_PTR params){
+    switch ((int)index) {
+        case kDescribeColorAspectsIndex:
+        {
+            if (!supportsDescribeColorAspects()) {
+                return OMX_ErrorUnsupportedIndex;
+            }
+            const DescribeColorAspectsParams* colorAspectsParams =
+                    (const DescribeColorAspectsParams *)params;
+
+            if (!isValidOMXParam(colorAspectsParams)) {
+                return OMX_ErrorBadParameter;
+            }
+
+            if (colorAspectsParams->nPortIndex != kOutputPortIndex) {
+                return OMX_ErrorBadParameter;
+            }
+
+            // Update color aspects if necessary.
+            if (colorAspectsDiffer(colorAspectsParams->sAspects, mDefaultColorAspects)) {
+                mDefaultColorAspects = colorAspectsParams->sAspects;
+                status_t err = handleColorAspectsChange();
+                CHECK(err == OK);
+            }
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return OMX_ErrorUnsupportedIndex;
+    }
+}
+
+
 OMX_ERRORTYPE SoftVideoDecoderOMXComponent::getExtensionIndex(
         const char *name, OMX_INDEXTYPE *index) {
     if (!strcmp(name, "OMX.google.android.index.prepareForAdaptivePlayback")) {
         *(int32_t*)index = kPrepareForAdaptivePlaybackIndex;
         return OMX_ErrorNone;
+    } else if (!strcmp(name, "OMX.google.android.index.describeColorAspects")
+                && supportsDescribeColorAspects()) {
+        *(int32_t*)index = kDescribeColorAspectsIndex;
+        return OMX_ErrorNone;
     }
 
     return SimpleSoftOMXComponent::getExtensionIndex(name, index);
 }
 
+bool SoftVideoDecoderOMXComponent::supportsDescribeColorAspects() {
+    return getColorAspectPreference() != kNotSupported;
+}
+
+int SoftVideoDecoderOMXComponent::getColorAspectPreference() {
+    return kNotSupported;
+}
+
 void SoftVideoDecoderOMXComponent::onReset() {
     mOutputPortSettingsChange = NONE;
 }
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index 98498e9..47573c3 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -17,6 +17,7 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "ASessionDescription"
 #include <utils/Log.h>
+#include <cutils/log.h>
 
 #include "ASessionDescription.h"
 
@@ -211,12 +212,12 @@
 
     *PT = x;
 
-    char key[20];
-    sprintf(key, "a=rtpmap:%lu", x);
+    char key[32];
+    snprintf(key, sizeof(key), "a=rtpmap:%lu", x);
 
     CHECK(findAttribute(index, key, desc));
 
-    sprintf(key, "a=fmtp:%lu", x);
+    snprintf(key, sizeof(key), "a=fmtp:%lu", x);
     if (!findAttribute(index, key, params)) {
         params->clear();
     }
@@ -228,8 +229,11 @@
     *width = 0;
     *height = 0;
 
-    char key[20];
-    sprintf(key, "a=framesize:%lu", PT);
+    char key[33];
+    snprintf(key, sizeof(key), "a=framesize:%lu", PT);
+    if (PT > 9999999) {
+        android_errorWriteLog(0x534e4554, "25747670");
+    }
     AString value;
     if (!findAttribute(index, key, &value)) {
         return false;
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index f9a9ab9..abe2582 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -1042,49 +1042,13 @@
                     return;
                 }
 
-                sp<ABuffer> accessUnit;
-                CHECK(msg->findBuffer("access-unit", &accessUnit));
-
-                uint32_t seqNum = (uint32_t)accessUnit->int32Data();
-
                 if (mSeekPending) {
                     ALOGV("we're seeking, dropping stale packet.");
                     break;
                 }
 
-                if (track->mNewSegment) {
-                    // The sequence number from RTP packet has only 16 bits and is extended
-                    // by ARTPSource. Only the low 16 bits of seq in RTP-Info of reply of
-                    // RTSP "PLAY" command should be used to detect the first RTP packet
-                    // after seeking.
-                    if (track->mAllowedStaleAccessUnits > 0) {
-                        if ((((seqNum ^ track->mFirstSeqNumInSegment) & 0xffff) != 0)) {
-                            // Not the first rtp packet of the stream after seeking, discarding.
-                            track->mAllowedStaleAccessUnits--;
-                            ALOGV("discarding stale access unit (0x%x : 0x%x)",
-                                 seqNum, track->mFirstSeqNumInSegment);
-                            break;
-                        }
-                    } else { // track->mAllowedStaleAccessUnits <= 0
-                        mNumAccessUnitsReceived = 0;
-                        ALOGW_IF(track->mAllowedStaleAccessUnits == 0,
-                             "Still no first rtp packet after %d stale ones",
-                             kMaxAllowedStaleAccessUnits);
-                        track->mAllowedStaleAccessUnits = -1;
-                        break;
-                    }
-
-                    // Now found the first rtp packet of the stream after seeking.
-                    track->mFirstSeqNumInSegment = seqNum;
-                    track->mNewSegment = false;
-                }
-
-                if (seqNum < track->mFirstSeqNumInSegment) {
-                    ALOGV("dropping stale access-unit (%d < %d)",
-                         seqNum, track->mFirstSeqNumInSegment);
-                    break;
-                }
-
+                sp<ABuffer> accessUnit;
+                CHECK(msg->findBuffer("access-unit", &accessUnit));
                 onAccessUnitComplete(trackIndex, accessUnit);
                 break;
             }
@@ -1852,17 +1816,16 @@
             int32_t trackIndex, const sp<ABuffer> &accessUnit) {
         ALOGV("onAccessUnitComplete track %d", trackIndex);
 
+        TrackInfo *track = &mTracks.editItemAt(trackIndex);
         if(!mPlayResponseParsed){
-            ALOGI("play response is not parsed, storing accessunit");
-            TrackInfo *track = &mTracks.editItemAt(trackIndex);
+            uint32_t seqNum = (uint32_t)accessUnit->int32Data();
+            ALOGI("play response is not parsed, storing accessunit %u", seqNum);
             track->mPackets.push_back(accessUnit);
             return;
         }
 
         handleFirstAccessUnit();
 
-        TrackInfo *track = &mTracks.editItemAt(trackIndex);
-
         if (!mAllTracksHaveTime) {
             ALOGV("storing accessUnit, no time established yet");
             track->mPackets.push_back(accessUnit);
@@ -1873,6 +1836,41 @@
             sp<ABuffer> accessUnit = *track->mPackets.begin();
             track->mPackets.erase(track->mPackets.begin());
 
+            uint32_t seqNum = (uint32_t)accessUnit->int32Data();
+            if (track->mNewSegment) {
+                // The sequence number from RTP packet has only 16 bits and is extended
+                // by ARTPSource. Only the low 16 bits of seq in RTP-Info of reply of
+                // RTSP "PLAY" command should be used to detect the first RTP packet
+                // after seeking.
+                if (track->mAllowedStaleAccessUnits > 0) {
+                    if ((((seqNum ^ track->mFirstSeqNumInSegment) & 0xffff) != 0)) {
+                        // Not the first rtp packet of the stream after seeking, discarding.
+                        track->mAllowedStaleAccessUnits--;
+                        ALOGV("discarding stale access unit (0x%x : 0x%x)",
+                             seqNum, track->mFirstSeqNumInSegment);
+                        continue;
+                    }
+                } else { // track->mAllowedStaleAccessUnits <= 0
+                    mNumAccessUnitsReceived = 0;
+                    ALOGW_IF(track->mAllowedStaleAccessUnits == 0,
+                         "Still no first rtp packet after %d stale ones",
+                         kMaxAllowedStaleAccessUnits);
+                    track->mAllowedStaleAccessUnits = -1;
+                    return;
+                }
+
+                // Now found the first rtp packet of the stream after seeking.
+                track->mFirstSeqNumInSegment = seqNum;
+                track->mNewSegment = false;
+            }
+
+            if (seqNum < track->mFirstSeqNumInSegment) {
+                ALOGV("dropping stale access-unit (%d < %d)",
+                     seqNum, track->mFirstSeqNumInSegment);
+                continue;
+            }
+
+
             if (addMediaTimestamp(trackIndex, track, accessUnit)) {
                 postQueueAccessUnit(trackIndex, accessUnit);
             }
diff --git a/media/ndk/Android.mk b/media/ndk/Android.mk
index 7f6b66b..74729e4 100644
--- a/media/ndk/Android.mk
+++ b/media/ndk/Android.mk
@@ -40,7 +40,7 @@
 
 LOCAL_CFLAGS += -fvisibility=hidden -D EXPORT='__attribute__ ((visibility ("default")))'
 
-LOCAL_CFLAGS += -Werror
+LOCAL_CFLAGS += -Werror -Wall
 
 LOCAL_SHARED_LIBRARIES := \
     libbinder \
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 4f826e5..8b831f0 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -14,6 +14,8 @@
     liblog \
     libbinder
 
+LOCAL_CFLAGS := -Wall -Werror
+
 include $(BUILD_SHARED_LIBRARY)
 
 include $(CLEAR_VARS)
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 0e564ea..2547746 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1341,12 +1341,10 @@
         }
     } break;
     case OFFLOAD:
-        // only offloadable effects on offload thread
-        if ((desc->flags & EFFECT_FLAG_OFFLOAD_MASK) != EFFECT_FLAG_OFFLOAD_SUPPORTED) {
-            ALOGW("checkEffectCompatibility_l(): non offloadable effect %s created on"
-                    " OFFLOAD thread %s", desc->name, mThreadName);
-            return BAD_VALUE;
-        }
+        // nothing actionable on offload threads, if the effect:
+        //   - is offloadable: the effect can be created
+        //   - is NOT offloadable: the effect should still be created, but EffectHandle::enable()
+        //     will take care of invalidating the tracks of the thread
         break;
     case DIRECT:
         // Reject any effect on Direct output threads for now, since the format of
@@ -2301,6 +2299,12 @@
     mCallbackThread->resetDraining();
 }
 
+void AudioFlinger::PlaybackThread::errorCallback()
+{
+    ALOG_ASSERT(mCallbackThread != 0);
+    mCallbackThread->setAsyncError();
+}
+
 void AudioFlinger::PlaybackThread::resetWriteBlocked(uint32_t sequence)
 {
     Mutex::Autolock _l(mLock);
@@ -2335,6 +2339,9 @@
     case STREAM_CBK_EVENT_DRAIN_READY:
         me->drainCallback();
         break;
+    case STREAM_CBK_EVENT_ERROR:
+        me->errorCallback();
+        break;
     default:
         ALOGW("asyncCallback() unknown event %d", event);
         break;
@@ -3906,6 +3913,13 @@
     broadcast_l();
 }
 
+void AudioFlinger::PlaybackThread::onAsyncError()
+{
+    for (int i = AUDIO_STREAM_SYSTEM; i < (int)AUDIO_STREAM_CNT; i++) {
+        invalidateTracks((audio_stream_type_t)i);
+    }
+}
+
 void AudioFlinger::MixerThread::threadLoop_mix()
 {
     // mix buffers...
@@ -5222,7 +5236,8 @@
     :   Thread(false /*canCallJava*/),
         mPlaybackThread(playbackThread),
         mWriteAckSequence(0),
-        mDrainSequence(0)
+        mDrainSequence(0),
+        mAsyncError(false)
 {
 }
 
@@ -5240,11 +5255,13 @@
     while (!exitPending()) {
         uint32_t writeAckSequence;
         uint32_t drainSequence;
+        bool asyncError;
 
         {
             Mutex::Autolock _l(mLock);
             while (!((mWriteAckSequence & 1) ||
                      (mDrainSequence & 1) ||
+                     mAsyncError ||
                      exitPending())) {
                 mWaitWorkCV.wait(mLock);
             }
@@ -5258,6 +5275,8 @@
             mWriteAckSequence &= ~1;
             drainSequence = mDrainSequence;
             mDrainSequence &= ~1;
+            asyncError = mAsyncError;
+            mAsyncError = false;
         }
         {
             sp<AudioFlinger::PlaybackThread> playbackThread = mPlaybackThread.promote();
@@ -5268,6 +5287,9 @@
                 if (drainSequence & 1) {
                     playbackThread->resetDraining(drainSequence >> 1);
                 }
+                if (asyncError) {
+                    playbackThread->onAsyncError();
+                }
             }
         }
     }
@@ -5316,6 +5338,13 @@
     }
 }
 
+void AudioFlinger::AsyncCallbackThread::setAsyncError()
+{
+    Mutex::Autolock _l(mLock);
+    mAsyncError = true;
+    mWaitWorkCV.signal();
+}
+
 
 // ----------------------------------------------------------------------------
 AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 1bfbca9..2fd7eeb 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -543,6 +543,7 @@
                 void        resetWriteBlocked(uint32_t sequence);
                 void        drainCallback();
                 void        resetDraining(uint32_t sequence);
+                void        errorCallback();
 
     static      int         asyncCallback(stream_callback_event_t event, void *param, void *cookie);
 
@@ -550,6 +551,7 @@
     virtual     bool        waitingAsyncCallback_l();
     virtual     bool        shouldStandby_l();
     virtual     void        onAddNewTrack_l();
+                void        onAsyncError(); // error reported by AsyncCallbackThread
 
     // ThreadBase virtuals
     virtual     void        preExit();
@@ -1044,6 +1046,7 @@
             void        resetWriteBlocked();
             void        setDraining(uint32_t sequence);
             void        resetDraining();
+            void        setAsyncError();
 
 private:
     const wp<PlaybackThread>   mPlaybackThread;
@@ -1057,6 +1060,7 @@
     uint32_t                   mDrainSequence;
     Condition                  mWaitWorkCV;
     Mutex                      mLock;
+    bool                       mAsyncError;
 };
 
 class DuplicatingThread : public MixerThread {
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 8b45adc..c8e5148 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -50,6 +50,7 @@
 LOCAL_MODULE:= libaudiopolicyservice
 
 LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Wall -Werror
 
 include $(BUILD_SHARED_LIBRARY)
 
@@ -102,6 +103,8 @@
 LOCAL_CFLAGS += -DUSE_XML_AUDIO_POLICY_CONF
 endif #ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
 
+LOCAL_CFLAGS += -Wall -Werror
+
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
 LOCAL_MODULE:= libaudiopolicymanagerdefault
@@ -125,6 +128,8 @@
     $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
     $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface
 
+LOCAL_CFLAGS := -Wall -Werror
+
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
 LOCAL_MODULE:= libaudiopolicymanager
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index 3b4ae6b..d7da0ad 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -60,6 +60,8 @@
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
+LOCAL_CFLAGS := -Wall -Werror
+
 LOCAL_MODULE := libaudiopolicycomponents
 
 include $(BUILD_STATIC_LIBRARY)
diff --git a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
index b828f81..1612714 100644
--- a/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
+++ b/services/audiopolicy/common/managerdefinitions/include/TypeConverter.h
@@ -131,6 +131,7 @@
 typedef TypeConverter<StreamTraits> StreamTypeConverter;
 typedef TypeConverter<DeviceCategoryTraits> DeviceCategoryConverter;
 
+inline
 static SampleRateTraits::Collection samplingRatesFromString(const std::string &samplingRates,
                                                             const char *del = "|")
 {
@@ -139,6 +140,7 @@
     return samplingRateCollection;
 }
 
+inline
 static FormatTraits::Collection formatsFromString(const std::string &formats, const char *del = "|")
 {
     FormatTraits::Collection formatCollection;
@@ -146,6 +148,7 @@
     return formatCollection;
 }
 
+inline
 static audio_format_t formatFromString(const std::string &literalFormat)
 {
     audio_format_t format;
@@ -156,6 +159,7 @@
     return format;
 }
 
+inline
 static audio_channel_mask_t channelMaskFromString(const std::string &literalChannels)
 {
     audio_channel_mask_t channels;
@@ -166,6 +170,7 @@
     return channels;
 }
 
+inline
 static ChannelTraits::Collection channelMasksFromString(const std::string &channels,
                                                         const char *del = "|")
 {
@@ -176,6 +181,7 @@
     return channelMaskCollection;
 }
 
+inline
 static InputChannelTraits::Collection inputChannelMasksFromString(const std::string &inChannels,
                                                                   const char *del = "|")
 {
@@ -185,6 +191,7 @@
     return inputChannelMaskCollection;
 }
 
+inline
 static OutputChannelTraits::Collection outputChannelMasksFromString(const std::string &outChannels,
                                                                     const char *del = "|")
 {
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 35f078e..50453ad 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -104,7 +104,6 @@
 
 ssize_t DeviceVector::remove(const sp<DeviceDescriptor>& item)
 {
-    size_t i;
     ssize_t ret = indexOf(item);
 
     if (ret < 0) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
index ab2b51f..14caf7c 100644
--- a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
@@ -34,7 +34,8 @@
     // Where would this volume index been inserted in the curve point
     size_t indexInUiPosition = mCurvePoints.orderOf(CurvePoint(volIdx, 0));
     if (indexInUiPosition >= nbCurvePoints) {
-        return 0.0f; // out of bounds
+        //use last point of table
+        return mCurvePoints[nbCurvePoints - 1].mAttenuationInMb / 100.0f;
     }
     if (indexInUiPosition == 0) {
         if (indexInUiPosition != mCurvePoints[0].mIndex) {
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 0e64716..ff38df4 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -775,7 +775,6 @@
         const audio_offload_info_t *offloadInfo)
 {
     audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-    uint32_t latency = 0;
     status_t status;
 
 #ifdef AUDIO_POLICY_TEST
@@ -1192,7 +1191,7 @@
                 }
             }
         }
-        uint32_t muteWaitMs = setOutputDevice(outputDesc, device, force, 0, NULL, address);
+        (void) /*uint32_t muteWaitMs*/ setOutputDevice(outputDesc, device, force, 0, NULL, address);
 
         // handle special case for sonification while in call
         if (isInCall()) {
@@ -1288,7 +1287,6 @@
             // force restoring the device selection on other active outputs if it differs from the
             // one being selected for this output
             for (size_t i = 0; i < mOutputs.size(); i++) {
-                audio_io_handle_t curOutput = mOutputs.keyAt(i);
                 sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
                 if (desc != outputDesc &&
                         desc->isActive() &&
@@ -1795,7 +1793,7 @@
         ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
         if (patch_index >= 0) {
             sp<AudioPatch> patchDesc = mAudioPatches.valueAt(patch_index);
-            status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+            (void) /*status_t status*/ mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
             mAudioPatches.removeItemsAt(patch_index);
             patchRemoved = true;
         }
@@ -2708,7 +2706,6 @@
                            true,
                            NULL);
         } else if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) {
-            audio_patch_handle_t afPatchHandle = patchDesc->mAfPatchHandle;
             status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
             ALOGV("releaseAudioPatch() patch panel returned %d patchHandle %d",
                                                               status, patchDesc->mAfPatchHandle);
@@ -3187,6 +3184,7 @@
     }
     mEngine->setObserver(this);
     status_t status = mEngine->initCheck();
+    (void) status;
     ALOG_ASSERT(status == NO_ERROR, "Policy engine not initialized(err=%d)", status);
 
     // mAvailableOutputDevices and mAvailableInputDevices now contain all attached devices
@@ -4066,7 +4064,7 @@
     ssize_t index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
     if (index >= 0) {
         sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
-        status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+        (void) /*status_t status*/ mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
         mAudioPatches.removeItemsAt(index);
         mpClientInterface->onAudioPatchListUpdate();
     }
@@ -4095,7 +4093,7 @@
     ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
     if (index >= 0) {
         sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
-        status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+        (void) /*status_t status*/ mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
         mAudioPatches.removeItemsAt(index);
         mpClientInterface->onAudioPatchListUpdate();
     }
@@ -5400,7 +5398,6 @@
                                              AudioProfileVector &profiles)
 {
     String8 reply;
-    char *value;
 
     // Format MUST be checked first to update the list of AudioProfile
     if (profiles.hasDynamicFormat()) {
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index a6cd50e..f6e24e4 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -52,9 +52,11 @@
 
 static const nsecs_t kAudioCommandTimeoutNs = seconds(3); // 3 seconds
 
+#ifdef USE_LEGACY_AUDIO_POLICY
 namespace {
     extern struct audio_policy_service_ops aps_ops;
 };
+#endif
 
 // ----------------------------------------------------------------------------
 
@@ -66,11 +68,6 @@
 
 void AudioPolicyService::onFirstRef()
 {
-    char value[PROPERTY_VALUE_MAX];
-    const struct hw_module_t *module;
-    int forced_val;
-    int rc;
-
     {
         Mutex::Autolock _l(mLock);
 
@@ -85,7 +82,8 @@
         ALOGI("AudioPolicyService CSTOR in legacy mode");
 
         /* instantiate the audio policy manager */
-        rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
+        const struct hw_module_t *module;
+        int rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
         if (rc) {
             return;
         }
@@ -1198,6 +1196,7 @@
 int aps_set_voice_volume(void *service, float volume, int delay_ms);
 };
 
+#ifdef USE_LEGACY_AUDIO_POLICY
 namespace {
     struct audio_policy_service_ops aps_ops = {
         .open_output           = aps_open_output,
@@ -1220,5 +1219,6 @@
         .open_input_on_module  = aps_open_input_on_module,
     };
 }; // namespace <unnamed>
+#endif
 
 }; // namespace android
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index e8e18b8..c55ac7f 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -40,6 +40,8 @@
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
 
+LOCAL_CFLAGS := -Wall -Werror
+
 LOCAL_MODULE:= libsoundtriggerservice
 
 include $(BUILD_SHARED_LIBRARY)