Merge "Revert "stagefright: define CB_CODEC_RELEASED and ReleaseReason enum.""
diff --git a/camera/camera2/CaptureRequest.cpp b/camera/camera2/CaptureRequest.cpp
index 66d6913..4217bc6 100644
--- a/camera/camera2/CaptureRequest.cpp
+++ b/camera/camera2/CaptureRequest.cpp
@@ -81,6 +81,13 @@
         mSurfaceList.push_back(surface);
     }
 
+    int isReprocess = 0;
+    if ((err = parcel->readInt32(&isReprocess)) != OK) {
+        ALOGE("%s: Failed to read reprocessing from parcel", __FUNCTION__);
+        return err;
+    }
+    mIsReprocess = (isReprocess != 0);
+
     return OK;
 }
 
@@ -118,6 +125,8 @@
         parcel->writeStrongBinder(binder);
     }
 
+    parcel->writeInt32(mIsReprocess ? 1 : 0);
+
     return OK;
 }
 
diff --git a/camera/camera2/ICameraDeviceUser.cpp b/camera/camera2/ICameraDeviceUser.cpp
index 89c6fb7..2ec08a9 100644
--- a/camera/camera2/ICameraDeviceUser.cpp
+++ b/camera/camera2/ICameraDeviceUser.cpp
@@ -42,6 +42,8 @@
     END_CONFIGURE,
     DELETE_STREAM,
     CREATE_STREAM,
+    CREATE_INPUT_STREAM,
+    GET_INPUT_SURFACE,
     CREATE_DEFAULT_REQUEST,
     GET_CAMERA_INFO,
     WAIT_UNTIL_IDLE,
@@ -225,6 +227,50 @@
         return reply.readInt32();
     }
 
+    virtual status_t createInputStream(int width, int height, int format)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+        data.writeInt32(width);
+        data.writeInt32(height);
+        data.writeInt32(format);
+
+        remote()->transact(CREATE_INPUT_STREAM, data, &reply);
+
+        reply.readExceptionCode();
+        return reply.readInt32();
+    }
+
+    // get the buffer producer of the input stream
+    virtual status_t getInputBufferProducer(
+            sp<IGraphicBufferProducer> *producer) {
+        if (producer == NULL) {
+            return BAD_VALUE;
+        }
+
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+
+        remote()->transact(GET_INPUT_SURFACE, data, &reply);
+
+        reply.readExceptionCode();
+        status_t result = reply.readInt32() ;
+        if (result != OK) {
+            return result;
+        }
+
+        sp<IGraphicBufferProducer> bp = NULL;
+        if (reply.readInt32() != 0) {
+            String16 name = readMaybeEmptyString16(reply);
+            bp = interface_cast<IGraphicBufferProducer>(
+                    reply.readStrongBinder());
+        }
+
+        *producer = bp;
+
+        return *producer == NULL ? INVALID_OPERATION : OK;
+    }
+
     // Create a request object from a template.
     virtual status_t createDefaultRequest(int templateId,
                                           /*out*/
@@ -409,7 +455,35 @@
 
             return NO_ERROR;
         } break;
+        case CREATE_INPUT_STREAM: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+            int width, height, format;
 
+            width = data.readInt32();
+            height = data.readInt32();
+            format = data.readInt32();
+            status_t ret = createInputStream(width, height, format);
+
+            reply->writeNoException();
+            reply->writeInt32(ret);
+            return NO_ERROR;
+
+        } break;
+        case GET_INPUT_SURFACE: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+
+            sp<IGraphicBufferProducer> bp;
+            status_t ret = getInputBufferProducer(&bp);
+            sp<IBinder> b(IInterface::asBinder(ret == OK ? bp : NULL));
+
+            reply->writeNoException();
+            reply->writeInt32(ret);
+            reply->writeInt32(1);
+            reply->writeString16(String16("camera input")); // name of surface
+            reply->writeStrongBinder(b);
+
+            return NO_ERROR;
+        } break;
         case CREATE_DEFAULT_REQUEST: {
             CHECK_INTERFACE(ICameraDeviceUser, data, reply);
 
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
index 9b786c5..851ad2c 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
@@ -56,7 +56,7 @@
         return true;
     }
 
-    status_t MockDrmFactory::createDrmPlugin(const uint8_t uuid[16], DrmPlugin **plugin)
+    status_t MockDrmFactory::createDrmPlugin(const uint8_t /* uuid */[16], DrmPlugin **plugin)
     {
         *plugin = new MockDrmPlugin();
         return OK;
@@ -68,8 +68,9 @@
         return (!memcmp(uuid, mock_uuid, sizeof(mock_uuid)));
     }
 
-    status_t MockCryptoFactory::createPlugin(const uint8_t uuid[16], const void *data,
-                                             size_t size, CryptoPlugin **plugin)
+    status_t MockCryptoFactory::createPlugin(const uint8_t /* uuid */[16],
+                                             const void * /* data */,
+                                             size_t /* size */, CryptoPlugin **plugin)
     {
         *plugin = new MockCryptoPlugin();
         return OK;
@@ -150,7 +151,7 @@
         // Properties used in mock test, set by cts test app returned from mock plugin
         //   byte[] mock-request       -> request
         //   string mock-default-url   -> defaultUrl
-        //   string mock-key-request-type -> keyRequestType
+        //   string mock-keyRequestType -> keyRequestType
 
         index = mByteArrayProperties.indexOfKey(String8("mock-request"));
         if (index < 0) {
@@ -266,8 +267,8 @@
         return OK;
     }
 
-    status_t MockDrmPlugin::getProvisionRequest(String8 const &certType,
-                                                String8 const &certAuthority,
+    status_t MockDrmPlugin::getProvisionRequest(String8 const & /* certType */,
+                                                String8 const & /* certAuthority */,
                                                 Vector<uint8_t> &request,
                                                 String8 &defaultUrl)
     {
@@ -297,8 +298,8 @@
     }
 
     status_t MockDrmPlugin::provideProvisionResponse(Vector<uint8_t> const &response,
-                                                     Vector<uint8_t> &certificate,
-                                                     Vector<uint8_t> &wrappedKey)
+                                                     Vector<uint8_t> & /* certificate */,
+                                                     Vector<uint8_t> & /* wrappedKey */)
     {
         Mutex::Autolock lock(mLock);
         ALOGD("MockDrmPlugin::provideProvisionResponse(%s)",
@@ -317,7 +318,8 @@
         return OK;
     }
 
-    status_t MockDrmPlugin::getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop)
+    status_t MockDrmPlugin::getSecureStop(Vector<uint8_t> const & /* ssid */,
+                                          Vector<uint8_t> & secureStop)
     {
         Mutex::Autolock lock(mLock);
         ALOGD("MockDrmPlugin::getSecureStop()");
@@ -439,6 +441,63 @@
                   pData ? vectorToString(*pData) : "{}");
 
             sendEvent(eventType, extra, pSessionId, pData);
+        } else if (name == "mock-send-expiration-update") {
+            int64_t expiryTimeMS;
+            sscanf(value.string(), "%jd", &expiryTimeMS);
+
+            Vector<uint8_t> const *pSessionId = NULL;
+            ssize_t index = mByteArrayProperties.indexOfKey(String8("mock-event-session-id"));
+            if (index >= 0) {
+                pSessionId = &mByteArrayProperties[index];
+            }
+
+            ALOGD("sending expiration-update from mock drm plugin: %jd %s",
+                  expiryTimeMS, pSessionId ? vectorToString(*pSessionId) : "{}");
+
+            sendExpirationUpdate(pSessionId, expiryTimeMS);
+        } else if (name == "mock-send-keys-change") {
+            Vector<uint8_t> const *pSessionId = NULL;
+            ssize_t index = mByteArrayProperties.indexOfKey(String8("mock-event-session-id"));
+            if (index >= 0) {
+                pSessionId = &mByteArrayProperties[index];
+            }
+
+            ALOGD("sending keys-change from mock drm plugin: %s",
+                  pSessionId ? vectorToString(*pSessionId) : "{}");
+
+            Vector<DrmPlugin::KeyStatus> keyStatusList;
+            DrmPlugin::KeyStatus keyStatus;
+            uint8_t keyId1[] = {'k', 'e', 'y', '1'};
+            keyStatus.mKeyId.clear();
+            keyStatus.mKeyId.appendArray(keyId1, sizeof(keyId1));
+            keyStatus.mType = DrmPlugin::kKeyStatusType_Usable;
+            keyStatusList.add(keyStatus);
+
+            uint8_t keyId2[] = {'k', 'e', 'y', '2'};
+            keyStatus.mKeyId.clear();
+            keyStatus.mKeyId.appendArray(keyId2, sizeof(keyId2));
+            keyStatus.mType = DrmPlugin::kKeyStatusType_Expired;
+            keyStatusList.add(keyStatus);
+
+            uint8_t keyId3[] = {'k', 'e', 'y', '3'};
+            keyStatus.mKeyId.clear();
+            keyStatus.mKeyId.appendArray(keyId3, sizeof(keyId3));
+            keyStatus.mType = DrmPlugin::kKeyStatusType_OutputNotAllowed;
+            keyStatusList.add(keyStatus);
+
+            uint8_t keyId4[] = {'k', 'e', 'y', '4'};
+            keyStatus.mKeyId.clear();
+            keyStatus.mKeyId.appendArray(keyId4, sizeof(keyId4));
+            keyStatus.mType = DrmPlugin::kKeyStatusType_StatusPending;
+            keyStatusList.add(keyStatus);
+
+            uint8_t keyId5[] = {'k', 'e', 'y', '5'};
+            keyStatus.mKeyId.clear();
+            keyStatus.mKeyId.appendArray(keyId5, sizeof(keyId5));
+            keyStatus.mType = DrmPlugin::kKeyStatusType_InternalError;
+            keyStatusList.add(keyStatus);
+
+            sendKeysChange(pSessionId, &keyStatusList, true);
         } else {
             mStringProperties.add(name, value);
         }
@@ -740,7 +799,7 @@
     ssize_t
     MockCryptoPlugin::decrypt(bool secure, const uint8_t key[16], const uint8_t iv[16],
                               Mode mode, const void *srcPtr, const SubSample *subSamples,
-                              size_t numSubSamples, void *dstPtr, AString *errorDetailMsg)
+                              size_t numSubSamples, void *dstPtr, AString * /* errorDetailMsg */)
     {
         ALOGD("MockCryptoPlugin::decrypt(secure=%d, key=%s, iv=%s, mode=%d, src=%p, "
               "subSamples=%s, dst=%p)",
@@ -769,7 +828,7 @@
     {
         String8 result;
         for (size_t i = 0; i < numSubSamples; i++) {
-            result.appendFormat("[%zu] {clear:%zu, encrypted:%zu} ", i,
+            result.appendFormat("[%zu] {clear:%u, encrypted:%u} ", i,
                                 subSamples[i].mNumBytesOfClearData,
                                 subSamples[i].mNumBytesOfEncryptedData);
         }
diff --git a/include/camera/camera2/CaptureRequest.h b/include/camera/camera2/CaptureRequest.h
index e56d61f..eeab217 100644
--- a/include/camera/camera2/CaptureRequest.h
+++ b/include/camera/camera2/CaptureRequest.h
@@ -30,6 +30,7 @@
 
     CameraMetadata          mMetadata;
     Vector<sp<Surface> >    mSurfaceList;
+    bool                    mIsReprocess;
 
     /**
      * Keep impl up-to-date with CaptureRequest.java in frameworks/base
diff --git a/include/camera/camera2/ICameraDeviceUser.h b/include/camera/camera2/ICameraDeviceUser.h
index e9f1f5a..c850924 100644
--- a/include/camera/camera2/ICameraDeviceUser.h
+++ b/include/camera/camera2/ICameraDeviceUser.h
@@ -103,6 +103,19 @@
 
     virtual status_t        createStream(const OutputConfiguration& outputConfiguration) = 0;
 
+    /**
+     * Create an input stream of width, height, and format (one of
+     * HAL_PIXEL_FORMAT_*)
+     *
+     * Return stream ID if it's a non-negative value. status_t if it's a
+     * negative value.
+     */
+    virtual status_t        createInputStream(int width, int height, int format) = 0;
+
+    // get the buffer producer of the input stream
+    virtual status_t        getInputBufferProducer(
+            sp<IGraphicBufferProducer> *producer) = 0;
+
     // Create a request object from a template.
     virtual status_t        createDefaultRequest(int templateId,
                                                  /*out*/
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 7be2c3e..dbe2788 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -335,6 +335,13 @@
      * After draining these frames of data, the caller should release them with releaseBuffer().
      * If the track buffer is not empty, obtainBuffer() returns as many contiguous
      * full frames as are available immediately.
+     *
+     * If nonContig is non-NULL, it is an output parameter that will be set to the number of
+     * additional non-contiguous frames that are predicted to be available immediately,
+     * if the client were to release the first frames and then call obtainBuffer() again.
+     * This value is only a prediction, and needs to be confirmed.
+     * It will be set to zero for an error return.
+     *
      * If the track buffer is empty and track is stopped, obtainBuffer() returns WOULD_BLOCK
      * regardless of the value of waitCount.
      * If the track buffer is empty and track is not stopped, obtainBuffer() blocks with a
@@ -364,11 +371,15 @@
      *  raw         pointer to the buffer
      */
 
-            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount);
+            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount,
+                                size_t *nonContig = NULL);
 
 private:
     /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
-     * additional non-contiguous frames that are available immediately.
+     * additional non-contiguous frames that are predicted to be available immediately,
+     * if the client were to release the first frames and then call obtainBuffer() again.
+     * This value is only a prediction, and needs to be confirmed.
+     * It will be set to zero for an error return.
      * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(),
      * in case the requested amount of frames is in two or more non-contiguous regions.
      * FIXME requested and elapsed are both relative times.  Consider changing to absolute time.
diff --git a/include/media/AudioResamplerPublic.h b/include/media/AudioResamplerPublic.h
index 0634741..07d946d 100644
--- a/include/media/AudioResamplerPublic.h
+++ b/include/media/AudioResamplerPublic.h
@@ -34,6 +34,16 @@
 // an int32_t of the phase increments, making the resulting sample rate inexact.
 #define AUDIO_RESAMPLER_UP_RATIO_MAX 65536
 
+#define AUDIO_TIMESTRETCH_SPEED_MIN    0.5f
+#define AUDIO_TIMESTRETCH_SPEED_MAX    2.0f
+#define AUDIO_TIMESTRETCH_SPEED_NORMAL 1.0f
+
+#define AUDIO_TIMESTRETCH_PITCH_MIN    0.5f
+#define AUDIO_TIMESTRETCH_PITCH_MAX    2.0f
+#define AUDIO_TIMESTRETCH_PITCH_NORMAL 1.0f
+
+// TODO: Consider putting these inlines into a class scope
+
 // Returns the source frames needed to resample to destination frames.  This is not a precise
 // value and depends on the resampler (and possibly how it handles rounding internally).
 // Nevertheless, this should be an upper bound on the requirements of the resampler.
@@ -58,4 +68,13 @@
     return dstFrames > 2 ? dstFrames - 2 : 0;
 }
 
+static inline size_t sourceFramesNeededWithTimestretch(
+        uint32_t srcSampleRate, size_t dstFramesRequired, uint32_t dstSampleRate,
+        float speed) {
+    // required is the number of input frames the resampler needs
+    size_t required = sourceFramesNeeded(srcSampleRate, dstFramesRequired, dstSampleRate);
+    // to deliver this, the time stretcher requires:
+    return required * (double)speed + 1 + 1; // accounting for rounding dependencies
+}
+
 #endif // ANDROID_AUDIO_RESAMPLER_PUBLIC_H
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index e7e0703..a06197f 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -359,6 +359,21 @@
     /* Return current source sample rate in Hz */
             uint32_t    getSampleRate() const;
 
+    /* Set source playback rate for timestretch
+     * 1.0 is normal speed: < 1.0 is slower, > 1.0 is faster
+     * 1.0 is normal pitch: < 1.0 is lower pitch, > 1.0 is higher pitch
+     *
+     * AUDIO_TIMESTRETCH_SPEED_MIN <= speed <= AUDIO_TIMESTRETCH_SPEED_MAX
+     * AUDIO_TIMESTRETCH_PITCH_MIN <= pitch <= AUDIO_TIMESTRETCH_PITCH_MAX
+     *
+     * Speed increases the playback rate of media, but does not alter pitch.
+     * Pitch increases the "tonal frequency" of media, but does not affect the playback rate.
+     */
+            status_t    setPlaybackRate(float speed, float pitch);
+
+    /* Return current playback rate */
+            void        getPlaybackRate(float *speed, float *pitch) const;
+
     /* Enables looping and sets the start and end points of looping.
      * Only supported for static buffer mode.
      *
@@ -719,6 +734,9 @@
             // increment mPosition by the delta of mServer, and return new value of mPosition
             uint32_t updateAndGetPosition_l();
 
+            // check sample rate and speed is compatible with AudioTrack
+            bool     isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const;
+
     // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
@@ -730,6 +748,8 @@
     float                   mVolume[2];
     float                   mSendLevel;
     mutable uint32_t        mSampleRate;            // mutable because getSampleRate() can update it
+    float                   mSpeed;                 // timestretch: 1.0f for normal speed.
+    float                   mPitch;                 // timestretch: 1.0f for normal pitch.
     size_t                  mFrameCount;            // corresponds to current IAudioTrack, value is
                                                     // reported back by AudioFlinger to the client
     size_t                  mReqFrameCount;         // frame count to request the first or next time
diff --git a/include/media/ICrypto.h b/include/media/ICrypto.h
index 07742ca..aa04dbe 100644
--- a/include/media/ICrypto.h
+++ b/include/media/ICrypto.h
@@ -25,6 +25,7 @@
 namespace android {
 
 struct AString;
+struct IMemory;
 
 struct ICrypto : public IInterface {
     DECLARE_META_INTERFACE(Crypto);
@@ -43,12 +44,14 @@
 
     virtual void notifyResolution(uint32_t width, uint32_t height) = 0;
 
+    virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId) = 0;
+
     virtual ssize_t decrypt(
             bool secure,
             const uint8_t key[16],
             const uint8_t iv[16],
             CryptoPlugin::Mode mode,
-            const void *srcPtr,
+            const sp<IMemory> &sharedBuffer, size_t offset,
             const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
             void *dstPtr,
             AString *errorDetailMsg) = 0;
@@ -61,6 +64,9 @@
     virtual status_t onTransact(
             uint32_t code, const Parcel &data, Parcel *reply,
             uint32_t flags = 0);
+private:
+    void readVector(const Parcel &data, Vector<uint8_t> &vector) const;
+    void writeVector(Parcel *reply, Vector<uint8_t> const &vector) const;
 };
 
 }  // namespace android
diff --git a/include/media/IMediaCodecList.h b/include/media/IMediaCodecList.h
index e93ea8b..12b52d7 100644
--- a/include/media/IMediaCodecList.h
+++ b/include/media/IMediaCodecList.h
@@ -21,6 +21,8 @@
 #include <binder/IInterface.h>
 #include <binder/Parcel.h>
 
+#include <media/stagefright/foundation/AMessage.h>
+
 namespace android {
 
 struct MediaCodecInfo;
@@ -33,6 +35,8 @@
     virtual size_t countCodecs() const = 0;
     virtual sp<MediaCodecInfo> getCodecInfo(size_t index) const = 0;
 
+    virtual const sp<AMessage> getGlobalSettings() const = 0;
+
     virtual ssize_t findCodecByType(
             const char *type, bool encoder, size_t startIndex = 0) const = 0;
 
diff --git a/include/media/MediaCodecInfo.h b/include/media/MediaCodecInfo.h
index cd56adb..895a13a 100644
--- a/include/media/MediaCodecInfo.h
+++ b/include/media/MediaCodecInfo.h
@@ -35,6 +35,8 @@
 struct Parcel;
 struct CodecCapabilities;
 
+typedef KeyedVector<AString, AString> CodecSettings;
+
 struct MediaCodecInfo : public RefBase {
     struct ProfileLevel {
         uint32_t mProfile;
@@ -104,6 +106,7 @@
     MediaCodecInfo(AString name, bool encoder, const char *mime);
     void addQuirk(const char *name);
     status_t addMime(const char *mime);
+    status_t updateMime(const char *mime);
     status_t initializeCapabilities(const CodecCapabilities &caps);
     void addDetail(const AString &key, const AString &value);
     void addFeature(const AString &key, int32_t value);
@@ -114,6 +117,7 @@
     DISALLOW_EVIL_CONSTRUCTORS(MediaCodecInfo);
 
     friend class MediaCodecList;
+    friend class MediaCodecListOverridesTest;
 };
 
 }  // namespace android
diff --git a/include/media/mediametadataretriever.h b/include/media/mediametadataretriever.h
index b35cf32..7191965 100644
--- a/include/media/mediametadataretriever.h
+++ b/include/media/mediametadataretriever.h
@@ -57,6 +57,7 @@
     METADATA_KEY_IS_DRM          = 22,
     METADATA_KEY_LOCATION        = 23,
     METADATA_KEY_VIDEO_ROTATION  = 24,
+    METADATA_KEY_CAPTURE_FRAMERATE = 25,
 
     // Add more here...
 };
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index c1483f3..a8d0fcb 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -300,6 +300,7 @@
             OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels);
 
     status_t setPriority(int32_t priority);
+    status_t setOperatingRate(float rateFloat, bool isVideo);
 
     status_t setMinBufferSize(OMX_U32 portIndex, size_t size);
 
diff --git a/include/media/stagefright/MediaClock.h b/include/media/stagefright/MediaClock.h
index e9c09a1..dd1a809 100644
--- a/include/media/stagefright/MediaClock.h
+++ b/include/media/stagefright/MediaClock.h
@@ -42,6 +42,7 @@
     void updateMaxTimeMedia(int64_t maxTimeMediaUs);
 
     void setPlaybackRate(float rate);
+    float getPlaybackRate() const;
 
     // query media time corresponding to real time |realUs|, and save the
     // result in |outMediaUs|.
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index d055341..d98fa1a 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -20,6 +20,7 @@
 
 #include <gui/IGraphicBufferProducer.h>
 #include <media/hardware/CryptoAPI.h>
+#include <media/MediaResource.h>
 #include <media/stagefright/foundation/AHandler.h>
 #include <utils/Vector.h>
 
@@ -30,8 +31,12 @@
 struct AReplyToken;
 struct AString;
 struct CodecBase;
-struct ICrypto;
 struct IBatteryStats;
+struct ICrypto;
+struct IMemory;
+struct MemoryDealer;
+class IResourceManagerClient;
+class IResourceManagerService;
 struct SoftwareRenderer;
 struct Surface;
 
@@ -127,6 +132,8 @@
     status_t getOutputFormat(sp<AMessage> *format) const;
     status_t getInputFormat(sp<AMessage> *format) const;
 
+    status_t getWidevineLegacyBuffers(Vector<sp<ABuffer> > *buffers) const;
+
     status_t getInputBuffers(Vector<sp<ABuffer> > *buffers) const;
     status_t getOutputBuffers(Vector<sp<ABuffer> > *buffers) const;
 
@@ -214,11 +221,36 @@
         uint32_t mBufferID;
         sp<ABuffer> mData;
         sp<ABuffer> mEncryptedData;
+        sp<IMemory> mSharedEncryptedBuffer;
         sp<AMessage> mNotify;
         sp<AMessage> mFormat;
         bool mOwnedByClient;
     };
 
+    struct ResourceManagerServiceProxy : public IBinder::DeathRecipient {
+        ResourceManagerServiceProxy();
+        ~ResourceManagerServiceProxy();
+
+        void init();
+
+        // implements DeathRecipient
+        virtual void binderDied(const wp<IBinder>& /*who*/);
+
+        void addResource(
+                int pid,
+                int64_t clientId,
+                const sp<IResourceManagerClient> client,
+                const Vector<MediaResource> &resources);
+
+        void removeResource(int64_t clientId);
+
+        bool reclaimResource(int callingPid, const Vector<MediaResource> &resources);
+
+    private:
+        Mutex mLock;
+        sp<IResourceManagerService> mService;
+    };
+
     State mState;
     sp<ALooper> mLooper;
     sp<ALooper> mCodecLooper;
@@ -232,15 +264,24 @@
     sp<AMessage> mOutputFormat;
     sp<AMessage> mInputFormat;
     sp<AMessage> mCallback;
+    sp<MemoryDealer> mDealer;
+
+    sp<IResourceManagerClient> mResourceManagerClient;
+    sp<ResourceManagerServiceProxy> mResourceManagerService;
 
     bool mBatteryStatNotified;
     bool mIsVideo;
+    int32_t mVideoWidth;
+    int32_t mVideoHeight;
 
     // initial create parameters
     AString mInitName;
     bool mInitNameIsType;
     bool mInitIsEncoder;
 
+    // configure parameter
+    sp<AMessage> mConfigureMsg;
+
     // Used only to synchronize asynchronous getBufferAndFormat
     // across all the other (synchronous) buffer state change
     // operations, such as de/queueIn/OutputBuffer, start and
@@ -308,6 +349,9 @@
     void updateBatteryStat();
     bool isExecuting() const;
 
+    uint64_t getGraphicBufferSize();
+    void addResource(const char *type, uint64_t value);
+
     /* called to get the last codec error when the sticky flag is set.
      * if no such codec error is found, returns UNKNOWN_ERROR.
      */
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index c2bbe4d..9d1d675 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -48,9 +48,14 @@
         return mCodecInfos.itemAt(index);
     }
 
+    virtual const sp<AMessage> getGlobalSettings() const;
+
     // to be used by MediaPlayerService alone
     static sp<IMediaCodecList> getLocalInstance();
 
+    // only to be used in getLocalInstance
+    void updateDetailsForMultipleCodecs(const KeyedVector<AString, CodecSettings>& updates);
+
 private:
     class BinderDeathObserver : public IBinder::DeathRecipient {
         void binderDied(const wp<IBinder> &the_late_who __unused);
@@ -60,6 +65,7 @@
 
     enum Section {
         SECTION_TOPLEVEL,
+        SECTION_SETTINGS,
         SECTION_DECODERS,
         SECTION_DECODER,
         SECTION_DECODER_TYPE,
@@ -74,10 +80,14 @@
 
     status_t mInitCheck;
     Section mCurrentSection;
+    bool mUpdate;
     Vector<Section> mPastSections;
     int32_t mDepth;
     AString mHrefBase;
 
+    sp<AMessage> mGlobalSettings;
+    KeyedVector<AString, CodecSettings> mOverrides;
+
     Vector<sp<MediaCodecInfo> > mCodecInfos;
     sp<MediaCodecInfo> mCurrentInfo;
     sp<IOMX> mOMX;
@@ -87,7 +97,7 @@
 
     status_t initCheck() const;
     void parseXMLFile(const char *path);
-    void parseTopLevelXMLFile(const char *path);
+    void parseTopLevelXMLFile(const char *path, bool ignore_errors = false);
 
     static void StartElementHandlerWrapper(
             void *me, const char *name, const char **attrs);
@@ -98,9 +108,12 @@
     void endElementHandler(const char *name);
 
     status_t includeXMLFile(const char **attrs);
+    status_t addSettingFromAttributes(const char **attrs);
     status_t addMediaCodecFromAttributes(bool encoder, const char **attrs);
     void addMediaCodec(bool encoder, const char *name, const char *type = NULL);
 
+    void setCurrentCodecInfo(bool encoder, const char *name, const char *type);
+
     status_t addQuirk(const char **attrs);
     status_t addTypeFromAttributes(const char **attrs);
     status_t addLimit(const char **attrs);
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index 087d016..8bdebf6 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -75,6 +75,8 @@
     kKeyDecoderComponent  = 'decC',  // cstring
     kKeyBufferID          = 'bfID',
     kKeyMaxInputSize      = 'inpS',
+    kKeyMaxWidth          = 'maxW',
+    kKeyMaxHeight         = 'maxH',
     kKeyThumbnailTime     = 'thbT',  // int64_t (usecs)
     kKeyTrackID           = 'trID',
     kKeyIsDRM             = 'idrm',  // int32_t (bool)
@@ -98,6 +100,7 @@
     kKeyCompilation       = 'cpil',  // cstring
     kKeyLocation          = 'loc ',  // cstring
     kKeyTimeScale         = 'tmsl',  // int32_t
+    kKeyCaptureFramerate  = 'capF',  // float (capture fps)
 
     // video profile and level
     kKeyVideoProfile      = 'vprf',  // int32_t
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 5644428..6cc2e2b 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -25,6 +25,7 @@
 #include <utils/Log.h>
 #include <utils/RefBase.h>
 #include <audio_utils/roundup.h>
+#include <media/AudioResamplerPublic.h>
 #include <media/SingleStateQueue.h>
 
 namespace android {
@@ -113,6 +114,14 @@
                     mPosLoopQueue;
 };
 
+
+struct AudioTrackPlaybackRate {
+    float mSpeed;
+    float mPitch;
+};
+
+typedef SingleStateQueue<AudioTrackPlaybackRate> AudioTrackPlaybackRateQueue;
+
 // ----------------------------------------------------------------------------
 
 // Important: do not add any virtual methods, including ~
@@ -159,6 +168,8 @@
                 uint32_t    mSampleRate;    // AudioTrack only: client's requested sample rate in Hz
                                             // or 0 == default. Write-only client, read-only server.
 
+                AudioTrackPlaybackRateQueue::Shared mPlaybackRateQueue;
+
                 // client write-only, server read-only
                 uint16_t    mSendLevel;      // Fixed point U4.12 so 0x1000 means 1.0
 
@@ -313,7 +324,8 @@
     AudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
             size_t frameSize, bool clientInServer = false)
         : ClientProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/,
-          clientInServer) { }
+          clientInServer),
+          mPlaybackRateMutator(&cblk->mPlaybackRateQueue) { }
     virtual ~AudioTrackClientProxy() { }
 
     // No barriers on the following operations, so the ordering of loads/stores
@@ -333,6 +345,13 @@
         mCblk->mSampleRate = sampleRate;
     }
 
+    void        setPlaybackRate(float speed, float pitch) {
+        AudioTrackPlaybackRate playbackRate;
+        playbackRate.mSpeed = speed;
+        playbackRate.mPitch = pitch;
+        mPlaybackRateMutator.push(playbackRate);
+    }
+
     virtual void flush();
 
     virtual uint32_t    getUnderrunFrames() const {
@@ -344,6 +363,9 @@
     bool        getStreamEndDone() const;
 
     status_t    waitStreamEndDone(const struct timespec *requested);
+
+private:
+    AudioTrackPlaybackRateQueue::Mutator   mPlaybackRateMutator;
 };
 
 class StaticAudioTrackClientProxy : public AudioTrackClientProxy {
@@ -458,8 +480,11 @@
 public:
     AudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
             size_t frameSize, bool clientInServer = false, uint32_t sampleRate = 0)
-        : ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer) {
+        : ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer),
+          mPlaybackRateObserver(&cblk->mPlaybackRateQueue) {
         mCblk->mSampleRate = sampleRate;
+        mPlaybackRate.mSpeed = AUDIO_TIMESTRETCH_SPEED_NORMAL;
+        mPlaybackRate.mPitch = AUDIO_TIMESTRETCH_PITCH_NORMAL;
     }
 protected:
     virtual ~AudioTrackServerProxy() { }
@@ -493,6 +518,13 @@
 
     // Return the total number of frames that AudioFlinger has obtained and released
     virtual size_t      framesReleased() const { return mCblk->mServer; }
+
+    // Return the playback speed and pitch read atomically. Not multi-thread safe on server side.
+    void                getPlaybackRate(float *speed, float *pitch);
+
+private:
+    AudioTrackPlaybackRate                  mPlaybackRate;  // last observed playback rate
+    AudioTrackPlaybackRateQueue::Observer   mPlaybackRateObserver;
 };
 
 class StaticAudioTrackServerProxy : public AudioTrackServerProxy {
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index f4cdde2..5bbe786 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -596,15 +596,21 @@
     return status;
 }
 
-status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
+status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
 {
     if (audioBuffer == NULL) {
+        if (nonContig != NULL) {
+            *nonContig = 0;
+        }
         return BAD_VALUE;
     }
     if (mTransfer != TRANSFER_OBTAIN) {
         audioBuffer->frameCount = 0;
         audioBuffer->size = 0;
         audioBuffer->raw = NULL;
+        if (nonContig != NULL) {
+            *nonContig = 0;
+        }
         return INVALID_OPERATION;
     }
 
@@ -623,7 +629,7 @@
         ALOGE("%s invalid waitCount %d", __func__, waitCount);
         requested = NULL;
     }
-    return obtainBuffer(audioBuffer, requested);
+    return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
 }
 
 status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 9e9ec5b..d32db7c 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -56,6 +56,24 @@
     return convertTimespecToUs(tv);
 }
 
+// Must match similar computation in createTrack_l in Threads.cpp.
+// TODO: Move to a common library
+static size_t calculateMinFrameCount(
+        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
+        uint32_t sampleRate, float speed)
+{
+    // Ensure that buffer depth covers at least audio hardware latency
+    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
+    if (minBufCount < 2) {
+        minBufCount = 2;
+    }
+    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
+            "sampleRate %u  speed %f  minBufCount: %u",
+            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount);
+    return minBufCount * sourceFramesNeededWithTimestretch(
+            sampleRate, afFrameCount, afSampleRate, speed);
+}
+
 // static
 status_t AudioTrack::getMinFrameCount(
         size_t* frameCount,
@@ -94,13 +112,10 @@
         return status;
     }
 
-    // Ensure that buffer depth covers at least audio hardware latency
-    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
-    if (minBufCount < 2) {
-        minBufCount = 2;
-    }
+    // When called from createTrack, speed is 1.0f (normal speed).
+    // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
+    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f);
 
-    *frameCount = minBufCount * sourceFramesNeeded(sampleRate, afFrameCount, afSampleRate);
     // The formula above should always produce a non-zero value under normal circumstances:
     // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
     // Return error in the unlikely event that it does not, as that's part of the API contract.
@@ -109,8 +124,8 @@
                 streamType, sampleRate);
         return BAD_VALUE;
     }
-    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%u, afSampleRate=%u, afLatency=%u",
-            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
+    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
+            *frameCount, afFrameCount, afSampleRate, afLatency);
     return NO_ERROR;
 }
 
@@ -360,6 +375,8 @@
         return BAD_VALUE;
     }
     mSampleRate = sampleRate;
+    mSpeed = AUDIO_TIMESTRETCH_SPEED_NORMAL;
+    mPitch = AUDIO_TIMESTRETCH_PITCH_NORMAL;
 
     // Make copy of input parameter offloadInfo so that in the future:
     //  (a) createTrack_l doesn't need it as an input parameter
@@ -689,6 +706,7 @@
     if (rate == 0 || rate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
         return BAD_VALUE;
     }
+    // TODO: Should we also check if the buffer size is compatible?
 
     mSampleRate = rate;
     mProxy->setSampleRate(rate);
@@ -719,6 +737,42 @@
     return mSampleRate;
 }
 
+status_t AudioTrack::setPlaybackRate(float speed, float pitch)
+{
+    if (speed < AUDIO_TIMESTRETCH_SPEED_MIN
+            || speed > AUDIO_TIMESTRETCH_SPEED_MAX
+            || pitch < AUDIO_TIMESTRETCH_PITCH_MIN
+            || pitch > AUDIO_TIMESTRETCH_PITCH_MAX) {
+        return BAD_VALUE;
+    }
+    AutoMutex lock(mLock);
+    if (speed == mSpeed && pitch == mPitch) {
+        return NO_ERROR;
+    }
+    if (mIsTimed || isOffloadedOrDirect_l()) {
+        return INVALID_OPERATION;
+    }
+    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
+        return INVALID_OPERATION;
+    }
+    // Check if the buffer size is compatible.
+    if (!isSampleRateSpeedAllowed_l(mSampleRate, speed)) {
+        ALOGV("setPlaybackRate(%f, %f) failed", speed, pitch);
+        return BAD_VALUE;
+    }
+    mSpeed = speed;
+    mPitch = pitch;
+    mProxy->setPlaybackRate(speed, pitch);
+    return NO_ERROR;
+}
+
+void AudioTrack::getPlaybackRate(float *speed, float *pitch) const
+{
+    AutoMutex lock(mLock);
+    *speed = mSpeed;
+    *pitch = mPitch;
+}
+
 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
 {
     if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
@@ -1086,8 +1140,16 @@
         // there _is_ a frameCount parameter.  We silently ignore it.
         frameCount = mSharedBuffer->size() / mFrameSize;
     } else {
-        // For fast and normal streaming tracks,
-        // the frame count calculations and checks are done by server
+        // For fast tracks the frame count calculations and checks are done by server
+
+        if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
+            // for normal tracks precompute the frame count based on speed.
+            const size_t minFrameCount = calculateMinFrameCount(
+                    afLatency, afFrameCount, afSampleRate, mSampleRate, mSpeed);
+            if (frameCount < minFrameCount) {
+                frameCount = minFrameCount;
+            }
+        }
     }
 
     IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
@@ -1230,6 +1292,7 @@
     }
 
     mAudioTrack->attachAuxEffect(mAuxEffectId);
+    // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack)
     // FIXME don't believe this lie
     mLatency = afLatency + (1000*frameCount) / mSampleRate;
 
@@ -1255,6 +1318,7 @@
 
     mProxy->setSendLevel(mSendLevel);
     mProxy->setSampleRate(mSampleRate);
+    mProxy->setPlaybackRate(mSpeed, mPitch);
     mProxy->setMinimum(mNotificationFramesAct);
 
     mDeathNotifier = new DeathNotifier(this);
@@ -1274,12 +1338,18 @@
 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
 {
     if (audioBuffer == NULL) {
+        if (nonContig != NULL) {
+            *nonContig = 0;
+        }
         return BAD_VALUE;
     }
     if (mTransfer != TRANSFER_OBTAIN) {
         audioBuffer->frameCount = 0;
         audioBuffer->size = 0;
         audioBuffer->raw = NULL;
+        if (nonContig != NULL) {
+            *nonContig = 0;
+        }
         return INVALID_OPERATION;
     }
 
@@ -1617,6 +1687,7 @@
 
     // Cache other fields that will be needed soon
     uint32_t sampleRate = mSampleRate;
+    float speed = mSpeed;
     uint32_t notificationFrames = mNotificationFramesAct;
     if (mRefreshRemaining) {
         mRefreshRemaining = false;
@@ -1745,7 +1816,7 @@
     if (minFrames != (uint32_t) ~0) {
         // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
         static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
-        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
+        ns = ((double)minFrames * 1000000000) / ((double)sampleRate * speed) + kFudgeNs;
     }
 
     // If not supplying data by EVENT_MORE_DATA, then we're done
@@ -1786,7 +1857,8 @@
         if (mRetryOnPartialBuffer && !isOffloaded()) {
             mRetryOnPartialBuffer = false;
             if (avail < mRemainingFrames) {
-                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
+                int64_t myns = ((double)(mRemainingFrames - avail) * 1100000000)
+                        / ((double)sampleRate * speed);
                 if (ns < 0 || myns < ns) {
                     ns = myns;
                 }
@@ -1841,7 +1913,7 @@
         // that total to a sum == notificationFrames.
         if (0 < misalignment && misalignment <= mRemainingFrames) {
             mRemainingFrames = misalignment;
-            return (mRemainingFrames * 1100000000LL) / sampleRate;
+            return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
         }
 #endif
 
@@ -1936,6 +2008,41 @@
     return mPosition += (uint32_t) delta;
 }
 
+bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const
+{
+    // applicable for mixing tracks only (not offloaded or direct)
+    if (mStaticProxy != 0) {
+        return true; // static tracks do not have issues with buffer sizing.
+    }
+    status_t status;
+    uint32_t afLatency;
+    status = AudioSystem::getLatency(mOutput, &afLatency);
+    if (status != NO_ERROR) {
+        ALOGE("getLatency(%d) failed status %d", mOutput, status);
+        return false;
+    }
+
+    size_t afFrameCount;
+    status = AudioSystem::getFrameCount(mOutput, &afFrameCount);
+    if (status != NO_ERROR) {
+        ALOGE("getFrameCount(output=%d) status %d", mOutput, status);
+        return false;
+    }
+
+    uint32_t afSampleRate;
+    status = AudioSystem::getSamplingRate(mOutput, &afSampleRate);
+    if (status != NO_ERROR) {
+        ALOGE("getSamplingRate(output=%d) status %d", mOutput, status);
+        return false;
+    }
+
+    const size_t minFrameCount =
+            calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, speed);
+    ALOGV("isSampleRateSpeedAllowed_l mFrameCount %zu  minFrameCount %zu",
+            mFrameCount, minFrameCount);
+    return mFrameCount >= minFrameCount;
+}
+
 status_t AudioTrack::setParameters(const String8& keyValuePairs)
 {
     AutoMutex lock(mLock);
@@ -2001,7 +2108,8 @@
                     return WOULD_BLOCK;  // stale timestamp time, occurs before start.
                 }
                 const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
-                const int64_t deltaPositionByUs = timestamp.mPosition * 1000000LL / mSampleRate;
+                const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
+                        / ((double)mSampleRate * mSpeed);
 
                 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
                     // Verify that the counter can't count faster than the sample rate
@@ -2088,7 +2196,8 @@
     snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
             mChannelCount, mFrameCount);
     result.append(buffer);
-    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
+    snprintf(buffer, 255, "  sample rate(%u), speed(%f), status(%d)\n",
+            mSampleRate, mSpeed, mStatus);
     result.append(buffer);
     snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
     result.append(buffer);
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 6d5f1af..ba67b40 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -793,6 +793,16 @@
     (void) android_atomic_or(CBLK_UNDERRUN, &cblk->mFlags);
 }
 
+void AudioTrackServerProxy::getPlaybackRate(float *speed, float *pitch)
+{   // do not call from multiple threads without holding lock
+    AudioTrackPlaybackRate playbackRate;
+    if (mPlaybackRateObserver.poll(playbackRate)) {
+        mPlaybackRate = playbackRate;
+    }
+    *speed = mPlaybackRate.mSpeed;
+    *pitch = mPlaybackRate.mPitch;
+}
+
 // ---------------------------------------------------------------------------
 
 StaticAudioTrackServerProxy::StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers,
diff --git a/media/libmedia/ICrypto.cpp b/media/libmedia/ICrypto.cpp
index c26c5bf..9246a7c 100644
--- a/media/libmedia/ICrypto.cpp
+++ b/media/libmedia/ICrypto.cpp
@@ -19,6 +19,7 @@
 #include <utils/Log.h>
 
 #include <binder/Parcel.h>
+#include <binder/IMemory.h>
 #include <media/ICrypto.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -34,6 +35,7 @@
     REQUIRES_SECURE_COMPONENT,
     DECRYPT,
     NOTIFY_RESOLUTION,
+    SET_MEDIADRM_SESSION,
 };
 
 struct BpCrypto : public BpInterface<ICrypto> {
@@ -97,7 +99,7 @@
             const uint8_t key[16],
             const uint8_t iv[16],
             CryptoPlugin::Mode mode,
-            const void *srcPtr,
+            const sp<IMemory> &sharedBuffer, size_t offset,
             const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
             void *dstPtr,
             AString *errorDetailMsg) {
@@ -126,7 +128,8 @@
         }
 
         data.writeInt32(totalSize);
-        data.write(srcPtr, totalSize);
+        data.writeStrongBinder(IInterface::asBinder(sharedBuffer));
+        data.writeInt32(offset);
 
         data.writeInt32(numSubSamples);
         data.write(subSamples, sizeof(CryptoPlugin::SubSample) * numSubSamples);
@@ -159,7 +162,28 @@
         remote()->transact(NOTIFY_RESOLUTION, data, &reply);
     }
 
+    virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
+
+        writeVector(data, sessionId);
+        remote()->transact(SET_MEDIADRM_SESSION, data, &reply);
+
+        return reply.readInt32();
+    }
+
 private:
+    void readVector(Parcel &reply, Vector<uint8_t> &vector) const {
+        uint32_t size = reply.readInt32();
+        vector.insertAt((size_t)0, size);
+        reply.read(vector.editArray(), size);
+    }
+
+    void writeVector(Parcel &data, Vector<uint8_t> const &vector) const {
+        data.writeInt32(vector.size());
+        data.write(vector.array(), vector.size());
+    }
+
     DISALLOW_EVIL_CONSTRUCTORS(BpCrypto);
 };
 
@@ -167,6 +191,17 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
+void BnCrypto::readVector(const Parcel &data, Vector<uint8_t> &vector) const {
+    uint32_t size = data.readInt32();
+    vector.insertAt((size_t)0, size);
+    data.read(vector.editArray(), size);
+}
+
+void BnCrypto::writeVector(Parcel *reply, Vector<uint8_t> const &vector) const {
+    reply->writeInt32(vector.size());
+    reply->write(vector.array(), vector.size());
+}
+
 status_t BnCrypto::onTransact(
     uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags) {
     switch (code) {
@@ -245,8 +280,9 @@
             data.read(iv, sizeof(iv));
 
             size_t totalSize = data.readInt32();
-            void *srcData = malloc(totalSize);
-            data.read(srcData, totalSize);
+            sp<IMemory> sharedBuffer =
+                interface_cast<IMemory>(data.readStrongBinder());
+            int32_t offset = data.readInt32();
 
             int32_t numSubSamples = data.readInt32();
 
@@ -265,15 +301,21 @@
             }
 
             AString errorDetailMsg;
-            ssize_t result = decrypt(
+            ssize_t result;
+
+            if (offset + totalSize > sharedBuffer->size()) {
+                result = -EINVAL;
+            } else {
+                result = decrypt(
                     secure,
                     key,
                     iv,
                     mode,
-                    srcData,
+                    sharedBuffer, offset,
                     subSamples, numSubSamples,
                     dstPtr,
                     &errorDetailMsg);
+            }
 
             reply->writeInt32(result);
 
@@ -294,9 +336,6 @@
             delete[] subSamples;
             subSamples = NULL;
 
-            free(srcData);
-            srcData = NULL;
-
             return OK;
         }
 
@@ -311,6 +350,15 @@
             return OK;
         }
 
+        case SET_MEDIADRM_SESSION:
+        {
+            CHECK_INTERFACE(IDrm, data, reply);
+            Vector<uint8_t> sessionId;
+            readVector(data, sessionId);
+            reply->writeInt32(setMediaDrmSession(sessionId));
+            return OK;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IMediaCodecList.cpp b/media/libmedia/IMediaCodecList.cpp
index 80020db..e2df104 100644
--- a/media/libmedia/IMediaCodecList.cpp
+++ b/media/libmedia/IMediaCodecList.cpp
@@ -30,6 +30,7 @@
     CREATE = IBinder::FIRST_CALL_TRANSACTION,
     COUNT_CODECS,
     GET_CODEC_INFO,
+    GET_GLOBAL_SETTINGS,
     FIND_CODEC_BY_TYPE,
     FIND_CODEC_BY_NAME,
 };
@@ -64,6 +65,19 @@
         }
     }
 
+    virtual const sp<AMessage> getGlobalSettings() const
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaCodecList::getInterfaceDescriptor());
+        remote()->transact(GET_GLOBAL_SETTINGS, data, &reply);
+        status_t err = reply.readInt32();
+        if (err == OK) {
+            return AMessage::FromParcel(reply);
+        } else {
+            return NULL;
+        }
+    }
+
     virtual ssize_t findCodecByType(
             const char *type, bool encoder, size_t startIndex = 0) const
     {
@@ -125,6 +139,20 @@
         }
         break;
 
+        case GET_GLOBAL_SETTINGS:
+        {
+            CHECK_INTERFACE(IMediaCodecList, data, reply);
+            const sp<AMessage> info = getGlobalSettings();
+            if (info != NULL) {
+                reply->writeInt32(OK);
+                info->writeToParcel(reply);
+            } else {
+                reply->writeInt32(-ERANGE);
+            }
+            return NO_ERROR;
+        }
+        break;
+
         case FIND_CODEC_BY_TYPE:
         {
             CHECK_INTERFACE(IMediaCodecList, data, reply);
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index 7b4c4e2..8d3fa7b 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -206,6 +206,17 @@
     return OK;
 }
 
+status_t MediaCodecInfo::updateMime(const char *mime) {
+    ssize_t ix = getCapabilityIndex(mime);
+    if (ix < 0) {
+        ALOGE("updateMime mime not found %s", mime);
+        return -EINVAL;
+    }
+
+    mCurrentCaps = mCaps.valueAt(ix);
+    return OK;
+}
+
 void MediaCodecInfo::removeMime(const char *mime) {
     ssize_t ix = getCapabilityIndex(mime);
     if (ix >= 0) {
diff --git a/media/libmediaplayerservice/Crypto.cpp b/media/libmediaplayerservice/Crypto.cpp
index 8ee7c0b..f639193 100644
--- a/media/libmediaplayerservice/Crypto.cpp
+++ b/media/libmediaplayerservice/Crypto.cpp
@@ -22,6 +22,7 @@
 
 #include "Crypto.h"
 
+#include <binder/IMemory.h>
 #include <media/hardware/CryptoAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AString.h>
@@ -238,7 +239,7 @@
         const uint8_t key[16],
         const uint8_t iv[16],
         CryptoPlugin::Mode mode,
-        const void *srcPtr,
+        const sp<IMemory> &sharedBuffer, size_t offset,
         const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
         void *dstPtr,
         AString *errorDetailMsg) {
@@ -252,6 +253,8 @@
         return -EINVAL;
     }
 
+    const void *srcPtr = static_cast<uint8_t *>(sharedBuffer->pointer()) + offset;
+
     return mPlugin->decrypt(
             secure, key, iv, mode, srcPtr, subSamples, numSubSamples, dstPtr,
             errorDetailMsg);
@@ -265,4 +268,14 @@
     }
 }
 
+status_t Crypto::setMediaDrmSession(const Vector<uint8_t> &sessionId) {
+    Mutex::Autolock autoLock(mLock);
+
+    status_t result = NO_INIT;
+    if (mInitCheck == OK && mPlugin != NULL) {
+        result = mPlugin->setMediaDrmSession(sessionId);
+    }
+    return result;
+}
+
 }  // namespace android
diff --git a/media/libmediaplayerservice/Crypto.h b/media/libmediaplayerservice/Crypto.h
index 0037c2e..99ea95d 100644
--- a/media/libmediaplayerservice/Crypto.h
+++ b/media/libmediaplayerservice/Crypto.h
@@ -47,12 +47,14 @@
 
     virtual void notifyResolution(uint32_t width, uint32_t height);
 
+    virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId);
+
     virtual ssize_t decrypt(
             bool secure,
             const uint8_t key[16],
             const uint8_t iv[16],
             CryptoPlugin::Mode mode,
-            const void *srcPtr,
+            const sp<IMemory> &sharedBuffer, size_t offset,
             const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
             void *dstPtr,
             AString *errorDetailMsg);
diff --git a/media/libmediaplayerservice/Drm.cpp b/media/libmediaplayerservice/Drm.cpp
index 49e01d1..62cf3e5 100644
--- a/media/libmediaplayerservice/Drm.cpp
+++ b/media/libmediaplayerservice/Drm.cpp
@@ -136,25 +136,57 @@
 
     if (listener != NULL) {
         Parcel obj;
-        if (sessionId && sessionId->size()) {
-            obj.writeInt32(sessionId->size());
-            obj.write(sessionId->array(), sessionId->size());
-        } else {
-            obj.writeInt32(0);
-        }
-
-        if (data && data->size()) {
-            obj.writeInt32(data->size());
-            obj.write(data->array(), data->size());
-        } else {
-            obj.writeInt32(0);
-        }
+        writeByteArray(obj, sessionId);
+        writeByteArray(obj, data);
 
         Mutex::Autolock lock(mNotifyLock);
         listener->notify(eventType, extra, &obj);
     }
 }
 
+void Drm::sendExpirationUpdate(Vector<uint8_t> const *sessionId,
+                               int64_t expiryTimeInMS)
+{
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        Parcel obj;
+        writeByteArray(obj, sessionId);
+        obj.writeInt64(expiryTimeInMS);
+
+        Mutex::Autolock lock(mNotifyLock);
+        listener->notify(DrmPlugin::kDrmPluginEventExpirationUpdate, 0, &obj);
+    }
+}
+
+void Drm::sendKeysChange(Vector<uint8_t> const *sessionId,
+                         Vector<DrmPlugin::KeyStatus> const *keyStatusList,
+                         bool hasNewUsableKey)
+{
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        Parcel obj;
+        writeByteArray(obj, sessionId);
+
+        size_t nkeys = keyStatusList->size();
+        obj.writeInt32(keyStatusList->size());
+        for (size_t i = 0; i < nkeys; ++i) {
+            const DrmPlugin::KeyStatus *keyStatus = &keyStatusList->itemAt(i);
+            writeByteArray(obj, &keyStatus->mKeyId);
+            obj.writeInt32(keyStatus->mType);
+        }
+        obj.writeInt32(hasNewUsableKey);
+
+        Mutex::Autolock lock(mNotifyLock);
+        listener->notify(DrmPlugin::kDrmPluginEventKeysChange, 0, &obj);
+    }
+}
+
 /*
  * Search the plugins directory for a plugin that supports the scheme
  * specified by uuid
@@ -756,4 +788,14 @@
     closeFactory();
 }
 
+void Drm::writeByteArray(Parcel &obj, Vector<uint8_t> const *array)
+{
+    if (array && array->size()) {
+        obj.writeInt32(array->size());
+        obj.write(array->array(), array->size());
+    } else {
+        obj.writeInt32(0);
+    }
+}
+
 }  // namespace android
diff --git a/media/libmediaplayerservice/Drm.h b/media/libmediaplayerservice/Drm.h
index 7e8f246..1591738 100644
--- a/media/libmediaplayerservice/Drm.h
+++ b/media/libmediaplayerservice/Drm.h
@@ -133,6 +133,13 @@
                            Vector<uint8_t> const *sessionId,
                            Vector<uint8_t> const *data);
 
+    virtual void sendExpirationUpdate(Vector<uint8_t> const *sessionId,
+                                      int64_t expiryTimeInMS);
+
+    virtual void sendKeysChange(Vector<uint8_t> const *sessionId,
+                                Vector<DrmPlugin::KeyStatus> const *keyStatusList,
+                                bool hasNewUsableKey);
+
     virtual void binderDied(const wp<IBinder> &the_late_who);
 
 private:
@@ -157,7 +164,7 @@
     void findFactoryForScheme(const uint8_t uuid[16]);
     bool loadLibraryForScheme(const String8 &path, const uint8_t uuid[16]);
     void closeFactory();
-
+    void writeByteArray(Parcel &obj, Vector<uint8_t> const *array);
 
     DISALLOW_EVIL_CONSTRUCTORS(Drm);
 };
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 04ac699..3fff1e6 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -82,25 +82,69 @@
     switch (msg->what()) {
         case kWhatCodecNotify:
         {
-            if (!isStaleReply(msg)) {
-                int32_t numInput, numOutput;
-
-                if (!msg->findInt32("input-buffers", &numInput)) {
-                    numInput = INT32_MAX;
-                }
-
-                if (!msg->findInt32("output-buffers", &numOutput)) {
-                    numOutput = INT32_MAX;
-                }
-
-                if (!mPaused) {
-                    while (numInput-- > 0 && handleAnInputBuffer()) {}
-                }
-
-                while (numOutput-- > 0 && handleAnOutputBuffer()) {}
+            if (mPaused) {
+                break;
             }
 
-            requestCodecNotification();
+            int32_t cbID;
+            CHECK(msg->findInt32("callbackID", &cbID));
+
+            ALOGV("kWhatCodecNotify: cbID = %d", cbID);
+            switch (cbID) {
+                case MediaCodec::CB_INPUT_AVAILABLE:
+                {
+                    int32_t index;
+                    CHECK(msg->findInt32("index", &index));
+
+                    handleAnInputBuffer(index);
+                    break;
+                }
+
+                case MediaCodec::CB_OUTPUT_AVAILABLE:
+                {
+                    int32_t index;
+                    size_t offset;
+                    size_t size;
+                    int64_t timeUs;
+                    int32_t flags;
+
+                    CHECK(msg->findInt32("index", &index));
+                    CHECK(msg->findSize("offset", &offset));
+                    CHECK(msg->findSize("size", &size));
+                    CHECK(msg->findInt64("timeUs", &timeUs));
+                    CHECK(msg->findInt32("flags", &flags));
+
+                    handleAnOutputBuffer(index, offset, size, timeUs, flags);
+                    break;
+                }
+
+                case MediaCodec::CB_OUTPUT_FORMAT_CHANGED:
+                {
+                    sp<AMessage> format;
+                    CHECK(msg->findMessage("format", &format));
+
+                    handleOutputFormatChange(format);
+                    break;
+                }
+
+                case MediaCodec::CB_ERROR:
+                {
+                    status_t err;
+                    CHECK(msg->findInt32("err", &err));
+                    ALOGE("Decoder (%s) reported error : 0x%x",
+                            mIsAudio ? "audio" : "video", err);
+
+                    handleError(err);
+                    break;
+                }
+
+                default:
+                {
+                    TRESPASS();
+                    break;
+                }
+            }
+
             break;
         }
 
@@ -188,6 +232,9 @@
     CHECK_EQ((status_t)OK, mCodec->getOutputFormat(&mOutputFormat));
     CHECK_EQ((status_t)OK, mCodec->getInputFormat(&mInputFormat));
 
+    sp<AMessage> reply = new AMessage(kWhatCodecNotify, this);
+    mCodec->setCallback(reply);
+
     err = mCodec->start();
     if (err != OK) {
         ALOGE("Failed to start %s decoder (err=%d)", mComponentName.c_str(), err);
@@ -197,18 +244,8 @@
         return;
     }
 
-    // the following should work after start
-    CHECK_EQ((status_t)OK, mCodec->getInputBuffers(&mInputBuffers));
     releaseAndResetMediaBuffers();
-    CHECK_EQ((status_t)OK, mCodec->getOutputBuffers(&mOutputBuffers));
-    ALOGV("[%s] got %zu input and %zu output buffers",
-            mComponentName.c_str(),
-            mInputBuffers.size(),
-            mOutputBuffers.size());
 
-    if (mRenderer != NULL) {
-        requestCodecNotification();
-    }
     mPaused = false;
     mResumePending = false;
 }
@@ -217,16 +254,14 @@
     bool hadNoRenderer = (mRenderer == NULL);
     mRenderer = renderer;
     if (hadNoRenderer && mRenderer != NULL) {
-        requestCodecNotification();
+        // this means that the widevine legacy source is ready
+        onRequestInputBuffers();
     }
 }
 
 void NuPlayer::Decoder::onGetInputBuffers(
         Vector<sp<ABuffer> > *dstBuffers) {
-    dstBuffers->clear();
-    for (size_t i = 0; i < mInputBuffers.size(); i++) {
-        dstBuffers->push(mInputBuffers[i]);
-    }
+    CHECK_EQ((status_t)OK, mCodec->getWidevineLegacyBuffers(dstBuffers));
 }
 
 void NuPlayer::Decoder::onResume(bool notifyComplete) {
@@ -235,6 +270,7 @@
     if (notifyComplete) {
         mResumePending = true;
     }
+    mCodec->start();
 }
 
 void NuPlayer::Decoder::doFlush(bool notifyComplete) {
@@ -261,8 +297,10 @@
         // we attempt to release the buffers even if flush fails.
     }
     releaseAndResetMediaBuffers();
+    mPaused = true;
 }
 
+
 void NuPlayer::Decoder::onFlush() {
     doFlush(true);
 
@@ -276,7 +314,6 @@
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kWhatFlushCompleted);
     notify->post();
-    mPaused = true;
 }
 
 void NuPlayer::Decoder::onShutdown(bool notifyComplete) {
@@ -320,7 +357,9 @@
 }
 
 void NuPlayer::Decoder::doRequestBuffers() {
-    if (isDiscontinuityPending()) {
+    // mRenderer is only NULL if we have a legacy widevine source that
+    // is not yet ready. In this case we must not fetch input.
+    if (isDiscontinuityPending() || mRenderer == NULL) {
         return;
     }
     status_t err = OK;
@@ -347,34 +386,50 @@
     }
 }
 
-bool NuPlayer::Decoder::handleAnInputBuffer() {
+void NuPlayer::Decoder::handleError(int32_t err)
+{
+    // We cannot immediately release the codec due to buffers still outstanding
+    // in the renderer.  We signal to the player the error so it can shutdown/release the
+    // decoder after flushing and increment the generation to discard unnecessary messages.
+
+    ++mBufferGeneration;
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatError);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+bool NuPlayer::Decoder::handleAnInputBuffer(size_t index) {
     if (isDiscontinuityPending()) {
         return false;
     }
-    size_t bufferIx = -1;
-    status_t res = mCodec->dequeueInputBuffer(&bufferIx);
-    ALOGV("[%s] dequeued input: %d",
-            mComponentName.c_str(), res == OK ? (int)bufferIx : res);
-    if (res != OK) {
-        if (res != -EAGAIN) {
-            ALOGE("Failed to dequeue input buffer for %s (err=%d)",
-                    mComponentName.c_str(), res);
-            handleError(res);
+
+    sp<ABuffer> buffer;
+    mCodec->getInputBuffer(index, &buffer);
+
+    if (index >= mInputBuffers.size()) {
+        for (size_t i = mInputBuffers.size(); i <= index; ++i) {
+            mInputBuffers.add();
+            mMediaBuffers.add();
+            mInputBufferIsDequeued.add();
+            mMediaBuffers.editItemAt(i) = NULL;
+            mInputBufferIsDequeued.editItemAt(i) = false;
         }
-        return false;
     }
+    mInputBuffers.editItemAt(index) = buffer;
 
-    CHECK_LT(bufferIx, mInputBuffers.size());
+    //CHECK_LT(bufferIx, mInputBuffers.size());
 
-    if (mMediaBuffers[bufferIx] != NULL) {
-        mMediaBuffers[bufferIx]->release();
-        mMediaBuffers.editItemAt(bufferIx) = NULL;
+    if (mMediaBuffers[index] != NULL) {
+        mMediaBuffers[index]->release();
+        mMediaBuffers.editItemAt(index) = NULL;
     }
-    mInputBufferIsDequeued.editItemAt(bufferIx) = true;
+    mInputBufferIsDequeued.editItemAt(index) = true;
 
     if (!mCSDsToSubmit.isEmpty()) {
         sp<AMessage> msg = new AMessage();
-        msg->setSize("buffer-ix", bufferIx);
+        msg->setSize("buffer-ix", index);
 
         sp<ABuffer> buffer = mCSDsToSubmit.itemAt(0);
         ALOGI("[%s] resubmitting CSD", mComponentName.c_str());
@@ -392,94 +447,38 @@
         mPendingInputMessages.erase(mPendingInputMessages.begin());
     }
 
-    if (!mInputBufferIsDequeued.editItemAt(bufferIx)) {
+    if (!mInputBufferIsDequeued.editItemAt(index)) {
         return true;
     }
 
-    mDequeuedInputBuffers.push_back(bufferIx);
+    mDequeuedInputBuffers.push_back(index);
 
     onRequestInputBuffers();
     return true;
 }
 
-bool NuPlayer::Decoder::handleAnOutputBuffer() {
-    size_t bufferIx = -1;
-    size_t offset;
-    size_t size;
-    int64_t timeUs;
-    uint32_t flags;
-    status_t res = mCodec->dequeueOutputBuffer(
-            &bufferIx, &offset, &size, &timeUs, &flags);
-
-    if (res != OK) {
-        ALOGV("[%s] dequeued output: %d", mComponentName.c_str(), res);
-    } else {
-        ALOGV("[%s] dequeued output: %d (time=%lld flags=%" PRIu32 ")",
-                mComponentName.c_str(), (int)bufferIx, timeUs, flags);
-    }
-
-    if (res == INFO_OUTPUT_BUFFERS_CHANGED) {
-        res = mCodec->getOutputBuffers(&mOutputBuffers);
-        if (res != OK) {
-            ALOGE("Failed to get output buffers for %s after INFO event (err=%d)",
-                    mComponentName.c_str(), res);
-            handleError(res);
-            return false;
-        }
-        // NuPlayer ignores this
-        return true;
-    } else if (res == INFO_FORMAT_CHANGED) {
-        sp<AMessage> format = new AMessage();
-        res = mCodec->getOutputFormat(&format);
-        if (res != OK) {
-            ALOGE("Failed to get output format for %s after INFO event (err=%d)",
-                    mComponentName.c_str(), res);
-            handleError(res);
-            return false;
-        }
-
-        if (!mIsAudio) {
-            sp<AMessage> notify = mNotify->dup();
-            notify->setInt32("what", kWhatVideoSizeChanged);
-            notify->setMessage("format", format);
-            notify->post();
-        } else if (mRenderer != NULL) {
-            uint32_t flags;
-            int64_t durationUs;
-            bool hasVideo = (mSource->getFormat(false /* audio */) != NULL);
-            if (!hasVideo &&
-                    mSource->getDuration(&durationUs) == OK &&
-                    durationUs
-                        > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US) {
-                flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
-            } else {
-                flags = AUDIO_OUTPUT_FLAG_NONE;
-            }
-
-            res = mRenderer->openAudioSink(
-                    format, false /* offloadOnly */, hasVideo, flags, NULL /* isOffloaded */);
-            if (res != OK) {
-                ALOGE("Failed to open AudioSink on format change for %s (err=%d)",
-                        mComponentName.c_str(), res);
-                handleError(res);
-                return false;
-            }
-        }
-        return true;
-    } else if (res == INFO_DISCONTINUITY) {
-        // nothing to do
-        return true;
-    } else if (res != OK) {
-        if (res != -EAGAIN) {
-            ALOGE("Failed to dequeue output buffer for %s (err=%d)",
-                    mComponentName.c_str(), res);
-            handleError(res);
-        }
+bool NuPlayer::Decoder::handleAnOutputBuffer(
+        size_t index,
+        size_t offset,
+        size_t size,
+        int64_t timeUs,
+        int32_t flags) {
+    if (mFormatChangePending) {
         return false;
     }
 
-    CHECK_LT(bufferIx, mOutputBuffers.size());
-    sp<ABuffer> buffer = mOutputBuffers[bufferIx];
+//    CHECK_LT(bufferIx, mOutputBuffers.size());
+    sp<ABuffer> buffer;
+    mCodec->getOutputBuffer(index, &buffer);
+
+    if (index >= mOutputBuffers.size()) {
+        for (size_t i = mOutputBuffers.size(); i <= index; ++i) {
+            mOutputBuffers.add();
+        }
+    }
+
+    mOutputBuffers.editItemAt(index) = buffer;
+
     buffer->setRange(offset, size);
     buffer->meta()->clear();
     buffer->meta()->setInt64("timeUs", timeUs);
@@ -488,7 +487,7 @@
     // we do not expect CODECCONFIG or SYNCFRAME for decoder
 
     sp<AMessage> reply = new AMessage(kWhatRenderBuffer, this);
-    reply->setSize("buffer-ix", bufferIx);
+    reply->setSize("buffer-ix", index);
     reply->setInt32("generation", mBufferGeneration);
 
     if (eos) {
@@ -522,6 +521,29 @@
     return true;
 }
 
+void NuPlayer::Decoder::handleOutputFormatChange(const sp<AMessage> &format) {
+    if (!mIsAudio) {
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatVideoSizeChanged);
+        notify->setMessage("format", format);
+        notify->post();
+    } else if (mRenderer != NULL) {
+        uint32_t flags;
+        int64_t durationUs;
+        bool hasVideo = (mSource->getFormat(false /* audio */) != NULL);
+        if (!hasVideo &&
+                mSource->getDuration(&durationUs) == OK &&
+                durationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US) {
+            flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+        } else {
+            flags = AUDIO_OUTPUT_FLAG_NONE;
+        }
+
+        mRenderer->openAudioSink(
+                format, false /* offloadOnly */, hasVideo, flags, NULL /* isOffloaed */);
+    }
+}
+
 void NuPlayer::Decoder::releaseAndResetMediaBuffers() {
     for (size_t i = 0; i < mMediaBuffers.size(); i++) {
         if (mMediaBuffers[i] != NULL) {
@@ -825,7 +847,8 @@
         mPaused = true;
     } else if (mTimeChangePending) {
         if (flushOnTimeChange) {
-            doFlush(false /*notifyComplete*/);
+            doFlush(false /* notifyComplete */);
+            signalResume(false /* notifyComplete */);
         }
 
         // restart fetching input
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index 4aab2c6..0c0e90c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -87,8 +87,15 @@
     bool mResumePending;
     AString mComponentName;
 
-    bool handleAnInputBuffer();
-    bool handleAnOutputBuffer();
+    void handleError(int32_t err);
+    bool handleAnInputBuffer(size_t index);
+    bool handleAnOutputBuffer(
+            size_t index,
+            size_t offset,
+            size_t size,
+            int64_t timeUs,
+            int32_t flags);
+    void handleOutputFormatChange(const sp<AMessage> &format);
 
     void releaseAndResetMediaBuffers();
     void requestCodecNotification();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index a2ec51c..827bdc1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -312,6 +312,9 @@
                 int64_t delayUs =
                     mAudioSink->msecsPerFrame()
                         * numFramesPendingPlayout * 1000ll;
+                if (mPlaybackRate > 1.0f) {
+                    delayUs /= mPlaybackRate;
+                }
 
                 // Let's give it more data after about half that time
                 // has elapsed.
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 97f3e20..45f6339 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1685,6 +1685,16 @@
         err = setPriority(priority);
     }
 
+    int32_t rateInt = -1;
+    float rateFloat = -1;
+    if (!msg->findFloat("operating-rate", &rateFloat)) {
+        msg->findInt32("operating-rate", &rateInt);
+        rateFloat = (float)rateInt;  // 16MHz (FLINTMAX) is OK for upper bound.
+    }
+    if (rateFloat > 0) {
+        err = setOperatingRate(rateFloat, video);
+    }
+
     mBaseOutputFormat = outputFormat;
 
     CHECK_EQ(getPortFormat(kPortIndexInput, inputFormat), (status_t)OK);
@@ -1711,6 +1721,34 @@
     return OK;
 }
 
+status_t ACodec::setOperatingRate(float rateFloat, bool isVideo) {
+    if (rateFloat < 0) {
+        return BAD_VALUE;
+    }
+    OMX_U32 rate;
+    if (isVideo) {
+        if (rateFloat > 65535) {
+            return BAD_VALUE;
+        }
+        rate = (OMX_U32)(rateFloat * 65536.0f + 0.5f);
+    } else {
+        if (rateFloat > UINT_MAX) {
+            return BAD_VALUE;
+        }
+        rate = (OMX_U32)(rateFloat);
+    }
+    OMX_PARAM_U32TYPE config;
+    InitOMXParams(&config);
+    config.nU32 = rate;
+    status_t err = mOMX->setConfig(
+            mNode, (OMX_INDEXTYPE)OMX_IndexConfigOperatingRate,
+            &config, sizeof(config));
+    if (err != OK) {
+        ALOGI("codec does not support config operating rate (err %d)", err);
+    }
+    return OK;
+}
+
 status_t ACodec::setMinBufferSize(OMX_U32 portIndex, size_t size) {
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
@@ -4902,6 +4940,7 @@
     sp<CodecObserver> observer = new CodecObserver;
     IOMX::node_id node = NULL;
 
+    status_t err = OMX_ErrorComponentNotFound;
     for (size_t matchIndex = 0; matchIndex < matchingCodecs.size();
             ++matchIndex) {
         componentName = matchingCodecs.itemAt(matchIndex).mName.string();
@@ -4910,7 +4949,7 @@
         pid_t tid = gettid();
         int prevPriority = androidGetThreadPriority(tid);
         androidSetThreadPriority(tid, ANDROID_PRIORITY_FOREGROUND);
-        status_t err = omx->allocateNode(componentName.c_str(), observer, &node);
+        err = omx->allocateNode(componentName.c_str(), observer, &node);
         androidSetThreadPriority(tid, prevPriority);
 
         if (err == OK) {
@@ -4924,13 +4963,13 @@
 
     if (node == NULL) {
         if (!mime.empty()) {
-            ALOGE("Unable to instantiate a %scoder for type '%s'.",
-                    encoder ? "en" : "de", mime.c_str());
+            ALOGE("Unable to instantiate a %scoder for type '%s' with err %#x.",
+                    encoder ? "en" : "de", mime.c_str(), err);
         } else {
-            ALOGE("Unable to instantiate codec '%s'.", componentName.c_str());
+            ALOGE("Unable to instantiate codec '%s' with err %#x.", componentName.c_str(), err);
         }
 
-        mCodec->signalError(OMX_ErrorComponentNotFound);
+        mCodec->signalError((OMX_ERRORTYPE)err, makeNoSideEffectStatus(err));
         return false;
     }
 
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index a2cbdaf..b0eeb7f 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -34,6 +34,7 @@
         MediaClock.cpp                    \
         MediaCodec.cpp                    \
         MediaCodecList.cpp                \
+        MediaCodecListOverrides.cpp       \
         MediaCodecSource.cpp              \
         MediaDefs.cpp                     \
         MediaExtractor.cpp                \
diff --git a/media/libstagefright/ESDS.cpp b/media/libstagefright/ESDS.cpp
index 427bf7b..8fbb57c 100644
--- a/media/libstagefright/ESDS.cpp
+++ b/media/libstagefright/ESDS.cpp
@@ -136,6 +136,8 @@
     --size;
 
     if (streamDependenceFlag) {
+        if (size < 2)
+            return ERROR_MALFORMED;
         offset += 2;
         size -= 2;
     }
@@ -145,11 +147,15 @@
             return ERROR_MALFORMED;
         }
         unsigned URLlength = mData[offset];
+        if (URLlength >= size)
+            return ERROR_MALFORMED;
         offset += URLlength + 1;
         size -= URLlength + 1;
     }
 
     if (OCRstreamFlag) {
+        if (size < 2)
+            return ERROR_MALFORMED;
         offset += 2;
         size -= 2;
 
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 910ae32..297a186 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -739,6 +739,16 @@
         && path[3] == FOURCC('i', 'l', 's', 't');
 }
 
+static bool underQTMetaPath(const Vector<uint32_t> &path, int32_t depth) {
+    return path.size() >= 2
+            && path[0] == FOURCC('m', 'o', 'o', 'v')
+            && path[1] == FOURCC('m', 'e', 't', 'a')
+            && (depth == 2
+            || (depth == 3
+                    && (path[2] == FOURCC('i', 'l', 's', 't')
+                    ||  path[2] == FOURCC('k', 'e', 'y', 's'))));
+}
+
 // Given a time in seconds since Jan 1 1904, produce a human-readable string.
 static void convertTimeToDate(int64_t time_1904, String8 *s) {
     time_t time_1970 = time_1904 - (((66 * 365 + 17) * 24) * 3600);
@@ -874,6 +884,9 @@
                     }
                 }
 
+                if (mLastTrack == NULL)
+                    return ERROR_MALFORMED;
+
                 mLastTrack->sampleTable = new SampleTable(mDataSource);
             }
 
@@ -1028,6 +1041,10 @@
             }
             original_fourcc = ntohl(original_fourcc);
             ALOGV("read original format: %d", original_fourcc);
+
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(original_fourcc));
             uint32_t num_channels = 0;
             uint32_t sample_rate = 0;
@@ -1083,6 +1100,9 @@
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setInt32(kKeyCryptoMode, defaultAlgorithmId);
             mLastTrack->meta->setInt32(kKeyCryptoDefaultIVSize, defaultIVSize);
             mLastTrack->meta->setData(kKeyCryptoKey, 'tenc', defaultKeyId, 16);
@@ -1198,7 +1218,7 @@
                     duration = ntohl(duration32);
                 }
             }
-            if (duration != 0) {
+            if (duration != 0 && mLastTrack->timescale != 0) {
                 mLastTrack->meta->setInt64(
                         kKeyDuration, (duration * 1000000) / mLastTrack->timescale);
             }
@@ -1262,6 +1282,10 @@
                 // display the timed text.
                 // For encrypted files, there may also be more than one entry.
                 const char *mime;
+
+                if (mLastTrack == NULL)
+                    return ERROR_MALFORMED;
+
                 CHECK(mLastTrack->meta->findCString(kKeyMIMEType, &mime));
                 if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) &&
                         strcasecmp(mime, "application/octet-stream")) {
@@ -1308,6 +1332,9 @@
             uint16_t sample_size = U16_AT(&buffer[18]);
             uint32_t sample_rate = U32_AT(&buffer[24]) >> 16;
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             if (chunk_type != FOURCC('e', 'n', 'c', 'a')) {
                 // if the chunk type is enca, we'll get the type from the sinf/frma box later
                 mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(chunk_type));
@@ -1369,6 +1396,9 @@
             // printf("*** coding='%s' width=%d height=%d\n",
             //        chunk, width, height);
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             if (chunk_type != FOURCC('e', 'n', 'c', 'v')) {
                 // if the chunk type is encv, we'll get the type from the sinf/frma box later
                 mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(chunk_type));
@@ -1394,6 +1424,9 @@
         case FOURCC('s', 't', 'c', 'o'):
         case FOURCC('c', 'o', '6', '4'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             status_t err =
                 mLastTrack->sampleTable->setChunkOffsetParams(
                         chunk_type, data_offset, chunk_data_size);
@@ -1409,6 +1442,9 @@
 
         case FOURCC('s', 't', 's', 'c'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             status_t err =
                 mLastTrack->sampleTable->setSampleToChunkParams(
                         data_offset, chunk_data_size);
@@ -1425,6 +1461,9 @@
         case FOURCC('s', 't', 's', 'z'):
         case FOURCC('s', 't', 'z', '2'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             status_t err =
                 mLastTrack->sampleTable->setSampleSizeParams(
                         chunk_type, data_offset, chunk_data_size);
@@ -1494,6 +1533,9 @@
 
         case FOURCC('s', 't', 't', 's'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             *offset += chunk_size;
 
             status_t err =
@@ -1509,6 +1551,9 @@
 
         case FOURCC('c', 't', 't', 's'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             *offset += chunk_size;
 
             status_t err =
@@ -1524,6 +1569,9 @@
 
         case FOURCC('s', 't', 's', 's'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             *offset += chunk_size;
 
             status_t err =
@@ -1596,6 +1644,9 @@
                 return ERROR_MALFORMED;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setData(
                     kKeyESDS, kTypeESDS, &buffer[4], chunk_data_size - 4);
 
@@ -1628,6 +1679,9 @@
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setData(
                     kKeyAVCC, kTypeAVCC, buffer->data(), chunk_data_size);
 
@@ -1642,6 +1696,9 @@
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setData(
                     kKeyHVCC, kTypeHVCC, buffer->data(), chunk_data_size);
 
@@ -1675,6 +1732,9 @@
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setData(kKeyD263, kTypeD263, buffer, chunk_data_size);
 
             break;
@@ -1682,31 +1742,35 @@
 
         case FOURCC('m', 'e', 't', 'a'):
         {
-            uint8_t buffer[4];
-            if (chunk_data_size < (off64_t)sizeof(buffer)) {
-                *offset += chunk_size;
-                return ERROR_MALFORMED;
-            }
-
-            if (mDataSource->readAt(
-                        data_offset, buffer, 4) < 4) {
-                *offset += chunk_size;
-                return ERROR_IO;
-            }
-
-            if (U32_AT(buffer) != 0) {
-                // Should be version 0, flags 0.
-
-                // If it's not, let's assume this is one of those
-                // apparently malformed chunks that don't have flags
-                // and completely different semantics than what's
-                // in the MPEG4 specs and skip it.
-                *offset += chunk_size;
-                return OK;
-            }
-
             off64_t stop_offset = *offset + chunk_size;
-            *offset = data_offset + sizeof(buffer);
+            *offset = data_offset;
+            bool isParsingMetaKeys = underQTMetaPath(mPath, 2);
+            if (!isParsingMetaKeys) {
+                uint8_t buffer[4];
+                if (chunk_data_size < (off64_t)sizeof(buffer)) {
+                    *offset += chunk_size;
+                    return ERROR_MALFORMED;
+                }
+
+                if (mDataSource->readAt(
+                            data_offset, buffer, 4) < 4) {
+                    *offset += chunk_size;
+                    return ERROR_IO;
+                }
+
+                if (U32_AT(buffer) != 0) {
+                    // Should be version 0, flags 0.
+
+                    // If it's not, let's assume this is one of those
+                    // apparently malformed chunks that don't have flags
+                    // and completely different semantics than what's
+                    // in the MPEG4 specs and skip it.
+                    *offset += chunk_size;
+                    return OK;
+                }
+                *offset +=  sizeof(buffer);
+            }
+
             while (*offset < stop_offset) {
                 status_t err = parseChunk(offset, depth + 1);
                 if (err != OK) {
@@ -1772,7 +1836,7 @@
                 }
                 duration = d32;
             }
-            if (duration != 0) {
+            if (duration != 0 && mHeaderTimescale != 0) {
                 mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
             }
 
@@ -1821,7 +1885,7 @@
                 return ERROR_MALFORMED;
             }
 
-            if (duration != 0) {
+            if (duration != 0 && mHeaderTimescale != 0) {
                 mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
             }
 
@@ -1856,6 +1920,9 @@
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             uint32_t type = ntohl(buffer);
             // For the 3GPP file format, the handler-type within the 'hdlr' box
             // shall be 'text'. We also want to support 'sbtl' handler type
@@ -1867,6 +1934,16 @@
             break;
         }
 
+        case FOURCC('k', 'e', 'y', 's'):
+        {
+            *offset += chunk_size;
+
+            if (underQTMetaPath(mPath, 3)) {
+                parseQTMetaKey(data_offset, chunk_data_size);
+            }
+            break;
+        }
+
         case FOURCC('t', 'r', 'e', 'x'):
         {
             *offset += chunk_size;
@@ -1888,6 +1965,9 @@
 
         case FOURCC('t', 'x', '3', 'g'):
         {
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             uint32_t type;
             const void *data;
             size_t size = 0;
@@ -1994,6 +2074,12 @@
 
         default:
         {
+            // check if we're parsing 'ilst' for meta keys
+            // if so, treat type as a number (key-id).
+            if (underQTMetaPath(mPath, 3)) {
+                parseQTMetaVal(chunk_type, data_offset, chunk_data_size);
+            }
+
             *offset += chunk_size;
             break;
         }
@@ -2029,6 +2115,8 @@
         return ERROR_MALFORMED;
     }
     ALOGV("sidx refid/timescale: %d/%d", referenceId, timeScale);
+    if (timeScale == 0)
+        return ERROR_MALFORMED;
 
     uint64_t earliestPresentationTime;
     uint64_t firstOffset;
@@ -2112,6 +2200,9 @@
 
     uint64_t sidxDuration = total_duration * 1000000 / timeScale;
 
+    if (mLastTrack == NULL)
+        return ERROR_MALFORMED;
+
     int64_t metaDuration;
     if (!mLastTrack->meta->findInt64(kKeyDuration, &metaDuration) || metaDuration == 0) {
         mLastTrack->meta->setInt64(kKeyDuration, sidxDuration);
@@ -2119,7 +2210,108 @@
     return OK;
 }
 
+status_t MPEG4Extractor::parseQTMetaKey(off64_t offset, size_t size) {
+    if (size < 8) {
+        return ERROR_MALFORMED;
+    }
 
+    uint32_t count;
+    if (!mDataSource->getUInt32(offset + 4, &count)) {
+        return ERROR_MALFORMED;
+    }
+
+    if (mMetaKeyMap.size() > 0) {
+        ALOGW("'keys' atom seen again, discarding existing entries");
+        mMetaKeyMap.clear();
+    }
+
+    off64_t keyOffset = offset + 8;
+    off64_t stopOffset = offset + size;
+    for (size_t i = 1; i <= count; i++) {
+        if (keyOffset + 8 > stopOffset) {
+            return ERROR_MALFORMED;
+        }
+
+        uint32_t keySize;
+        if (!mDataSource->getUInt32(keyOffset, &keySize)
+                || keySize < 8
+                || keyOffset + keySize > stopOffset) {
+            return ERROR_MALFORMED;
+        }
+
+        uint32_t type;
+        if (!mDataSource->getUInt32(keyOffset + 4, &type)
+                || type != FOURCC('m', 'd', 't', 'a')) {
+            return ERROR_MALFORMED;
+        }
+
+        keySize -= 8;
+        keyOffset += 8;
+
+        sp<ABuffer> keyData = new ABuffer(keySize);
+        if (keyData->data() == NULL) {
+            return ERROR_MALFORMED;
+        }
+        if (mDataSource->readAt(
+                keyOffset, keyData->data(), keySize) < (ssize_t) keySize) {
+            return ERROR_MALFORMED;
+        }
+
+        AString key((const char *)keyData->data(), keySize);
+        mMetaKeyMap.add(i, key);
+
+        keyOffset += keySize;
+    }
+    return OK;
+}
+
+status_t MPEG4Extractor::parseQTMetaVal(
+        int32_t keyId, off64_t offset, size_t size) {
+    ssize_t index = mMetaKeyMap.indexOfKey(keyId);
+    if (index < 0) {
+        // corresponding key is not present, ignore
+        return ERROR_MALFORMED;
+    }
+
+    if (size <= 16) {
+        return ERROR_MALFORMED;
+    }
+    uint32_t dataSize;
+    if (!mDataSource->getUInt32(offset, &dataSize)
+            || dataSize > size || dataSize <= 16) {
+        return ERROR_MALFORMED;
+    }
+    uint32_t atomFourCC;
+    if (!mDataSource->getUInt32(offset + 4, &atomFourCC)
+            || atomFourCC != FOURCC('d', 'a', 't', 'a')) {
+        return ERROR_MALFORMED;
+    }
+    uint32_t dataType;
+    if (!mDataSource->getUInt32(offset + 8, &dataType)
+            || ((dataType & 0xff000000) != 0)) {
+        // not well-known type
+        return ERROR_MALFORMED;
+    }
+
+    dataSize -= 16;
+    offset += 16;
+
+    if (dataType == 23 && dataSize >= 4) {
+        // BE Float32
+        uint32_t val;
+        if (!mDataSource->getUInt32(offset, &val)) {
+            return ERROR_MALFORMED;
+        }
+        if (!strcasecmp(mMetaKeyMap[index].c_str(), "com.android.capture.fps")) {
+            mFileMetaData->setFloat(kKeyCaptureFramerate, *(float *)&val);
+        }
+    } else {
+        // add more keys if needed
+        ALOGV("ignoring key: type %d, size %d", dataType, dataSize);
+    }
+
+    return OK;
+}
 
 status_t MPEG4Extractor::parseTrackHeader(
         off64_t data_offset, off64_t data_size) {
@@ -2162,6 +2354,9 @@
         return ERROR_UNSUPPORTED;
     }
 
+    if (mLastTrack == NULL)
+        return ERROR_MALFORMED;
+
     mLastTrack->meta->setInt32(kKeyTrackID, id);
 
     size_t matrixOffset = dynSize + 16;
@@ -2344,6 +2539,9 @@
                     int32_t delay, padding;
                     if (sscanf(mLastCommentData,
                                " %*x %x %x %*x", &delay, &padding) == 2) {
+                        if (mLastTrack == NULL)
+                            return ERROR_MALFORMED;
+
                         mLastTrack->meta->setInt32(kKeyEncoderDelay, delay);
                         mLastTrack->meta->setInt32(kKeyEncoderPadding, padding);
                     }
@@ -2711,6 +2909,9 @@
 
     if (objectTypeIndication == 0xe1) {
         // This isn't MPEG4 audio at all, it's QCELP 14k...
+        if (mLastTrack == NULL)
+            return ERROR_MALFORMED;
+
         mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_QCELP);
         return OK;
     }
@@ -2759,6 +2960,9 @@
         objectType = 32 + br.getBits(6);
     }
 
+    if (mLastTrack == NULL)
+        return ERROR_MALFORMED;
+
     //keep AOT type
     mLastTrack->meta->setInt32(kKeyAACAOT, objectType);
 
@@ -2929,6 +3133,9 @@
         return ERROR_UNSUPPORTED;
     }
 
+    if (mLastTrack == NULL)
+        return ERROR_MALFORMED;
+
     int32_t prevSampleRate;
     CHECK(mLastTrack->meta->findInt32(kKeySampleRate, &prevSampleRate));
 
diff --git a/media/libstagefright/MediaClock.cpp b/media/libstagefright/MediaClock.cpp
index 433f555..2641e4e 100644
--- a/media/libstagefright/MediaClock.cpp
+++ b/media/libstagefright/MediaClock.cpp
@@ -92,6 +92,11 @@
     mPlaybackRate = rate;
 }
 
+float MediaClock::getPlaybackRate() const {
+    Mutex::Autolock autoLock(mLock);
+    return mPlaybackRate;
+}
+
 status_t MediaClock::getMediaTime(
         int64_t realUs, int64_t *outMediaUs, bool allowPastMaxTime) const {
     if (outMediaUs == NULL) {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 0597f1d..40818f9 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -22,9 +22,13 @@
 #include "include/SoftwareRenderer.h"
 
 #include <binder/IBatteryStats.h>
+#include <binder/IMemory.h>
+#include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
+#include <binder/MemoryDealer.h>
 #include <gui/Surface.h>
 #include <media/ICrypto.h>
+#include <media/IResourceManagerService.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -45,18 +49,72 @@
 
 namespace android {
 
+static inline int getCallingPid() {
+    return IPCThreadState::self()->getCallingPid();
+}
+
+static int64_t getId(sp<IResourceManagerClient> client) {
+    return (int64_t) client.get();
+}
+
+static bool isResourceError(status_t err) {
+    return (err == OMX_ErrorInsufficientResources);
+}
+
+static const int kMaxRetry = 2;
+
+struct ResourceManagerClient : public BnResourceManagerClient {
+    ResourceManagerClient(MediaCodec* codec) : mMediaCodec(codec) {}
+
+    virtual bool reclaimResource() {
+        sp<MediaCodec> codec = mMediaCodec.promote();
+        if (codec == NULL) {
+            // codec is already gone.
+            return true;
+        }
+        status_t err = codec->release();
+        if (err != OK) {
+            ALOGW("ResourceManagerClient failed to release codec with err %d", err);
+        }
+        return (err == OK);
+    }
+
+protected:
+    virtual ~ResourceManagerClient() {}
+
+private:
+    wp<MediaCodec> mMediaCodec;
+
+    DISALLOW_EVIL_CONSTRUCTORS(ResourceManagerClient);
+};
+
 struct MediaCodec::BatteryNotifier : public Singleton<BatteryNotifier> {
     BatteryNotifier();
+    virtual ~BatteryNotifier();
 
     void noteStartVideo();
     void noteStopVideo();
     void noteStartAudio();
     void noteStopAudio();
+    void onBatteryStatServiceDied();
 
 private:
+    struct DeathNotifier : public IBinder::DeathRecipient {
+        DeathNotifier() {}
+        virtual void binderDied(const wp<IBinder>& /*who*/) {
+            BatteryNotifier::getInstance().onBatteryStatServiceDied();
+        }
+    };
+
+    Mutex mLock;
     int32_t mVideoRefCount;
     int32_t mAudioRefCount;
     sp<IBatteryStats> mBatteryStatService;
+    sp<DeathNotifier> mDeathNotifier;
+
+    sp<IBatteryStats> getBatteryService_l();
+
+    DISALLOW_EVIL_CONSTRUCTORS(BatteryNotifier);
 };
 
 ANDROID_SINGLETON_STATIC_INSTANCE(MediaCodec::BatteryNotifier)
@@ -64,54 +122,162 @@
 MediaCodec::BatteryNotifier::BatteryNotifier() :
     mVideoRefCount(0),
     mAudioRefCount(0) {
-    // get battery service
+}
+
+sp<IBatteryStats> MediaCodec::BatteryNotifier::getBatteryService_l() {
+    if (mBatteryStatService != NULL) {
+        return mBatteryStatService;
+    }
+    // get battery service from service manager
     const sp<IServiceManager> sm(defaultServiceManager());
     if (sm != NULL) {
         const String16 name("batterystats");
-        mBatteryStatService = interface_cast<IBatteryStats>(sm->getService(name));
+        mBatteryStatService =
+                interface_cast<IBatteryStats>(sm->getService(name));
         if (mBatteryStatService == NULL) {
             ALOGE("batterystats service unavailable!");
+            return NULL;
         }
+        mDeathNotifier = new DeathNotifier();
+        if (IInterface::asBinder(mBatteryStatService)->
+                linkToDeath(mDeathNotifier) != OK) {
+            mBatteryStatService.clear();
+            mDeathNotifier.clear();
+            return NULL;
+        }
+        // notify start now if media already started
+        if (mVideoRefCount > 0) {
+            mBatteryStatService->noteStartVideo(AID_MEDIA);
+        }
+        if (mAudioRefCount > 0) {
+            mBatteryStatService->noteStartAudio(AID_MEDIA);
+        }
+    }
+    return mBatteryStatService;
+}
+
+MediaCodec::BatteryNotifier::~BatteryNotifier() {
+    if (mDeathNotifier != NULL) {
+        IInterface::asBinder(mBatteryStatService)->
+                unlinkToDeath(mDeathNotifier);
     }
 }
 
 void MediaCodec::BatteryNotifier::noteStartVideo() {
-    if (mVideoRefCount == 0 && mBatteryStatService != NULL) {
-        mBatteryStatService->noteStartVideo(AID_MEDIA);
+    Mutex::Autolock _l(mLock);
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+    if (mVideoRefCount == 0 && batteryService != NULL) {
+        batteryService->noteStartVideo(AID_MEDIA);
     }
     mVideoRefCount++;
 }
 
 void MediaCodec::BatteryNotifier::noteStopVideo() {
+    Mutex::Autolock _l(mLock);
     if (mVideoRefCount == 0) {
         ALOGW("BatteryNotifier::noteStop(): video refcount is broken!");
         return;
     }
 
     mVideoRefCount--;
-    if (mVideoRefCount == 0 && mBatteryStatService != NULL) {
-        mBatteryStatService->noteStopVideo(AID_MEDIA);
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+    if (mVideoRefCount == 0 && batteryService != NULL) {
+        batteryService->noteStopVideo(AID_MEDIA);
     }
 }
 
 void MediaCodec::BatteryNotifier::noteStartAudio() {
-    if (mAudioRefCount == 0 && mBatteryStatService != NULL) {
-        mBatteryStatService->noteStartAudio(AID_MEDIA);
+    Mutex::Autolock _l(mLock);
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+    if (mAudioRefCount == 0 && batteryService != NULL) {
+        batteryService->noteStartAudio(AID_MEDIA);
     }
     mAudioRefCount++;
 }
 
 void MediaCodec::BatteryNotifier::noteStopAudio() {
+    Mutex::Autolock _l(mLock);
     if (mAudioRefCount == 0) {
         ALOGW("BatteryNotifier::noteStop(): audio refcount is broken!");
         return;
     }
 
     mAudioRefCount--;
-    if (mAudioRefCount == 0 && mBatteryStatService != NULL) {
-        mBatteryStatService->noteStopAudio(AID_MEDIA);
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+    if (mAudioRefCount == 0 && batteryService != NULL) {
+        batteryService->noteStopAudio(AID_MEDIA);
     }
 }
+
+void MediaCodec::BatteryNotifier::onBatteryStatServiceDied() {
+    Mutex::Autolock _l(mLock);
+    mBatteryStatService.clear();
+    mDeathNotifier.clear();
+    // Do not reset mVideoRefCount and mAudioRefCount here. The ref
+    // counting is independent of the battery service availability.
+    // We need this if battery service becomes available after media
+    // started.
+}
+
+MediaCodec::ResourceManagerServiceProxy::ResourceManagerServiceProxy() {
+}
+
+MediaCodec::ResourceManagerServiceProxy::~ResourceManagerServiceProxy() {
+    if (mService != NULL) {
+        IInterface::asBinder(mService)->unlinkToDeath(this);
+    }
+}
+
+void MediaCodec::ResourceManagerServiceProxy::init() {
+    sp<IServiceManager> sm = defaultServiceManager();
+    sp<IBinder> binder = sm->getService(String16("media.resource_manager"));
+    mService = interface_cast<IResourceManagerService>(binder);
+    if (mService == NULL) {
+        ALOGE("Failed to get ResourceManagerService");
+        return;
+    }
+    if (IInterface::asBinder(mService)->linkToDeath(this) != OK) {
+        mService.clear();
+        ALOGE("Failed to linkToDeath to ResourceManagerService.");
+        return;
+    }
+}
+
+void MediaCodec::ResourceManagerServiceProxy::binderDied(const wp<IBinder>& /*who*/) {
+    ALOGW("ResourceManagerService died.");
+    Mutex::Autolock _l(mLock);
+    mService.clear();
+}
+
+void MediaCodec::ResourceManagerServiceProxy::addResource(
+        int pid,
+        int64_t clientId,
+        const sp<IResourceManagerClient> client,
+        const Vector<MediaResource> &resources) {
+    Mutex::Autolock _l(mLock);
+    if (mService == NULL) {
+        return;
+    }
+    mService->addResource(pid, clientId, client, resources);
+}
+
+void MediaCodec::ResourceManagerServiceProxy::removeResource(int64_t clientId) {
+    Mutex::Autolock _l(mLock);
+    if (mService == NULL) {
+        return;
+    }
+    mService->removeResource(clientId);
+}
+
+bool MediaCodec::ResourceManagerServiceProxy::reclaimResource(
+        int callingPid, const Vector<MediaResource> &resources) {
+    Mutex::Autolock _l(mLock);
+    if (mService == NULL) {
+        return false;
+    }
+    return mService->reclaimResource(callingPid, resources);
+}
+
 // static
 sp<MediaCodec> MediaCodec::CreateByType(
         const sp<ALooper> &looper, const char *mime, bool encoder, status_t *err) {
@@ -142,10 +308,14 @@
       mCodec(NULL),
       mReplyID(0),
       mFlags(0),
+      mResourceManagerClient(new ResourceManagerClient(this)),
+      mResourceManagerService(new ResourceManagerServiceProxy()),
       mStickyError(OK),
       mSoftRenderer(NULL),
       mBatteryStatNotified(false),
       mIsVideo(false),
+      mVideoWidth(0),
+      mVideoHeight(0),
       mDequeueInputTimeoutGeneration(0),
       mDequeueInputReplyID(0),
       mDequeueOutputTimeoutGeneration(0),
@@ -155,6 +325,7 @@
 
 MediaCodec::~MediaCodec() {
     CHECK_EQ(mState, UNINITIALIZED);
+    mResourceManagerService->removeResource(getId(mResourceManagerClient));
 }
 
 // static
@@ -181,6 +352,8 @@
 }
 
 status_t MediaCodec::init(const AString &name, bool nameIsType, bool encoder) {
+    mResourceManagerService->init();
+
     // save init parameters for reset
     mInitName = name;
     mInitNameIsType = nameIsType;
@@ -200,12 +373,13 @@
         return NAME_NOT_FOUND;
     }
 
-    bool needDedicatedLooper = false;
+    bool secureCodec = false;
     if (nameIsType && !strncasecmp(name.c_str(), "video/", 6)) {
-        needDedicatedLooper = true;
+        mIsVideo = true;
     } else {
         AString tmp = name;
         if (tmp.endsWith(".secure")) {
+            secureCodec = true;
             tmp.erase(tmp.size() - 7, 7);
         }
         const sp<IMediaCodecList> mcl = MediaCodecList::getInstance();
@@ -216,14 +390,15 @@
             info->getSupportedMimes(&mimes);
             for (size_t i = 0; i < mimes.size(); i++) {
                 if (mimes[i].startsWith("video/")) {
-                    needDedicatedLooper = true;
+                    mIsVideo = true;
                     break;
                 }
             }
         }
     }
 
-    if (needDedicatedLooper) {
+    if (mIsVideo) {
+        // video codec needs dedicated looper
         if (mCodecLooper == NULL) {
             mCodecLooper = new ALooper;
             mCodecLooper->setName("CodecLooper");
@@ -247,8 +422,25 @@
         msg->setInt32("encoder", encoder);
     }
 
-    sp<AMessage> response;
-    return PostAndAwaitResponse(msg, &response);
+    status_t err;
+    Vector<MediaResource> resources;
+    const char *type = secureCodec ? kResourceSecureCodec : kResourceNonSecureCodec;
+    resources.push_back(MediaResource(String8(type), 1));
+    for (int i = 0; i <= kMaxRetry; ++i) {
+        if (i > 0) {
+            // Don't try to reclaim resource for the first time.
+            if (!mResourceManagerService->reclaimResource(getCallingPid(), resources)) {
+                break;
+            }
+        }
+
+        sp<AMessage> response;
+        err = PostAndAwaitResponse(msg, &response);
+        if (!isResourceError(err)) {
+            break;
+        }
+    }
+    return err;
 }
 
 status_t MediaCodec::setCallback(const sp<AMessage> &callback) {
@@ -266,6 +458,11 @@
         uint32_t flags) {
     sp<AMessage> msg = new AMessage(kWhatConfigure, this);
 
+    if (mIsVideo) {
+        format->findInt32("width", &mVideoWidth);
+        format->findInt32("height", &mVideoHeight);
+    }
+
     msg->setMessage("format", format);
     msg->setInt32("flags", flags);
 
@@ -279,20 +476,41 @@
         msg->setPointer("crypto", crypto.get());
     }
 
-    sp<AMessage> response;
-    status_t err = PostAndAwaitResponse(msg, &response);
+    // save msg for reset
+    mConfigureMsg = msg;
 
-    if (err != OK && err != INVALID_OPERATION) {
-        // MediaCodec now set state to UNINITIALIZED upon any fatal error.
-        // To maintain backward-compatibility, do a reset() to put codec
-        // back into INITIALIZED state.
-        // But don't reset if the err is INVALID_OPERATION, which means
-        // the configure failure is due to wrong state.
+    status_t err;
+    Vector<MediaResource> resources;
+    const char *type = (mFlags & kFlagIsSecure) ?
+            kResourceSecureCodec : kResourceNonSecureCodec;
+    resources.push_back(MediaResource(String8(type), 1));
+    // Don't know the buffer size at this point, but it's fine to use 1 because
+    // the reclaimResource call doesn't consider the requester's buffer size for now.
+    resources.push_back(MediaResource(String8(kResourceGraphicMemory), 1));
+    for (int i = 0; i <= kMaxRetry; ++i) {
+        if (i > 0) {
+            // Don't try to reclaim resource for the first time.
+            if (!mResourceManagerService->reclaimResource(getCallingPid(), resources)) {
+                break;
+            }
+        }
 
-        ALOGE("configure failed with err 0x%08x, resetting...", err);
-        reset();
+        sp<AMessage> response;
+        err = PostAndAwaitResponse(msg, &response);
+        if (err != OK && err != INVALID_OPERATION) {
+            // MediaCodec now set state to UNINITIALIZED upon any fatal error.
+            // To maintain backward-compatibility, do a reset() to put codec
+            // back into INITIALIZED state.
+            // But don't reset if the err is INVALID_OPERATION, which means
+            // the configure failure is due to wrong state.
+
+            ALOGE("configure failed with err 0x%08x, resetting...", err);
+            reset();
+        }
+        if (!isResourceError(err)) {
+            break;
+        }
     }
-
     return err;
 }
 
@@ -316,11 +534,65 @@
     return err;
 }
 
+uint64_t MediaCodec::getGraphicBufferSize() {
+    if (!mIsVideo) {
+        return 0;
+    }
+
+    uint64_t size = 0;
+    size_t portNum = sizeof(mPortBuffers) / sizeof((mPortBuffers)[0]);
+    for (size_t i = 0; i < portNum; ++i) {
+        // TODO: this is just an estimation, we should get the real buffer size from ACodec.
+        size += mPortBuffers[i].size() * mVideoWidth * mVideoHeight * 3 / 2;
+    }
+    return size;
+}
+
+void MediaCodec::addResource(const char *type, uint64_t value) {
+    Vector<MediaResource> resources;
+    resources.push_back(MediaResource(String8(type), value));
+    mResourceManagerService->addResource(
+            getCallingPid(), getId(mResourceManagerClient), mResourceManagerClient, resources);
+}
+
 status_t MediaCodec::start() {
     sp<AMessage> msg = new AMessage(kWhatStart, this);
 
-    sp<AMessage> response;
-    return PostAndAwaitResponse(msg, &response);
+    status_t err;
+    Vector<MediaResource> resources;
+    const char *type = (mFlags & kFlagIsSecure) ?
+            kResourceSecureCodec : kResourceNonSecureCodec;
+    resources.push_back(MediaResource(String8(type), 1));
+    // Don't know the buffer size at this point, but it's fine to use 1 because
+    // the reclaimResource call doesn't consider the requester's buffer size for now.
+    resources.push_back(MediaResource(String8(kResourceGraphicMemory), 1));
+    for (int i = 0; i <= kMaxRetry; ++i) {
+        if (i > 0) {
+            // Don't try to reclaim resource for the first time.
+            if (!mResourceManagerService->reclaimResource(getCallingPid(), resources)) {
+                break;
+            }
+            // Recover codec from previous error before retry start.
+            err = reset();
+            if (err != OK) {
+                ALOGE("retrying start: failed to reset codec");
+                break;
+            }
+            sp<AMessage> response;
+            err = PostAndAwaitResponse(mConfigureMsg, &response);
+            if (err != OK) {
+                ALOGE("retrying start: failed to configure codec");
+                break;
+            }
+        }
+
+        sp<AMessage> response;
+        err = PostAndAwaitResponse(msg, &response);
+        if (!isResourceError(err)) {
+            break;
+        }
+    }
+    return err;
 }
 
 status_t MediaCodec::stop() {
@@ -544,6 +816,16 @@
     return OK;
 }
 
+status_t MediaCodec::getWidevineLegacyBuffers(Vector<sp<ABuffer> > *buffers) const {
+    sp<AMessage> msg = new AMessage(kWhatGetBuffers, this);
+    msg->setInt32("portIndex", kPortIndexInput);
+    msg->setPointer("buffers", buffers);
+    msg->setInt32("widevine", true);
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
 status_t MediaCodec::getInputBuffers(Vector<sp<ABuffer> > *buffers) const {
     sp<AMessage> msg = new AMessage(kWhatGetBuffers, this);
     msg->setInt32("portIndex", kPortIndexInput);
@@ -884,11 +1166,15 @@
                         mFlags &= ~kFlagUsesSoftwareRenderer;
                     }
 
+                    String8 resourceType;
                     if (mComponentName.endsWith(".secure")) {
                         mFlags |= kFlagIsSecure;
+                        resourceType = String8(kResourceSecureCodec);
                     } else {
                         mFlags &= ~kFlagIsSecure;
+                        resourceType = String8(kResourceNonSecureCodec);
                     }
+                    addResource(resourceType, 1);
 
                     (new AMessage)->postReply(mReplyID);
                     break;
@@ -969,6 +1255,17 @@
 
                     size_t numBuffers = portDesc->countBuffers();
 
+                    size_t totalSize = 0;
+                    for (size_t i = 0; i < numBuffers; ++i) {
+                        if (portIndex == kPortIndexInput && mCrypto != NULL) {
+                            totalSize += portDesc->bufferAt(i)->capacity();
+                        }
+                    }
+
+                    if (totalSize) {
+                        mDealer = new MemoryDealer(totalSize, "MediaCodec");
+                    }
+
                     for (size_t i = 0; i < numBuffers; ++i) {
                         BufferInfo info;
                         info.mBufferID = portDesc->bufferIDAt(i);
@@ -976,8 +1273,10 @@
                         info.mData = portDesc->bufferAt(i);
 
                         if (portIndex == kPortIndexInput && mCrypto != NULL) {
+                            sp<IMemory> mem = mDealer->allocate(info.mData->capacity());
                             info.mEncryptedData =
-                                new ABuffer(info.mData->capacity());
+                                new ABuffer(mem->pointer(), info.mData->capacity());
+                            info.mSharedEncryptedBuffer = mem;
                         }
 
                         buffers->push_back(info);
@@ -988,6 +1287,9 @@
                             // We're always allocating output buffers after
                             // allocating input buffers, so this is a good
                             // indication that now all buffers are allocated.
+                            if (mIsVideo) {
+                                addResource(kResourceGraphicMemory, getGraphicBufferSize());
+                            }
                             setState(STARTED);
                             (new AMessage)->postReply(mReplyID);
                         } else {
@@ -1167,6 +1469,8 @@
                     }
                     mFlags &= ~kFlagIsComponentAllocated;
 
+                    mResourceManagerService->removeResource(getId(mResourceManagerClient));
+
                     (new AMessage)->postReply(mReplyID);
                     break;
                 }
@@ -1587,8 +1891,12 @@
         {
             sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
+            // Unfortunately widevine legacy source requires knowing all of the
+            // codec input buffers, so we have to provide them even in async mode.
+            int32_t widevine = 0;
+            msg->findInt32("widevine", &widevine);
 
-            if (!isExecuting() || (mFlags & kFlagIsAsync)) {
+            if (!isExecuting() || ((mFlags & kFlagIsAsync) && !widevine)) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
             } else if (mFlags & kFlagStickyError) {
@@ -1953,7 +2261,8 @@
                 key,
                 iv,
                 mode,
-                info->mEncryptedData->base() + offset,
+                info->mSharedEncryptedBuffer,
+                offset,
                 subSamples,
                 numSubSamples,
                 info->mData->base(),
@@ -2263,12 +2572,6 @@
 
 void MediaCodec::updateBatteryStat() {
     if (mState == CONFIGURED && !mBatteryStatNotified) {
-        AString mime;
-        CHECK(mOutputFormat != NULL &&
-                mOutputFormat->findString("mime", &mime));
-
-        mIsVideo = mime.startsWithIgnoreCase("video/");
-
         BatteryNotifier& notifier(BatteryNotifier::getInstance());
 
         if (mIsVideo) {
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index cf6e937..26798ae 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -18,6 +18,8 @@
 #define LOG_TAG "MediaCodecList"
 #include <utils/Log.h>
 
+#include "MediaCodecListOverrides.h"
+
 #include <binder/IServiceManager.h>
 
 #include <media/IMediaCodecList.h>
@@ -31,6 +33,7 @@
 #include <media/stagefright/OMXClient.h>
 #include <media/stagefright/OMXCodec.h>
 
+#include <sys/stat.h>
 #include <utils/threads.h>
 
 #include <libexpat/expat.h>
@@ -41,21 +44,58 @@
 
 static MediaCodecList *gCodecList = NULL;
 
+static const char *kProfilingResults = "/data/misc/media/media_codecs_profiling_results.xml";
+
+static bool parseBoolean(const char *s) {
+    if (!strcasecmp(s, "true") || !strcasecmp(s, "yes") || !strcasecmp(s, "y")) {
+        return true;
+    }
+    char *end;
+    unsigned long res = strtoul(s, &end, 10);
+    return *s != '\0' && *end == '\0' && res > 0;
+}
+
 // static
 sp<IMediaCodecList> MediaCodecList::sCodecList;
 
 // static
 sp<IMediaCodecList> MediaCodecList::getLocalInstance() {
-    Mutex::Autolock autoLock(sInitMutex);
+    bool profilingNeeded = false;
+    KeyedVector<AString, CodecSettings> updates;
+    Vector<sp<MediaCodecInfo>> infos;
 
-    if (gCodecList == NULL) {
-        gCodecList = new MediaCodecList;
-        if (gCodecList->initCheck() == OK) {
-            sCodecList = gCodecList;
+    {
+        Mutex::Autolock autoLock(sInitMutex);
+
+        if (gCodecList == NULL) {
+            gCodecList = new MediaCodecList;
+            if (gCodecList->initCheck() == OK) {
+                sCodecList = gCodecList;
+
+                struct stat s;
+                if (stat(kProfilingResults, &s) == -1) {
+                    // profiling results doesn't existed
+                    profilingNeeded = true;
+                    for (size_t i = 0; i < gCodecList->countCodecs(); ++i) {
+                        infos.push_back(gCodecList->getCodecInfo(i));
+                    }
+                }
+            }
         }
     }
 
-    return sCodecList;
+    if (profilingNeeded) {
+        profileCodecs(infos, &updates);
+    }
+
+    {
+        Mutex::Autolock autoLock(sInitMutex);
+        if (updates.size() > 0) {
+            gCodecList->updateDetailsForMultipleCodecs(updates);
+        }
+
+        return sCodecList;
+    }
 }
 
 static Mutex sRemoteInitMutex;
@@ -94,11 +134,27 @@
 }
 
 MediaCodecList::MediaCodecList()
-    : mInitCheck(NO_INIT) {
+    : mInitCheck(NO_INIT),
+      mUpdate(false),
+      mGlobalSettings(new AMessage()) {
     parseTopLevelXMLFile("/etc/media_codecs.xml");
+    parseTopLevelXMLFile(kProfilingResults, true/* ignore_errors */);
 }
 
-void MediaCodecList::parseTopLevelXMLFile(const char *codecs_xml) {
+void MediaCodecList::updateDetailsForMultipleCodecs(
+        const KeyedVector<AString, CodecSettings>& updates) {
+    if (updates.size() == 0) {
+        return;
+    }
+
+    exportResultsToXML(kProfilingResults, updates);
+
+    for (size_t i = 0; i < updates.size(); ++i) {
+        applyCodecSettings(updates.keyAt(i), updates.valueAt(i), &mCodecInfos);
+    }
+}
+
+void MediaCodecList::parseTopLevelXMLFile(const char *codecs_xml, bool ignore_errors) {
     // get href_base
     char *href_base_end = strrchr(codecs_xml, '/');
     if (href_base_end != NULL) {
@@ -119,13 +175,16 @@
     mOMX.clear();
 
     if (mInitCheck != OK) {
+        if (ignore_errors) {
+            mInitCheck = OK;
+            return;
+        }
         mCodecInfos.clear();
         return;
     }
 
     for (size_t i = mCodecInfos.size(); i-- > 0;) {
         const MediaCodecInfo &info = *mCodecInfos.itemAt(i).get();
-
         if (info.mCaps.size() == 0) {
             // No types supported by this component???
             ALOGW("Component %s does not support any type of media?",
@@ -169,6 +228,16 @@
                     }
                     ALOGV("    levels=[%s]", nice.c_str());
                 }
+                {
+                    AString quirks;
+                    for (size_t ix = 0; ix < info.mQuirks.size(); ix++) {
+                        if (ix > 0) {
+                            quirks.append(", ");
+                        }
+                        quirks.append(info.mQuirks[ix]);
+                    }
+                    ALOGV("    quirks=[%s]", quirks.c_str());
+                }
             }
 #endif
         }
@@ -328,6 +397,16 @@
                 mCurrentSection = SECTION_DECODERS;
             } else if (!strcmp(name, "Encoders")) {
                 mCurrentSection = SECTION_ENCODERS;
+            } else if (!strcmp(name, "Settings")) {
+                mCurrentSection = SECTION_SETTINGS;
+            }
+            break;
+        }
+
+        case SECTION_SETTINGS:
+        {
+            if (!strcmp(name, "Setting")) {
+                mInitCheck = addSettingFromAttributes(attrs);
             }
             break;
         }
@@ -397,6 +476,14 @@
     }
 
     switch (mCurrentSection) {
+        case SECTION_SETTINGS:
+        {
+            if (!strcmp(name, "Settings")) {
+                mCurrentSection = SECTION_TOPLEVEL;
+            }
+            break;
+        }
+
         case SECTION_DECODERS:
         {
             if (!strcmp(name, "Decoders")) {
@@ -462,10 +549,81 @@
     --mDepth;
 }
 
+status_t MediaCodecList::addSettingFromAttributes(const char **attrs) {
+    const char *name = NULL;
+    const char *value = NULL;
+    const char *update = NULL;
+
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (!strcmp(attrs[i], "name")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            name = attrs[i + 1];
+            ++i;
+        } else if (!strcmp(attrs[i], "value")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            value = attrs[i + 1];
+            ++i;
+        } else if (!strcmp(attrs[i], "update")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            update = attrs[i + 1];
+            ++i;
+        } else {
+            return -EINVAL;
+        }
+
+        ++i;
+    }
+
+    if (name == NULL || value == NULL) {
+        return -EINVAL;
+    }
+
+    mUpdate = (update != NULL) && parseBoolean(update);
+    if (mUpdate != mGlobalSettings->contains(name)) {
+        return -EINVAL;
+    }
+
+    mGlobalSettings->setString(name, value);
+    return OK;
+}
+
+void MediaCodecList::setCurrentCodecInfo(bool encoder, const char *name, const char *type) {
+    for (size_t i = 0; i < mCodecInfos.size(); ++i) {
+        if (AString(name) == mCodecInfos[i]->getCodecName()) {
+            if (mCodecInfos[i]->getCapabilitiesFor(type) == NULL) {
+                ALOGW("Overrides with an unexpected mime %s", type);
+                // Create a new MediaCodecInfo (but don't add it to mCodecInfos) to hold the
+                // overrides we don't want.
+                mCurrentInfo = new MediaCodecInfo(name, encoder, type);
+            } else {
+                mCurrentInfo = mCodecInfos.editItemAt(i);
+                mCurrentInfo->updateMime(type);  // to set the current cap
+            }
+            return;
+        }
+    }
+    mCurrentInfo = new MediaCodecInfo(name, encoder, type);
+    // The next step involves trying to load the codec, which may
+    // fail.  Only list the codec if this succeeds.
+    // However, keep mCurrentInfo object around until parsing
+    // of full codec info is completed.
+    if (initializeCapabilities(type) == OK) {
+        mCodecInfos.push_back(mCurrentInfo);
+    }
+}
+
 status_t MediaCodecList::addMediaCodecFromAttributes(
         bool encoder, const char **attrs) {
     const char *name = NULL;
     const char *type = NULL;
+    const char *update = NULL;
 
     size_t i = 0;
     while (attrs[i] != NULL) {
@@ -481,6 +639,12 @@
             }
             type = attrs[i + 1];
             ++i;
+        } else if (!strcmp(attrs[i], "update")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            update = attrs[i + 1];
+            ++i;
         } else {
             return -EINVAL;
         }
@@ -492,14 +656,39 @@
         return -EINVAL;
     }
 
-    mCurrentInfo = new MediaCodecInfo(name, encoder, type);
-    // The next step involves trying to load the codec, which may
-    // fail.  Only list the codec if this succeeds.
-    // However, keep mCurrentInfo object around until parsing
-    // of full codec info is completed.
-    if (initializeCapabilities(type) == OK) {
-        mCodecInfos.push_back(mCurrentInfo);
+    mUpdate = (update != NULL) && parseBoolean(update);
+    ssize_t index = -1;
+    for (size_t i = 0; i < mCodecInfos.size(); ++i) {
+        if (AString(name) == mCodecInfos[i]->getCodecName()) {
+            index = i;
+        }
     }
+    if (mUpdate != (index >= 0)) {
+        return -EINVAL;
+    }
+
+    if (index >= 0) {
+        // existing codec
+        mCurrentInfo = mCodecInfos.editItemAt(index);
+        if (type != NULL) {
+            // existing type
+            if (mCodecInfos[index]->getCapabilitiesFor(type) == NULL) {
+                return -EINVAL;
+            }
+            mCurrentInfo->updateMime(type);
+        }
+    } else {
+        // new codec
+        mCurrentInfo = new MediaCodecInfo(name, encoder, type);
+        // The next step involves trying to load the codec, which may
+        // fail.  Only list the codec if this succeeds.
+        // However, keep mCurrentInfo object around until parsing
+        // of full codec info is completed.
+        if (initializeCapabilities(type) == OK) {
+            mCodecInfos.push_back(mCurrentInfo);
+        }
+    }
+
     return OK;
 }
 
@@ -553,6 +742,7 @@
 
 status_t MediaCodecList::addTypeFromAttributes(const char **attrs) {
     const char *name = NULL;
+    const char *update = NULL;
 
     size_t i = 0;
     while (attrs[i] != NULL) {
@@ -562,6 +752,12 @@
             }
             name = attrs[i + 1];
             ++i;
+        } else if (!strcmp(attrs[i], "update")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            update = attrs[i + 1];
+            ++i;
         } else {
             return -EINVAL;
         }
@@ -573,14 +769,25 @@
         return -EINVAL;
     }
 
-    status_t ret = mCurrentInfo->addMime(name);
+    bool isExistingType = (mCurrentInfo->getCapabilitiesFor(name) != NULL);
+    if (mUpdate != isExistingType) {
+        return -EINVAL;
+    }
+
+    status_t ret;
+    if (mUpdate) {
+        ret = mCurrentInfo->updateMime(name);
+    } else {
+        ret = mCurrentInfo->addMime(name);
+    }
+
     if (ret != OK) {
         return ret;
     }
 
     // The next step involves trying to load the codec, which may
     // fail.  Handle this gracefully (by not reporting such mime).
-    if (initializeCapabilities(name) != OK) {
+    if (!mUpdate && initializeCapabilities(name) != OK) {
         mCurrentInfo->removeMime(name);
     }
     return OK;
@@ -758,7 +965,8 @@
             return limitFoundMissingAttr(name, "ranges", found);
         } else if (msg->contains("scale")) {
             return limitFoundMissingAttr(name, "scale");
-        } else if ((name == "alignment" || name == "block-size") ^
+        } else if ((name == "alignment" || name == "block-size"
+                || name == "max-supported-instances") ^
                 (found = msg->findString("value", &value))) {
             return limitFoundMissingAttr(name, "value", found);
         }
@@ -780,15 +988,6 @@
     return OK;
 }
 
-static bool parseBoolean(const char *s) {
-    if (!strcasecmp(s, "true") || !strcasecmp(s, "yes") || !strcasecmp(s, "y")) {
-        return true;
-    }
-    char *end;
-    unsigned long res = strtoul(s, &end, 10);
-    return *s != '\0' && *end == '\0' && res > 0;
-}
-
 status_t MediaCodecList::addFeature(const char **attrs) {
     size_t i = 0;
     const char *name = NULL;
@@ -860,4 +1059,8 @@
     return mCodecInfos.size();
 }
 
+const sp<AMessage> MediaCodecList::getGlobalSettings() const {
+    return mGlobalSettings;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/MediaCodecListOverrides.cpp b/media/libstagefright/MediaCodecListOverrides.cpp
new file mode 100644
index 0000000..3c54f34
--- /dev/null
+++ b/media/libstagefright/MediaCodecListOverrides.cpp
@@ -0,0 +1,404 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaCodecListOverrides"
+#include <utils/Log.h>
+
+#include "MediaCodecListOverrides.h"
+
+#include <gui/Surface.h>
+#include <media/ICrypto.h>
+#include <media/IMediaCodecList.h>
+#include <media/MediaCodecInfo.h>
+
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaCodec.h>
+
+namespace android {
+
+// a limit to avoid allocating unreasonable number of codec instances in the measurement.
+// this should be in sync with the MAX_SUPPORTED_INSTANCES defined in MediaCodecInfo.java.
+static const int kMaxInstances = 32;
+
+// TODO: move MediaCodecInfo to C++. Until then, some temp methods to parse out info.
+static bool getMeasureSize(sp<MediaCodecInfo::Capabilities> caps, int32_t *width, int32_t *height) {
+    AString sizeRange;
+    if (!caps->getDetails()->findString("size-range", &sizeRange)) {
+        return false;
+    }
+    AString minSize;
+    AString maxSize;
+    if (!splitString(sizeRange, "-", &minSize, &maxSize)) {
+        return false;
+    }
+    AString sWidth;
+    AString sHeight;
+    if (!splitString(minSize, "x", &sWidth, &sHeight)) {
+        if (!splitString(minSize, "*", &sWidth, &sHeight)) {
+            return false;
+        }
+    }
+
+    *width = strtol(sWidth.c_str(), NULL, 10);
+    *height = strtol(sHeight.c_str(), NULL, 10);
+    return (*width > 0) && (*height > 0);
+}
+
+static void getMeasureBitrate(sp<MediaCodecInfo::Capabilities> caps, int32_t *bitrate) {
+    // Until have native MediaCodecInfo, we cannot get bitrates based on profile/levels.
+    // We use 200000 as default value for our measurement.
+    *bitrate = 200000;
+    AString bitrateRange;
+    if (!caps->getDetails()->findString("bitrate-range", &bitrateRange)) {
+        return;
+    }
+    AString minBitrate;
+    AString maxBitrate;
+    if (!splitString(bitrateRange, "-", &minBitrate, &maxBitrate)) {
+        return;
+    }
+
+    *bitrate = strtol(minBitrate.c_str(), NULL, 10);
+}
+
+static sp<AMessage> getMeasureFormat(
+        bool isEncoder, AString mime, sp<MediaCodecInfo::Capabilities> caps) {
+    sp<AMessage> format = new AMessage();
+    format->setString("mime", mime);
+
+    if (isEncoder) {
+        int32_t bitrate = 0;
+        getMeasureBitrate(caps, &bitrate);
+        format->setInt32("bitrate", bitrate);
+    }
+
+    if (mime.startsWith("video/")) {
+        int32_t width = 0;
+        int32_t height = 0;
+        if (!getMeasureSize(caps, &width, &height)) {
+            return NULL;
+        }
+        format->setInt32("width", width);
+        format->setInt32("height", height);
+
+        Vector<uint32_t> colorFormats;
+        caps->getSupportedColorFormats(&colorFormats);
+        if (colorFormats.size() == 0) {
+            return NULL;
+        }
+        format->setInt32("color-format", colorFormats[0]);
+
+        format->setFloat("frame-rate", 10.0);
+        format->setInt32("i-frame-interval", 10);
+    } else {
+        // TODO: profile hw audio
+        return NULL;
+    }
+
+    return format;
+}
+
+static size_t doProfileCodecs(
+        bool isEncoder, AString name, AString mime, sp<MediaCodecInfo::Capabilities> caps) {
+    sp<AMessage> format = getMeasureFormat(isEncoder, mime, caps);
+    if (format == NULL) {
+        return 0;
+    }
+    if (isEncoder) {
+        format->setInt32("encoder", 1);
+    }
+    ALOGV("doProfileCodecs %s %s %s %s",
+            name.c_str(), mime.c_str(), isEncoder ? "encoder" : "decoder",
+            format->debugString().c_str());
+
+    status_t err = OK;
+    Vector<sp<MediaCodec>> codecs;
+    while (err == OK && codecs.size() < kMaxInstances) {
+        sp<ALooper> looper = new ALooper;
+        looper->setName("MediaCodec_looper");
+        ALOGV("doProfileCodecs for codec #%u", codecs.size());
+        ALOGV("doProfileCodecs start looper");
+        looper->start(
+                false /* runOnCallingThread */, false /* canCallJava */, ANDROID_PRIORITY_AUDIO);
+        ALOGV("doProfileCodecs CreateByComponentName");
+        sp<MediaCodec> codec = MediaCodec::CreateByComponentName(looper, name.c_str(), &err);
+        if (err != OK) {
+            ALOGV("Failed to create codec: %s", name.c_str());
+            break;
+        }
+        const sp<Surface> nativeWindow;
+        const sp<ICrypto> crypto;
+        uint32_t flags = 0;
+        ALOGV("doProfileCodecs configure");
+        err = codec->configure(format, nativeWindow, crypto, flags);
+        if (err != OK) {
+            ALOGV("Failed to configure codec: %s with mime: %s", name.c_str(), mime.c_str());
+            codec->release();
+            break;
+        }
+        ALOGV("doProfileCodecs start");
+        err = codec->start();
+        if (err != OK) {
+            ALOGV("Failed to start codec: %s with mime: %s", name.c_str(), mime.c_str());
+            codec->release();
+            break;
+        }
+        codecs.push_back(codec);
+    }
+
+    for (size_t i = 0; i < codecs.size(); ++i) {
+        ALOGV("doProfileCodecs release %s", name.c_str());
+        err = codecs[i]->release();
+        if (err != OK) {
+            ALOGE("Failed to release codec: %s with mime: %s", name.c_str(), mime.c_str());
+        }
+    }
+
+    return codecs.size();
+}
+
+static void printLongString(const char *buf, size_t size) {
+    AString print;
+    const char *start = buf;
+    size_t len;
+    size_t totalLen = size;
+    while (totalLen > 0) {
+        len = (totalLen > 500) ? 500 : totalLen;
+        print.setTo(start, len);
+        ALOGV("%s", print.c_str());
+        totalLen -= len;
+        start += len;
+    }
+}
+
+bool splitString(const AString &s, const AString &delimiter, AString *s1, AString *s2) {
+    ssize_t pos = s.find(delimiter.c_str());
+    if (pos < 0) {
+        return false;
+    }
+    *s1 = AString(s, 0, pos);
+    *s2 = AString(s, pos + 1, s.size() - pos - 1);
+    return true;
+}
+
+bool splitString(
+        const AString &s, const AString &delimiter, AString *s1, AString *s2, AString *s3) {
+    AString temp;
+    if (!splitString(s, delimiter, s1, &temp)) {
+        return false;
+    }
+    if (!splitString(temp, delimiter, s2, s3)) {
+        return false;
+    }
+    return true;
+}
+
+void profileCodecs(
+        const Vector<sp<MediaCodecInfo>> &infos,
+        KeyedVector<AString, CodecSettings> *results,
+        bool forceToMeasure) {
+    KeyedVector<AString, sp<MediaCodecInfo::Capabilities>> codecsNeedMeasure;
+    for (size_t i = 0; i < infos.size(); ++i) {
+        const sp<MediaCodecInfo> info = infos[i];
+        AString name = info->getCodecName();
+        if (name.startsWith("OMX.google.") ||
+                // TODO: reenable below codecs once fixed
+                name == "OMX.Intel.VideoDecoder.VP9.hybrid") {
+            continue;
+        }
+
+        Vector<AString> mimes;
+        info->getSupportedMimes(&mimes);
+        for (size_t i = 0; i < mimes.size(); ++i) {
+            const sp<MediaCodecInfo::Capabilities> &caps =
+                    info->getCapabilitiesFor(mimes[i].c_str());
+            if (!forceToMeasure && caps->getDetails()->contains("max-supported-instances")) {
+                continue;
+            }
+
+            size_t max = doProfileCodecs(info->isEncoder(), name, mimes[i], caps);
+            if (max > 0) {
+                CodecSettings settings;
+                char maxStr[32];
+                sprintf(maxStr, "%u", max);
+                settings.add("max-supported-instances", maxStr);
+
+                AString key = name;
+                key.append(" ");
+                key.append(mimes[i]);
+                key.append(" ");
+                key.append(info->isEncoder() ? "encoder" : "decoder");
+                results->add(key, settings);
+            }
+        }
+    }
+}
+
+void applyCodecSettings(
+        const AString& codecInfo,
+        const CodecSettings &settings,
+        Vector<sp<MediaCodecInfo>> *infos) {
+    AString name;
+    AString mime;
+    AString type;
+    if (!splitString(codecInfo, " ", &name, &mime, &type)) {
+        return;
+    }
+
+    for (size_t i = 0; i < infos->size(); ++i) {
+        const sp<MediaCodecInfo> &info = infos->itemAt(i);
+        if (name != info->getCodecName()) {
+            continue;
+        }
+
+        Vector<AString> mimes;
+        info->getSupportedMimes(&mimes);
+        for (size_t j = 0; j < mimes.size(); ++j) {
+            if (mimes[j] != mime) {
+                continue;
+            }
+            const sp<MediaCodecInfo::Capabilities> &caps = info->getCapabilitiesFor(mime.c_str());
+            for (size_t k = 0; k < settings.size(); ++k) {
+                caps->getDetails()->setString(
+                        settings.keyAt(k).c_str(), settings.valueAt(k).c_str());
+            }
+        }
+    }
+}
+
+void exportResultsToXML(const char *fileName, const KeyedVector<AString, CodecSettings>& results) {
+#if LOG_NDEBUG == 0
+    ALOGE("measurement results");
+    for (size_t i = 0; i < results.size(); ++i) {
+        ALOGE("key %s", results.keyAt(i).c_str());
+        const CodecSettings &settings = results.valueAt(i);
+        for (size_t j = 0; j < settings.size(); ++j) {
+            ALOGE("name %s value %s", settings.keyAt(j).c_str(), settings.valueAt(j).c_str());
+        }
+    }
+#endif
+
+    AString overrides;
+    FILE *f = fopen(fileName, "rb");
+    if (f != NULL) {
+        fseek(f, 0, SEEK_END);
+        long size = ftell(f);
+        rewind(f);
+
+        char *buf = (char *)malloc(size);
+        if (fread(buf, size, 1, f) == 1) {
+            overrides.setTo(buf, size);
+#if LOG_NDEBUG == 0
+            ALOGV("Existing overrides:");
+            printLongString(buf, size);
+#endif
+        } else {
+            ALOGE("Failed to read %s", fileName);
+        }
+        fclose(f);
+        free(buf);
+    }
+
+    for (size_t i = 0; i < results.size(); ++i) {
+        AString name;
+        AString mime;
+        AString type;
+        if (!splitString(results.keyAt(i), " ", &name, &mime, &type)) {
+            continue;
+        }
+        name = AStringPrintf("\"%s\"", name.c_str());
+        mime = AStringPrintf("\"%s\"", mime.c_str());
+        ALOGV("name(%s) mime(%s) type(%s)", name.c_str(), mime.c_str(), type.c_str());
+        ssize_t posCodec = overrides.find(name.c_str());
+        size_t posInsert = 0;
+        if (posCodec < 0) {
+            AString encodersDecoders = (type == "encoder") ? "<Encoders>" : "<Decoders>";
+            AString encodersDecodersEnd = (type == "encoder") ? "</Encoders>" : "</Decoders>";
+            ssize_t posEncodersDecoders = overrides.find(encodersDecoders.c_str());
+            if (posEncodersDecoders < 0) {
+                AString mediaCodecs = "<MediaCodecs>";
+                ssize_t posMediaCodec = overrides.find(mediaCodecs.c_str());
+                if (posMediaCodec < 0) {
+                    posMediaCodec = overrides.size();
+                    overrides.insert("\n<MediaCodecs>\n</MediaCodecs>\n", posMediaCodec);
+                    posMediaCodec = overrides.find(mediaCodecs.c_str(), posMediaCodec);
+                }
+                posEncodersDecoders = posMediaCodec + mediaCodecs.size();
+                AString codecs = AStringPrintf(
+                        "\n    %s\n    %s", encodersDecoders.c_str(), encodersDecodersEnd.c_str());
+                overrides.insert(codecs.c_str(), posEncodersDecoders);
+                posEncodersDecoders = overrides.find(encodersDecoders.c_str(), posEncodersDecoders);
+            }
+            posCodec = posEncodersDecoders + encodersDecoders.size();
+            AString codec = AStringPrintf(
+                        "\n        <MediaCodec name=%s type=%s update=\"true\" >\n        </MediaCodec>",
+                        name.c_str(),
+                        mime.c_str());
+            overrides.insert(codec.c_str(), posCodec);
+            posCodec = overrides.find(name.c_str());
+        }
+
+        // insert to existing entry
+        ssize_t posMime = overrides.find(mime.c_str(), posCodec);
+        ssize_t posEnd = overrides.find(">", posCodec);
+        if (posEnd < 0) {
+            ALOGE("Format error in overrides file.");
+            return;
+        }
+        if (posMime < 0 || posMime > posEnd) {
+            // new mime for an existing component
+            AString codecEnd = "</MediaCodec>";
+            posInsert = overrides.find(codecEnd.c_str(), posCodec) + codecEnd.size();
+            AString codec = AStringPrintf(
+                    "\n        <MediaCodec name=%s type=%s update=\"true\" >\n        </MediaCodec>",
+                    name.c_str(),
+                    mime.c_str());
+            overrides.insert(codec.c_str(), posInsert);
+            posInsert = overrides.find(">", posInsert) + 1;
+        } else {
+            posInsert = posEnd + 1;
+        }
+
+        CodecSettings settings = results.valueAt(i);
+        for (size_t i = 0; i < settings.size(); ++i) {
+            // WARNING: we assume all the settings are "Limit". Currently we have only one type
+            // of setting in this case, which is "max-supported-instances".
+            AString strInsert = AStringPrintf(
+                    "\n            <Limit name=\"%s\" value=\"%s\" />",
+                    settings.keyAt(i).c_str(),
+                    settings.valueAt(i).c_str());
+            overrides.insert(strInsert, posInsert);
+        }
+    }
+
+#if LOG_NDEBUG == 0
+    ALOGV("New overrides:");
+    printLongString(overrides.c_str(), overrides.size());
+#endif
+
+    f = fopen(fileName, "wb");
+    if (f == NULL) {
+        ALOGE("Failed to open %s for writing.", fileName);
+        return;
+    }
+    if (fwrite(overrides.c_str(), 1, overrides.size(), f) != overrides.size()) {
+        ALOGE("Failed to write to %s.", fileName);
+    }
+    fclose(f);
+}
+
+}  // namespace android
diff --git a/media/libstagefright/MediaCodecListOverrides.h b/media/libstagefright/MediaCodecListOverrides.h
new file mode 100644
index 0000000..f97ce63
--- /dev/null
+++ b/media/libstagefright/MediaCodecListOverrides.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_CODEC_LIST_OVERRIDES_H_
+
+#define MEDIA_CODEC_LIST_OVERRIDES_H_
+
+#include <media/MediaCodecInfo.h>
+#include <media/stagefright/foundation/AString.h>
+
+#include <utils/StrongPointer.h>
+#include <utils/KeyedVector.h>
+
+namespace android {
+
+class MediaCodecInfo;
+
+bool splitString(const AString &s, const AString &delimiter, AString *s1, AString *s2);
+
+bool splitString(
+        const AString &s, const AString &delimiter, AString *s1, AString *s2, AString *s3);
+
+void profileCodecs(
+        const Vector<sp<MediaCodecInfo>> &infos,
+        KeyedVector<AString, CodecSettings> *results,
+        bool forceToMeasure = false);  // forceToMeasure is mainly for testing
+
+void applyCodecSettings(
+        const AString& codecInfo,
+        const CodecSettings &settings,
+        Vector<sp<MediaCodecInfo>> *infos);
+
+void exportResultsToXML(const char *fileName, const KeyedVector<AString, CodecSettings>& results);
+
+}  // namespace android
+
+#endif  // MEDIA_CODEC_LIST_OVERRIDES_H_
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index b6fa810..6568d25 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -399,6 +399,9 @@
 
     ALOGV("output format is '%s'", mOutputFormat->debugString(0).c_str());
 
+    mEncoderActivityNotify = new AMessage(kWhatEncoderActivity, mReflector);
+    mEncoder->setCallback(mEncoderActivityNotify);
+
     status_t err = mEncoder->configure(
                 mOutputFormat,
                 NULL /* nativeWindow */,
@@ -422,9 +425,6 @@
         }
     }
 
-    mEncoderActivityNotify = new AMessage(kWhatEncoderActivity, mReflector);
-    mEncoder->setCallback(mEncoderActivityNotify);
-
     err = mEncoder->start();
 
     if (err != OK) {
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index bdd6d56..aba64d5 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -230,8 +230,13 @@
         return ERROR_MALFORMED;
     }
 
+    if (SIZE_MAX / sizeof(SampleToChunkEntry) <= mNumSampleToChunkOffsets)
+        return ERROR_OUT_OF_RANGE;
+
     mSampleToChunkEntries =
-        new SampleToChunkEntry[mNumSampleToChunkOffsets];
+        new (std::nothrow) SampleToChunkEntry[mNumSampleToChunkOffsets];
+    if (!mSampleToChunkEntries)
+        return ERROR_OUT_OF_RANGE;
 
     for (uint32_t i = 0; i < mNumSampleToChunkOffsets; ++i) {
         uint8_t buffer[12];
@@ -330,11 +335,13 @@
     }
 
     mTimeToSampleCount = U32_AT(&header[4]);
-    uint64_t allocSize = mTimeToSampleCount * 2 * sizeof(uint32_t);
+    uint64_t allocSize = mTimeToSampleCount * 2 * (uint64_t)sizeof(uint32_t);
     if (allocSize > SIZE_MAX) {
         return ERROR_OUT_OF_RANGE;
     }
-    mTimeToSample = new uint32_t[mTimeToSampleCount * 2];
+    mTimeToSample = new (std::nothrow) uint32_t[mTimeToSampleCount * 2];
+    if (!mTimeToSample)
+        return ERROR_OUT_OF_RANGE;
 
     size_t size = sizeof(uint32_t) * mTimeToSampleCount * 2;
     if (mDataSource->readAt(
@@ -376,12 +383,14 @@
     }
 
     mNumCompositionTimeDeltaEntries = numEntries;
-    uint64_t allocSize = numEntries * 2 * sizeof(uint32_t);
+    uint64_t allocSize = numEntries * 2 * (uint64_t)sizeof(uint32_t);
     if (allocSize > SIZE_MAX) {
         return ERROR_OUT_OF_RANGE;
     }
 
-    mCompositionTimeDeltaEntries = new uint32_t[2 * numEntries];
+    mCompositionTimeDeltaEntries = new (std::nothrow) uint32_t[2 * numEntries];
+    if (!mCompositionTimeDeltaEntries)
+        return ERROR_OUT_OF_RANGE;
 
     if (mDataSource->readAt(
                 data_offset + 8, mCompositionTimeDeltaEntries, numEntries * 8)
@@ -426,12 +435,15 @@
         ALOGV("Table of sync samples is empty or has only a single entry!");
     }
 
-    uint64_t allocSize = mNumSyncSamples * sizeof(uint32_t);
+    uint64_t allocSize = mNumSyncSamples * (uint64_t)sizeof(uint32_t);
     if (allocSize > SIZE_MAX) {
         return ERROR_OUT_OF_RANGE;
     }
 
-    mSyncSamples = new uint32_t[mNumSyncSamples];
+    mSyncSamples = new (std::nothrow) uint32_t[mNumSyncSamples];
+    if (!mSyncSamples)
+        return ERROR_OUT_OF_RANGE;
+
     size_t size = mNumSyncSamples * sizeof(uint32_t);
     if (mDataSource->readAt(mSyncSampleOffset + 8, mSyncSamples, size)
             != (ssize_t)size) {
@@ -499,7 +511,9 @@
         return;
     }
 
-    mSampleTimeEntries = new SampleTimeEntry[mNumSampleSizes];
+    mSampleTimeEntries = new (std::nothrow) SampleTimeEntry[mNumSampleSizes];
+    if (!mSampleTimeEntries)
+        return;
 
     uint32_t sampleIndex = 0;
     uint32_t sampleTime = 0;
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 101fc8a..820b2fc 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -519,6 +519,12 @@
 
     mMetaData.add(METADATA_KEY_NUM_TRACKS, String8(tmp));
 
+    float captureFps;
+    if (meta->findFloat(kKeyCaptureFramerate, &captureFps)) {
+        sprintf(tmp, "%f", captureFps);
+        mMetaData.add(METADATA_KEY_CAPTURE_FRAMERATE, String8(tmp));
+    }
+
     bool hasAudio = false;
     bool hasVideo = false;
     int32_t videoWidth = -1;
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 8506e37..dfe8ad1 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -166,6 +166,16 @@
         msg->setInt32("max-input-size", maxInputSize);
     }
 
+    int32_t maxWidth;
+    if (meta->findInt32(kKeyMaxWidth, &maxWidth)) {
+        msg->setInt32("max-width", maxWidth);
+    }
+
+    int32_t maxHeight;
+    if (meta->findInt32(kKeyMaxHeight, &maxHeight)) {
+        msg->setInt32("max-height", maxHeight);
+    }
+
     int32_t rotationDegrees;
     if (meta->findInt32(kKeyRotation, &rotationDegrees)) {
         msg->setInt32("rotation-degrees", rotationDegrees);
@@ -568,6 +578,16 @@
         meta->setInt32(kKeyMaxInputSize, maxInputSize);
     }
 
+    int32_t maxWidth;
+    if (msg->findInt32("max-width", &maxWidth)) {
+        meta->setInt32(kKeyMaxWidth, maxWidth);
+    }
+
+    int32_t maxHeight;
+    if (msg->findInt32("max-height", &maxHeight)) {
+        meta->setInt32(kKeyMaxHeight, maxHeight);
+    }
+
     // reassemble the csd data into its original form
     sp<ABuffer> csd0;
     if (msg->findBuffer("csd-0", &csd0)) {
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 6e6a78a..a35909e 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -139,7 +139,7 @@
         uint32_t height = mImg->d_h;
         outInfo = *outQueue.begin();
         outHeader = outInfo->mHeader;
-        CHECK_EQ(mImg->fmt, IMG_FMT_I420);
+        CHECK_EQ(mImg->fmt, VPX_IMG_FMT_I420);
         handlePortSettingsChange(portWillReset, width, height);
         if (*portWillReset) {
             return true;
@@ -151,12 +151,12 @@
         outHeader->nTimeStamp = *(OMX_TICKS *)mImg->user_priv;
 
         uint8_t *dst = outHeader->pBuffer;
-        const uint8_t *srcY = (const uint8_t *)mImg->planes[PLANE_Y];
-        const uint8_t *srcU = (const uint8_t *)mImg->planes[PLANE_U];
-        const uint8_t *srcV = (const uint8_t *)mImg->planes[PLANE_V];
-        size_t srcYStride = mImg->stride[PLANE_Y];
-        size_t srcUStride = mImg->stride[PLANE_U];
-        size_t srcVStride = mImg->stride[PLANE_V];
+        const uint8_t *srcY = (const uint8_t *)mImg->planes[VPX_PLANE_Y];
+        const uint8_t *srcU = (const uint8_t *)mImg->planes[VPX_PLANE_U];
+        const uint8_t *srcV = (const uint8_t *)mImg->planes[VPX_PLANE_V];
+        size_t srcYStride = mImg->stride[VPX_PLANE_Y];
+        size_t srcUStride = mImg->stride[VPX_PLANE_U];
+        size_t srcVStride = mImg->stride[VPX_PLANE_V];
         copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
 
         mImg = NULL;
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index f7a4a0d..4886000 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -171,6 +171,8 @@
       mOrigBandwidthIndex(-1),
       mLastBandwidthBps(-1ll),
       mBandwidthEstimator(new BandwidthEstimator()),
+      mMaxWidth(720),
+      mMaxHeight(480),
       mStreamMask(0),
       mNewStreamMask(0),
       mSwapMask(0),
@@ -345,6 +347,9 @@
     if (stream == STREAMTYPE_AUDIO) {
         // set AAC input buffer size to 32K bytes (256kbps x 1sec)
         meta->setInt32(kKeyMaxInputSize, 32 * 1024);
+    } else if (stream == STREAMTYPE_VIDEO) {
+        meta->setInt32(kKeyMaxWidth, mMaxWidth);
+        meta->setInt32(kKeyMaxHeight, mMaxHeight);
     }
 
     return convertMetaDataToMessage(meta, format);
@@ -498,16 +503,15 @@
 
         case kWhatSeek:
         {
-            sp<AReplyToken> seekReplyID;
-            CHECK(msg->senderAwaitsResponse(&seekReplyID));
-            mSeekReplyID = seekReplyID;
+            if (mReconfigurationInProgress) {
+                msg->post(50000);
+                break;
+            }
+
+            CHECK(msg->senderAwaitsResponse(&mSeekReplyID));
             mSeekReply = new AMessage;
 
-            status_t err = onSeek(msg);
-
-            if (err != OK) {
-                msg->post(50000);
-            }
+            onSeek(msg);
             break;
         }
 
@@ -848,6 +852,9 @@
     size_t initialBandwidth = 0;
     size_t initialBandwidthIndex = 0;
 
+    int32_t maxWidth = 0;
+    int32_t maxHeight = 0;
+
     if (mPlaylist->isVariantPlaylist()) {
         Vector<BandwidthItem> itemsWithVideo;
         for (size_t i = 0; i < mPlaylist->size(); ++i) {
@@ -861,6 +868,14 @@
 
             CHECK(meta->findInt32("bandwidth", (int32_t *)&item.mBandwidth));
 
+            int32_t width, height;
+            if (meta->findInt32("width", &width)) {
+                maxWidth = max(maxWidth, width);
+            }
+            if (meta->findInt32("height", &height)) {
+                maxHeight = max(maxHeight, height);
+            }
+
             mBandwidthItems.push(item);
             if (mPlaylist->hasType(i, "video")) {
                 itemsWithVideo.push(item);
@@ -894,6 +909,9 @@
         mBandwidthItems.push(item);
     }
 
+    mMaxWidth = maxWidth > 0 ? maxWidth : mMaxWidth;
+    mMaxHeight = maxHeight > 0 ? maxHeight : mMaxHeight;
+
     mPlaylist->pickRandomMediaItems();
     changeConfiguration(
             0ll /* timeUs */, initialBandwidthIndex, false /* pickTrack */);
@@ -1372,16 +1390,10 @@
     return audioTime < videoTime ? videoTime : audioTime;
 }
 
-status_t LiveSession::onSeek(const sp<AMessage> &msg) {
+void LiveSession::onSeek(const sp<AMessage> &msg) {
     int64_t timeUs;
     CHECK(msg->findInt64("timeUs", &timeUs));
-
-    if (!mReconfigurationInProgress) {
-        changeConfiguration(timeUs);
-        return OK;
-    } else {
-        return -EWOULDBLOCK;
-    }
+    changeConfiguration(timeUs);
 }
 
 status_t LiveSession::getDuration(int64_t *durationUs) const {
@@ -1462,6 +1474,10 @@
     if (bandwidthIndex >= 0) {
         mOrigBandwidthIndex = mCurBandwidthIndex;
         mCurBandwidthIndex = bandwidthIndex;
+        if (mOrigBandwidthIndex != mCurBandwidthIndex) {
+            ALOGI("#### Starting Bandwidth Switch: %zd => %zd",
+                    mOrigBandwidthIndex, mCurBandwidthIndex);
+        }
     }
     CHECK_LT(mCurBandwidthIndex, mBandwidthItems.size());
     const BandwidthItem &item = mBandwidthItems.itemAt(mCurBandwidthIndex);
@@ -1581,6 +1597,7 @@
 
     if (timeUs >= 0) {
         mLastSeekTimeUs = timeUs;
+        mLastDequeuedTimeUs = timeUs;
 
         for (size_t i = 0; i < mPacketSources.size(); i++) {
             mPacketSources.editValueAt(i)->clear();
@@ -1633,8 +1650,10 @@
             ALOGV("stream %zu changed: oldURI %s, newURI %s", i,
                     mStreams[i].mUri.c_str(), URIs[i].c_str());
             sp<AnotherPacketSource> source = mPacketSources.valueFor(indexToType(i));
-            source->queueDiscontinuity(
-                    ATSParser::DISCONTINUITY_FORMATCHANGE, NULL, true);
+            if (source->getLatestDequeuedMeta() != NULL) {
+                source->queueDiscontinuity(
+                        ATSParser::DISCONTINUITY_FORMATCHANGE, NULL, true);
+            }
         }
         // Determine which decoders to shutdown on the player side,
         // a decoder has to be shutdown if its streamtype was active
@@ -1694,10 +1713,6 @@
             // and resume audio.
             mSwapMask =  mNewStreamMask & mStreamMask & ~resumeMask;
             switching = (mSwapMask != 0);
-            if (!switching) {
-                ALOGV("#### Finishing Bandwidth Switch Early: %zd => %zd",
-                        mOrigBandwidthIndex, mCurBandwidthIndex);
-            }
         }
         mRealTimeBaseUs = ALooper::GetNowUs() - mLastDequeuedTimeUs;
     } else {
@@ -1850,7 +1865,11 @@
         mSwitchInProgress = true;
     } else {
         mStreamMask = mNewStreamMask;
-        mOrigBandwidthIndex = mCurBandwidthIndex;
+        if (mOrigBandwidthIndex != mCurBandwidthIndex) {
+            ALOGV("#### Finished Bandwidth Switch Early: %zd => %zd",
+                    mOrigBandwidthIndex, mCurBandwidthIndex);
+            mOrigBandwidthIndex = mCurBandwidthIndex;
+        }
     }
 
     ALOGV("onChangeConfiguration3: mSwitchInProgress %d, mStreamMask 0x%x",
@@ -1977,11 +1996,19 @@
 
     bool underflow, ready, down, up;
     if (checkBuffering(underflow, ready, down, up)) {
-        if (mInPreparationPhase && ready) {
-            postPrepared(OK);
+        if (mInPreparationPhase) {
+            // Allow down switch even if we're still preparing.
+            //
+            // Some streams have a high bandwidth index as default,
+            // when bandwidth is low, it takes a long time to buffer
+            // to ready mark, then it immediately pauses after start
+            // as we have to do a down switch. It's better experience
+            // to restart from a lower index, if we detect low bw.
+            if (!switchBandwidthIfNeeded(false /* up */, down) && ready) {
+                postPrepared(OK);
+            }
         }
 
-        // don't switch before we report prepared
         if (!mInPreparationPhase) {
             if (ready) {
                 stopBufferingIfNecessary();
@@ -1989,8 +2016,7 @@
                 startBufferingIfNecessary();
             }
             switchBandwidthIfNeeded(up, down);
-       }
-
+        }
     }
 
     schedulePollBuffering();
@@ -2082,7 +2108,8 @@
             if (mPacketSources[i]->isFinished(0 /* duration */)) {
                 percent = 100;
             } else {
-                percent = (int32_t)(100.0 * (mLastDequeuedTimeUs + bufferedDurationUs) / durationUs);
+                percent = (int32_t)(100.0 *
+                        (mLastDequeuedTimeUs + bufferedDurationUs) / durationUs);
             }
             if (minBufferPercent < 0 || percent < minBufferPercent) {
                 minBufferPercent = percent;
@@ -2165,10 +2192,14 @@
     notify->post();
 }
 
-void LiveSession::switchBandwidthIfNeeded(bool bufferHigh, bool bufferLow) {
+/*
+ * returns true if a bandwidth switch is actually needed (and started),
+ * returns false otherwise
+ */
+bool LiveSession::switchBandwidthIfNeeded(bool bufferHigh, bool bufferLow) {
     // no need to check bandwidth if we only have 1 bandwidth settings
     if (mSwitchInProgress || mBandwidthItems.size() < 2) {
-        return;
+        return false;
     }
 
     int32_t bandwidthBps;
@@ -2177,7 +2208,7 @@
         mLastBandwidthBps = bandwidthBps;
     } else {
         ALOGV("no bandwidth estimate.");
-        return;
+        return false;
     }
 
     int32_t curBandwidth = mBandwidthItems.itemAt(mCurBandwidthIndex).mBandwidth;
@@ -2196,16 +2227,16 @@
         // bandwidthIndex is < mCurBandwidthIndex, as getBandwidthIndex() only uses 70%
         // of measured bw. In that case we don't want to do anything, since we have
         // both enough buffer and enough bw.
-        if (bandwidthIndex == mCurBandwidthIndex
-                || (canSwitchUp && bandwidthIndex < mCurBandwidthIndex)
-                || (canSwithDown && bandwidthIndex > mCurBandwidthIndex)) {
-            return;
+        if ((canSwitchUp && bandwidthIndex > mCurBandwidthIndex)
+         || (canSwithDown && bandwidthIndex < mCurBandwidthIndex)) {
+            // if not yet prepared, just restart again with new bw index.
+            // this is faster and playback experience is cleaner.
+            changeConfiguration(
+                    mInPreparationPhase ? 0 : -1ll, bandwidthIndex);
+            return true;
         }
-
-        ALOGI("#### Starting Bandwidth Switch: %zd => %zd",
-                mCurBandwidthIndex, bandwidthIndex);
-        changeConfiguration(-1, bandwidthIndex, false);
     }
+    return false;
 }
 
 void LiveSession::postError(status_t err) {
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index e4f1b97..ed74bc2 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -191,6 +191,8 @@
     sp<BandwidthEstimator> mBandwidthEstimator;
 
     sp<M3UParser> mPlaylist;
+    int32_t mMaxWidth;
+    int32_t mMaxHeight;
 
     sp<ALooper> mFetcherLooper;
     KeyedVector<AString, FetcherInfo> mFetcherInfos;
@@ -237,7 +239,7 @@
     sp<PlaylistFetcher> addFetcher(const char *uri);
 
     void onConnect(const sp<AMessage> &msg);
-    status_t onSeek(const sp<AMessage> &msg);
+    void onSeek(const sp<AMessage> &msg);
     void onFinishDisconnect2();
 
     // If given a non-zero block_size (default 0), it is used to cap the number of
@@ -292,7 +294,7 @@
     bool checkSwitchProgress(
             sp<AMessage> &msg, int64_t delayUs, bool *needResumeUntil);
 
-    void switchBandwidthIfNeeded(bool bufferHigh, bool bufferLow);
+    bool switchBandwidthIfNeeded(bool bufferHigh, bool bufferLow);
 
     void schedulePollBuffering();
     void cancelPollBuffering();
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 7bb7f2c..ef9145c 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -808,6 +808,29 @@
                 *meta = new AMessage;
             }
             (*meta)->setString(key.c_str(), codecs.c_str());
+        } else if (!strcasecmp("resolution", key.c_str())) {
+            const char *s = val.c_str();
+            char *end;
+            unsigned long width = strtoul(s, &end, 10);
+
+            if (end == s || *end != 'x') {
+                // malformed
+                continue;
+            }
+
+            s = end + 1;
+            unsigned long height = strtoul(s, &end, 10);
+
+            if (end == s || *end != '\0') {
+                // malformed
+                continue;
+            }
+
+            if (meta->get() == NULL) {
+                *meta = new AMessage;
+            }
+            (*meta)->setInt32("width", width);
+            (*meta)->setInt32("height", height);
         } else if (!strcasecmp("audio", key.c_str())
                 || !strcasecmp("video", key.c_str())
                 || !strcasecmp("subtitles", key.c_str())) {
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index 8c16251..3067c3d 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -104,11 +104,15 @@
     String8 mLastCommentName;
     String8 mLastCommentData;
 
+    KeyedVector<uint32_t, AString> mMetaKeyMap;
+
     status_t readMetaData();
     status_t parseChunk(off64_t *offset, int depth);
     status_t parseITunesMetaData(off64_t offset, size_t size);
     status_t parse3GPPMetaData(off64_t offset, size_t size, int depth);
     void parseID3v2MetaData(off64_t offset);
+    status_t parseQTMetaKey(off64_t data_offset, size_t data_size);
+    status_t parseQTMetaVal(int32_t keyId, off64_t data_offset, size_t data_size);
 
     status_t updateAudioTrackInfoFromESDS_MPEG4Audio(
             const void *esds_data, size_t esds_size);
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 0676a33..a4f8739 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -183,6 +183,11 @@
 
         mediaBuffer->meta_data()->setInt64(kKeyTime, timeUs);
 
+        int32_t isSync;
+        if (buffer->meta()->findInt32("isSync", &isSync)) {
+            mediaBuffer->meta_data()->setInt32(kKeyIsSyncFrame, isSync);
+        }
+
         *out = mediaBuffer;
         return OK;
     }
@@ -355,10 +360,15 @@
     int64_t time2 = -1;
     int64_t durationUs = 0;
 
-    List<sp<ABuffer> >::iterator it = mBuffers.begin();
-    while (it != mBuffers.end()) {
+    List<sp<ABuffer> >::iterator it;
+    for (it = mBuffers.begin(); it != mBuffers.end(); it++) {
         const sp<ABuffer> &buffer = *it;
 
+        int32_t discard;
+        if (buffer->meta()->findInt32("discard", &discard) && discard) {
+            continue;
+        }
+
         int64_t timeUs;
         if (buffer->meta()->findInt64("timeUs", &timeUs)) {
             if (time1 < 0 || timeUs < time1) {
@@ -373,8 +383,6 @@
             durationUs += time2 - time1;
             time1 = time2 = -1;
         }
-
-        ++it;
     }
 
     return durationUs + (time2 - time1);
@@ -393,11 +401,19 @@
         return getBufferedDurationUs_l(&finalResult);
     }
 
-    List<sp<ABuffer> >::iterator it = mBuffers.begin();
-    sp<ABuffer> buffer = *it;
+    sp<ABuffer> buffer;
+    int32_t discard;
+    int64_t startTimeUs = -1ll;
+    List<sp<ABuffer> >::iterator it;
+    for (it = mBuffers.begin(); it != mBuffers.end(); it++) {
+        buffer = *it;
+        if (buffer->meta()->findInt32("discard", &discard) && discard) {
+            continue;
+        }
+        buffer->meta()->findInt64("timeUs", &startTimeUs);
+        break;
+    }
 
-    int64_t startTimeUs;
-    buffer->meta()->findInt64("timeUs", &startTimeUs);
     if (startTimeUs < 0) {
         return 0;
     }
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index b17985c..a279049 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -533,6 +533,7 @@
     int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
     CHECK_GE(timeUs, 0ll);
     accessUnit->meta()->setInt64("timeUs", timeUs);
+    accessUnit->meta()->setInt32("isSync", 1);
 
     memmove(
             mBuffer->data(),
@@ -582,6 +583,7 @@
     int64_t timeUs = fetchTimestamp(payloadSize + 4);
     CHECK_GE(timeUs, 0ll);
     accessUnit->meta()->setInt64("timeUs", timeUs);
+    accessUnit->meta()->setInt32("isSync", 1);
 
     int16_t *ptr = (int16_t *)accessUnit->data();
     for (size_t i = 0; i < payloadSize / sizeof(int16_t); ++i) {
@@ -693,6 +695,7 @@
     mBuffer->setRange(0, mBuffer->size() - offset);
 
     accessUnit->meta()->setInt64("timeUs", timeUs);
+    accessUnit->meta()->setInt32("isSync", 1);
 
     return accessUnit;
 }
@@ -743,6 +746,7 @@
     const uint8_t *nalStart;
     size_t nalSize;
     bool foundSlice = false;
+    bool foundIDR = false;
     while ((err = getNextNALUnit(&data, &size, &nalStart, &nalSize)) == OK) {
         if (nalSize == 0) continue;
 
@@ -750,6 +754,9 @@
         bool flush = false;
 
         if (nalType == 1 || nalType == 5) {
+            if (nalType == 5) {
+                foundIDR = true;
+            }
             if (foundSlice) {
                 ABitReader br(nalStart + 1, nalSize);
                 unsigned first_mb_in_slice = parseUE(&br);
@@ -838,6 +845,9 @@
             CHECK_GE(timeUs, 0ll);
 
             accessUnit->meta()->setInt64("timeUs", timeUs);
+            if (foundIDR) {
+                accessUnit->meta()->setInt32("isSync", 1);
+            }
 
             if (mFormat == NULL) {
                 mFormat = MakeAVCCodecSpecificData(accessUnit);
@@ -894,6 +904,7 @@
     CHECK_GE(timeUs, 0ll);
 
     accessUnit->meta()->setInt64("timeUs", timeUs);
+    accessUnit->meta()->setInt32("isSync", 1);
 
     if (mFormat == NULL) {
         mFormat = new MetaData;
@@ -970,6 +981,9 @@
     int pprevStartCode = -1;
     int prevStartCode = -1;
     int currentStartCode = -1;
+    bool gopFound = false;
+    bool isClosedGop = false;
+    bool brokenLink = false;
 
     size_t offset = 0;
     while (offset + 3 < size) {
@@ -1032,6 +1046,13 @@
             }
         }
 
+        if (mFormat != NULL && currentStartCode == 0xb8) {
+            // GOP layer
+            gopFound = true;
+            isClosedGop = (data[offset + 7] & 0x40) != 0;
+            brokenLink = (data[offset + 7] & 0x20) != 0;
+        }
+
         if (mFormat != NULL && currentStartCode == 0x00) {
             // Picture start
 
@@ -1053,6 +1074,9 @@
                 offset = 0;
 
                 accessUnit->meta()->setInt64("timeUs", timeUs);
+                if (gopFound && (!brokenLink || isClosedGop)) {
+                    accessUnit->meta()->setInt32("isSync", 1);
+                }
 
                 ALOGV("returning MPEG video access unit at time %" PRId64 " us",
                       timeUs);
@@ -1197,6 +1221,8 @@
             case SKIP_TO_VOP_START:
             {
                 if (chunkType == 0xb6) {
+                    int vopCodingType = (data[offset + 4] & 0xc0) >> 6;
+
                     offset += chunkSize;
 
                     sp<ABuffer> accessUnit = new ABuffer(offset);
@@ -1212,6 +1238,9 @@
                     offset = 0;
 
                     accessUnit->meta()->setInt64("timeUs", timeUs);
+                    if (vopCodingType == 0) {  // intra-coded VOP
+                        accessUnit->meta()->setInt32("isSync", 1);
+                    }
 
                     ALOGV("returning MPEG4 video access unit at time %" PRId64 " us",
                          timeUs);
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
index 1f43d6d..33cfd1d 100644
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
@@ -85,12 +85,6 @@
         MediaBuffer **out, const ReadOptions *options) {
     *out = NULL;
 
-    int64_t seekTimeUs;
-    ReadOptions::SeekMode seekMode;
-    if (mSeekable && options && options->getSeekTo(&seekTimeUs, &seekMode)) {
-        return ERROR_UNSUPPORTED;
-    }
-
     status_t finalResult;
     while (!mImpl->hasBufferAvailable(&finalResult)) {
         if (finalResult != OK) {
@@ -103,6 +97,17 @@
         }
     }
 
+    int64_t seekTimeUs;
+    ReadOptions::SeekMode seekMode;
+    if (mSeekable && options && options->getSeekTo(&seekTimeUs, &seekMode)) {
+        // A seek was requested, but we don't actually support seeking and so can only "seek" to
+        // the current position
+        int64_t nextBufTimeUs;
+        if (mImpl->nextBufferTime(&nextBufTimeUs) != OK || seekTimeUs != nextBufTimeUs) {
+            return ERROR_UNSUPPORTED;
+        }
+    }
+
     return mImpl->read(out, options);
 }
 
diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp
index 9b6958a..3ab241a 100644
--- a/media/libstagefright/omx/SoftOMXPlugin.cpp
+++ b/media/libstagefright/omx/SoftOMXPlugin.cpp
@@ -85,7 +85,7 @@
         void *libHandle = dlopen(libName.c_str(), RTLD_NOW);
 
         if (libHandle == NULL) {
-            ALOGE("unable to dlopen %s", libName.c_str());
+            ALOGE("unable to dlopen %s: %s", libName.c_str(), dlerror());
 
             return OMX_ErrorComponentNotFound;
         }
diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk
index 8d6ff5b..51e1c78 100644
--- a/media/libstagefright/tests/Android.mk
+++ b/media/libstagefright/tests/Android.mk
@@ -62,6 +62,33 @@
 
 include $(BUILD_NATIVE_TEST)
 
+include $(CLEAR_VARS)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_MODULE := MediaCodecListOverrides_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+	MediaCodecListOverrides_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+	libmedia \
+	libstagefright \
+	libstagefright_foundation \
+	libstagefright_omx \
+	libutils \
+	liblog
+
+LOCAL_C_INCLUDES := \
+	frameworks/av/media/libstagefright \
+	frameworks/av/media/libstagefright/include \
+	frameworks/native/include/media/openmax \
+
+LOCAL_32_BIT_ONLY := true
+
+include $(BUILD_NATIVE_TEST)
+
 # Include subdirectory makefiles
 # ============================================================
 
diff --git a/media/libstagefright/tests/MediaCodecListOverrides_test.cpp b/media/libstagefright/tests/MediaCodecListOverrides_test.cpp
new file mode 100644
index 0000000..cacaa84
--- /dev/null
+++ b/media/libstagefright/tests/MediaCodecListOverrides_test.cpp
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MediaCodecListOverrides_test"
+#include <utils/Log.h>
+
+#include <gtest/gtest.h>
+
+#include "MediaCodecListOverrides.h"
+
+#include <media/MediaCodecInfo.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaCodecList.h>
+
+namespace android {
+
+static const char kTestOverridesStr[] =
+"<MediaCodecs>\n"
+"    <Settings>\n"
+"        <Setting name=\"max-max-supported-instances\" value=\"8\" update=\"true\" />\n"
+"    </Settings>\n"
+"    <Encoders>\n"
+"        <MediaCodec name=\"OMX.qcom.video.encoder.mpeg4\" type=\"video/mp4v-es\" update=\"true\" >\n"
+"            <Quirk name=\"requires-allocate-on-input-ports\" />\n"
+"            <Limit name=\"bitrate\" range=\"1-20000000\" />\n"
+"            <Feature name=\"can-swap-width-height\" />\n"
+"        </MediaCodec>\n"
+"    </Encoders>\n"
+"    <Decoders>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.avc\" type=\"video/avc\" update=\"true\" >\n"
+"            <Quirk name=\"requires-allocate-on-input-ports\" />\n"
+"            <Limit name=\"size\" min=\"64x64\" max=\"1920x1088\" />\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.mpeg2\" type=\"different_mime\" update=\"true\" >\n"
+"        </MediaCodec>\n"
+"    </Decoders>\n"
+"</MediaCodecs>\n";
+
+static const char kTestOverridesStrNew1[] =
+"<MediaCodecs>\n"
+"    <Settings>\n"
+"        <Setting name=\"max-max-supported-instances\" value=\"8\" update=\"true\" />\n"
+"    </Settings>\n"
+"    <Encoders>\n"
+"        <MediaCodec name=\"OMX.qcom.video.encoder.avc\" type=\"video/avc\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"4\" />\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.encoder.mpeg4\" type=\"video/mp4v-es\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"4\" />\n"
+"            <Quirk name=\"requires-allocate-on-input-ports\" />\n"
+"            <Limit name=\"bitrate\" range=\"1-20000000\" />\n"
+"            <Feature name=\"can-swap-width-height\" />\n"
+"        </MediaCodec>\n"
+"    </Encoders>\n"
+"    <Decoders>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.mpeg4\" type=\"video/mp4v-es\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"3\" />\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.h263\" type=\"video/3gpp\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"4\" />\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.avc.secure\" type=\"video/avc\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"1\" />\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.avc\" type=\"video/avc\" update=\"true\" >\n"
+"            <Quirk name=\"requires-allocate-on-input-ports\" />\n"
+"            <Limit name=\"size\" min=\"64x64\" max=\"1920x1088\" />\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.mpeg2\" type=\"different_mime\" update=\"true\" >\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.mpeg2\" type=\"video/mpeg2\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"3\" />\n"
+"        </MediaCodec>\n"
+"    </Decoders>\n"
+"</MediaCodecs>\n";
+
+static const char kTestOverridesStrNew2[] =
+"\n"
+"<MediaCodecs>\n"
+"    <Encoders>\n"
+"        <MediaCodec name=\"OMX.qcom.video.encoder.mpeg4\" type=\"video/mp4v-es\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"4\" />\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.encoder.avc\" type=\"video/avc\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"4\" />\n"
+"        </MediaCodec>\n"
+"    </Encoders>\n"
+"    <Decoders>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.mpeg4\" type=\"video/mp4v-es\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"3\" />\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.mpeg2\" type=\"video/mpeg2\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"3\" />\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.h263\" type=\"video/3gpp\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"4\" />\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.avc.secure\" type=\"video/avc\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"1\" />\n"
+"        </MediaCodec>\n"
+"    </Decoders>\n"
+"</MediaCodecs>\n";
+
+class MediaCodecListOverridesTest : public ::testing::Test {
+public:
+    MediaCodecListOverridesTest() {}
+
+    void verifyOverrides(const KeyedVector<AString, CodecSettings> &overrides) {
+        EXPECT_EQ(3u, overrides.size());
+
+        EXPECT_TRUE(overrides.keyAt(0) == "OMX.qcom.video.decoder.avc video/avc decoder");
+        const CodecSettings &settings0 = overrides.valueAt(0);
+        EXPECT_EQ(1u, settings0.size());
+        EXPECT_TRUE(settings0.keyAt(0) == "max-supported-instances");
+        EXPECT_TRUE(settings0.valueAt(0) == "4");
+
+        EXPECT_TRUE(overrides.keyAt(1) == "OMX.qcom.video.encoder.avc video/avc encoder");
+        const CodecSettings &settings1 = overrides.valueAt(1);
+        EXPECT_EQ(1u, settings1.size());
+        EXPECT_TRUE(settings1.keyAt(0) == "max-supported-instances");
+        EXPECT_TRUE(settings1.valueAt(0) == "3");
+
+        EXPECT_TRUE(overrides.keyAt(2) == "global");
+        const CodecSettings &settings2 = overrides.valueAt(2);
+        EXPECT_EQ(3u, settings2.size());
+        EXPECT_TRUE(settings2.keyAt(0) == "max-max-supported-instances");
+        EXPECT_TRUE(settings2.valueAt(0) == "8");
+        EXPECT_TRUE(settings2.keyAt(1) == "supports-multiple-secure-codecs");
+        EXPECT_TRUE(settings2.valueAt(1) == "false");
+        EXPECT_TRUE(settings2.keyAt(2) == "supports-secure-with-non-secure-codec");
+        EXPECT_TRUE(settings2.valueAt(2) == "true");
+    }
+
+    void verifySetting(const sp<AMessage> &details, const char *name, const char *value) {
+        AString value1;
+        EXPECT_TRUE(details->findString(name, &value1));
+        EXPECT_TRUE(value1 == value);
+    }
+
+    void createTestInfos(Vector<sp<MediaCodecInfo>> *infos) {
+        const char *name = "OMX.qcom.video.decoder.avc";
+        const bool encoder = false;
+        const char *mime = "video/avc";
+        sp<MediaCodecInfo> info = new MediaCodecInfo(name, encoder, mime);
+        infos->push_back(info);
+        const sp<MediaCodecInfo::Capabilities> caps = info->getCapabilitiesFor(mime);
+        const sp<AMessage> details = caps->getDetails();
+        details->setString("cap1", "value1");
+        details->setString("max-max-supported-instances", "16");
+
+        info = new MediaCodecInfo("anothercodec", true, "anothermime");
+        infos->push_back(info);
+    }
+
+    void addMaxInstancesSetting(
+            const AString &key,
+            const AString &value,
+            KeyedVector<AString, CodecSettings> *results) {
+        CodecSettings settings;
+        settings.add("max-supported-instances", value);
+        results->add(key, settings);
+    }
+
+    void exportTestResultsToXML(const char *fileName) {
+        KeyedVector<AString, CodecSettings> r;
+        addMaxInstancesSetting("OMX.qcom.video.decoder.avc.secure video/avc decoder", "1", &r);
+        addMaxInstancesSetting("OMX.qcom.video.decoder.h263 video/3gpp decoder", "4", &r);
+        addMaxInstancesSetting("OMX.qcom.video.decoder.mpeg2 video/mpeg2 decoder", "3", &r);
+        addMaxInstancesSetting("OMX.qcom.video.decoder.mpeg4 video/mp4v-es decoder", "3", &r);
+        addMaxInstancesSetting("OMX.qcom.video.encoder.avc video/avc encoder", "4", &r);
+        addMaxInstancesSetting("OMX.qcom.video.encoder.mpeg4 video/mp4v-es encoder", "4", &r);
+
+        exportResultsToXML(fileName, r);
+    }
+};
+
+TEST_F(MediaCodecListOverridesTest, splitString) {
+    AString s = "abc123";
+    AString delimiter = " ";
+    AString s1;
+    AString s2;
+    EXPECT_FALSE(splitString(s, delimiter, &s1, &s2));
+    s = "abc 123";
+    EXPECT_TRUE(splitString(s, delimiter, &s1, &s2));
+    EXPECT_TRUE(s1 == "abc");
+    EXPECT_TRUE(s2 == "123");
+
+    s = "abc123xyz";
+    delimiter = ",";
+    AString s3;
+    EXPECT_FALSE(splitString(s, delimiter, &s1, &s2, &s3));
+    s = "abc,123xyz";
+    EXPECT_FALSE(splitString(s, delimiter, &s1, &s2, &s3));
+    s = "abc,123,xyz";
+    EXPECT_TRUE(splitString(s, delimiter, &s1, &s2, &s3));
+    EXPECT_TRUE(s1 == "abc");
+    EXPECT_TRUE(s2 == "123" );
+    EXPECT_TRUE(s3 == "xyz");
+}
+
+// TODO: the codec component never returns OMX_EventCmdComplete in unit test.
+TEST_F(MediaCodecListOverridesTest, DISABLED_profileCodecs) {
+    sp<IMediaCodecList> list = MediaCodecList::getInstance();
+    Vector<sp<MediaCodecInfo>> infos;
+    for (size_t i = 0; i < list->countCodecs(); ++i) {
+        infos.push_back(list->getCodecInfo(i));
+    }
+    KeyedVector<AString, CodecSettings> results;
+    profileCodecs(infos, &results, true /* forceToMeasure */);
+    EXPECT_LT(0u, results.size());
+    for (size_t i = 0; i < results.size(); ++i) {
+        AString key = results.keyAt(i);
+        CodecSettings settings = results.valueAt(i);
+        EXPECT_EQ(1u, settings.size());
+        EXPECT_TRUE(settings.keyAt(0) == "max-supported-instances");
+        AString valueS = settings.valueAt(0);
+        int32_t value = strtol(valueS.c_str(), NULL, 10);
+        EXPECT_LT(0, value);
+        ALOGV("profileCodecs results %s %s", key.c_str(), valueS.c_str());
+    }
+}
+
+TEST_F(MediaCodecListOverridesTest, applyCodecSettings) {
+    AString codecInfo = "OMX.qcom.video.decoder.avc video/avc decoder";
+    Vector<sp<MediaCodecInfo>> infos;
+    createTestInfos(&infos);
+    CodecSettings settings;
+    settings.add("max-supported-instances", "3");
+    settings.add("max-max-supported-instances", "8");
+    applyCodecSettings(codecInfo, settings, &infos);
+
+    EXPECT_EQ(2u, infos.size());
+    EXPECT_TRUE(AString(infos[0]->getCodecName()) == "OMX.qcom.video.decoder.avc");
+    const sp<AMessage> details = infos[0]->getCapabilitiesFor("video/avc")->getDetails();
+    verifySetting(details, "max-supported-instances", "3");
+    verifySetting(details, "max-max-supported-instances", "8");
+
+    EXPECT_TRUE(AString(infos[1]->getCodecName()) == "anothercodec");
+    EXPECT_EQ(0u, infos[1]->getCapabilitiesFor("anothermime")->getDetails()->countEntries());
+}
+
+TEST_F(MediaCodecListOverridesTest, exportResultsToExistingFile) {
+    const char *fileName = "/sdcard/mediacodec_list_overrides_test.xml";
+    remove(fileName);
+
+    FILE *f = fopen(fileName, "wb");
+    if (f == NULL) {
+        ALOGW("Failed to open %s for writing.", fileName);
+        return;
+    }
+    EXPECT_EQ(
+            strlen(kTestOverridesStr),
+            fwrite(kTestOverridesStr, 1, strlen(kTestOverridesStr), f));
+    fclose(f);
+
+    exportTestResultsToXML(fileName);
+
+    // verify
+    AString overrides;
+    f = fopen(fileName, "rb");
+    ASSERT_TRUE(f != NULL);
+    fseek(f, 0, SEEK_END);
+    long size = ftell(f);
+    rewind(f);
+
+    char *buf = (char *)malloc(size);
+    EXPECT_EQ(1, fread(buf, size, 1, f));
+    overrides.setTo(buf, size);
+    fclose(f);
+    free(buf);
+
+    EXPECT_TRUE(overrides == kTestOverridesStrNew1);
+
+    remove(fileName);
+}
+
+TEST_F(MediaCodecListOverridesTest, exportResultsToEmptyFile) {
+    const char *fileName = "/sdcard/mediacodec_list_overrides_test.xml";
+    remove(fileName);
+
+    exportTestResultsToXML(fileName);
+
+    // verify
+    AString overrides;
+    FILE *f = fopen(fileName, "rb");
+    ASSERT_TRUE(f != NULL);
+    fseek(f, 0, SEEK_END);
+    long size = ftell(f);
+    rewind(f);
+
+    char *buf = (char *)malloc(size);
+    EXPECT_EQ(1, fread(buf, size, 1, f));
+    overrides.setTo(buf, size);
+    fclose(f);
+    free(buf);
+
+    EXPECT_TRUE(overrides == kTestOverridesStrNew2);
+
+    remove(fileName);
+}
+
+} // namespace android
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index 0e2e48c..ba47172 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -45,7 +45,8 @@
     frameworks/av/services/mediaresourcemanager \
     $(call include-path-for, audio-utils) \
     frameworks/av/services/soundtrigger \
-    frameworks/av/services/radio
+    frameworks/av/services/radio \
+    external/sonic
 
 LOCAL_MODULE:= mediaserver
 LOCAL_32_BIT_ONLY := true
diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp
index 99572f8..06b3c6e 100644
--- a/media/mediaserver/main_mediaserver.cpp
+++ b/media/mediaserver/main_mediaserver.cpp
@@ -33,6 +33,7 @@
 #include "CameraService.h"
 #include "MediaLogService.h"
 #include "MediaPlayerService.h"
+#include "ResourceManagerService.h"
 #include "service/AudioPolicyService.h"
 #include "SoundTriggerHwService.h"
 #include "RadioService.h"
@@ -128,6 +129,7 @@
         ALOGI("ServiceManager: %p", sm.get());
         AudioFlinger::instantiate();
         MediaPlayerService::instantiate();
+        ResourceManagerService::instantiate();
         CameraService::instantiate();
         AudioPolicyService::instantiate();
         SoundTriggerHwService::instantiate();
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index fee2347..c359be5 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -44,12 +44,13 @@
     SpdifStreamOut.cpp          \
     Effects.cpp                 \
     AudioMixer.cpp.arm          \
-    PatchPanel.cpp
-
-LOCAL_SRC_FILES += StateQueue.cpp
+    BufferProviders.cpp         \
+    PatchPanel.cpp              \
+    StateQueue.cpp
 
 LOCAL_C_INCLUDES := \
     $(TOPDIR)frameworks/av/services/audiopolicy \
+    $(TOPDIR)external/sonic \
     $(call include-path-for, audio-effects) \
     $(call include-path-for, audio-utils)
 
@@ -68,7 +69,8 @@
     libhardware_legacy \
     libeffects \
     libpowermanager \
-    libserviceutility
+    libserviceutility \
+    libsonic
 
 LOCAL_STATIC_LIBRARIES := \
     libscheduling_policy \
diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp
index 09d86ea..3191598 100644
--- a/services/audioflinger/AudioHwDevice.cpp
+++ b/services/audioflinger/AudioHwDevice.cpp
@@ -44,7 +44,7 @@
     AudioStreamOut *outputStream = new AudioStreamOut(this, flags);
 
     // Try to open the HAL first using the current format.
-    ALOGV("AudioHwDevice::openOutputStream(), try "
+    ALOGV("openOutputStream(), try "
             " sampleRate %d, Format %#x, "
             "channelMask %#x",
             config->sample_rate,
@@ -59,7 +59,7 @@
         // FIXME Look at any modification to the config.
         // The HAL might modify the config to suggest a wrapped format.
         // Log this so we can see what the HALs are doing.
-        ALOGI("AudioHwDevice::openOutputStream(), HAL returned"
+        ALOGI("openOutputStream(), HAL returned"
             " sampleRate %d, Format %#x, "
             "channelMask %#x, status %d",
             config->sample_rate,
@@ -72,16 +72,19 @@
                 && ((flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0)
                 && ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0);
 
-        // FIXME - Add isEncodingSupported() query to SPDIF wrapper then
-        // call it from here.
         if (wrapperNeeded) {
-            outputStream = new SpdifStreamOut(this, flags);
-            status = outputStream->open(handle, devices, &originalConfig, address);
-            if (status != NO_ERROR) {
-                ALOGE("ERROR - AudioHwDevice::openOutputStream(), SPDIF open returned %d",
-                    status);
-                delete outputStream;
-                outputStream = NULL;
+            if (SPDIFEncoder::isFormatSupported(originalConfig.format)) {
+                outputStream = new SpdifStreamOut(this, flags, originalConfig.format);
+                status = outputStream->open(handle, devices, &originalConfig, address);
+                if (status != NO_ERROR) {
+                    ALOGE("ERROR - openOutputStream(), SPDIF open returned %d",
+                        status);
+                    delete outputStream;
+                    outputStream = NULL;
+                }
+            } else {
+                ALOGE("ERROR - openOutputStream(), SPDIFEncoder does not support format 0x%08x",
+                    originalConfig.format);
             }
         }
     }
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index dddca02..c2c791f 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -38,9 +38,7 @@
 #include <audio_utils/format.h>
 #include <common_time/local_clock.h>
 #include <common_time/cc_helper.h>
-
-#include <media/EffectsFactoryApi.h>
-#include <audio_effects/effect_downmix.h>
+#include <media/AudioResamplerPublic.h>
 
 #include "AudioMixerOps.h"
 #include "AudioMixer.h"
@@ -91,323 +89,6 @@
     return a < b ? a : b;
 }
 
-AudioMixer::CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize,
-        size_t outputFrameSize, size_t bufferFrameCount) :
-        mInputFrameSize(inputFrameSize),
-        mOutputFrameSize(outputFrameSize),
-        mLocalBufferFrameCount(bufferFrameCount),
-        mLocalBufferData(NULL),
-        mConsumed(0)
-{
-    ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this,
-            inputFrameSize, outputFrameSize, bufferFrameCount);
-    LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0,
-            "Requires local buffer if inputFrameSize(%zu) < outputFrameSize(%zu)",
-            inputFrameSize, outputFrameSize);
-    if (mLocalBufferFrameCount) {
-        (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize);
-    }
-    mBuffer.frameCount = 0;
-}
-
-AudioMixer::CopyBufferProvider::~CopyBufferProvider()
-{
-    ALOGV("~CopyBufferProvider(%p)", this);
-    if (mBuffer.frameCount != 0) {
-        mTrackBufferProvider->releaseBuffer(&mBuffer);
-    }
-    free(mLocalBufferData);
-}
-
-status_t AudioMixer::CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
-        int64_t pts)
-{
-    //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
-    //        this, pBuffer, pBuffer->frameCount, pts);
-    if (mLocalBufferFrameCount == 0) {
-        status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
-        if (res == OK) {
-            copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount);
-        }
-        return res;
-    }
-    if (mBuffer.frameCount == 0) {
-        mBuffer.frameCount = pBuffer->frameCount;
-        status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
-        // At one time an upstream buffer provider had
-        // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014.
-        //
-        // By API spec, if res != OK, then mBuffer.frameCount == 0.
-        // but there may be improper implementations.
-        ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
-        if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
-            pBuffer->raw = NULL;
-            pBuffer->frameCount = 0;
-            return res;
-        }
-        mConsumed = 0;
-    }
-    ALOG_ASSERT(mConsumed < mBuffer.frameCount);
-    size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed);
-    count = min(count, pBuffer->frameCount);
-    pBuffer->raw = mLocalBufferData;
-    pBuffer->frameCount = count;
-    copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize,
-            pBuffer->frameCount);
-    return OK;
-}
-
-void AudioMixer::CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
-{
-    //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))",
-    //        this, pBuffer, pBuffer->frameCount);
-    if (mLocalBufferFrameCount == 0) {
-        mTrackBufferProvider->releaseBuffer(pBuffer);
-        return;
-    }
-    // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
-    mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content
-    if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) {
-        mTrackBufferProvider->releaseBuffer(&mBuffer);
-        ALOG_ASSERT(mBuffer.frameCount == 0);
-    }
-    pBuffer->raw = NULL;
-    pBuffer->frameCount = 0;
-}
-
-void AudioMixer::CopyBufferProvider::reset()
-{
-    if (mBuffer.frameCount != 0) {
-        mTrackBufferProvider->releaseBuffer(&mBuffer);
-    }
-    mConsumed = 0;
-}
-
-AudioMixer::DownmixerBufferProvider::DownmixerBufferProvider(
-        audio_channel_mask_t inputChannelMask,
-        audio_channel_mask_t outputChannelMask, audio_format_t format,
-        uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) :
-        CopyBufferProvider(
-            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask),
-            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
-            bufferFrameCount)  // set bufferFrameCount to 0 to do in-place
-{
-    ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)",
-            this, inputChannelMask, outputChannelMask, format,
-            sampleRate, sessionId);
-    if (!sIsMultichannelCapable
-            || EffectCreate(&sDwnmFxDesc.uuid,
-                    sessionId,
-                    SESSION_ID_INVALID_AND_IGNORED,
-                    &mDownmixHandle) != 0) {
-         ALOGE("DownmixerBufferProvider() error creating downmixer effect");
-         mDownmixHandle = NULL;
-         return;
-     }
-     // channel input configuration will be overridden per-track
-     mDownmixConfig.inputCfg.channels = inputChannelMask;   // FIXME: Should be bits
-     mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits
-     mDownmixConfig.inputCfg.format = format;
-     mDownmixConfig.outputCfg.format = format;
-     mDownmixConfig.inputCfg.samplingRate = sampleRate;
-     mDownmixConfig.outputCfg.samplingRate = sampleRate;
-     mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
-     mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
-     // input and output buffer provider, and frame count will not be used as the downmix effect
-     // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
-     mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
-             EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
-     mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask;
-
-     int cmdStatus;
-     uint32_t replySize = sizeof(int);
-
-     // Configure downmixer
-     status_t status = (*mDownmixHandle)->command(mDownmixHandle,
-             EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
-             &mDownmixConfig /*pCmdData*/,
-             &replySize, &cmdStatus /*pReplyData*/);
-     if (status != 0 || cmdStatus != 0) {
-         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer",
-                 status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
-         return;
-     }
-
-     // Enable downmixer
-     replySize = sizeof(int);
-     status = (*mDownmixHandle)->command(mDownmixHandle,
-             EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
-             &replySize, &cmdStatus /*pReplyData*/);
-     if (status != 0 || cmdStatus != 0) {
-         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer",
-                 status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
-         return;
-     }
-
-     // Set downmix type
-     // parameter size rounded for padding on 32bit boundary
-     const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
-     const int downmixParamSize =
-             sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
-     effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
-     param->psize = sizeof(downmix_params_t);
-     const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
-     memcpy(param->data, &downmixParam, param->psize);
-     const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
-     param->vsize = sizeof(downmix_type_t);
-     memcpy(param->data + psizePadded, &downmixType, param->vsize);
-     replySize = sizeof(int);
-     status = (*mDownmixHandle)->command(mDownmixHandle,
-             EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */,
-             param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/);
-     free(param);
-     if (status != 0 || cmdStatus != 0) {
-         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type",
-                 status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
-         return;
-     }
-     ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType);
-}
-
-AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider()
-{
-    ALOGV("~DownmixerBufferProvider (%p)", this);
-    EffectRelease(mDownmixHandle);
-    mDownmixHandle = NULL;
-}
-
-void AudioMixer::DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
-    mDownmixConfig.inputCfg.buffer.frameCount = frames;
-    mDownmixConfig.inputCfg.buffer.raw = const_cast<void *>(src);
-    mDownmixConfig.outputCfg.buffer.frameCount = frames;
-    mDownmixConfig.outputCfg.buffer.raw = dst;
-    // may be in-place if src == dst.
-    status_t res = (*mDownmixHandle)->process(mDownmixHandle,
-            &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
-    ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res);
-}
-
-/* call once in a pthread_once handler. */
-/*static*/ status_t AudioMixer::DownmixerBufferProvider::init()
-{
-    // find multichannel downmix effect if we have to play multichannel content
-    uint32_t numEffects = 0;
-    int ret = EffectQueryNumberEffects(&numEffects);
-    if (ret != 0) {
-        ALOGE("AudioMixer() error %d querying number of effects", ret);
-        return NO_INIT;
-    }
-    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
-
-    for (uint32_t i = 0 ; i < numEffects ; i++) {
-        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
-            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
-            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
-                ALOGI("found effect \"%s\" from %s",
-                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
-                sIsMultichannelCapable = true;
-                break;
-            }
-        }
-    }
-    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
-    return NO_INIT;
-}
-
-/*static*/ bool AudioMixer::DownmixerBufferProvider::sIsMultichannelCapable = false;
-/*static*/ effect_descriptor_t AudioMixer::DownmixerBufferProvider::sDwnmFxDesc;
-
-AudioMixer::RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask,
-        audio_channel_mask_t outputChannelMask, audio_format_t format,
-        size_t bufferFrameCount) :
-        CopyBufferProvider(
-                audio_bytes_per_sample(format)
-                    * audio_channel_count_from_out_mask(inputChannelMask),
-                audio_bytes_per_sample(format)
-                    * audio_channel_count_from_out_mask(outputChannelMask),
-                bufferFrameCount),
-        mFormat(format),
-        mSampleSize(audio_bytes_per_sample(format)),
-        mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)),
-        mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask))
-{
-    ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu",
-            this, format, inputChannelMask, outputChannelMask,
-            mInputChannels, mOutputChannels);
-
-    const audio_channel_representation_t inputRepresentation =
-            audio_channel_mask_get_representation(inputChannelMask);
-    const audio_channel_representation_t outputRepresentation =
-            audio_channel_mask_get_representation(outputChannelMask);
-    const uint32_t inputBits = audio_channel_mask_get_bits(inputChannelMask);
-    const uint32_t outputBits = audio_channel_mask_get_bits(outputChannelMask);
-
-    switch (inputRepresentation) {
-    case AUDIO_CHANNEL_REPRESENTATION_POSITION:
-        switch (outputRepresentation) {
-        case AUDIO_CHANNEL_REPRESENTATION_POSITION:
-            memcpy_by_index_array_initialization(mIdxAry, ARRAY_SIZE(mIdxAry),
-                    outputBits, inputBits);
-            return;
-        case AUDIO_CHANNEL_REPRESENTATION_INDEX:
-            // TODO: output channel index mask not currently allowed
-            // fall through
-        default:
-            break;
-        }
-        break;
-    case AUDIO_CHANNEL_REPRESENTATION_INDEX:
-        switch (outputRepresentation) {
-        case AUDIO_CHANNEL_REPRESENTATION_POSITION:
-            memcpy_by_index_array_initialization_src_index(mIdxAry, ARRAY_SIZE(mIdxAry),
-                    outputBits, inputBits);
-            return;
-        case AUDIO_CHANNEL_REPRESENTATION_INDEX:
-            // TODO: output channel index mask not currently allowed
-            // fall through
-        default:
-            break;
-        }
-        break;
-    default:
-        break;
-    }
-    LOG_ALWAYS_FATAL("invalid channel mask conversion from %#x to %#x",
-            inputChannelMask, outputChannelMask);
-}
-
-void AudioMixer::RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
-    memcpy_by_index_array(dst, mOutputChannels,
-            src, mInputChannels, mIdxAry, mSampleSize, frames);
-}
-
-AudioMixer::ReformatBufferProvider::ReformatBufferProvider(int32_t channels,
-        audio_format_t inputFormat, audio_format_t outputFormat,
-        size_t bufferFrameCount) :
-        CopyBufferProvider(
-            channels * audio_bytes_per_sample(inputFormat),
-            channels * audio_bytes_per_sample(outputFormat),
-            bufferFrameCount),
-        mChannels(channels),
-        mInputFormat(inputFormat),
-        mOutputFormat(outputFormat)
-{
-    ALOGV("ReformatBufferProvider(%p)(%d, %#x, %#x)", this, channels, inputFormat, outputFormat);
-}
-
-void AudioMixer::ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
-    memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannels);
-}
-
 // ----------------------------------------------------------------------------
 
 // Ensure mConfiguredNames bitmask is initialized properly on all architectures.
@@ -442,6 +123,7 @@
         t->resampler = NULL;
         t->downmixerBufferProvider = NULL;
         t->mReformatBufferProvider = NULL;
+        t->mTimestretchBufferProvider = NULL;
         t++;
     }
 
@@ -454,6 +136,7 @@
         delete t->resampler;
         delete t->downmixerBufferProvider;
         delete t->mReformatBufferProvider;
+        delete t->mTimestretchBufferProvider;
         t++;
     }
     delete [] mState.outputTemp;
@@ -532,6 +215,7 @@
         t->mReformatBufferProvider = NULL;
         t->downmixerBufferProvider = NULL;
         t->mPostDownmixReformatBufferProvider = NULL;
+        t->mTimestretchBufferProvider = NULL;
         t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
         t->mFormat = format;
         t->mMixerInFormat = selectMixerInFormat(format);
@@ -539,6 +223,8 @@
         t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
                 AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
         t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
+        t->mSpeed = AUDIO_TIMESTRETCH_SPEED_NORMAL;
+        t->mPitch = AUDIO_TIMESTRETCH_PITCH_NORMAL;
         // Check the downmixing (or upmixing) requirements.
         status_t status = t->prepareForDownmix();
         if (status != OK) {
@@ -731,6 +417,10 @@
         mPostDownmixReformatBufferProvider->setBufferProvider(bufferProvider);
         bufferProvider = mPostDownmixReformatBufferProvider;
     }
+    if (mTimestretchBufferProvider) {
+        mTimestretchBufferProvider->setBufferProvider(bufferProvider);
+        bufferProvider = mTimestretchBufferProvider;
+    }
 }
 
 void AudioMixer::deleteTrackName(int name)
@@ -751,7 +441,9 @@
     mState.tracks[name].unprepareForDownmix();
     // delete the reformatter
     mState.tracks[name].unprepareForReformat();
-
+    // delete the timestretch provider
+    delete track.mTimestretchBufferProvider;
+    track.mTimestretchBufferProvider = NULL;
     mTrackNames &= ~(1<<name);
 }
 
@@ -973,6 +665,26 @@
             }
         }
         break;
+        case TIMESTRETCH:
+            switch (param) {
+            case PLAYBACK_RATE: {
+                const float speed = reinterpret_cast<float*>(value)[0];
+                const float pitch = reinterpret_cast<float*>(value)[1];
+                ALOG_ASSERT(AUDIO_TIMESTRETCH_SPEED_MIN <= speed
+                        && speed <= AUDIO_TIMESTRETCH_SPEED_MAX,
+                        "bad speed %f", speed);
+                ALOG_ASSERT(AUDIO_TIMESTRETCH_PITCH_MIN <= pitch
+                        && pitch <= AUDIO_TIMESTRETCH_PITCH_MAX,
+                        "bad pitch %f", pitch);
+                if (track.setPlaybackRate(speed, pitch)) {
+                    ALOGV("setParameter(TIMESTRETCH, PLAYBACK_RATE, %f %f", speed, pitch);
+                    // invalidateState(1 << name);
+                }
+                } break;
+            default:
+                LOG_ALWAYS_FATAL("setParameter timestretch: bad param %d", param);
+            }
+            break;
 
     default:
         LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
@@ -1018,6 +730,28 @@
     return false;
 }
 
+bool AudioMixer::track_t::setPlaybackRate(float speed, float pitch)
+{
+    if (speed == mSpeed && pitch == mPitch) {
+        return false;
+    }
+    mSpeed = speed;
+    mPitch = pitch;
+    if (mTimestretchBufferProvider == NULL) {
+        // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
+        // but if none exists, it is the channel count (1 for mono).
+        const int timestretchChannelCount = downmixerBufferProvider != NULL
+                ? mMixerChannelCount : channelCount;
+        mTimestretchBufferProvider = new TimestretchBufferProvider(timestretchChannelCount,
+                mMixerInFormat, sampleRate, speed, pitch);
+        reconfigureBufferProviders();
+    } else {
+        reinterpret_cast<TimestretchBufferProvider*>(mTimestretchBufferProvider)
+                ->setPlaybackRate(speed, pitch);
+    }
+    return true;
+}
+
 /* Checks to see if the volume ramp has completed and clears the increment
  * variables appropriately.
  *
@@ -1096,6 +830,8 @@
         mState.tracks[name].downmixerBufferProvider->reset();
     } else if (mState.tracks[name].mPostDownmixReformatBufferProvider != NULL) {
         mState.tracks[name].mPostDownmixReformatBufferProvider->reset();
+    } else if (mState.tracks[name].mTimestretchBufferProvider != NULL) {
+        mState.tracks[name].mTimestretchBufferProvider->reset();
     }
 
     mState.tracks[name].mInputBufferProvider = bufferProvider;
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 381036b..e27a0d1 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -29,6 +29,7 @@
 #include <utils/threads.h>
 
 #include "AudioResampler.h"
+#include "BufferProviders.h"
 
 // FIXME This is actually unity gain, which might not be max in future, expressed in U.12
 #define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT
@@ -72,6 +73,7 @@
         RESAMPLE        = 0x3001,
         RAMP_VOLUME     = 0x3002, // ramp to new volume
         VOLUME          = 0x3003, // don't ramp
+        TIMESTRETCH     = 0x3004,
 
         // set Parameter names
         // for target TRACK
@@ -99,6 +101,9 @@
         VOLUME0         = 0x4200,
         VOLUME1         = 0x4201,
         AUXLEVEL        = 0x4210,
+        // for target TIMESTRETCH
+        PLAYBACK_RATE   = 0x4300, // Configure timestretch on this track name;
+                                  // parameter 'value' is a pointer to the new playback rate.
     };
 
 
@@ -159,7 +164,6 @@
 
     struct state_t;
     struct track_t;
-    class CopyBufferProvider;
 
     typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp,
                            int32_t* aux);
@@ -214,6 +218,9 @@
 
         /* Buffer providers are constructed to translate the track input data as needed.
          *
+         * TODO: perhaps make a single PlaybackConverterProvider class to move
+         * all pre-mixer track buffer conversions outside the AudioMixer class.
+         *
          * 1) mInputBufferProvider: The AudioTrack buffer provider.
          * 2) mReformatBufferProvider: If not NULL, performs the audio reformat to
          *    match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
@@ -223,13 +230,14 @@
          *    the number of channels required by the mixer sink.
          * 4) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
          *    the downmixer requirements to the mixer engine input requirements.
+         * 5) mTimestretchBufferProvider: Adds timestretching for playback rate
          */
         AudioBufferProvider*     mInputBufferProvider;    // externally provided buffer provider.
-        CopyBufferProvider*      mReformatBufferProvider; // provider wrapper for reformatting.
-        CopyBufferProvider*      downmixerBufferProvider; // wrapper for channel conversion.
-        CopyBufferProvider*      mPostDownmixReformatBufferProvider;
+        PassthruBufferProvider*  mReformatBufferProvider; // provider wrapper for reformatting.
+        PassthruBufferProvider*  downmixerBufferProvider; // wrapper for channel conversion.
+        PassthruBufferProvider*  mPostDownmixReformatBufferProvider;
+        PassthruBufferProvider*  mTimestretchBufferProvider;
 
-        // 16-byte boundary
         int32_t     sessionId;
 
         audio_format_t mMixerFormat;     // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
@@ -251,6 +259,9 @@
         audio_channel_mask_t mMixerChannelMask;
         uint32_t             mMixerChannelCount;
 
+        float          mSpeed;
+        float          mPitch;
+
         bool        needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
         bool        setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
         bool        doesResample() const { return resampler != NULL; }
@@ -263,6 +274,7 @@
         void        unprepareForDownmix();
         status_t    prepareForReformat();
         void        unprepareForReformat();
+        bool        setPlaybackRate(float speed, float pitch);
         void        reconfigureBufferProviders();
     };
 
@@ -282,112 +294,6 @@
         track_t         tracks[MAX_NUM_TRACKS] __attribute__((aligned(32)));
     };
 
-    // Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider,
-    // and ReformatBufferProvider.
-    // It handles a private buffer for use in converting format or channel masks from the
-    // input data to a form acceptable by the mixer.
-    // TODO: Make a ResamplerBufferProvider when integers are entirely removed from the
-    // processing pipeline.
-    class CopyBufferProvider : public AudioBufferProvider {
-    public:
-        // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes).
-        // If bufferFrameCount is 0, no private buffer is created and in-place modification of
-        // the upstream buffer provider's buffers is performed by copyFrames().
-        CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize,
-                size_t bufferFrameCount);
-        virtual ~CopyBufferProvider();
-
-        // Overrides AudioBufferProvider methods
-        virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
-        virtual void releaseBuffer(Buffer* buffer);
-
-        // Other public methods
-
-        // call this to release the buffer to the upstream provider.
-        // treat it as an audio discontinuity for future samples.
-        virtual void reset();
-
-        // this function should be supplied by the derived class.  It converts
-        // #frames in the *src pointer to the *dst pointer.  It is public because
-        // some providers will allow this to work on arbitrary buffers outside
-        // of the internal buffers.
-        virtual void copyFrames(void *dst, const void *src, size_t frames) = 0;
-
-        // set the upstream buffer provider. Consider calling "reset" before this function.
-        void setBufferProvider(AudioBufferProvider *p) {
-            mTrackBufferProvider = p;
-        }
-
-    protected:
-        AudioBufferProvider* mTrackBufferProvider;
-        const size_t         mInputFrameSize;
-        const size_t         mOutputFrameSize;
-    private:
-        AudioBufferProvider::Buffer mBuffer;
-        const size_t         mLocalBufferFrameCount;
-        void*                mLocalBufferData;
-        size_t               mConsumed;
-    };
-
-    // DownmixerBufferProvider wraps a track AudioBufferProvider to provide
-    // position dependent downmixing by an Audio Effect.
-    class DownmixerBufferProvider : public CopyBufferProvider {
-    public:
-        DownmixerBufferProvider(audio_channel_mask_t inputChannelMask,
-                audio_channel_mask_t outputChannelMask, audio_format_t format,
-                uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount);
-        virtual ~DownmixerBufferProvider();
-        virtual void copyFrames(void *dst, const void *src, size_t frames);
-        bool isValid() const { return mDownmixHandle != NULL; }
-
-        static status_t init();
-        static bool isMultichannelCapable() { return sIsMultichannelCapable; }
-
-    protected:
-        effect_handle_t    mDownmixHandle;
-        effect_config_t    mDownmixConfig;
-
-        // effect descriptor for the downmixer used by the mixer
-        static effect_descriptor_t sDwnmFxDesc;
-        // indicates whether a downmix effect has been found and is usable by this mixer
-        static bool                sIsMultichannelCapable;
-        // FIXME: should we allow effects outside of the framework?
-        // We need to here. A special ioId that must be <= -2 so it does not map to a session.
-        static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
-    };
-
-    // RemixBufferProvider wraps a track AudioBufferProvider to perform an
-    // upmix or downmix to the proper channel count and mask.
-    class RemixBufferProvider : public CopyBufferProvider {
-    public:
-        RemixBufferProvider(audio_channel_mask_t inputChannelMask,
-                audio_channel_mask_t outputChannelMask, audio_format_t format,
-                size_t bufferFrameCount);
-        virtual void copyFrames(void *dst, const void *src, size_t frames);
-
-    protected:
-        const audio_format_t mFormat;
-        const size_t         mSampleSize;
-        const size_t         mInputChannels;
-        const size_t         mOutputChannels;
-        int8_t               mIdxAry[sizeof(uint32_t)*8]; // 32 bits => channel indices
-    };
-
-    // ReformatBufferProvider wraps a track AudioBufferProvider to convert the input data
-    // to an acceptable mixer input format type.
-    class ReformatBufferProvider : public CopyBufferProvider {
-    public:
-        ReformatBufferProvider(int32_t channels,
-                audio_format_t inputFormat, audio_format_t outputFormat,
-                size_t bufferFrameCount);
-        virtual void copyFrames(void *dst, const void *src, size_t frames);
-
-    protected:
-        const int32_t        mChannels;
-        const audio_format_t mInputFormat;
-        const audio_format_t mOutputFormat;
-    };
-
     // bitmask of allocated track names, where bit 0 corresponds to TRACK0 etc.
     uint32_t        mTrackNames;
 
diff --git a/services/audioflinger/BufferProviders.cpp b/services/audioflinger/BufferProviders.cpp
new file mode 100644
index 0000000..dcae5e7
--- /dev/null
+++ b/services/audioflinger/BufferProviders.cpp
@@ -0,0 +1,540 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "BufferProvider"
+//#define LOG_NDEBUG 0
+
+#include <audio_effects/effect_downmix.h>
+#include <audio_utils/primitives.h>
+#include <audio_utils/format.h>
+#include <media/AudioResamplerPublic.h>
+#include <media/EffectsFactoryApi.h>
+
+#include <utils/Log.h>
+
+#include "Configuration.h"
+#include "BufferProviders.h"
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
+#endif
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+template <typename T>
+static inline T min(const T& a, const T& b)
+{
+    return a < b ? a : b;
+}
+
+CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize,
+        size_t outputFrameSize, size_t bufferFrameCount) :
+        mInputFrameSize(inputFrameSize),
+        mOutputFrameSize(outputFrameSize),
+        mLocalBufferFrameCount(bufferFrameCount),
+        mLocalBufferData(NULL),
+        mConsumed(0)
+{
+    ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this,
+            inputFrameSize, outputFrameSize, bufferFrameCount);
+    LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0,
+            "Requires local buffer if inputFrameSize(%zu) < outputFrameSize(%zu)",
+            inputFrameSize, outputFrameSize);
+    if (mLocalBufferFrameCount) {
+        (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize);
+    }
+    mBuffer.frameCount = 0;
+}
+
+CopyBufferProvider::~CopyBufferProvider()
+{
+    ALOGV("~CopyBufferProvider(%p)", this);
+    if (mBuffer.frameCount != 0) {
+        mTrackBufferProvider->releaseBuffer(&mBuffer);
+    }
+    free(mLocalBufferData);
+}
+
+status_t CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
+        int64_t pts)
+{
+    //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
+    //        this, pBuffer, pBuffer->frameCount, pts);
+    if (mLocalBufferFrameCount == 0) {
+        status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
+        if (res == OK) {
+            copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount);
+        }
+        return res;
+    }
+    if (mBuffer.frameCount == 0) {
+        mBuffer.frameCount = pBuffer->frameCount;
+        status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
+        // At one time an upstream buffer provider had
+        // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014.
+        //
+        // By API spec, if res != OK, then mBuffer.frameCount == 0.
+        // but there may be improper implementations.
+        ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
+        if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
+            pBuffer->raw = NULL;
+            pBuffer->frameCount = 0;
+            return res;
+        }
+        mConsumed = 0;
+    }
+    ALOG_ASSERT(mConsumed < mBuffer.frameCount);
+    size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed);
+    count = min(count, pBuffer->frameCount);
+    pBuffer->raw = mLocalBufferData;
+    pBuffer->frameCount = count;
+    copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize,
+            pBuffer->frameCount);
+    return OK;
+}
+
+void CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
+{
+    //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))",
+    //        this, pBuffer, pBuffer->frameCount);
+    if (mLocalBufferFrameCount == 0) {
+        mTrackBufferProvider->releaseBuffer(pBuffer);
+        return;
+    }
+    // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
+    mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content
+    if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) {
+        mTrackBufferProvider->releaseBuffer(&mBuffer);
+        ALOG_ASSERT(mBuffer.frameCount == 0);
+    }
+    pBuffer->raw = NULL;
+    pBuffer->frameCount = 0;
+}
+
+void CopyBufferProvider::reset()
+{
+    if (mBuffer.frameCount != 0) {
+        mTrackBufferProvider->releaseBuffer(&mBuffer);
+    }
+    mConsumed = 0;
+}
+
+DownmixerBufferProvider::DownmixerBufferProvider(
+        audio_channel_mask_t inputChannelMask,
+        audio_channel_mask_t outputChannelMask, audio_format_t format,
+        uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) :
+        CopyBufferProvider(
+            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask),
+            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
+            bufferFrameCount)  // set bufferFrameCount to 0 to do in-place
+{
+    ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)",
+            this, inputChannelMask, outputChannelMask, format,
+            sampleRate, sessionId);
+    if (!sIsMultichannelCapable
+            || EffectCreate(&sDwnmFxDesc.uuid,
+                    sessionId,
+                    SESSION_ID_INVALID_AND_IGNORED,
+                    &mDownmixHandle) != 0) {
+         ALOGE("DownmixerBufferProvider() error creating downmixer effect");
+         mDownmixHandle = NULL;
+         return;
+     }
+     // channel input configuration will be overridden per-track
+     mDownmixConfig.inputCfg.channels = inputChannelMask;   // FIXME: Should be bits
+     mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits
+     mDownmixConfig.inputCfg.format = format;
+     mDownmixConfig.outputCfg.format = format;
+     mDownmixConfig.inputCfg.samplingRate = sampleRate;
+     mDownmixConfig.outputCfg.samplingRate = sampleRate;
+     mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+     mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
+     // input and output buffer provider, and frame count will not be used as the downmix effect
+     // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
+     mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
+             EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
+     mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask;
+
+     int cmdStatus;
+     uint32_t replySize = sizeof(int);
+
+     // Configure downmixer
+     status_t status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
+             &mDownmixConfig /*pCmdData*/,
+             &replySize, &cmdStatus /*pReplyData*/);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+
+     // Enable downmixer
+     replySize = sizeof(int);
+     status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
+             &replySize, &cmdStatus /*pReplyData*/);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+
+     // Set downmix type
+     // parameter size rounded for padding on 32bit boundary
+     const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
+     const int downmixParamSize =
+             sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
+     effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
+     param->psize = sizeof(downmix_params_t);
+     const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
+     memcpy(param->data, &downmixParam, param->psize);
+     const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
+     param->vsize = sizeof(downmix_type_t);
+     memcpy(param->data + psizePadded, &downmixType, param->vsize);
+     replySize = sizeof(int);
+     status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */,
+             param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/);
+     free(param);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+     ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType);
+}
+
+DownmixerBufferProvider::~DownmixerBufferProvider()
+{
+    ALOGV("~DownmixerBufferProvider (%p)", this);
+    EffectRelease(mDownmixHandle);
+    mDownmixHandle = NULL;
+}
+
+void DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    mDownmixConfig.inputCfg.buffer.frameCount = frames;
+    mDownmixConfig.inputCfg.buffer.raw = const_cast<void *>(src);
+    mDownmixConfig.outputCfg.buffer.frameCount = frames;
+    mDownmixConfig.outputCfg.buffer.raw = dst;
+    // may be in-place if src == dst.
+    status_t res = (*mDownmixHandle)->process(mDownmixHandle,
+            &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
+    ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res);
+}
+
+/* call once in a pthread_once handler. */
+/*static*/ status_t DownmixerBufferProvider::init()
+{
+    // find multichannel downmix effect if we have to play multichannel content
+    uint32_t numEffects = 0;
+    int ret = EffectQueryNumberEffects(&numEffects);
+    if (ret != 0) {
+        ALOGE("AudioMixer() error %d querying number of effects", ret);
+        return NO_INIT;
+    }
+    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
+
+    for (uint32_t i = 0 ; i < numEffects ; i++) {
+        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
+            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
+            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
+                ALOGI("found effect \"%s\" from %s",
+                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
+                sIsMultichannelCapable = true;
+                break;
+            }
+        }
+    }
+    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
+    return NO_INIT;
+}
+
+/*static*/ bool DownmixerBufferProvider::sIsMultichannelCapable = false;
+/*static*/ effect_descriptor_t DownmixerBufferProvider::sDwnmFxDesc;
+
+RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask,
+        audio_channel_mask_t outputChannelMask, audio_format_t format,
+        size_t bufferFrameCount) :
+        CopyBufferProvider(
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(inputChannelMask),
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(outputChannelMask),
+                bufferFrameCount),
+        mFormat(format),
+        mSampleSize(audio_bytes_per_sample(format)),
+        mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)),
+        mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask))
+{
+    ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu",
+            this, format, inputChannelMask, outputChannelMask,
+            mInputChannels, mOutputChannels);
+
+    const audio_channel_representation_t inputRepresentation =
+            audio_channel_mask_get_representation(inputChannelMask);
+    const audio_channel_representation_t outputRepresentation =
+            audio_channel_mask_get_representation(outputChannelMask);
+    const uint32_t inputBits = audio_channel_mask_get_bits(inputChannelMask);
+    const uint32_t outputBits = audio_channel_mask_get_bits(outputChannelMask);
+
+    switch (inputRepresentation) {
+    case AUDIO_CHANNEL_REPRESENTATION_POSITION:
+        switch (outputRepresentation) {
+        case AUDIO_CHANNEL_REPRESENTATION_POSITION:
+            memcpy_by_index_array_initialization(mIdxAry, ARRAY_SIZE(mIdxAry),
+                    outputBits, inputBits);
+            return;
+        case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+            // TODO: output channel index mask not currently allowed
+            // fall through
+        default:
+            break;
+        }
+        break;
+    case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+        switch (outputRepresentation) {
+        case AUDIO_CHANNEL_REPRESENTATION_POSITION:
+            memcpy_by_index_array_initialization_src_index(mIdxAry, ARRAY_SIZE(mIdxAry),
+                    outputBits, inputBits);
+            return;
+        case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+            // TODO: output channel index mask not currently allowed
+            // fall through
+        default:
+            break;
+        }
+        break;
+    default:
+        break;
+    }
+    LOG_ALWAYS_FATAL("invalid channel mask conversion from %#x to %#x",
+            inputChannelMask, outputChannelMask);
+}
+
+void RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    memcpy_by_index_array(dst, mOutputChannels,
+            src, mInputChannels, mIdxAry, mSampleSize, frames);
+}
+
+ReformatBufferProvider::ReformatBufferProvider(int32_t channelCount,
+        audio_format_t inputFormat, audio_format_t outputFormat,
+        size_t bufferFrameCount) :
+        CopyBufferProvider(
+                channelCount * audio_bytes_per_sample(inputFormat),
+                channelCount * audio_bytes_per_sample(outputFormat),
+                bufferFrameCount),
+        mChannelCount(channelCount),
+        mInputFormat(inputFormat),
+        mOutputFormat(outputFormat)
+{
+    ALOGV("ReformatBufferProvider(%p)(%u, %#x, %#x)",
+            this, channelCount, inputFormat, outputFormat);
+}
+
+void ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannelCount);
+}
+
+TimestretchBufferProvider::TimestretchBufferProvider(int32_t channelCount,
+        audio_format_t format, uint32_t sampleRate, float speed, float pitch) :
+        mChannelCount(channelCount),
+        mFormat(format),
+        mSampleRate(sampleRate),
+        mFrameSize(channelCount * audio_bytes_per_sample(format)),
+        mSpeed(speed),
+        mPitch(pitch),
+        mLocalBufferFrameCount(0),
+        mLocalBufferData(NULL),
+        mRemaining(0),
+        mSonicStream(sonicCreateStream(sampleRate, mChannelCount))
+{
+    ALOGV("TimestretchBufferProvider(%p)(%u, %#x, %u %f %f)",
+            this, channelCount, format, sampleRate, speed, pitch);
+    mBuffer.frameCount = 0;
+
+    LOG_ALWAYS_FATAL_IF(mSonicStream == NULL,
+            "TimestretchBufferProvider can't allocate Sonic stream");
+    sonicSetSpeed(mSonicStream, speed);
+}
+
+TimestretchBufferProvider::~TimestretchBufferProvider()
+{
+    ALOGV("~TimestretchBufferProvider(%p)", this);
+    sonicDestroyStream(mSonicStream);
+    if (mBuffer.frameCount != 0) {
+        mTrackBufferProvider->releaseBuffer(&mBuffer);
+    }
+    free(mLocalBufferData);
+}
+
+status_t TimestretchBufferProvider::getNextBuffer(
+        AudioBufferProvider::Buffer *pBuffer, int64_t pts)
+{
+    ALOGV("TimestretchBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
+            this, pBuffer, pBuffer->frameCount, pts);
+
+    // BYPASS
+    //return mTrackBufferProvider->getNextBuffer(pBuffer, pts);
+
+    // check if previously processed data is sufficient.
+    if (pBuffer->frameCount <= mRemaining) {
+        ALOGV("previous sufficient");
+        pBuffer->raw = mLocalBufferData;
+        return OK;
+    }
+
+    // do we need to resize our buffer?
+    if (pBuffer->frameCount > mLocalBufferFrameCount) {
+        void *newmem;
+        if (posix_memalign(&newmem, 32, pBuffer->frameCount * mFrameSize) == OK) {
+            if (mRemaining != 0) {
+                memcpy(newmem, mLocalBufferData, mRemaining * mFrameSize);
+            }
+            free(mLocalBufferData);
+            mLocalBufferData = newmem;
+            mLocalBufferFrameCount = pBuffer->frameCount;
+        }
+    }
+
+    // need to fetch more data
+    const size_t outputDesired = pBuffer->frameCount - mRemaining;
+    mBuffer.frameCount = mSpeed == AUDIO_TIMESTRETCH_SPEED_NORMAL
+            ? outputDesired : outputDesired * mSpeed + 1;
+
+    status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
+
+    ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
+    if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
+        ALOGD("buffer error");
+        if (mRemaining == 0) {
+            pBuffer->raw = NULL;
+            pBuffer->frameCount = 0;
+            return res;
+        } else { // return partial count
+            pBuffer->raw = mLocalBufferData;
+            pBuffer->frameCount = mRemaining;
+            return OK;
+        }
+    }
+
+    // time-stretch the data
+    size_t dstAvailable = min(mLocalBufferFrameCount - mRemaining, outputDesired);
+    size_t srcAvailable = mBuffer.frameCount;
+    processFrames((uint8_t*)mLocalBufferData + mRemaining * mFrameSize, &dstAvailable,
+            mBuffer.raw, &srcAvailable);
+
+    // release all data consumed
+    mBuffer.frameCount = srcAvailable;
+    mTrackBufferProvider->releaseBuffer(&mBuffer);
+
+    // update buffer vars with the actual data processed and return with buffer
+    mRemaining += dstAvailable;
+
+    pBuffer->raw = mLocalBufferData;
+    pBuffer->frameCount = mRemaining;
+
+    return OK;
+}
+
+void TimestretchBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
+{
+    ALOGV("TimestretchBufferProvider(%p)::releaseBuffer(%p (%zu))",
+       this, pBuffer, pBuffer->frameCount);
+
+    // BYPASS
+    //return mTrackBufferProvider->releaseBuffer(pBuffer);
+
+    // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
+    if (pBuffer->frameCount < mRemaining) {
+        memcpy(mLocalBufferData,
+                (uint8_t*)mLocalBufferData + pBuffer->frameCount * mFrameSize,
+                (mRemaining - pBuffer->frameCount) * mFrameSize);
+        mRemaining -= pBuffer->frameCount;
+    } else if (pBuffer->frameCount == mRemaining) {
+        mRemaining = 0;
+    } else {
+        LOG_ALWAYS_FATAL("Releasing more frames(%zu) than available(%zu)",
+                pBuffer->frameCount, mRemaining);
+    }
+
+    pBuffer->raw = NULL;
+    pBuffer->frameCount = 0;
+}
+
+void TimestretchBufferProvider::reset()
+{
+    mRemaining = 0;
+}
+
+status_t TimestretchBufferProvider::setPlaybackRate(float speed, float pitch)
+{
+    mSpeed = speed;
+    mPitch = pitch;
+
+    sonicSetSpeed(mSonicStream, speed);
+    //TODO: pitch is ignored for now
+    return OK;
+}
+
+void TimestretchBufferProvider::processFrames(void *dstBuffer, size_t *dstFrames,
+        const void *srcBuffer, size_t *srcFrames)
+{
+    ALOGV("processFrames(%zu %zu)  remaining(%zu)", *dstFrames, *srcFrames, mRemaining);
+    // Note dstFrames is the required number of frames.
+
+    // Ensure consumption from src is as expected.
+    const size_t targetSrc = *dstFrames * mSpeed;
+    if (*srcFrames < targetSrc) { // limit dst frames to that possible
+        *dstFrames = *srcFrames / mSpeed;
+    } else if (*srcFrames > targetSrc + 1) {
+        *srcFrames = targetSrc + 1;
+    }
+
+    switch (mFormat) {
+    case AUDIO_FORMAT_PCM_FLOAT:
+        if (sonicWriteFloatToStream(mSonicStream, (float*)srcBuffer, *srcFrames) != 1) {
+            ALOGE("sonicWriteFloatToStream cannot realloc");
+            *srcFrames = 0; // cannot consume all of srcBuffer
+        }
+        *dstFrames = sonicReadFloatFromStream(mSonicStream, (float*)dstBuffer, *dstFrames);
+        break;
+    case AUDIO_FORMAT_PCM_16_BIT:
+        if (sonicWriteShortToStream(mSonicStream, (short*)srcBuffer, *srcFrames) != 1) {
+            ALOGE("sonicWriteShortToStream cannot realloc");
+            *srcFrames = 0; // cannot consume all of srcBuffer
+        }
+        *dstFrames = sonicReadShortFromStream(mSonicStream, (short*)dstBuffer, *dstFrames);
+        break;
+    default:
+        // could also be caught on construction
+        LOG_ALWAYS_FATAL("invalid format %#x for TimestretchBufferProvider", mFormat);
+    }
+}
+
+// ----------------------------------------------------------------------------
+} // namespace android
diff --git a/services/audioflinger/BufferProviders.h b/services/audioflinger/BufferProviders.h
new file mode 100644
index 0000000..42030c0
--- /dev/null
+++ b/services/audioflinger/BufferProviders.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_BUFFER_PROVIDERS_H
+#define ANDROID_BUFFER_PROVIDERS_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <hardware/audio_effect.h>
+#include <media/AudioBufferProvider.h>
+#include <system/audio.h>
+#include <sonic.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+class PassthruBufferProvider : public AudioBufferProvider {
+public:
+    PassthruBufferProvider() : mTrackBufferProvider(NULL) { }
+
+    virtual ~PassthruBufferProvider() { }
+
+    // call this to release the buffer to the upstream provider.
+    // treat it as an audio discontinuity for future samples.
+    virtual void reset() { }
+
+    // set the upstream buffer provider. Consider calling "reset" before this function.
+    virtual void setBufferProvider(AudioBufferProvider *p) {
+        mTrackBufferProvider = p;
+    }
+
+protected:
+    AudioBufferProvider *mTrackBufferProvider;
+};
+
+// Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider,
+// and ReformatBufferProvider.
+// It handles a private buffer for use in converting format or channel masks from the
+// input data to a form acceptable by the mixer.
+// TODO: Make a ResamplerBufferProvider when integers are entirely removed from the
+// processing pipeline.
+class CopyBufferProvider : public PassthruBufferProvider {
+public:
+    // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes).
+    // If bufferFrameCount is 0, no private buffer is created and in-place modification of
+    // the upstream buffer provider's buffers is performed by copyFrames().
+    CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize,
+            size_t bufferFrameCount);
+    virtual ~CopyBufferProvider();
+
+    // Overrides AudioBufferProvider methods
+    virtual status_t getNextBuffer(Buffer *buffer, int64_t pts);
+    virtual void releaseBuffer(Buffer *buffer);
+
+    // Overrides PassthruBufferProvider
+    virtual void reset();
+
+    // this function should be supplied by the derived class.  It converts
+    // #frames in the *src pointer to the *dst pointer.  It is public because
+    // some providers will allow this to work on arbitrary buffers outside
+    // of the internal buffers.
+    virtual void copyFrames(void *dst, const void *src, size_t frames) = 0;
+
+protected:
+    const size_t         mInputFrameSize;
+    const size_t         mOutputFrameSize;
+private:
+    AudioBufferProvider::Buffer mBuffer;
+    const size_t         mLocalBufferFrameCount;
+    void                *mLocalBufferData;
+    size_t               mConsumed;
+};
+
+// DownmixerBufferProvider derives from CopyBufferProvider to provide
+// position dependent downmixing by an Audio Effect.
+class DownmixerBufferProvider : public CopyBufferProvider {
+public:
+    DownmixerBufferProvider(audio_channel_mask_t inputChannelMask,
+            audio_channel_mask_t outputChannelMask, audio_format_t format,
+            uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount);
+    virtual ~DownmixerBufferProvider();
+    //Overrides
+    virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+    bool isValid() const { return mDownmixHandle != NULL; }
+    static status_t init();
+    static bool isMultichannelCapable() { return sIsMultichannelCapable; }
+
+protected:
+    effect_handle_t    mDownmixHandle;
+    effect_config_t    mDownmixConfig;
+
+    // effect descriptor for the downmixer used by the mixer
+    static effect_descriptor_t sDwnmFxDesc;
+    // indicates whether a downmix effect has been found and is usable by this mixer
+    static bool                sIsMultichannelCapable;
+    // FIXME: should we allow effects outside of the framework?
+    // We need to here. A special ioId that must be <= -2 so it does not map to a session.
+    static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
+};
+
+// RemixBufferProvider derives from CopyBufferProvider to perform an
+// upmix or downmix to the proper channel count and mask.
+class RemixBufferProvider : public CopyBufferProvider {
+public:
+    RemixBufferProvider(audio_channel_mask_t inputChannelMask,
+            audio_channel_mask_t outputChannelMask, audio_format_t format,
+            size_t bufferFrameCount);
+    //Overrides
+    virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+protected:
+    const audio_format_t mFormat;
+    const size_t         mSampleSize;
+    const size_t         mInputChannels;
+    const size_t         mOutputChannels;
+    int8_t               mIdxAry[sizeof(uint32_t) * 8]; // 32 bits => channel indices
+};
+
+// ReformatBufferProvider derives from CopyBufferProvider to convert the input data
+// to an acceptable mixer input format type.
+class ReformatBufferProvider : public CopyBufferProvider {
+public:
+    ReformatBufferProvider(int32_t channelCount,
+            audio_format_t inputFormat, audio_format_t outputFormat,
+            size_t bufferFrameCount);
+    virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+protected:
+    const uint32_t       mChannelCount;
+    const audio_format_t mInputFormat;
+    const audio_format_t mOutputFormat;
+};
+
+// TimestretchBufferProvider derives from PassthruBufferProvider for time stretching
+class TimestretchBufferProvider : public PassthruBufferProvider {
+public:
+    TimestretchBufferProvider(int32_t channelCount,
+            audio_format_t format, uint32_t sampleRate, float speed, float pitch);
+    virtual ~TimestretchBufferProvider();
+
+    // Overrides AudioBufferProvider methods
+    virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
+    virtual void releaseBuffer(Buffer* buffer);
+
+    // Overrides PassthruBufferProvider
+    virtual void reset();
+
+    virtual status_t setPlaybackRate(float speed, float pitch);
+
+    // processes frames
+    // dstBuffer is where to place the data
+    // dstFrames [in/out] is the desired frames (return with actual placed in buffer)
+    // srcBuffer is the source data
+    // srcFrames [in/out] is the available source frames (return with consumed)
+    virtual void processFrames(void *dstBuffer, size_t *dstFrames,
+            const void *srcBuffer, size_t *srcFrames);
+
+protected:
+    const uint32_t       mChannelCount;
+    const audio_format_t mFormat;
+    const uint32_t       mSampleRate; // const for now (TODO change this)
+    const size_t         mFrameSize;
+    float                mSpeed;
+    float                mPitch;
+
+private:
+    AudioBufferProvider::Buffer mBuffer;
+    size_t               mLocalBufferFrameCount;
+    void                *mLocalBufferData;
+    size_t               mRemaining;
+    sonicStream          mSonicStream;
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif // ANDROID_BUFFER_PROVIDERS_H
diff --git a/services/audioflinger/ServiceUtilities.cpp b/services/audioflinger/ServiceUtilities.cpp
index fae19a1..8246fef 100644
--- a/services/audioflinger/ServiceUtilities.cpp
+++ b/services/audioflinger/ServiceUtilities.cpp
@@ -50,13 +50,6 @@
     return ok;
 }
 
-bool captureFmTunerAllowed() {
-    static const String16 sCaptureFmTunerAllowed("android.permission.ACCESS_FM_RADIO");
-    bool ok = checkCallingPermission(sCaptureFmTunerAllowed);
-    if (!ok) ALOGE("android.permission.ACCESS_FM_RADIO");
-    return ok;
-}
-
 bool settingsAllowed() {
     if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
     static const String16 sAudioSettings("android.permission.MODIFY_AUDIO_SETTINGS");
diff --git a/services/audioflinger/ServiceUtilities.h b/services/audioflinger/ServiceUtilities.h
index ce18a90..df6f6f4 100644
--- a/services/audioflinger/ServiceUtilities.h
+++ b/services/audioflinger/ServiceUtilities.h
@@ -23,7 +23,6 @@
 bool recordingAllowed();
 bool captureAudioOutputAllowed();
 bool captureHotwordAllowed();
-bool captureFmTunerAllowed();
 bool settingsAllowed();
 bool modifyAudioRoutingAllowed();
 bool dumpAllowed();
diff --git a/services/audioflinger/SpdifStreamOut.cpp b/services/audioflinger/SpdifStreamOut.cpp
index d23588e..45b541a 100644
--- a/services/audioflinger/SpdifStreamOut.cpp
+++ b/services/audioflinger/SpdifStreamOut.cpp
@@ -32,10 +32,12 @@
  * If the AudioFlinger is processing encoded data and the HAL expects
  * PCM then we need to wrap the data in an SPDIF wrapper.
  */
-SpdifStreamOut::SpdifStreamOut(AudioHwDevice *dev, audio_output_flags_t flags)
+SpdifStreamOut::SpdifStreamOut(AudioHwDevice *dev,
+            audio_output_flags_t flags,
+            audio_format_t format)
         : AudioStreamOut(dev,flags)
         , mRateMultiplier(1)
-        , mSpdifEncoder(this)
+        , mSpdifEncoder(this, format)
         , mRenderPositionHal(0)
         , mPreviousHalPosition32(0)
 {
@@ -49,15 +51,15 @@
 {
     struct audio_config customConfig = *config;
 
-    customConfig.format = AUDIO_FORMAT_PCM_16_BIT;
-    customConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
-
     // Some data bursts run at a higher sample rate.
+    // TODO Move this into the audio_utils as a static method.
     switch(config->format) {
         case AUDIO_FORMAT_E_AC3:
             mRateMultiplier = 4;
             break;
         case AUDIO_FORMAT_AC3:
+        case AUDIO_FORMAT_DTS:
+        case AUDIO_FORMAT_DTS_HD:
             mRateMultiplier = 1;
             break;
         default:
@@ -67,6 +69,9 @@
     }
     customConfig.sample_rate = config->sample_rate * mRateMultiplier;
 
+    customConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+    customConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+
     // Always print this because otherwise it could be very confusing if the
     // HAL and AudioFlinger are using different formats.
     // Print before open() because HAL may modify customConfig.
diff --git a/services/audioflinger/SpdifStreamOut.h b/services/audioflinger/SpdifStreamOut.h
index cb82ac7..d81c064 100644
--- a/services/audioflinger/SpdifStreamOut.h
+++ b/services/audioflinger/SpdifStreamOut.h
@@ -38,7 +38,8 @@
 class SpdifStreamOut : public AudioStreamOut {
 public:
 
-    SpdifStreamOut(AudioHwDevice *dev, audio_output_flags_t flags);
+    SpdifStreamOut(AudioHwDevice *dev, audio_output_flags_t flags,
+            audio_format_t format);
 
     virtual ~SpdifStreamOut() { }
 
@@ -77,8 +78,9 @@
     class MySPDIFEncoder : public SPDIFEncoder
     {
     public:
-        MySPDIFEncoder(SpdifStreamOut *spdifStreamOut)
-          : mSpdifStreamOut(spdifStreamOut)
+        MySPDIFEncoder(SpdifStreamOut *spdifStreamOut, audio_format_t format)
+          :  SPDIFEncoder(format)
+          , mSpdifStreamOut(spdifStreamOut)
         {
         }
 
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 1a20fae..b30fd20 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1608,13 +1608,19 @@
     // If you change this calculation, also review the start threshold which is related.
     if (!(*flags & IAudioFlinger::TRACK_FAST)
             && audio_is_linear_pcm(format) && sharedBuffer == 0) {
+        // this must match AudioTrack.cpp calculateMinFrameCount().
+        // TODO: Move to a common library
         uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream);
         uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
         if (minBufCount < 2) {
             minBufCount = 2;
         }
+        // For normal mixing tracks, if speed is > 1.0f (normal), AudioTrack
+        // or the client should compute and pass in a larger buffer request.
         size_t minFrameCount =
-                minBufCount * sourceFramesNeeded(sampleRate, mNormalFrameCount, mSampleRate);
+                minBufCount * sourceFramesNeededWithTimestretch(
+                        sampleRate, mNormalFrameCount,
+                        mSampleRate, AUDIO_TIMESTRETCH_SPEED_NORMAL /*speed*/);
         if (frameCount < minFrameCount) { // including frameCount == 0
             frameCount = minFrameCount;
         }
@@ -3592,21 +3598,17 @@
         // hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed
         // during last round
         size_t desiredFrames;
-        uint32_t sr = track->sampleRate();
-        if (sr == mSampleRate) {
-            desiredFrames = mNormalFrameCount;
-        } else {
-            desiredFrames = sourceFramesNeeded(sr, mNormalFrameCount, mSampleRate);
-            // add frames already consumed but not yet released by the resampler
-            // because mAudioTrackServerProxy->framesReady() will include these frames
-            desiredFrames += mAudioMixer->getUnreleasedFrames(track->name());
-#if 0
-            // the minimum track buffer size is normally twice the number of frames necessary
-            // to fill one buffer and the resampler should not leave more than one buffer worth
-            // of unreleased frames after each pass, but just in case...
-            ALOG_ASSERT(desiredFrames <= cblk->frameCount_);
-#endif
-        }
+        const uint32_t sampleRate = track->mAudioTrackServerProxy->getSampleRate();
+        float speed, pitch;
+        track->mAudioTrackServerProxy->getPlaybackRate(&speed, &pitch);
+
+        desiredFrames = sourceFramesNeededWithTimestretch(
+                sampleRate, mNormalFrameCount, mSampleRate, speed);
+        // TODO: ONLY USED FOR LEGACY RESAMPLERS, remove when they are removed.
+        // add frames already consumed but not yet released by the resampler
+        // because mAudioTrackServerProxy->framesReady() will include these frames
+        desiredFrames += mAudioMixer->getUnreleasedFrames(track->name());
+
         uint32_t minFrames = 1;
         if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() &&
                 (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) {
@@ -3769,6 +3771,17 @@
                 AudioMixer::RESAMPLE,
                 AudioMixer::SAMPLE_RATE,
                 (void *)(uintptr_t)reqSampleRate);
+
+            // set the playback rate as an float array {speed, pitch}
+            float playbackRate[2];
+            track->mAudioTrackServerProxy->getPlaybackRate(
+                    &playbackRate[0] /*speed*/, &playbackRate[1] /*pitch*/);
+            mAudioMixer->setParameter(
+                name,
+                AudioMixer::TIMESTRETCH,
+                AudioMixer::PLAYBACK_RATE,
+                playbackRate);
+
             /*
              * Select the appropriate output buffer for the track.
              *
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 1566b1f..da2d634 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -903,9 +903,14 @@
             mPreviousTimestampValid = false;
             return INVALID_OPERATION;
         }
+        // FIXME Not accurate under dynamic changes of sample rate and speed.
+        // Do not use track's mSampleRate as it is not current for mixer tracks.
+        uint32_t sampleRate = mAudioTrackServerProxy->getSampleRate();
+        float speed, pitch;
+        mAudioTrackServerProxy->getPlaybackRate(&speed, &pitch);
         uint32_t unpresentedFrames =
-                ((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) /
-                playbackThread->mSampleRate;
+                ((double) playbackThread->mLatchQ.mUnpresentedFrames * sampleRate * speed)
+                / playbackThread->mSampleRate;
         // FIXME Since we're using a raw pointer as the key, it is theoretically possible
         //       for a brand new track to share the same address as a recently destroyed
         //       track, and thus for us to get the frames released of the wrong track.
diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk
index 8604ef5..536eb93 100644
--- a/services/audioflinger/tests/Android.mk
+++ b/services/audioflinger/tests/Android.mk
@@ -39,11 +39,13 @@
 LOCAL_SRC_FILES:= \
 	test-mixer.cpp \
 	../AudioMixer.cpp.arm \
+	../BufferProviders.cpp
 
 LOCAL_C_INCLUDES := \
 	$(call include-path-for, audio-effects) \
 	$(call include-path-for, audio-utils) \
-	frameworks/av/services/audioflinger
+	frameworks/av/services/audioflinger \
+	external/sonic
 
 LOCAL_STATIC_LIBRARIES := \
 	libsndfile
@@ -57,7 +59,8 @@
 	libdl \
 	libcutils \
 	libutils \
-	liblog
+	liblog \
+	libsonic
 
 LOCAL_MODULE:= test-mixer
 
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index dea1b8a..1c2c27e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -62,8 +62,12 @@
     // searches for an exact match
     status_t checkExactChannelMask(audio_channel_mask_t channelMask) const;
     // searches for a compatible match, currently implemented for input channel masks only
-    status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask) const;
-    status_t checkFormat(audio_format_t format) const;
+    status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask,
+            audio_channel_mask_t *updatedChannelMask) const;
+
+    status_t checkExactFormat(audio_format_t format) const;
+    // searches for a compatible match, currently implemented for input formats only
+    status_t checkCompatibleFormat(audio_format_t format, audio_format_t *updatedFormat) const;
     status_t checkGain(const struct audio_gain_config *gainConfig, int index) const;
 
     uint32_t pickSamplingRate() const;
@@ -71,6 +75,11 @@
     audio_format_t pickFormat() const;
 
     static const audio_format_t sPcmFormatCompareTable[];
+    static int compareFormatsGoodToBad(
+            const audio_format_t *format1, const audio_format_t *format2) {
+        // compareFormats sorts from bad to good, we reverse it here
+        return compareFormats(*format2, *format1);
+    }
     static int compareFormats(audio_format_t format1, audio_format_t format2);
 
     audio_module_handle_t getModuleHandle() const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index 022257e..ab6fcc1 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -45,7 +45,9 @@
                              uint32_t samplingRate,
                              uint32_t *updatedSamplingRate,
                              audio_format_t format,
+                             audio_format_t *updatedFormat,
                              audio_channel_mask_t channelMask,
+                             audio_channel_mask_t *updatedChannelMask,
                              uint32_t flags) const;
 
     void dump(int fd);
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index e8191dd..f3978ec 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -16,7 +16,7 @@
 
 #define LOG_TAG "APM::AudioPort"
 //#define LOG_NDEBUG 0
-
+#include <media/AudioResamplerPublic.h>
 #include "AudioPort.h"
 #include "HwModule.h"
 #include "AudioGain.h"
@@ -216,6 +216,7 @@
         }
         str = strtok(NULL, "|");
     }
+    mFormats.sort(compareFormatsGoodToBad);
 }
 
 void AudioPort::loadInChannels(char *name)
@@ -358,6 +359,9 @@
         uint32_t *updatedSamplingRate) const
 {
     if (mSamplingRates.isEmpty()) {
+        if (updatedSamplingRate != NULL) {
+            *updatedSamplingRate = samplingRate;
+        }
         return NO_ERROR;
     }
 
@@ -387,16 +391,11 @@
             }
         }
     }
-    // This uses hard-coded knowledge about AudioFlinger resampling ratios.
-    // TODO Move these assumptions out.
-    static const uint32_t kMaxDownSampleRatio = 6;  // beyond this aliasing occurs
-    static const uint32_t kMaxUpSampleRatio = 256;  // beyond this sample rate inaccuracies occur
-                                                    // due to approximation by an int32_t of the
-                                                    // phase increments
+
     // Prefer to down-sample from a higher sampling rate, as we get the desired frequency spectrum.
     if (minAbove >= 0) {
         candidate = mSamplingRates[minAbove];
-        if (candidate / kMaxDownSampleRatio <= samplingRate) {
+        if (candidate / AUDIO_RESAMPLER_DOWN_RATIO_MAX <= samplingRate) {
             if (updatedSamplingRate != NULL) {
                 *updatedSamplingRate = candidate;
             }
@@ -406,7 +405,7 @@
     // But if we have to up-sample from a lower sampling rate, that's OK.
     if (maxBelow >= 0) {
         candidate = mSamplingRates[maxBelow];
-        if (candidate * kMaxUpSampleRatio >= samplingRate) {
+        if (candidate * AUDIO_RESAMPLER_UP_RATIO_MAX >= samplingRate) {
             if (updatedSamplingRate != NULL) {
                 *updatedSamplingRate = candidate;
             }
@@ -431,10 +430,13 @@
     return BAD_VALUE;
 }
 
-status_t AudioPort::checkCompatibleChannelMask(audio_channel_mask_t channelMask)
-        const
+status_t AudioPort::checkCompatibleChannelMask(audio_channel_mask_t channelMask,
+        audio_channel_mask_t *updatedChannelMask) const
 {
     if (mChannelMasks.isEmpty()) {
+        if (updatedChannelMask != NULL) {
+            *updatedChannelMask = channelMask;
+        }
         return NO_ERROR;
     }
 
@@ -443,6 +445,9 @@
         // FIXME Does not handle multi-channel automatic conversions yet
         audio_channel_mask_t supported = mChannelMasks[i];
         if (supported == channelMask) {
+            if (updatedChannelMask != NULL) {
+                *updatedChannelMask = channelMask;
+            }
             return NO_ERROR;
         }
         if (isRecordThread) {
@@ -452,6 +457,9 @@
                     && channelMask == AUDIO_CHANNEL_IN_MONO) ||
                 (supported == AUDIO_CHANNEL_IN_MONO && (channelMask == AUDIO_CHANNEL_IN_FRONT_BACK
                     || channelMask == AUDIO_CHANNEL_IN_STEREO))) {
+                if (updatedChannelMask != NULL) {
+                    *updatedChannelMask = supported;
+                }
                 return NO_ERROR;
             }
         }
@@ -459,7 +467,7 @@
     return BAD_VALUE;
 }
 
-status_t AudioPort::checkFormat(audio_format_t format) const
+status_t AudioPort::checkExactFormat(audio_format_t format) const
 {
     if (mFormats.isEmpty()) {
         return NO_ERROR;
@@ -473,6 +481,33 @@
     return BAD_VALUE;
 }
 
+status_t AudioPort::checkCompatibleFormat(audio_format_t format, audio_format_t *updatedFormat)
+        const
+{
+    if (mFormats.isEmpty()) {
+        if (updatedFormat != NULL) {
+            *updatedFormat = format;
+        }
+        return NO_ERROR;
+    }
+
+    const bool checkInexact = // when port is input and format is linear pcm
+            mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK
+            && audio_is_linear_pcm(format);
+
+    for (size_t i = 0; i < mFormats.size(); ++i) {
+        if (mFormats[i] == format ||
+                (checkInexact && audio_is_linear_pcm(mFormats[i]))) {
+            // for inexact checks we take the first linear pcm format since
+            // mFormats is sorted from best PCM format to worst PCM format.
+            if (updatedFormat != NULL) {
+                *updatedFormat = mFormats[i];
+            }
+            return NO_ERROR;
+        }
+    }
+    return BAD_VALUE;
+}
 
 uint32_t AudioPort::pickSamplingRate() const
 {
@@ -756,7 +791,7 @@
         mChannelMask = config->channel_mask;
     }
     if (config->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
-        status = audioport->checkFormat(config->format);
+        status = audioport->checkExactFormat(config->format);
         if (status != NO_ERROR) {
             goto exit;
         }
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index de6539c..7b6d51d 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -40,7 +40,9 @@
                                     uint32_t samplingRate,
                                     uint32_t *updatedSamplingRate,
                                     audio_format_t format,
+                                    audio_format_t *updatedFormat,
                                     audio_channel_mask_t channelMask,
+                                    audio_channel_mask_t *updatedChannelMask,
                                     uint32_t flags) const
 {
     const bool isPlaybackThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SOURCE;
@@ -71,7 +73,14 @@
          return false;
     }
 
-    if (!audio_is_valid_format(format) || checkFormat(format) != NO_ERROR) {
+    if (!audio_is_valid_format(format)) {
+        return false;
+    }
+    if (isPlaybackThread && checkExactFormat(format) != NO_ERROR) {
+        return false;
+    }
+    audio_format_t myUpdatedFormat = format;
+    if (isRecordThread && checkCompatibleFormat(format, &myUpdatedFormat) != NO_ERROR) {
         return false;
     }
 
@@ -79,8 +88,9 @@
             checkExactChannelMask(channelMask) != NO_ERROR)) {
         return false;
     }
+    audio_channel_mask_t myUpdatedChannelMask = channelMask;
     if (isRecordThread && (!audio_is_input_channel(channelMask) ||
-            checkCompatibleChannelMask(channelMask) != NO_ERROR)) {
+            checkCompatibleChannelMask(channelMask, &myUpdatedChannelMask) != NO_ERROR)) {
         return false;
     }
 
@@ -99,6 +109,12 @@
     if (updatedSamplingRate != NULL) {
         *updatedSamplingRate = myUpdatedSamplingRate;
     }
+    if (updatedFormat != NULL) {
+        *updatedFormat = myUpdatedFormat;
+    }
+    if (updatedChannelMask != NULL) {
+        *updatedChannelMask = myUpdatedChannelMask;
+    }
     return true;
 }
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 35e80f7..ba9f996 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -585,8 +585,10 @@
         }
         for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) {
             sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
-            bool found = profile->isCompatibleProfile(device, String8(""), samplingRate,
-                    NULL /*updatedSamplingRate*/, format, channelMask,
+            bool found = profile->isCompatibleProfile(device, String8(""),
+                    samplingRate, NULL /*updatedSamplingRate*/,
+                    format, NULL /*updatedFormat*/,
+                    channelMask, NULL /*updatedChannelMask*/,
                     flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD ?
                         AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD : AUDIO_OUTPUT_FLAG_DIRECT);
             if (found && (mAvailableOutputDevices.types() & profile->mSupportedDevices.types())) {
@@ -1303,20 +1305,25 @@
         }
     }
 
-    sp<IOProfile> profile = getInputProfile(device, address,
-                                            samplingRate, format, channelMask,
-                                            flags);
-    if (profile == 0) {
-        //retry without flags
-        audio_input_flags_t log_flags = flags;
-        flags = AUDIO_INPUT_FLAG_NONE;
+    // find a compatible input profile (not necessarily identical in parameters)
+    sp<IOProfile> profile;
+    // samplingRate and flags may be updated by getInputProfile
+    uint32_t profileSamplingRate = samplingRate;
+    audio_format_t profileFormat = format;
+    audio_channel_mask_t profileChannelMask = channelMask;
+    audio_input_flags_t profileFlags = flags;
+    for (;;) {
         profile = getInputProfile(device, address,
-                                  samplingRate, format, channelMask,
-                                  flags);
-        if (profile == 0) {
+                                  profileSamplingRate, profileFormat, profileChannelMask,
+                                  profileFlags);
+        if (profile != 0) {
+            break; // success
+        } else if (profileFlags != AUDIO_INPUT_FLAG_NONE) {
+            profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
+        } else { // fail
             ALOGW("getInputForAttr() could not find profile for device 0x%X, samplingRate %u,"
                     "format %#x, channelMask 0x%X, flags %#x",
-                    device, samplingRate, format, channelMask, log_flags);
+                    device, samplingRate, format, channelMask, flags);
             return BAD_VALUE;
         }
     }
@@ -1327,9 +1334,9 @@
     }
 
     audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-    config.sample_rate = samplingRate;
-    config.channel_mask = channelMask;
-    config.format = format;
+    config.sample_rate = profileSamplingRate;
+    config.channel_mask = profileChannelMask;
+    config.format = profileFormat;
 
     status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
                                                    input,
@@ -1337,14 +1344,15 @@
                                                    &device,
                                                    address,
                                                    halInputSource,
-                                                   flags);
+                                                   profileFlags);
 
     // only accept input with the exact requested set of parameters
     if (status != NO_ERROR || *input == AUDIO_IO_HANDLE_NONE ||
-        (samplingRate != config.sample_rate) ||
-        (format != config.format) ||
-        (channelMask != config.channel_mask)) {
-        ALOGW("getInputForAttr() failed opening input: samplingRate %d, format %d, channelMask %x",
+        (profileSamplingRate != config.sample_rate) ||
+        (profileFormat != config.format) ||
+        (profileChannelMask != config.channel_mask)) {
+        ALOGW("getInputForAttr() failed opening input: samplingRate %d, format %d,"
+                " channelMask %x",
                 samplingRate, format, channelMask);
         if (*input != AUDIO_IO_HANDLE_NONE) {
             mpClientInterface->closeInput(*input);
@@ -1356,9 +1364,9 @@
     inputDesc->mInputSource = inputSource;
     inputDesc->mRefCount = 0;
     inputDesc->mOpenRefCount = 1;
-    inputDesc->mSamplingRate = samplingRate;
-    inputDesc->mFormat = format;
-    inputDesc->mChannelMask = channelMask;
+    inputDesc->mSamplingRate = profileSamplingRate;
+    inputDesc->mFormat = profileFormat;
+    inputDesc->mChannelMask = profileChannelMask;
     inputDesc->mDevice = device;
     inputDesc->mSessions.add(session);
     inputDesc->mIsSoundTrigger = isSoundTrigger;
@@ -2122,9 +2130,12 @@
                                                            patch->sources[0].sample_rate,
                                                            NULL,  // updatedSamplingRate
                                                            patch->sources[0].format,
+                                                           NULL,  // updatedFormat
                                                            patch->sources[0].channel_mask,
+                                                           NULL,  // updatedChannelMask
                                                            AUDIO_OUTPUT_FLAG_NONE /*FIXME*/)) {
-                ALOGV("createAudioPatch() profile not supported for device %08x", devDesc->type());
+                ALOGV("createAudioPatch() profile not supported for device %08x",
+                        devDesc->type());
                 return INVALID_OPERATION;
             }
             devices.add(devDesc);
@@ -2176,7 +2187,9 @@
                                                           patch->sinks[0].sample_rate,
                                                           NULL, /*updatedSampleRate*/
                                                           patch->sinks[0].format,
+                                                          NULL, /*updatedFormat*/
                                                           patch->sinks[0].channel_mask,
+                                                          NULL, /*updatedChannelMask*/
                                                           // FIXME for the parameter type,
                                                           // and the NONE
                                                           (audio_output_flags_t)
@@ -4201,12 +4214,15 @@
 sp<IOProfile> AudioPolicyManager::getInputProfile(audio_devices_t device,
                                                   String8 address,
                                                   uint32_t& samplingRate,
-                                                  audio_format_t format,
-                                                  audio_channel_mask_t channelMask,
+                                                  audio_format_t& format,
+                                                  audio_channel_mask_t& channelMask,
                                                   audio_input_flags_t flags)
 {
     // Choose an input profile based on the requested capture parameters: select the first available
     // profile supporting all requested parameters.
+    //
+    // TODO: perhaps isCompatibleProfile should return a "matching" score so we can return
+    // the best matching profile, not the first one.
 
     for (size_t i = 0; i < mHwModules.size(); i++)
     {
@@ -4219,7 +4235,11 @@
             // profile->log();
             if (profile->isCompatibleProfile(device, address, samplingRate,
                                              &samplingRate /*updatedSamplingRate*/,
-                                             format, channelMask, (audio_output_flags_t) flags)) {
+                                             format,
+                                             &format /*updatedFormat*/,
+                                             channelMask,
+                                             &channelMask /*updatedChannelMask*/,
+                                             (audio_output_flags_t) flags)) {
 
                 return profile;
             }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 11fd5ff..fe6b986 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -470,12 +470,12 @@
         audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
                                        audio_output_flags_t flags,
                                        audio_format_t format);
-        // samplingRate parameter is an in/out and so may be modified
+        // samplingRate, format, channelMask are in/out and so may be modified
         sp<IOProfile> getInputProfile(audio_devices_t device,
                                       String8 address,
                                       uint32_t& samplingRate,
-                                      audio_format_t format,
-                                      audio_channel_mask_t channelMask,
+                                      audio_format_t& format,
+                                      audio_channel_mask_t& channelMask,
                                       audio_input_flags_t flags);
         sp<IOProfile> getProfileForDirectOutput(audio_devices_t device,
                                                        uint32_t samplingRate,
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index a763151..9510727 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -262,8 +262,7 @@
         return BAD_VALUE;
     }
 
-    if (((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) ||
-        ((attr->source == AUDIO_SOURCE_FM_TUNER) && !captureFmTunerAllowed())) {
+    if ((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
         return BAD_VALUE;
     }
     sp<AudioPolicyEffects>audioPolicyEffects;
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
index 372a9fa..e4ca5dc 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
@@ -255,8 +255,7 @@
         inputSource = AUDIO_SOURCE_MIC;
     }
 
-    if (((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) ||
-        ((inputSource == AUDIO_SOURCE_FM_TUNER) && !captureFmTunerAllowed())) {
+    if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
         return BAD_VALUE;
     }
 
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index e9c96c6..414d563 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -122,7 +122,7 @@
 // should be ok for now.
 static CameraService *gCameraService;
 
-CameraService::CameraService() : mEventLog(DEFAULT_EVICTION_LOG_LENGTH),
+CameraService::CameraService() : mEventLog(DEFAULT_EVENT_LOG_LENGTH),
         mLastUserId(DEFAULT_LAST_USER_ID), mSoundRef(0), mModule(0), mFlashlight(0) {
     ALOGI("CameraService started (pid=%d)", getpid());
     gCameraService = this;
@@ -242,6 +242,8 @@
     }
 
     if (newStatus == CAMERA_DEVICE_STATUS_NOT_PRESENT) {
+        logDeviceRemoved(id, String8::format("Device status changed from %d to %d", oldStatus,
+                newStatus));
         sp<BasicClient> clientToDisconnect;
         {
             // Don't do this in updateStatus to avoid deadlock over mServiceLock
@@ -274,6 +276,10 @@
         }
 
     } else {
+        if (oldStatus == ICameraServiceListener::Status::STATUS_NOT_PRESENT) {
+            logDeviceAdded(id, String8::format("Device status changed from %d to %d", oldStatus,
+                    newStatus));
+        }
         updateStatus(static_cast<ICameraServiceListener::Status>(newStatus), id);
     }
 
@@ -765,8 +771,8 @@
     } else {
         // We only trust our own process to forward client UIDs
         if (callingPid != getpid()) {
-            ALOGE("CameraService::connect X (PID %d) rejected (don't trust clientUid)",
-                    callingPid);
+            ALOGE("CameraService::connect X (PID %d) rejected (don't trust clientUid %d)",
+                    callingPid, clientUid);
             return PERMISSION_DENIED;
         }
     }
@@ -796,10 +802,12 @@
         return -EACCES;
     }
 
-    // Only allow clients who are being used by the current foreground device user.
-    if (mLastUserId != clientUserId && mLastUserId != DEFAULT_LAST_USER_ID) {
-        ALOGE("CameraService::connect X (PID %d) rejected (cannot connect from non-foreground "
-                "device user)", callingPid);
+    // Only allow clients who are being used by the current foreground device user, unless calling
+    // from our own process.
+    if (callingPid != getpid() &&
+            (mLastUserId != clientUserId && mLastUserId != DEFAULT_LAST_USER_ID)) {
+        ALOGE("CameraService::connect X (PID %d) rejected (cannot connect from previous "
+                "device user %d, current device user %d)", callingPid, clientUserId, mLastUserId);
         return PERMISSION_DENIED;
     }
 
@@ -858,7 +866,7 @@
         std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>>* partial) {
 
     status_t ret = NO_ERROR;
-    std::vector<sp<BasicClient>> evictedClients;
+    std::vector<DescriptorPtr> evictedClients;
     DescriptorPtr clientDescriptor;
     {
         if (effectiveApiLevel == API_1) {
@@ -934,7 +942,7 @@
                     mActiveClientManager.getIncompatibleClients(clientDescriptor);
 
             String8 msg = String8::format("%s : DENIED connect device %s client for package %s "
-                    "(PID %d, priority %d)", curTime.string(),
+                    "(PID %d, priority %d) due to eviction policy", curTime.string(),
                     cameraId.string(), packageName.string(), clientPid,
                     getCameraPriorityFromProcState(priorities[priorities.size() - 1]));
 
@@ -946,6 +954,7 @@
             }
 
             // Log the client's attempt
+            Mutex::Autolock l(mLogLock);
             mEventLog.add(msg);
 
             return -EBUSY;
@@ -965,14 +974,12 @@
 
             ALOGE("CameraService::connect evicting conflicting client for camera ID %s",
                     i->getKey().string());
-            evictedClients.push_back(clientSp);
-
-            String8 curTime = getFormattedCurrentTime();
+            evictedClients.push_back(i);
 
             // Log the clients evicted
-            mEventLog.add(String8::format("%s : EVICT device %s client for package %s (PID %"
-                    PRId32 ", priority %" PRId32 ")\n   - Evicted by device %s client for "
-                    "package %s (PID %d, priority %" PRId32 ")", curTime.string(),
+            logEvent(String8::format("EVICT device %s client held by package %s (PID"
+                    " %" PRId32 ", priority %" PRId32 ")\n   - Evicted by device %s client for"
+                    " package %s (PID %d, priority %" PRId32 ")",
                     i->getKey().string(), String8{clientSp->getPackageName()}.string(),
                     i->getOwnerId(), i->getPriority(), cameraId.string(),
                     packageName.string(), clientPid,
@@ -994,12 +1001,31 @@
     // Destroy evicted clients
     for (auto& i : evictedClients) {
         // Disconnect is blocking, and should only have returned when HAL has cleaned up
-        i->disconnect(); // Clients will remove themselves from the active client list here
+        i->getValue()->disconnect(); // Clients will remove themselves from the active client list
     }
-    evictedClients.clear();
 
     IPCThreadState::self()->restoreCallingIdentity(token);
 
+    for (const auto& i : evictedClients) {
+        ALOGV("%s: Waiting for disconnect to complete for client for device %s (PID %" PRId32 ")",
+                __FUNCTION__, i->getKey().string(), i->getOwnerId());
+        ret = mActiveClientManager.waitUntilRemoved(i, DEFAULT_DISCONNECT_TIMEOUT_NS);
+        if (ret == TIMED_OUT) {
+            ALOGE("%s: Timed out waiting for client for device %s to disconnect, "
+                    "current clients:\n%s", __FUNCTION__, i->getKey().string(),
+                    mActiveClientManager.toString().string());
+            return -EBUSY;
+        }
+        if (ret != NO_ERROR) {
+            ALOGE("%s: Received error waiting for client for device %s to disconnect: %s (%d), "
+                    "current clients:\n%s", __FUNCTION__, i->getKey().string(), strerror(-ret),
+                    ret, mActiveClientManager.toString().string());
+            return ret;
+        }
+    }
+
+    evictedClients.clear();
+
     // Once clients have been disconnected, relock
     mServiceLock.lock();
 
@@ -1027,6 +1053,8 @@
             clientPackageName, clientUid, API_1, false, false, /*out*/client);
 
     if(ret != NO_ERROR) {
+        logRejected(id, getCallingPid(), String8(clientPackageName),
+                String8::format("%s (%d)", strerror(-ret), ret));
         return ret;
     }
 
@@ -1042,6 +1070,7 @@
         /*out*/
         sp<ICamera>& device) {
 
+    String8 id = String8::format("%d", cameraId);
     int apiVersion = mModule->getModuleApiVersion();
     if (halVersion != CAMERA_HAL_API_VERSION_UNSPECIFIED &&
             apiVersion < CAMERA_MODULE_API_VERSION_2_3) {
@@ -1053,16 +1082,19 @@
          */
         ALOGE("%s: camera HAL module version %x doesn't support connecting to legacy HAL devices!",
                 __FUNCTION__, apiVersion);
+        logRejected(id, getCallingPid(), String8(clientPackageName),
+                String8("HAL module version doesn't support legacy HAL connections"));
         return INVALID_OPERATION;
     }
 
     status_t ret = NO_ERROR;
-    String8 id = String8::format("%d", cameraId);
     sp<Client> client = nullptr;
     ret = connectHelper<ICameraClient,Client>(cameraClient, id, halVersion, clientPackageName,
             clientUid, API_1, true, false, /*out*/client);
 
     if(ret != NO_ERROR) {
+        logRejected(id, getCallingPid(), String8(clientPackageName),
+                String8::format("%s (%d)", strerror(-ret), ret));
         return ret;
     }
 
@@ -1086,6 +1118,8 @@
             /*out*/client);
 
     if(ret != NO_ERROR) {
+        logRejected(id, getCallingPid(), String8(clientPackageName),
+                String8::format("%s (%d)", strerror(-ret), ret));
         return ret;
     }
 
@@ -1426,6 +1460,8 @@
         newUserId = DEFAULT_LAST_USER_ID;
     }
 
+    logUserSwitch(mLastUserId, newUserId);
+
     mLastUserId = newUserId;
 
     // Current user has switched, evict all current clients.
@@ -1444,12 +1480,12 @@
 
         ALOGE("Evicting conflicting client for camera ID %s due to user change",
                 i->getKey().string());
+
         // Log the clients evicted
-        mEventLog.add(String8::format("%s : EVICT device %s client for package %s (PID %"
+        logEvent(String8::format("EVICT device %s client held by package %s (PID %"
                 PRId32 ", priority %" PRId32 ")\n   - Evicted due to user switch.",
-                curTime.string(), i->getKey().string(),
-                String8{clientSp->getPackageName()}.string(), i->getOwnerId(),
-                i->getPriority()));
+                i->getKey().string(), String8{clientSp->getPackageName()}.string(),
+                i->getOwnerId(), i->getPriority()));
 
     }
 
@@ -1470,22 +1506,52 @@
     mServiceLock.lock();
 }
 
-void CameraService::logDisconnected(const String8& cameraId, int clientPid,
-        const String8& clientPackage) {
-
+void CameraService::logEvent(const char* event) {
     String8 curTime = getFormattedCurrentTime();
-    // Log the clients evicted
-    mEventLog.add(String8::format("%s : DISCONNECT device %s client for package %s (PID %d)",
-            curTime.string(), cameraId.string(), clientPackage.string(), clientPid));
+    Mutex::Autolock l(mLogLock);
+    mEventLog.add(String8::format("%s : %s", curTime.string(), event));
 }
 
-void CameraService::logConnected(const String8& cameraId, int clientPid,
-        const String8& clientPackage) {
-
-    String8 curTime = getFormattedCurrentTime();
+void CameraService::logDisconnected(const char* cameraId, int clientPid,
+        const char* clientPackage) {
     // Log the clients evicted
-    mEventLog.add(String8::format("%s : CONNECT device %s client for package %s (PID %d)",
-            curTime.string(), cameraId.string(), clientPackage.string(), clientPid));
+    logEvent(String8::format("DISCONNECT device %s client for package %s (PID %d)", cameraId,
+            clientPackage, clientPid));
+}
+
+void CameraService::logConnected(const char* cameraId, int clientPid,
+        const char* clientPackage) {
+    // Log the clients evicted
+    logEvent(String8::format("CONNECT device %s client for package %s (PID %d)", cameraId,
+            clientPackage, clientPid));
+}
+
+void CameraService::logRejected(const char* cameraId, int clientPid,
+        const char* clientPackage, const char* reason) {
+    // Log the client rejected
+    logEvent(String8::format("REJECT device %s client for package %s (PID %d), reason: (%s)",
+            cameraId, clientPackage, clientPid, reason));
+}
+
+void CameraService::logUserSwitch(int oldUserId, int newUserId) {
+    // Log the new and old users
+    logEvent(String8::format("USER_SWITCH from old user: %d , to new user: %d", oldUserId,
+            newUserId));
+}
+
+void CameraService::logDeviceRemoved(const char* cameraId, const char* reason) {
+    // Log the device removal
+    logEvent(String8::format("REMOVE device %s, reason: (%s)", cameraId, reason));
+}
+
+void CameraService::logDeviceAdded(const char* cameraId, const char* reason) {
+    // Log the device removal
+    logEvent(String8::format("ADD device %s, reason: (%s)", cameraId, reason));
+}
+
+void CameraService::logClientDied(int clientPid, const char* reason) {
+    // Log the device removal
+    logEvent(String8::format("DIED client(s) with PID %d, reason: (%s)", clientPid, reason));
 }
 
 status_t CameraService::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
@@ -1911,7 +1977,7 @@
 }
 
 status_t CameraService::dump(int fd, const Vector<String16>& args) {
-    String8 result;
+    String8 result("Dump of the Camera Service:\n");
     if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
         result.appendFormat("Permission Denial: "
                 "can't dump CameraService from pid=%d, uid=%d\n",
@@ -1957,12 +2023,15 @@
 
         result = String8("Prior client events (most recent at top):\n");
 
-        for (const auto& msg : mEventLog) {
-            result.appendFormat("%s\n", msg.string());
-        }
+        {
+            Mutex::Autolock l(mLogLock);
+            for (const auto& msg : mEventLog) {
+                result.appendFormat("%s\n", msg.string());
+            }
 
-        if (mEventLog.size() == DEFAULT_EVICTION_LOG_LENGTH) {
-            result.append("...\n");
+            if (mEventLog.size() == DEFAULT_EVENT_LOG_LENGTH) {
+                result.append("...\n");
+            }
         }
 
         write(fd, result.string(), result.size());
@@ -2094,10 +2163,12 @@
 /*virtual*/void CameraService::binderDied(const wp<IBinder> &who) {
 
     /**
-      * While tempting to promote the wp<IBinder> into a sp,
-      * it's actually not supported by the binder driver
+      * While tempting to promote the wp<IBinder> into a sp, it's actually not supported by the
+      * binder driver
       */
 
+    logClientDied(getCallingPid(), String8("Binder died unexpectedly"));
+
     // check torch client
     handleTorchClientBinderDied(who);
 
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index ca1c504..91c7d59 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -90,8 +90,11 @@
     // 3 second busy timeout when other clients are connecting
     static const nsecs_t DEFAULT_CONNECT_TIMEOUT_NS = 3000000000;
 
+    // 1 second busy timeout when other clients are disconnecting
+    static const nsecs_t DEFAULT_DISCONNECT_TIMEOUT_NS = 1000000000;
+
     // Default number of messages to store in eviction log
-    static const size_t DEFAULT_EVICTION_LOG_LENGTH = 50;
+    static const size_t DEFAULT_EVENT_LOG_LENGTH = 100;
 
     enum {
         // Default last user id
@@ -492,6 +495,7 @@
 
     // Circular buffer for storing event logging for dumps
     RingBuffer<String8> mEventLog;
+    Mutex mLogLock;
 
     // UID of last user.
     int mLastUserId;
@@ -546,14 +550,45 @@
     void doUserSwitch(int newUserId);
 
     /**
-     * Add a event log message that a client has been disconnected.
+     * Add an event log message.
      */
-    void logDisconnected(const String8& cameraId, int clientPid, const String8& clientPackage);
+    void logEvent(const char* event);
 
     /**
-     * Add a event log message that a client has been connected.
+     * Add an event log message that a client has been disconnected.
      */
-    void logConnected(const String8& cameraId, int clientPid, const String8& clientPackage);
+    void logDisconnected(const char* cameraId, int clientPid, const char* clientPackage);
+
+    /**
+     * Add an event log message that a client has been connected.
+     */
+    void logConnected(const char* cameraId, int clientPid, const char* clientPackage);
+
+    /**
+     * Add an event log message that a client's connect attempt has been rejected.
+     */
+    void logRejected(const char* cameraId, int clientPid, const char* clientPackage,
+            const char* reason);
+
+    /**
+     * Add an event log message that the current device user has been switched.
+     */
+    void logUserSwitch(int oldUserId, int newUserId);
+
+    /**
+     * Add an event log message that a device has been removed by the HAL
+     */
+    void logDeviceRemoved(const char* cameraId, const char* reason);
+
+    /**
+     * Add an event log message that a device has been added by the HAL
+     */
+    void logDeviceAdded(const char* cameraId, const char* reason);
+
+    /**
+     * Add an event log message that a client has unexpectedly died.
+     */
+    void logClientDied(int clientPid, const char* reason);
 
     int                 mNumberOfCameras;
 
@@ -714,9 +749,10 @@
     String8 clientName8(clientPackageName);
     int clientPid = getCallingPid();
 
-    ALOGI("CameraService::connect call E (PID %d \"%s\", camera ID %s) for HAL version %d and "
+    ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) for HAL version %s and "
             "Camera API version %d", clientPid, clientName8.string(), cameraId.string(),
-            halVersion, static_cast<int>(effectiveApiLevel));
+            (halVersion == -1) ? "default" : std::to_string(halVersion).c_str(),
+            static_cast<int>(effectiveApiLevel));
 
     sp<CLIENT> client = nullptr;
     {
@@ -734,7 +770,15 @@
         if((ret = validateConnectLocked(cameraId, /*inout*/clientUid)) != NO_ERROR) {
             return ret;
         }
-        mLastUserId = multiuser_get_user_id(clientUid);
+        int userId = multiuser_get_user_id(clientUid);
+
+        if (userId != mLastUserId && clientPid != getpid() ) {
+            // If no previous user ID had been set, set to the user of the caller.
+            logUserSwitch(mLastUserId, userId);
+            LOG_ALWAYS_FATAL_IF(mLastUserId != DEFAULT_LAST_USER_ID,
+                    "Invalid state: Should never update user ID here unless was default");
+            mLastUserId = userId;
+        }
 
         // Check the shim parameters after acquiring lock, if they have already been updated and
         // we were doing a shim update, return immediately
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 8587e0e..0016174 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -65,6 +65,7 @@
                                    int servicePid) :
     Camera2ClientBase(cameraService, remoteCallback, clientPackageName,
                 cameraId, cameraFacing, clientPid, clientUid, servicePid),
+    mInputStream(),
     mRequestIdCounter(0) {
 
     ATRACE_CALL();
@@ -127,6 +128,7 @@
     List<const CameraMetadata> metadataRequestList;
     int32_t requestId = mRequestIdCounter;
     uint32_t loopCounter = 0;
+    bool isReprocess = false;
 
     for (List<sp<CaptureRequest> >::iterator it = requests.begin(); it != requests.end(); ++it) {
         sp<CaptureRequest> request = *it;
@@ -134,6 +136,18 @@
             ALOGE("%s: Camera %d: Sent null request.",
                     __FUNCTION__, mCameraId);
             return BAD_VALUE;
+        } else if (it == requests.begin()) {
+            isReprocess = request->mIsReprocess;
+            if (isReprocess && !mInputStream.configured) {
+                ALOGE("%s: Camera %d: no input stream is configured.");
+                return BAD_VALUE;
+            } else if (isReprocess && streaming) {
+                ALOGE("%s: Camera %d: streaming reprocess requests not supported.");
+                return BAD_VALUE;
+            }
+        } else if (isReprocess != request->mIsReprocess) {
+            ALOGE("%s: Camera %d: Sent regular and reprocess requests.");
+            return BAD_VALUE;
         }
 
         CameraMetadata metadata(request->mMetadata);
@@ -182,6 +196,10 @@
         metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
                         outputStreamIds.size());
 
+        if (isReprocess) {
+            metadata.update(ANDROID_REQUEST_INPUT_STREAMS, &mInputStream.id, 1);
+        }
+
         metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1);
         loopCounter++; // loopCounter starts from 1
         ALOGV("%s: Camera %d: Creating request with ID %d (%d of %zu)",
@@ -260,8 +278,8 @@
 }
 
 status_t CameraDeviceClient::endConfigure() {
-    ALOGV("%s: ending configure (%zu streams)",
-            __FUNCTION__, mStreamMap.size());
+    ALOGV("%s: ending configure (%d input stream, %zu output streams)",
+            __FUNCTION__, mInputStream.configured ? 1 : 0, mStreamMap.size());
 
     status_t res;
     if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
@@ -284,19 +302,25 @@
 
     if (!mDevice.get()) return DEAD_OBJECT;
 
-    // Guard against trying to delete non-created streams
+    bool isInput = false;
     ssize_t index = NAME_NOT_FOUND;
-    for (size_t i = 0; i < mStreamMap.size(); ++i) {
-        if (streamId == mStreamMap.valueAt(i)) {
-            index = i;
-            break;
-        }
-    }
 
-    if (index == NAME_NOT_FOUND) {
-        ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
-              "created yet", __FUNCTION__, mCameraId, streamId);
-        return BAD_VALUE;
+    if (mInputStream.configured && mInputStream.id == streamId) {
+        isInput = true;
+    } else {
+        // Guard against trying to delete non-created streams
+        for (size_t i = 0; i < mStreamMap.size(); ++i) {
+            if (streamId == mStreamMap.valueAt(i)) {
+                index = i;
+                break;
+            }
+        }
+
+        if (index == NAME_NOT_FOUND) {
+            ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
+                  "created yet", __FUNCTION__, mCameraId, streamId);
+            return BAD_VALUE;
+        }
     }
 
     // Also returns BAD_VALUE if stream ID was not valid
@@ -307,8 +331,11 @@
               " already checked and the stream ID (%d) should be valid.",
               __FUNCTION__, mCameraId, streamId);
     } else if (res == OK) {
-        mStreamMap.removeItemsAt(index);
-
+        if (isInput) {
+            mInputStream.configured = false;
+        } else {
+            mStreamMap.removeItemsAt(index);
+        }
     }
 
     return res;
@@ -450,6 +477,58 @@
 }
 
 
+status_t CameraDeviceClient::createInputStream(int width, int height,
+        int format) {
+
+    ATRACE_CALL();
+    ALOGV("%s (w = %d, h = %d, f = 0x%x)", __FUNCTION__, width, height, format);
+
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+    if (!mDevice.get()) return DEAD_OBJECT;
+
+    if (mInputStream.configured) {
+        ALOGE("%s: Camera %d: Already has an input stream "
+                " configuration. (ID %zd)", __FUNCTION__, mCameraId,
+                mInputStream.id);
+        return ALREADY_EXISTS;
+    }
+
+    int streamId = -1;
+    res = mDevice->createInputStream(width, height, format, &streamId);
+    if (res == OK) {
+        mInputStream.configured = true;
+        mInputStream.width = width;
+        mInputStream.height = height;
+        mInputStream.format = format;
+        mInputStream.id = streamId;
+
+        ALOGV("%s: Camera %d: Successfully created a new input stream ID %d",
+              __FUNCTION__, mCameraId, streamId);
+
+        return streamId;
+    }
+
+    return res;
+}
+
+status_t CameraDeviceClient::getInputBufferProducer(
+        /*out*/sp<IGraphicBufferProducer> *producer) {
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    if (producer == NULL) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+    if (!mDevice.get()) return DEAD_OBJECT;
+
+    return mDevice->getInputBufferProducer(producer);
+}
+
 bool CameraDeviceClient::roundBufferDimensionNearest(int32_t width, int32_t height,
         int32_t format, android_dataspace dataSpace, const CameraMetadata& info,
         /*out*/int32_t* outWidth, /*out*/int32_t* outHeight) {
@@ -602,13 +681,19 @@
 
     result.append("  State:\n");
     result.appendFormat("    Request ID counter: %d\n", mRequestIdCounter);
+    if (mInputStream.configured) {
+        result.appendFormat("    Current input stream ID: %d\n",
+                    mInputStream.id);
+    } else {
+        result.append("    No input stream configured.\n");
+    }
     if (!mStreamMap.isEmpty()) {
-        result.append("    Current stream IDs:\n");
+        result.append("    Current output stream IDs:\n");
         for (size_t i = 0; i < mStreamMap.size(); i++) {
             result.appendFormat("      Stream %d\n", mStreamMap.valueAt(i));
         }
     } else {
-        result.append("    No streams configured.\n");
+        result.append("    No output streams configured.\n");
     }
     write(fd, result.string(), result.size());
     // TODO: print dynamic/request section from most recent requests
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index a3dbb90..f2d8899 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -86,6 +86,13 @@
 
     virtual status_t      createStream(const OutputConfiguration &outputConfiguration);
 
+    // Create an input stream of width, height, and format.
+    virtual status_t      createInputStream(int width, int height, int format);
+
+    // Get the buffer producer of the input stream
+    virtual status_t      getInputBufferProducer(
+                                /*out*/sp<IGraphicBufferProducer> *producer);
+
     // Create a request object from a template.
     virtual status_t      createDefaultRequest(int templateId,
                                                /*out*/
@@ -161,10 +168,18 @@
             android_dataspace dataSpace, const CameraMetadata& info,
             /*out*/int32_t* outWidth, /*out*/int32_t* outHeight);
 
-    // IGraphicsBufferProducer binder -> Stream ID
+    // IGraphicsBufferProducer binder -> Stream ID for output streams
     KeyedVector<sp<IBinder>, int> mStreamMap;
 
-    // Stream ID
+    struct InputStreamConfiguration {
+        bool configured;
+        int32_t width;
+        int32_t height;
+        int32_t format;
+        int32_t id;
+    } mInputStream;
+
+    // Request ID
     Vector<int> mStreamingRequestList;
 
     int32_t mRequestIdCounter;
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index fe55b9e..6ece359 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -30,6 +30,7 @@
 #include "camera/CameraMetadata.h"
 #include "camera/CaptureResult.h"
 #include "common/CameraModule.h"
+#include "gui/IGraphicBufferProducer.h"
 
 namespace android {
 
@@ -110,6 +111,14 @@
             android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id) = 0;
 
     /**
+     * Create an input stream of width, height, and format.
+     *
+     * Return value is the stream ID if non-negative and an error if negative.
+     */
+    virtual status_t createInputStream(uint32_t width, uint32_t height,
+            int32_t format, /*out*/ int32_t *id) = 0;
+
+    /**
      * Create an input reprocess stream that uses buffers from an existing
      * output stream.
      */
@@ -150,6 +159,10 @@
      */
     virtual status_t configureStreams() = 0;
 
+    // get the buffer producer of the input stream
+    virtual status_t getInputBufferProducer(
+            sp<IGraphicBufferProducer> *producer) = 0;
+
     /**
      * Create a metadata buffer with fields that the HAL device believes are
      * best for the given use case
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index 878986b..3c5ea9d 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -1581,4 +1581,18 @@
     return OK;
 }
 
+// camera 2 devices don't support reprocessing
+status_t Camera2Device::createInputStream(
+    uint32_t width, uint32_t height, int format, int *id) {
+    ALOGE("%s: camera 2 devices don't support reprocessing", __FUNCTION__);
+    return INVALID_OPERATION;
+}
+
+// camera 2 devices don't support reprocessing
+status_t Camera2Device::getInputBufferProducer(
+        sp<IGraphicBufferProducer> *producer) {
+    ALOGE("%s: camera 2 devices don't support reprocessing", __FUNCTION__);
+    return INVALID_OPERATION;
+}
+
 }; // namespace android
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index 9b32fa6..9972606 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -59,6 +59,8 @@
     virtual status_t createStream(sp<ANativeWindow> consumer,
             uint32_t width, uint32_t height, int format,
             android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id);
+    virtual status_t createInputStream(
+            uint32_t width, uint32_t height, int format, int *id);
     virtual status_t createReprocessStreamFromStream(int outputId, int *id);
     virtual status_t getStreamInfo(int id,
             uint32_t *width, uint32_t *height, uint32_t *format);
@@ -67,6 +69,8 @@
     virtual status_t deleteReprocessStream(int id);
     // No-op on HAL2 devices
     virtual status_t configureStreams();
+    virtual status_t getInputBufferProducer(
+            sp<IGraphicBufferProducer> *producer);
     virtual status_t createDefaultRequest(int templateId, CameraMetadata *request);
     virtual status_t waitUntilDrained();
     virtual status_t setNotifyCallback(NotificationListener *listener);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 8236788..dc752a6 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -62,6 +62,7 @@
         mUsePartialResult(false),
         mNumPartialResults(1),
         mNextResultFrameNumber(0),
+        mNextReprocessResultFrameNumber(0),
         mNextShutterFrameNumber(0),
         mListener(NULL)
 {
@@ -201,6 +202,17 @@
         }
     }
 
+    camera_metadata_entry configs =
+            mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+    for (uint32_t i = 0; i < configs.count; i += 4) {
+        if (configs.data.i32[i] == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
+                configs.data.i32[i + 3] ==
+                ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_INPUT) {
+            mSupportedOpaqueInputSizes.add(Size(configs.data.i32[i + 1],
+                    configs.data.i32[i + 2]));
+        }
+    }
+
     return OK;
 }
 
@@ -1019,6 +1031,20 @@
     return configureStreamsLocked();
 }
 
+status_t Camera3Device::getInputBufferProducer(
+        sp<IGraphicBufferProducer> *producer) {
+    Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
+
+    if (producer == NULL) {
+        return BAD_VALUE;
+    } else if (mInputStream == NULL) {
+        return INVALID_OPERATION;
+    }
+
+    return mInputStream->getInputBufferProducer(producer);
+}
+
 status_t Camera3Device::createDefaultRequest(int templateId,
         CameraMetadata *request) {
     ATRACE_CALL();
@@ -1423,6 +1449,17 @@
     return newRequest;
 }
 
+bool Camera3Device::isOpaqueInputSizeSupported(uint32_t width, uint32_t height) {
+    for (uint32_t i = 0; i < mSupportedOpaqueInputSizes.size(); i++) {
+        Size size = mSupportedOpaqueInputSizes[i];
+        if (size.width == width && size.height == height) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
 status_t Camera3Device::configureStreamsLocked() {
     ATRACE_CALL();
     status_t res;
@@ -1947,20 +1984,31 @@
 void Camera3Device::sendCaptureResult(CameraMetadata &pendingMetadata,
         CaptureResultExtras &resultExtras,
         CameraMetadata &collectedPartialResult,
-        uint32_t frameNumber) {
+        uint32_t frameNumber,
+        bool reprocess) {
     if (pendingMetadata.isEmpty())
         return;
 
     Mutex::Autolock l(mOutputLock);
 
     // TODO: need to track errors for tighter bounds on expected frame number
-    if (frameNumber < mNextResultFrameNumber) {
-        SET_ERR("Out-of-order capture result metadata submitted! "
+    if (reprocess) {
+        if (frameNumber < mNextReprocessResultFrameNumber) {
+            SET_ERR("Out-of-order reprocess capture result metadata submitted! "
                 "(got frame number %d, expecting %d)",
-                frameNumber, mNextResultFrameNumber);
-        return;
+                frameNumber, mNextReprocessResultFrameNumber);
+            return;
+        }
+        mNextReprocessResultFrameNumber = frameNumber + 1;
+    } else {
+        if (frameNumber < mNextResultFrameNumber) {
+            SET_ERR("Out-of-order capture result metadata submitted! "
+                    "(got frame number %d, expecting %d)",
+                    frameNumber, mNextResultFrameNumber);
+            return;
+        }
+        mNextResultFrameNumber = frameNumber + 1;
     }
-    mNextResultFrameNumber = frameNumber + 1;
 
     CaptureResult captureResult;
     captureResult.mResultExtras = resultExtras;
@@ -2170,7 +2218,7 @@
                 CameraMetadata metadata;
                 metadata = result->result;
                 sendCaptureResult(metadata, request.resultExtras,
-                    collectedPartialResult, frameNumber);
+                    collectedPartialResult, frameNumber, hasInputBufferInRequest);
             }
         }
 
@@ -2332,7 +2380,8 @@
 
             // send pending result and buffers
             sendCaptureResult(r.pendingMetadata, r.resultExtras,
-                r.partialResult.collectedResult, msg.frame_number);
+                r.partialResult.collectedResult, msg.frame_number,
+                r.hasInputBuffer);
             returnOutputBuffers(r.pendingOutputBuffers.array(),
                 r.pendingOutputBuffers.size(), r.shutterTimestamp);
             r.pendingOutputBuffers.clear();
@@ -2669,7 +2718,6 @@
     // Fill in buffers
 
     if (nextRequest->mInputStream != NULL) {
-        request.input_buffer = &inputBuffer;
         res = nextRequest->mInputStream->getInputBuffer(&inputBuffer);
         if (res != OK) {
             // Can't get input buffer from gralloc queue - this could be due to
@@ -2686,6 +2734,7 @@
             cleanUpFailedRequest(request, nextRequest, outputBuffers);
             return true;
         }
+        request.input_buffer = &inputBuffer;
         totalNumBuffers += 1;
     } else {
         request.input_buffer = NULL;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index a77548d..b08ba81 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -116,6 +116,8 @@
     virtual status_t deleteReprocessStream(int id);
 
     virtual status_t configureStreams();
+    virtual status_t getInputBufferProducer(
+            sp<IGraphicBufferProducer> *producer);
 
     virtual status_t createDefaultRequest(int templateId, CameraMetadata *request);
 
@@ -179,6 +181,14 @@
 
     uint32_t                   mDeviceVersion;
 
+    struct Size {
+        uint32_t width;
+        uint32_t height;
+        Size(uint32_t w = 0, uint32_t h = 0) : width(w), height(h){}
+    };
+    // Map from format to size.
+    Vector<Size>               mSupportedOpaqueInputSizes;
+
     enum Status {
         STATUS_ERROR,
         STATUS_UNINITIALIZED,
@@ -324,11 +334,11 @@
      */
     bool               tryLockSpinRightRound(Mutex& lock);
 
-    struct Size {
-        int width;
-        int height;
-        Size(int w, int h) : width(w), height(h){}
-    };
+    /**
+     * Helper function to determine if an input size for implementation defined
+     * format is supported.
+     */
+    bool isOpaqueInputSizeSupported(uint32_t width, uint32_t height);
 
     /**
      * Helper function to get the largest Jpeg resolution (in area)
@@ -639,8 +649,10 @@
     Mutex                  mOutputLock;
 
     /**** Scope for mOutputLock ****/
-
+    // the minimal frame number of the next non-reprocess result
     uint32_t               mNextResultFrameNumber;
+    // the minimal frame number of the next reprocess result
+    uint32_t               mNextReprocessResultFrameNumber;
     uint32_t               mNextShutterFrameNumber;
     List<CaptureResult>   mResultQueue;
     Condition              mResultSignal;
@@ -669,7 +681,8 @@
     // partial results, and the frame number to the result queue.
     void sendCaptureResult(CameraMetadata &pendingMetadata,
             CaptureResultExtras &resultExtras,
-            CameraMetadata &collectedPartialResult, uint32_t frameNumber);
+            CameraMetadata &collectedPartialResult, uint32_t frameNumber,
+            bool reprocess);
 
     /**** Scope for mInFlightLock ****/
 
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 6bf671e..fa97e57 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -65,8 +65,8 @@
     assert(mConsumer != 0);
 
     BufferItem bufferItem;
-    res = mConsumer->acquireBuffer(&bufferItem, /*waitForFence*/false);
 
+    res = mConsumer->acquireBuffer(&bufferItem, /*waitForFence*/false);
     if (res != OK) {
         ALOGE("%s: Stream %d: Can't acquire next output buffer: %s (%d)",
                 __FUNCTION__, mId, strerror(-res), res);
@@ -162,6 +162,21 @@
     return returnAnyBufferLocked(buffer, /*timestamp*/0, /*output*/false);
 }
 
+status_t Camera3InputStream::getInputBufferProducerLocked(
+            sp<IGraphicBufferProducer> *producer) {
+    ATRACE_CALL();
+
+    if (producer == NULL) {
+        return BAD_VALUE;
+    } else if (mProducer == NULL) {
+        ALOGE("%s: No input stream is configured");
+        return INVALID_OPERATION;
+    }
+
+    *producer = mProducer;
+    return OK;
+}
+
 status_t Camera3InputStream::disconnectLocked() {
 
     status_t res;
@@ -212,10 +227,17 @@
         res = producer->query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBuffers);
         if (res != OK || minUndequeuedBuffers < 0) {
             ALOGE("%s: Stream %d: Could not query min undequeued buffers (error %d, bufCount %d)",
-                  __FUNCTION__, mId, res, minUndequeuedBuffers);
+                    __FUNCTION__, mId, res, minUndequeuedBuffers);
             return res;
         }
         size_t minBufs = static_cast<size_t>(minUndequeuedBuffers);
+
+        if (camera3_stream::max_buffers == 0) {
+            ALOGE("%s: %d: HAL sets max_buffer to 0. Must be at least 1.",
+                    __FUNCTION__, __LINE__);
+            return INVALID_OPERATION;
+        }
+
         /*
          * We promise never to 'acquire' more than camera3_stream::max_buffers
          * at any one time.
@@ -232,6 +254,8 @@
         mConsumer = new BufferItemConsumer(consumer, camera3_stream::usage,
                                            mTotalBufferCount);
         mConsumer->setName(String8::format("Camera3-InputStream-%d", mId));
+
+        mProducer = producer;
     }
 
     res = mConsumer->setDefaultBufferSize(camera3_stream::width,
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
index fd17f4f..7ba36c9 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -49,6 +49,7 @@
   private:
 
     sp<BufferItemConsumer> mConsumer;
+    sp<IGraphicBufferProducer> mProducer;
     Vector<BufferItem> mBuffersInFlight;
 
     /**
@@ -68,6 +69,8 @@
     virtual status_t getInputBufferLocked(camera3_stream_buffer *buffer);
     virtual status_t returnInputBufferLocked(
             const camera3_stream_buffer &buffer);
+    virtual status_t getInputBufferProducerLocked(
+            sp<IGraphicBufferProducer> *producer);
     virtual status_t disconnectLocked();
 
     virtual status_t configureQueueLocked();
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 0c739e9..8d9b360 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -396,8 +396,22 @@
     int32_t u = 0;
     res = mConsumer->query(mConsumer.get(),
             NATIVE_WINDOW_CONSUMER_USAGE_BITS, &u);
-    *usage = u;
 
+    // If an opaque output stream's endpoint is ImageReader, add
+    // GRALLOC_USAGE_HW_CAMERA_ZSL to the usage so HAL knows it will be used
+    // for the ZSL use case.
+    // Assume it's for ImageReader if the consumer usage doesn't have any of these bits set:
+    //     1. GRALLOC_USAGE_HW_TEXTURE
+    //     2. GRALLOC_USAGE_HW_RENDER
+    //     3. GRALLOC_USAGE_HW_COMPOSER
+    //     4. GRALLOC_USAGE_HW_VIDEO_ENCODER
+    if (camera3_stream::format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
+            (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_COMPOSER |
+            GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
+        u |= GRALLOC_USAGE_HW_CAMERA_ZSL;
+    }
+
+    *usage = u;
     return res;
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 4acbce3..d3c5cc3 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -346,6 +346,13 @@
     return res;
 }
 
+status_t Camera3Stream::getInputBufferProducer(sp<IGraphicBufferProducer> *producer) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mLock);
+
+    return getInputBufferProducerLocked(producer);
+}
+
 void Camera3Stream::fireBufferListenersLocked(
         const camera3_stream_buffer& /*buffer*/, bool acquired, bool output) {
     List<wp<Camera3StreamBufferListener> >::iterator it, end;
@@ -505,6 +512,10 @@
     ALOGE("%s: This type of stream does not support input", __FUNCTION__);
     return INVALID_OPERATION;
 }
+status_t Camera3Stream::getInputBufferProducerLocked(sp<IGraphicBufferProducer> *producer) {
+    ALOGE("%s: This type of stream does not support input", __FUNCTION__);
+    return INVALID_OPERATION;
+}
 
 void Camera3Stream::addBufferListener(
         wp<Camera3StreamBufferListener> listener) {
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index aba27fe..e89361e 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -205,6 +205,10 @@
      */
     status_t         returnInputBuffer(const camera3_stream_buffer &buffer);
 
+    // get the buffer producer of the input buffer queue.
+    // only apply to input streams.
+    status_t         getInputBufferProducer(sp<IGraphicBufferProducer> *producer);
+
     /**
      * Whether any of the stream's buffers are currently in use by the HAL,
      * including buffers that have been returned but not yet had their
@@ -285,6 +289,9 @@
     virtual status_t returnInputBufferLocked(
             const camera3_stream_buffer &buffer);
     virtual bool     hasOutstandingBuffersLocked() const = 0;
+    // Get the buffer producer of the input buffer queue. Only apply to input streams.
+    virtual status_t getInputBufferProducerLocked(sp<IGraphicBufferProducer> *producer);
+
     // Can return -ENOTCONN when we are already disconnected (not an error)
     virtual status_t disconnectLocked() = 0;
 
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index da989cd..ea90dd9 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -128,6 +128,13 @@
     virtual status_t returnInputBuffer(const camera3_stream_buffer &buffer) = 0;
 
     /**
+     * Get the buffer producer of the input buffer queue.
+     *
+     * This method only applies to input streams.
+     */
+    virtual status_t getInputBufferProducer(sp<IGraphicBufferProducer> *producer) = 0;
+
+    /**
      * Whether any of the stream's buffers are currently in use by the HAL,
      * including buffers that have been returned but not yet had their
      * release fence signaled.
diff --git a/services/camera/libcameraservice/utils/ClientManager.h b/services/camera/libcameraservice/utils/ClientManager.h
index ad5486d..aa40a2d 100644
--- a/services/camera/libcameraservice/utils/ClientManager.h
+++ b/services/camera/libcameraservice/utils/ClientManager.h
@@ -17,7 +17,9 @@
 #ifndef ANDROID_SERVICE_UTILS_EVICTION_POLICY_MANAGER_H
 #define ANDROID_SERVICE_UTILS_EVICTION_POLICY_MANAGER_H
 
+#include <utils/Condition.h>
 #include <utils/Mutex.h>
+#include <utils/Timers.h>
 
 #include <algorithm>
 #include <utility>
@@ -263,6 +265,16 @@
      */
     std::shared_ptr<ClientDescriptor<KEY, VALUE>> get(const KEY& key) const;
 
+    /**
+     * Block until the given client is no longer in the active clients list, or the timeout
+     * occurred.
+     *
+     * Returns NO_ERROR if this succeeded, -ETIMEDOUT on a timeout, or a negative error code on
+     * failure.
+     */
+    status_t waitUntilRemoved(const std::shared_ptr<ClientDescriptor<KEY, VALUE>> client,
+            nsecs_t timeout) const;
+
 protected:
     ~ClientManager();
 
@@ -284,6 +296,7 @@
     int64_t getCurrentCostLocked() const;
 
     mutable Mutex mLock;
+    mutable Condition mRemovedCondition;
     int32_t mMaxCost;
     // LRU ordered, most recent at end
     std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> mClients;
@@ -430,6 +443,7 @@
         }), mClients.end());
 
     mClients.push_back(client);
+    mRemovedCondition.broadcast();
 
     return evicted;
 }
@@ -487,6 +501,7 @@
 void ClientManager<KEY, VALUE>::removeAll() {
     Mutex::Autolock lock(mLock);
     mClients.clear();
+    mRemovedCondition.broadcast();
 }
 
 template<class KEY, class VALUE>
@@ -505,6 +520,39 @@
             return false;
         }), mClients.end());
 
+    mRemovedCondition.broadcast();
+    return ret;
+}
+
+template<class KEY, class VALUE>
+status_t ClientManager<KEY, VALUE>::waitUntilRemoved(
+        const std::shared_ptr<ClientDescriptor<KEY, VALUE>> client,
+        nsecs_t timeout) const {
+    status_t ret = NO_ERROR;
+    Mutex::Autolock lock(mLock);
+
+    bool isRemoved = false;
+
+    // Figure out what time in the future we should hit the timeout
+    nsecs_t failTime = systemTime(SYSTEM_TIME_MONOTONIC) + timeout;
+
+    while (!isRemoved) {
+        isRemoved = true;
+        for (const auto& i : mClients) {
+            if (i == client) {
+                isRemoved = false;
+            }
+        }
+
+        if (!isRemoved) {
+            ret = mRemovedCondition.waitRelative(mLock, timeout);
+            if (ret != NO_ERROR) {
+                break;
+            }
+            timeout = failTime - systemTime(SYSTEM_TIME_MONOTONIC);
+        }
+    }
+
     return ret;
 }
 
@@ -520,6 +568,7 @@
             }
             return false;
         }), mClients.end());
+    mRemovedCondition.broadcast();
 }
 
 template<class KEY, class VALUE>
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 7296d47..75a69ed 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -126,6 +126,7 @@
     Mutex::Autolock lock(mLock);
     ResourceInfos& infos = getResourceInfosForEdit(pid, mMap);
     ResourceInfo& info = getResourceInfoForEdit(clientId, client, infos);
+    // TODO: do the merge instead of append.
     info.resources.appendVector(resources);
 }
 
@@ -197,19 +198,58 @@
                 }
             }
         }
+
+        if (clients.size() == 0) {
+            // if we are here, run the third pass to free one codec with the same type.
+            for (size_t i = 0; i < resources.size(); ++i) {
+                String8 type = resources[i].mType;
+                if (type == kResourceSecureCodec || type == kResourceNonSecureCodec) {
+                    sp<IResourceManagerClient> client;
+                    if (!getLowestPriorityBiggestClient_l(callingPid, type, &client)) {
+                        return false;
+                    }
+                    clients.push_back(client);
+                }
+            }
+        }
     }
 
     if (clients.size() == 0) {
         return false;
     }
 
+    sp<IResourceManagerClient> failedClient;
     for (size_t i = 0; i < clients.size(); ++i) {
         ALOGV("reclaimResource from client %p", clients[i].get());
         if (!clients[i]->reclaimResource()) {
-            return false;
+            failedClient = clients[i];
+            break;
         }
     }
-    return true;
+
+    {
+        Mutex::Autolock lock(mLock);
+        bool found = false;
+        for (size_t i = 0; i < mMap.size(); ++i) {
+            ResourceInfos &infos = mMap.editValueAt(i);
+            for (size_t j = 0; j < infos.size();) {
+                if (infos[j].client == failedClient) {
+                    j = infos.removeAt(j);
+                    found = true;
+                } else {
+                    ++j;
+                }
+            }
+            if (found) {
+                break;
+            }
+        }
+        if (!found) {
+            ALOGV("didn't find failed client");
+        }
+    }
+
+    return (failedClient == NULL);
 }
 
 bool ResourceManagerService::getAllClients_l(
diff --git a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
index b73e1bc..48d1395 100644
--- a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
+++ b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 The Android Open Source Project
+ * Copyright 2015 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -118,6 +118,20 @@
         client3->reset();
     }
 
+    // test set up
+    // ---------------------------------------------------------------------------------
+    //   pid                priority         client           type               number
+    // ---------------------------------------------------------------------------------
+    //   kTestPid1(30)      30               mTestClient1     secure codec       1
+    //                                                        graphic memory     200
+    //                                                        graphic memory     200
+    // ---------------------------------------------------------------------------------
+    //   kTestPid2(20)      20               mTestClient2     non-secure codec   1
+    //                                                        graphic memory     300
+    //                                       -------------------------------------------
+    //                                       mTestClient3     secure codec       1
+    //                                                        graphic memory     100
+    // ---------------------------------------------------------------------------------
     void addResource() {
         // kTestPid1 mTestClient1
         Vector<MediaResource> resources1;
@@ -202,10 +216,12 @@
         int lowPriorityPid = 100;
         EXPECT_FALSE(mService->getAllClients_l(lowPriorityPid, type, &clients));
         int midPriorityPid = 25;
-        EXPECT_FALSE(mService->getAllClients_l(lowPriorityPid, type, &clients));
+        // some higher priority process (e.g. kTestPid2) owns the resource, so getAllClients_l
+        // will fail.
+        EXPECT_FALSE(mService->getAllClients_l(midPriorityPid, type, &clients));
         int highPriorityPid = 10;
-        EXPECT_TRUE(mService->getAllClients_l(10, unknowType, &clients));
-        EXPECT_TRUE(mService->getAllClients_l(10, type, &clients));
+        EXPECT_TRUE(mService->getAllClients_l(highPriorityPid, unknowType, &clients));
+        EXPECT_TRUE(mService->getAllClients_l(highPriorityPid, type, &clients));
 
         EXPECT_EQ(2u, clients.size());
         EXPECT_EQ(mTestClient3, clients[0]);
@@ -308,6 +324,30 @@
             // nothing left
             EXPECT_FALSE(mService->reclaimResource(10, resources));
         }
+
+        // ### secure codecs can coexist and secure codec can coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsMultipleSecureCodecs = true;
+            mService->mSupportsSecureWithNonSecureCodec = true;
+
+            Vector<MediaResource> resources;
+            resources.push_back(MediaResource(String8(kResourceSecureCodec), 1));
+
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            // secure codec from lowest process got reclaimed
+            verifyClients(true, false, false);
+
+            // call again should reclaim another secure codec from lowest process
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            verifyClients(false, false, true);
+
+            // nothing left
+            EXPECT_FALSE(mService->reclaimResource(10, resources));
+
+            // clean up client 2 which still has non secure codec left
+            mService->removeResource((int64_t) mTestClient2.get());
+        }
     }
 
     void testReclaimResourceNonSecure() {
@@ -360,6 +400,26 @@
             // nothing left
             EXPECT_FALSE(mService->reclaimResource(10, resources));
         }
+
+        // ### secure codec can coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsSecureWithNonSecureCodec = true;
+
+            Vector<MediaResource> resources;
+            resources.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
+
+            EXPECT_TRUE(mService->reclaimResource(10, resources));
+            // one non secure codec from lowest process got reclaimed
+            verifyClients(false, true, false);
+
+            // nothing left
+            EXPECT_FALSE(mService->reclaimResource(10, resources));
+
+            // clean up client 1 and 3 which still have secure codec left
+            mService->removeResource((int64_t) mTestClient1.get());
+            mService->removeResource((int64_t) mTestClient3.get());
+        }
     }
 
     void testGetLowestPriorityBiggestClient() {