Merge "Camera2/3: Support new PASSIVE_UNFOCUSED AF state" into klp-dev
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 62f0c64..052064d 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -398,18 +398,20 @@
 
                 void        pause();    // suspend thread from execution at next loop boundary
                 void        resume();   // allow thread to execute, if not requested to exit
-                void        pauseConditional();
-                                        // like pause(), but only if prior resume() wasn't latched
 
     private:
+                void        pauseInternal(nsecs_t ns = 0LL);
+                                        // like pause(), but only used internally within thread
+
         friend class AudioRecord;
         virtual bool        threadLoop();
         AudioRecord&        mReceiver;
         virtual ~AudioRecordThread();
         Mutex               mMyLock;    // Thread::mLock is private
         Condition           mMyCond;    // Thread::mThreadExitedCondition is private
-        bool                mPaused;    // whether thread is currently paused
-        bool                mResumeLatch;   // whether next pauseConditional() will be a nop
+        bool                mPaused;    // whether thread is requested to pause at next loop entry
+        bool                mPausedInt; // whether thread internally requests pause
+        nsecs_t             mPausedNs;  // if mPausedInt then associated timeout, otherwise ignored
     };
 
             // body of AudioRecordThread::threadLoop()
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 453c106..22ad57e 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -598,18 +598,20 @@
 
                 void        pause();    // suspend thread from execution at next loop boundary
                 void        resume();   // allow thread to execute, if not requested to exit
-                void        pauseConditional();
-                                        // like pause(), but only if prior resume() wasn't latched
 
     private:
+                void        pauseInternal(nsecs_t ns = 0LL);
+                                        // like pause(), but only used internally within thread
+
         friend class AudioTrack;
         virtual bool        threadLoop();
         AudioTrack&         mReceiver;
         virtual ~AudioTrackThread();
         Mutex               mMyLock;    // Thread::mLock is private
         Condition           mMyCond;    // Thread::mThreadExitedCondition is private
-        bool                mPaused;    // whether thread is currently paused
-        bool                mResumeLatch;   // whether next pauseConditional() will be a nop
+        bool                mPaused;    // whether thread is requested to pause at next loop entry
+        bool                mPausedInt; // whether thread internally requests pause
+        nsecs_t             mPausedNs;  // if mPausedInt then associated timeout, otherwise ignored
     };
 
             // body of AudioTrackThread::threadLoop()
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
index fef7af2..2998b37 100644
--- a/include/media/IMediaPlayerService.h
+++ b/include/media/IMediaPlayerService.h
@@ -49,8 +49,12 @@
     virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0;
     virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId = 0) = 0;
 
-    virtual sp<IMemory>         decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) = 0;
-    virtual sp<IMemory>         decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) = 0;
+    virtual status_t         decode(const char* url, uint32_t *pSampleRate, int* pNumChannels,
+                                    audio_format_t* pFormat,
+                                    const sp<IMemoryHeap>& heap, size_t *pSize) = 0;
+    virtual status_t         decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate,
+                                    int* pNumChannels, audio_format_t* pFormat,
+                                    const sp<IMemoryHeap>& heap, size_t *pSize) = 0;
     virtual sp<IOMX>            getOMX() = 0;
     virtual sp<ICrypto>         makeCrypto() = 0;
     virtual sp<IDrm>            makeDrm() = 0;
diff --git a/include/media/SoundPool.h b/include/media/SoundPool.h
index 9e5654f..2dd78cc 100644
--- a/include/media/SoundPool.h
+++ b/include/media/SoundPool.h
@@ -22,6 +22,8 @@
 #include <utils/Vector.h>
 #include <utils/KeyedVector.h>
 #include <media/AudioTrack.h>
+#include <binder/MemoryHeapBase.h>
+#include <binder/MemoryBase.h>
 
 namespace android {
 
@@ -85,6 +87,7 @@
     int64_t             mLength;
     char*               mUrl;
     sp<IMemory>         mData;
+    sp<MemoryHeapBase>  mHeap;
 };
 
 // stores pending events for stolen channels
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index e429263..6167dd6 100644
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -114,6 +114,14 @@
     status_t setScalingMode(uint32_t mode);
     uint32_t getScalingMode() { return mScalingMode; }
 
+    // set which measurements are done on the audio buffers processed by the effect.
+    // valid measurements (mask): MEASUREMENT_MODE_PEAK_RMS
+    status_t setMeasurementMode(uint32_t mode);
+    uint32_t getMeasurementMode() { return mMeasurementMode; }
+
+    // return a set of int32_t measurements
+    status_t getIntMeasurements(uint32_t type, uint32_t number, int32_t *measurements);
+
     // return a capture in PCM 8 bit unsigned format. The size of the capture is equal to
     // getCaptureSize()
     status_t getWaveForm(uint8_t *waveform);
@@ -156,6 +164,7 @@
     uint32_t mCaptureSize;
     uint32_t mSampleRate;
     uint32_t mScalingMode;
+    uint32_t mMeasurementMode;
     capture_cbk_t mCaptureCallBack;
     void *mCaptureCbkUser;
     sp<CaptureThread> mCaptureThread;
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 923c8b2..2177c4c 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -223,8 +223,12 @@
             bool            isLooping();
             status_t        setVolume(float leftVolume, float rightVolume);
             void            notify(int msg, int ext1, int ext2, const Parcel *obj = NULL);
-    static  sp<IMemory>     decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat);
-    static  sp<IMemory>     decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat);
+    static  status_t        decode(const char* url, uint32_t *pSampleRate, int* pNumChannels,
+                                   audio_format_t* pFormat,
+                                   const sp<IMemoryHeap>& heap, size_t *pSize);
+    static  status_t        decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate,
+                                   int* pNumChannels, audio_format_t* pFormat,
+                                   const sp<IMemoryHeap>& heap, size_t *pSize);
             status_t        invoke(const Parcel& request, Parcel *reply);
             status_t        setMetadataFilter(const Parcel& filter);
             status_t        getMetadata(bool update_only, bool apply_filter, Parcel *metadata);
diff --git a/media/libeffects/proxy/EffectProxy.cpp b/media/libeffects/proxy/EffectProxy.cpp
index 41640da..b3304b7 100644
--- a/media/libeffects/proxy/EffectProxy.cpp
+++ b/media/libeffects/proxy/EffectProxy.cpp
@@ -48,20 +48,6 @@
     &gProxyDescriptor,
 };
 
-static inline bool isGetterCmd(uint32_t cmdCode)
-{
-    switch (cmdCode) {
-    case EFFECT_CMD_GET_PARAM:
-    case EFFECT_CMD_GET_CONFIG:
-    case EFFECT_CMD_GET_CONFIG_REVERSE:
-    case EFFECT_CMD_GET_FEATURE_SUPPORTED_CONFIGS:
-    case EFFECT_CMD_GET_FEATURE_CONFIG:
-        return true;
-    default:
-        return false;
-    }
-}
-
 
 int EffectProxyCreate(const effect_uuid_t *uuid,
                             int32_t             sessionId,
@@ -80,6 +66,7 @@
     pContext->ioId = ioId;
     pContext->uuid = *uuid;
     pContext->common_itfe = &gEffectInterface;
+
     // The sub effects will be created in effect_command when the first command
     // for the effect is received
     pContext->eHandle[SUB_FX_HOST] = pContext->eHandle[SUB_FX_OFFLOAD] = NULL;
@@ -124,6 +111,10 @@
         uuid_print.node[1], uuid_print.node[2], uuid_print.node[3],
         uuid_print.node[4], uuid_print.node[5]);
 #endif
+
+    pContext->replySize = PROXY_REPLY_SIZE_DEFAULT;
+    pContext->replyData = (char *)malloc(PROXY_REPLY_SIZE_DEFAULT);
+
     *pHandle = (effect_handle_t)pContext;
     ALOGV("EffectCreate end");
     return 0;
@@ -137,6 +128,8 @@
     }
     ALOGV("EffectRelease");
     delete pContext->desc;
+    free(pContext->replyData);
+
     if (pContext->eHandle[SUB_FX_HOST])
        EffectRelease(pContext->eHandle[SUB_FX_HOST]);
     if (pContext->eHandle[SUB_FX_OFFLOAD])
@@ -253,43 +246,53 @@
     }
 
     // Getter commands are only sent to the active sub effect.
-    uint32_t hostReplySize = replySize != NULL ? *replySize : 0;
-    bool hostReplied = false;
-    int hostStatus = 0;
-    uint32_t offloadReplySize = replySize != NULL ? *replySize : 0;
-    bool offloadReplied = false;
-    int offloadStatus = 0;
+    int *subStatus[SUB_FX_COUNT];
+    uint32_t *subReplySize[SUB_FX_COUNT];
+    void *subReplyData[SUB_FX_COUNT];
+    uint32_t tmpSize;
+    int tmpStatus;
 
-    if (pContext->eHandle[SUB_FX_HOST] && (!isGetterCmd(cmdCode) || index == SUB_FX_HOST)) {
-        hostStatus = (*pContext->eHandle[SUB_FX_HOST])->command(
-                             pContext->eHandle[SUB_FX_HOST], cmdCode, cmdSize,
-                             pCmdData, replySize != NULL ? &hostReplySize : NULL, pReplyData);
-        hostReplied = true;
-    }
-    if (pContext->eHandle[SUB_FX_OFFLOAD] && (!isGetterCmd(cmdCode) || index == SUB_FX_OFFLOAD)) {
-        // In case of SET CMD, when the offload stream is unavailable,
-        // we will store the effect param values in the DSP effect wrapper.
-        // When the offload effects get enabled, we send these values to the
-        // DSP during Effect_config.
-        // So,we send the params to DSP wrapper also
-        offloadStatus = (*pContext->eHandle[SUB_FX_OFFLOAD])->command(
-                         pContext->eHandle[SUB_FX_OFFLOAD], cmdCode, cmdSize,
-                         pCmdData, replySize != NULL ? &offloadReplySize : NULL, pReplyData);
-        offloadReplied = true;
-    }
-    // By convention the offloaded implementation reply is returned if command is processed by both
-    // host and offloaded sub effects
-    if (offloadReplied){
-        status = offloadStatus;
-        if (replySize) {
-            *replySize = offloadReplySize;
+    // grow temp reply buffer if needed
+    if (replySize != NULL) {
+        tmpSize = pContext->replySize;
+        while (tmpSize < *replySize && tmpSize < PROXY_REPLY_SIZE_MAX) {
+            tmpSize *= 2;
         }
-    } else if (hostReplied) {
-        status = hostStatus;
-        if (replySize) {
-            *replySize = hostReplySize;
+        if (tmpSize > pContext->replySize) {
+            ALOGV("Effect_command grow reply buf to %d", tmpSize);
+            pContext->replyData = (char *)realloc(pContext->replyData, tmpSize);
+            pContext->replySize = tmpSize;
         }
+        if (tmpSize > *replySize) {
+            tmpSize = *replySize;
+        }
+    } else {
+        tmpSize = 0;
     }
+    // tmpSize is now the actual reply size for the non active sub effect
+
+    // Send command to sub effects. The command is sent to all sub effects so that their internal
+    // state is kept in sync.
+    // Only the reply from the active sub effect is returned to the caller. The reply from the
+    // other sub effect is lost in pContext->replyData
+    for (int i = 0; i < SUB_FX_COUNT; i++) {
+        if (pContext->eHandle[i] == NULL) {
+            continue;
+        }
+        if (i == index) {
+            subStatus[i] = &status;
+            subReplySize[i] = replySize;
+            subReplyData[i] = pReplyData;
+        } else {
+            subStatus[i] = &tmpStatus;
+            subReplySize[i] = replySize == NULL ? NULL : &tmpSize;
+            subReplyData[i] = pReplyData == NULL ? NULL : pContext->replyData;
+        }
+        *subStatus[i] = (*pContext->eHandle[i])->command(
+                             pContext->eHandle[i], cmdCode, cmdSize,
+                             pCmdData, subReplySize[i], subReplyData[i]);
+    }
+
     return status;
 }    /* end Effect_command */
 
diff --git a/media/libeffects/proxy/EffectProxy.h b/media/libeffects/proxy/EffectProxy.h
index 8992f93..acbe17e 100644
--- a/media/libeffects/proxy/EffectProxy.h
+++ b/media/libeffects/proxy/EffectProxy.h
@@ -57,6 +57,9 @@
   NULL,
 };
 
+#define PROXY_REPLY_SIZE_MAX     (64 * 1024) // must be power of two
+#define PROXY_REPLY_SIZE_DEFAULT 32          // must be power of two
+
 struct EffectContext {
   const struct effect_interface_s  *common_itfe; // Holds the itfe of the Proxy
   effect_descriptor_t*  desc;                    // Points to the sub effect descriptors
@@ -67,6 +70,8 @@
   int32_t               ioId;        // The ioId in which the effect is created.
                                      // Stored in context to pass on to sub effect creation
   effect_uuid_t         uuid;        // UUID of the Proxy
+  char*                 replyData;   // temporary buffer for non active sub effect command reply
+  uint32_t              replySize;   // current size of temporary reply buffer
 };
 
 #if __cplusplus
diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp
index 96935e3..dc403ab 100644
--- a/media/libeffects/visualizer/EffectVisualizer.cpp
+++ b/media/libeffects/visualizer/EffectVisualizer.cpp
@@ -22,6 +22,7 @@
 #include <string.h>
 #include <new>
 #include <time.h>
+#include <math.h>
 #include <audio_effects/effect_visualizer.h>
 
 
@@ -54,6 +55,18 @@
 
 #define CAPTURE_BUF_SIZE 65536 // "64k should be enough for everyone"
 
+#define DISCARD_MEASUREMENTS_TIME_MS 2000 // discard measurements older than this number of ms
+
+// maximum number of buffers for which we keep track of the measurements
+#define MEASUREMENT_WINDOW_MAX_SIZE_IN_BUFFERS 25 // note: buffer index is stored in uint8_t
+
+
+struct BufferStats {
+    bool mIsValid;
+    uint16_t mPeakU16; // the positive peak of the absolute value of the samples in a buffer
+    float mRmsSquared; // the average square of the samples in a buffer
+};
+
 struct VisualizerContext {
     const struct effect_interface_s *mItfe;
     effect_config_t mConfig;
@@ -65,11 +78,34 @@
     uint32_t mLatency;
     struct timespec mBufferUpdateTime;
     uint8_t mCaptureBuf[CAPTURE_BUF_SIZE];
+    // for measurements
+    uint8_t mChannelCount; // to avoid recomputing it every time a buffer is processed
+    uint32_t mMeasurementMode;
+    uint8_t mMeasurementWindowSizeInBuffers;
+    uint8_t mMeasurementBufferIdx;
+    BufferStats mPastMeasurements[MEASUREMENT_WINDOW_MAX_SIZE_IN_BUFFERS];
 };
 
 //
 //--- Local functions
 //
+uint32_t Visualizer_getDeltaTimeMsFromUpdatedTime(VisualizerContext* pContext) {
+    uint32_t deltaMs = 0;
+    if (pContext->mBufferUpdateTime.tv_sec != 0) {
+        struct timespec ts;
+        if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
+            time_t secs = ts.tv_sec - pContext->mBufferUpdateTime.tv_sec;
+            long nsec = ts.tv_nsec - pContext->mBufferUpdateTime.tv_nsec;
+            if (nsec < 0) {
+                --secs;
+                nsec += 1000000000;
+            }
+            deltaMs = secs * 1000 + nsec / 1000000;
+        }
+    }
+    return deltaMs;
+}
+
 
 void Visualizer_reset(VisualizerContext *pContext)
 {
@@ -165,9 +201,21 @@
     pContext->mConfig.outputCfg.bufferProvider.cookie = NULL;
     pContext->mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
 
+    // visualization initialization
     pContext->mCaptureSize = VISUALIZER_CAPTURE_SIZE_MAX;
     pContext->mScalingMode = VISUALIZER_SCALING_MODE_NORMALIZED;
 
+    // measurement initialization
+    pContext->mChannelCount = popcount(pContext->mConfig.inputCfg.channels);
+    pContext->mMeasurementMode = MEASUREMENT_MODE_NONE;
+    pContext->mMeasurementWindowSizeInBuffers = MEASUREMENT_WINDOW_MAX_SIZE_IN_BUFFERS;
+    pContext->mMeasurementBufferIdx = 0;
+    for (uint32_t i=0 ; i<pContext->mMeasurementWindowSizeInBuffers ; i++) {
+        pContext->mPastMeasurements[i].mIsValid = false;
+        pContext->mPastMeasurements[i].mPeakU16 = 0;
+        pContext->mPastMeasurements[i].mRmsSquared = 0;
+    }
+
     Visualizer_setConfig(pContext, &pContext->mConfig);
 
     return 0;
@@ -270,6 +318,30 @@
         return -EINVAL;
     }
 
+    // perform measurements if needed
+    if (pContext->mMeasurementMode & MEASUREMENT_MODE_PEAK_RMS) {
+        // find the peak and RMS squared for the new buffer
+        uint32_t inIdx;
+        int16_t maxSample = 0;
+        float rmsSqAcc = 0;
+        for (inIdx = 0 ; inIdx < inBuffer->frameCount * pContext->mChannelCount ; inIdx++) {
+            if (inBuffer->s16[inIdx] > maxSample) {
+                maxSample = inBuffer->s16[inIdx];
+            } else if (-inBuffer->s16[inIdx] > maxSample) {
+                maxSample = -inBuffer->s16[inIdx];
+            }
+            rmsSqAcc += (inBuffer->s16[inIdx] * inBuffer->s16[inIdx]);
+        }
+        // store the measurement
+        pContext->mPastMeasurements[pContext->mMeasurementBufferIdx].mPeakU16 = (uint16_t)maxSample;
+        pContext->mPastMeasurements[pContext->mMeasurementBufferIdx].mRmsSquared =
+                rmsSqAcc / (inBuffer->frameCount * pContext->mChannelCount);
+        pContext->mPastMeasurements[pContext->mMeasurementBufferIdx].mIsValid = true;
+        if (++pContext->mMeasurementBufferIdx >= pContext->mMeasurementWindowSizeInBuffers) {
+            pContext->mMeasurementBufferIdx = 0;
+        }
+    }
+
     // all code below assumes stereo 16 bit PCM output and input
     int32_t shift;
 
@@ -423,6 +495,12 @@
             p->vsize = sizeof(uint32_t);
             *replySize += sizeof(uint32_t);
             break;
+        case VISUALIZER_PARAM_MEASUREMENT_MODE:
+            ALOGV("get mMeasurementMode = %d", pContext->mMeasurementMode);
+            *((uint32_t *)p->data + 1) = pContext->mMeasurementMode;
+            p->vsize = sizeof(uint32_t);
+            *replySize += sizeof(uint32_t);
+            break;
         default:
             p->status = -EINVAL;
         }
@@ -452,6 +530,10 @@
             pContext->mLatency = *((uint32_t *)p->data + 1);
             ALOGV("set mLatency = %d", pContext->mLatency);
             break;
+        case VISUALIZER_PARAM_MEASUREMENT_MODE:
+            pContext->mMeasurementMode = *((uint32_t *)p->data + 1);
+            ALOGV("set mMeasurementMode = %d", pContext->mMeasurementMode);
+            break;
         default:
             *(int32_t *)pReplyData = -EINVAL;
         }
@@ -470,24 +552,12 @@
         }
         if (pContext->mState == VISUALIZER_STATE_ACTIVE) {
             int32_t latencyMs = pContext->mLatency;
-            uint32_t deltaMs = 0;
-            if (pContext->mBufferUpdateTime.tv_sec != 0) {
-                struct timespec ts;
-                if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
-                    time_t secs = ts.tv_sec - pContext->mBufferUpdateTime.tv_sec;
-                    long nsec = ts.tv_nsec - pContext->mBufferUpdateTime.tv_nsec;
-                    if (nsec < 0) {
-                        --secs;
-                        nsec += 1000000000;
-                    }
-                    deltaMs = secs * 1000 + nsec / 1000000;
-                    latencyMs -= deltaMs;
-                    if (latencyMs < 0) {
-                        latencyMs = 0;
-                    }
-                }
+            const uint32_t deltaMs = Visualizer_getDeltaTimeMsFromUpdatedTime(pContext);
+            latencyMs -= deltaMs;
+            if (latencyMs < 0) {
+                latencyMs = 0;
             }
-            uint32_t deltaSmpl = pContext->mConfig.inputCfg.samplingRate * latencyMs / 1000;
+            const uint32_t deltaSmpl = pContext->mConfig.inputCfg.samplingRate * latencyMs / 1000;
 
             int32_t capturePoint = pContext->mCaptureIdx - pContext->mCaptureSize - deltaSmpl;
             int32_t captureSize = pContext->mCaptureSize;
@@ -525,6 +595,54 @@
 
         break;
 
+    case VISUALIZER_CMD_MEASURE: {
+        uint16_t peakU16 = 0;
+        float sumRmsSquared = 0.0f;
+        uint8_t nbValidMeasurements = 0;
+        // reset measurements if last measurement was too long ago (which implies stored
+        // measurements aren't relevant anymore and shouldn't bias the new one)
+        const int32_t delayMs = Visualizer_getDeltaTimeMsFromUpdatedTime(pContext);
+        if (delayMs > DISCARD_MEASUREMENTS_TIME_MS) {
+            ALOGV("Discarding measurements, last measurement is %dms old", delayMs);
+            for (uint32_t i=0 ; i<pContext->mMeasurementWindowSizeInBuffers ; i++) {
+                pContext->mPastMeasurements[i].mIsValid = false;
+                pContext->mPastMeasurements[i].mPeakU16 = 0;
+                pContext->mPastMeasurements[i].mRmsSquared = 0;
+            }
+            pContext->mMeasurementBufferIdx = 0;
+        } else {
+            // only use actual measurements, otherwise the first RMS measure happening before
+            // MEASUREMENT_WINDOW_MAX_SIZE_IN_BUFFERS have been played will always be artificially
+            // low
+            for (uint32_t i=0 ; i < pContext->mMeasurementWindowSizeInBuffers ; i++) {
+                if (pContext->mPastMeasurements[i].mIsValid) {
+                    if (pContext->mPastMeasurements[i].mPeakU16 > peakU16) {
+                        peakU16 = pContext->mPastMeasurements[i].mPeakU16;
+                    }
+                    sumRmsSquared += pContext->mPastMeasurements[i].mRmsSquared;
+                    nbValidMeasurements++;
+                }
+            }
+        }
+        float rms = nbValidMeasurements == 0 ? 0.0f : sqrtf(sumRmsSquared / nbValidMeasurements);
+        int32_t* pIntReplyData = (int32_t*)pReplyData;
+        // convert from I16 sample values to mB and write results
+        if (rms < 0.000016f) {
+            pIntReplyData[MEASUREMENT_IDX_RMS] = -9600; //-96dB
+        } else {
+            pIntReplyData[MEASUREMENT_IDX_RMS] = (int32_t) (2000 * log10(rms / 32767.0f));
+        }
+        if (peakU16 == 0) {
+            pIntReplyData[MEASUREMENT_IDX_PEAK] = -9600; //-96dB
+        } else {
+            pIntReplyData[MEASUREMENT_IDX_PEAK] = (int32_t) (2000 * log10(peakU16 / 32767.0f));
+        }
+        ALOGV("VISUALIZER_CMD_MEASURE peak=%d (%dmB), rms=%.1f (%dmB)",
+                peakU16, pIntReplyData[MEASUREMENT_IDX_PEAK],
+                rms, pIntReplyData[MEASUREMENT_IDX_RMS]);
+        }
+        break;
+
     default:
         ALOGW("Visualizer_command invalid command %d",cmdCode);
         return -EINVAL;
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index e934a3e..c5a7777 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -105,6 +105,7 @@
         // Otherwise the callback thread will never exit.
         stop();
         if (mAudioRecordThread != 0) {
+            mProxy->interrupt();
             mAudioRecordThread->requestExit();  // see comment in AudioRecord.h
             mAudioRecordThread->requestExitAndWait();
             mAudioRecordThread.clear();
@@ -960,7 +961,7 @@
 // =========================================================================
 
 AudioRecord::AudioRecordThread::AudioRecordThread(AudioRecord& receiver, bool bCanCallJava)
-    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mResumeLatch(false)
+    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL)
 {
 }
 
@@ -977,25 +978,32 @@
             // caller will check for exitPending()
             return true;
         }
+        if (mPausedInt) {
+            if (mPausedNs > 0) {
+                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
+            } else {
+                mMyCond.wait(mMyLock);
+            }
+            mPausedInt = false;
+            return true;
+        }
     }
     nsecs_t ns =  mReceiver.processAudioBuffer(this);
     switch (ns) {
     case 0:
         return true;
-    case NS_WHENEVER:
-        sleep(1);
-        return true;
     case NS_INACTIVE:
-        pauseConditional();
+        pauseInternal();
         return true;
     case NS_NEVER:
         return false;
+    case NS_WHENEVER:
+        // FIXME increase poll interval, or make event-driven
+        ns = 1000000000LL;
+        // fall through
     default:
         LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
-        struct timespec req;
-        req.tv_sec = ns / 1000000000LL;
-        req.tv_nsec = ns % 1000000000LL;
-        nanosleep(&req, NULL /*rem*/);
+        pauseInternal(ns);
         return true;
     }
 }
@@ -1004,38 +1012,37 @@
 {
     // must be in this order to avoid a race condition
     Thread::requestExit();
-    resume();
+    AutoMutex _l(mMyLock);
+    if (mPaused || mPausedInt) {
+        mPaused = false;
+        mPausedInt = false;
+        mMyCond.signal();
+    }
 }
 
 void AudioRecord::AudioRecordThread::pause()
 {
     AutoMutex _l(mMyLock);
     mPaused = true;
-    mResumeLatch = false;
-}
-
-void AudioRecord::AudioRecordThread::pauseConditional()
-{
-    AutoMutex _l(mMyLock);
-    if (mResumeLatch) {
-        mResumeLatch = false;
-    } else {
-        mPaused = true;
-    }
 }
 
 void AudioRecord::AudioRecordThread::resume()
 {
     AutoMutex _l(mMyLock);
-    if (mPaused) {
+    if (mPaused || mPausedInt) {
         mPaused = false;
-        mResumeLatch = false;
+        mPausedInt = false;
         mMyCond.signal();
-    } else {
-        mResumeLatch = true;
     }
 }
 
+void AudioRecord::AudioRecordThread::pauseInternal(nsecs_t ns)
+{
+    AutoMutex _l(mMyLock);
+    mPausedInt = true;
+    mPausedNs = ns;
+}
+
 // -------------------------------------------------------------------------
 
 }; // namespace android
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 15249a4..754a4e3 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -1782,7 +1782,7 @@
 // =========================================================================
 
 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
-    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mResumeLatch(false)
+    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL)
 {
 }
 
@@ -1799,25 +1799,32 @@
             // caller will check for exitPending()
             return true;
         }
+        if (mPausedInt) {
+            if (mPausedNs > 0) {
+                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
+            } else {
+                mMyCond.wait(mMyLock);
+            }
+            mPausedInt = false;
+            return true;
+        }
     }
     nsecs_t ns = mReceiver.processAudioBuffer(this);
     switch (ns) {
     case 0:
         return true;
-    case NS_WHENEVER:
-        sleep(1);
-        return true;
     case NS_INACTIVE:
-        pauseConditional();
+        pauseInternal();
         return true;
     case NS_NEVER:
         return false;
+    case NS_WHENEVER:
+        // FIXME increase poll interval, or make event-driven
+        ns = 1000000000LL;
+        // fall through
     default:
         LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
-        struct timespec req;
-        req.tv_sec = ns / 1000000000LL;
-        req.tv_nsec = ns % 1000000000LL;
-        nanosleep(&req, NULL /*rem*/);
+        pauseInternal(ns);
         return true;
     }
 }
@@ -1826,36 +1833,35 @@
 {
     // must be in this order to avoid a race condition
     Thread::requestExit();
-    resume();
+    AutoMutex _l(mMyLock);
+    if (mPaused || mPausedInt) {
+        mPaused = false;
+        mPausedInt = false;
+        mMyCond.signal();
+    }
 }
 
 void AudioTrack::AudioTrackThread::pause()
 {
     AutoMutex _l(mMyLock);
     mPaused = true;
-    mResumeLatch = false;
-}
-
-void AudioTrack::AudioTrackThread::pauseConditional()
-{
-    AutoMutex _l(mMyLock);
-    if (mResumeLatch) {
-        mResumeLatch = false;
-    } else {
-        mPaused = true;
-    }
 }
 
 void AudioTrack::AudioTrackThread::resume()
 {
     AutoMutex _l(mMyLock);
-    if (mPaused) {
+    if (mPaused || mPausedInt) {
         mPaused = false;
-        mResumeLatch = false;
+        mPausedInt = false;
         mMyCond.signal();
-    } else {
-        mResumeLatch = true;
     }
 }
 
+void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
+{
+    AutoMutex _l(mMyLock);
+    mPausedInt = true;
+    mPausedNs = ns;
+}
+
 }; // namespace android
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index be818c6..68928f1 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -108,7 +108,12 @@
         data.writeInt32(frameCount);
         track_flags_t lFlags = flags != NULL ? *flags : (track_flags_t) TRACK_DEFAULT;
         data.writeInt32(lFlags);
-        data.writeStrongBinder(sharedBuffer->asBinder());
+        if (sharedBuffer != 0) {
+            data.writeInt32(true);
+            data.writeStrongBinder(sharedBuffer->asBinder());
+        } else {
+            data.writeInt32(false);
+        }
         data.writeInt32((int32_t) output);
         data.writeInt32((int32_t) tid);
         int lSessionId = 0;
@@ -738,15 +743,27 @@
             audio_channel_mask_t channelMask = data.readInt32();
             size_t frameCount = data.readInt32();
             track_flags_t flags = (track_flags_t) data.readInt32();
-            sp<IMemory> buffer = interface_cast<IMemory>(data.readStrongBinder());
+            bool haveSharedBuffer = data.readInt32() != 0;
+            sp<IMemory> buffer;
+            if (haveSharedBuffer) {
+                buffer = interface_cast<IMemory>(data.readStrongBinder());
+            }
             audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
             pid_t tid = (pid_t) data.readInt32();
             int sessionId = data.readInt32();
             String8 name;
             status_t status;
-            sp<IAudioTrack> track = createTrack(
-                    (audio_stream_type_t) streamType, sampleRate, format,
-                    channelMask, frameCount, &flags, buffer, output, tid, &sessionId, name, &status);
+            sp<IAudioTrack> track;
+            if ((haveSharedBuffer && (buffer == 0)) ||
+                    ((buffer != 0) && (buffer->pointer() == NULL))) {
+                ALOGW("CREATE_TRACK: cannot retrieve shared memory");
+                status = DEAD_OBJECT;
+            } else {
+                track = createTrack(
+                        (audio_stream_type_t) streamType, sampleRate, format,
+                        channelMask, frameCount, &flags, buffer, output, tid,
+                        &sessionId, name, &status);
+            }
             reply->writeInt32(flags);
             reply->writeInt32(sessionId);
             reply->writeString8(name);
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index 74f574d..3c22b4c 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -86,30 +86,48 @@
         return interface_cast<IMediaRecorder>(reply.readStrongBinder());
     }
 
-    virtual sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat)
+    virtual status_t decode(const char* url, uint32_t *pSampleRate, int* pNumChannels,
+                               audio_format_t* pFormat,
+                               const sp<IMemoryHeap>& heap, size_t *pSize)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
         data.writeCString(url);
-        remote()->transact(DECODE_URL, data, &reply);
-        *pSampleRate = uint32_t(reply.readInt32());
-        *pNumChannels = reply.readInt32();
-        *pFormat = (audio_format_t) reply.readInt32();
-        return interface_cast<IMemory>(reply.readStrongBinder());
+        data.writeStrongBinder(heap->asBinder());
+        status_t status = remote()->transact(DECODE_URL, data, &reply);
+        if (status == NO_ERROR) {
+            status = (status_t)reply.readInt32();
+            if (status == NO_ERROR) {
+                *pSampleRate = uint32_t(reply.readInt32());
+                *pNumChannels = reply.readInt32();
+                *pFormat = (audio_format_t)reply.readInt32();
+                *pSize = (size_t)reply.readInt32();
+            }
+        }
+        return status;
     }
 
-    virtual sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat)
+    virtual status_t decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate,
+                               int* pNumChannels, audio_format_t* pFormat,
+                               const sp<IMemoryHeap>& heap, size_t *pSize)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
         data.writeFileDescriptor(fd);
         data.writeInt64(offset);
         data.writeInt64(length);
-        remote()->transact(DECODE_FD, data, &reply);
-        *pSampleRate = uint32_t(reply.readInt32());
-        *pNumChannels = reply.readInt32();
-        *pFormat = (audio_format_t) reply.readInt32();
-        return interface_cast<IMemory>(reply.readStrongBinder());
+        data.writeStrongBinder(heap->asBinder());
+        status_t status = remote()->transact(DECODE_FD, data, &reply);
+        if (status == NO_ERROR) {
+            status = (status_t)reply.readInt32();
+            if (status == NO_ERROR) {
+                *pSampleRate = uint32_t(reply.readInt32());
+                *pNumChannels = reply.readInt32();
+                *pFormat = (audio_format_t)reply.readInt32();
+                *pSize = (size_t)reply.readInt32();
+            }
+        }
+        return status;
     }
 
     virtual sp<IOMX> getOMX() {
@@ -205,14 +223,19 @@
         case DECODE_URL: {
             CHECK_INTERFACE(IMediaPlayerService, data, reply);
             const char* url = data.readCString();
+            sp<IMemoryHeap> heap = interface_cast<IMemoryHeap>(data.readStrongBinder());
             uint32_t sampleRate;
             int numChannels;
             audio_format_t format;
-            sp<IMemory> player = decode(url, &sampleRate, &numChannels, &format);
-            reply->writeInt32(sampleRate);
-            reply->writeInt32(numChannels);
-            reply->writeInt32((int32_t) format);
-            reply->writeStrongBinder(player->asBinder());
+            size_t size;
+            status_t status = decode(url, &sampleRate, &numChannels, &format, heap, &size);
+            reply->writeInt32(status);
+            if (status == NO_ERROR) {
+                reply->writeInt32(sampleRate);
+                reply->writeInt32(numChannels);
+                reply->writeInt32((int32_t)format);
+                reply->writeInt32((int32_t)size);
+            }
             return NO_ERROR;
         } break;
         case DECODE_FD: {
@@ -220,14 +243,20 @@
             int fd = dup(data.readFileDescriptor());
             int64_t offset = data.readInt64();
             int64_t length = data.readInt64();
+            sp<IMemoryHeap> heap = interface_cast<IMemoryHeap>(data.readStrongBinder());
             uint32_t sampleRate;
             int numChannels;
             audio_format_t format;
-            sp<IMemory> player = decode(fd, offset, length, &sampleRate, &numChannels, &format);
-            reply->writeInt32(sampleRate);
-            reply->writeInt32(numChannels);
-            reply->writeInt32((int32_t) format);
-            reply->writeStrongBinder(player->asBinder());
+            size_t size;
+            status_t status = decode(fd, offset, length, &sampleRate, &numChannels, &format,
+                                     heap, &size);
+            reply->writeInt32(status);
+            if (status == NO_ERROR) {
+                reply->writeInt32(sampleRate);
+                reply->writeInt32(numChannels);
+                reply->writeInt32((int32_t)format);
+                reply->writeInt32((int32_t)size);
+            }
             return NO_ERROR;
         } break;
         case CREATE_MEDIA_RECORDER: {
diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp
index 37b400c..8434d43 100644
--- a/media/libmedia/SoundPool.cpp
+++ b/media/libmedia/SoundPool.cpp
@@ -32,6 +32,8 @@
 uint32_t kMaxSampleRate = 48000;
 uint32_t kDefaultSampleRate = 44100;
 uint32_t kDefaultFrameCount = 1200;
+size_t kDefaultHeapSize = 1024 * 1024; // 1MB
+
 
 SoundPool::SoundPool(int maxChannels, audio_stream_type_t streamType, int srcQuality)
 {
@@ -464,7 +466,6 @@
 
 void Sample::init()
 {
-    mData = 0;
     mSize = 0;
     mRefCount = 0;
     mSampleID = 0;
@@ -482,7 +483,6 @@
         ALOGV("close(%d)", mFd);
         ::close(mFd);
     }
-    mData.clear();
     free(mUrl);
 }
 
@@ -491,44 +491,48 @@
     uint32_t sampleRate;
     int numChannels;
     audio_format_t format;
-    sp<IMemory> p;
+    status_t status;
+    mHeap = new MemoryHeapBase(kDefaultHeapSize);
+
     ALOGV("Start decode");
     if (mUrl) {
-        p = MediaPlayer::decode(mUrl, &sampleRate, &numChannels, &format);
+        status = MediaPlayer::decode(mUrl, &sampleRate, &numChannels, &format, mHeap, &mSize);
     } else {
-        p = MediaPlayer::decode(mFd, mOffset, mLength, &sampleRate, &numChannels, &format);
+        status = MediaPlayer::decode(mFd, mOffset, mLength, &sampleRate, &numChannels, &format,
+                                     mHeap, &mSize);
         ALOGV("close(%d)", mFd);
         ::close(mFd);
         mFd = -1;
     }
-    if (p == 0) {
+    if (status != NO_ERROR) {
         ALOGE("Unable to load sample: %s", mUrl);
-        return -1;
+        goto error;
     }
     ALOGV("pointer = %p, size = %u, sampleRate = %u, numChannels = %d",
-            p->pointer(), p->size(), sampleRate, numChannels);
+          mHeap->getBase(), mSize, sampleRate, numChannels);
 
     if (sampleRate > kMaxSampleRate) {
        ALOGE("Sample rate (%u) out of range", sampleRate);
-       return - 1;
+       status = BAD_VALUE;
+       goto error;
     }
 
     if ((numChannels < 1) || (numChannels > 2)) {
         ALOGE("Sample channel count (%d) out of range", numChannels);
-        return - 1;
+        status = BAD_VALUE;
+        goto error;
     }
 
-    //_dumpBuffer(p->pointer(), p->size());
-    uint8_t* q = static_cast<uint8_t*>(p->pointer()) + p->size() - 10;
-    //_dumpBuffer(q, 10, 10, false);
-
-    mData = p;
-    mSize = p->size();
+    mData = new MemoryBase(mHeap, 0, mSize);
     mSampleRate = sampleRate;
     mNumChannels = numChannels;
     mFormat = format;
     mState = READY;
-    return 0;
+    return NO_ERROR;
+
+error:
+    mHeap.clear();
+    return status;
 }
 
 
@@ -537,18 +541,6 @@
     mSoundPool = soundPool;
 }
 
-// This class is used to destroy a RefBase asynchronously
-class AsyncDestructThread : public Thread
-{
-public:
-    AsyncDestructThread(sp<RefBase> refBase) : mRefBase(refBase) { }
-protected:
-    virtual ~AsyncDestructThread() { }
-private:
-    virtual bool        threadLoop()    { return false; }
-    const   sp<RefBase> mRefBase;
-};
-
 // call with sound pool lock held
 void SoundChannel::play(const sp<Sample>& sample, int nextChannelID, float leftVolume,
         float rightVolume, int priority, int loop, float rate)
@@ -653,17 +645,6 @@
     if (status != NO_ERROR) {
         mAudioTrack.clear();
     }
-    // FIXME AudioTrack destruction should not be slow
-    if (oldTrack != 0) {
-        // must be a raw reference to avoid a race after run()
-        AsyncDestructThread *adt = new AsyncDestructThread(oldTrack);
-        // guaranteed to not run destructor
-        oldTrack.clear();
-        // after the run(), adt thread will hold a strong reference to oldTrack,
-        // and the only strong reference to itself
-        adt->run("AsyncDestruct");
-        // do not delete adt here: adt thread destroys itself, and oldTrack if needed
-    }
 }
 
 void SoundChannel::nextEvent()
@@ -767,7 +748,7 @@
         ALOGV("process %p channel %d EVENT_UNDERRUN or EVENT_BUFFER_END", this, mChannelID);
         mSoundPool->addToStopList(this);
     } else if (event == AudioTrack::EVENT_LOOP_END) {
-        ALOGV("End loop %p channel %d count %d", this, mChannelID, *(int *)info);
+        ALOGV("End loop %p channel %d", this, mChannelID);
     }
 }
 
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index e519f13..c146b8d 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -43,6 +43,7 @@
         mCaptureSize(CAPTURE_SIZE_DEF),
         mSampleRate(44100000),
         mScalingMode(VISUALIZER_SCALING_MODE_NORMALIZED),
+        mMeasurementMode(MEASUREMENT_MODE_NONE),
         mCaptureCallBack(NULL),
         mCaptureCbkUser(NULL)
 {
@@ -186,6 +187,73 @@
     return status;
 }
 
+status_t Visualizer::setMeasurementMode(uint32_t mode) {
+    if ((mode != MEASUREMENT_MODE_NONE)
+            //Note: needs to be handled as a mask when more measurement modes are added
+            && ((mode & MEASUREMENT_MODE_PEAK_RMS) != mode)) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock _l(mCaptureLock);
+
+    uint32_t buf32[sizeof(effect_param_t) / sizeof(uint32_t) + 2];
+    effect_param_t *p = (effect_param_t *)buf32;
+
+    p->psize = sizeof(uint32_t);
+    p->vsize = sizeof(uint32_t);
+    *(int32_t *)p->data = VISUALIZER_PARAM_MEASUREMENT_MODE;
+    *((int32_t *)p->data + 1)= mode;
+    status_t status = setParameter(p);
+
+    ALOGV("setMeasurementMode mode %d  status %d p->status %d", mode, status, p->status);
+
+    if (status == NO_ERROR) {
+        status = p->status;
+        if (status == NO_ERROR) {
+            mMeasurementMode = mode;
+        }
+    }
+    return status;
+}
+
+status_t Visualizer::getIntMeasurements(uint32_t type, uint32_t number, int32_t *measurements) {
+    if (mMeasurementMode == MEASUREMENT_MODE_NONE) {
+        ALOGE("Cannot retrieve int measurements, no measurement mode set");
+        return INVALID_OPERATION;
+    }
+    if (!(mMeasurementMode & type)) {
+        // measurement type has not been set on this Visualizer
+        ALOGE("Cannot retrieve int measurements, requested measurement mode 0x%x not set(0x%x)",
+                type, mMeasurementMode);
+        return INVALID_OPERATION;
+    }
+    // only peak+RMS measurement supported
+    if ((type != MEASUREMENT_MODE_PEAK_RMS)
+            // for peak+RMS measurement, the results are 2 int32_t values
+            || (number != 2)) {
+        ALOGE("Cannot retrieve int measurements, MEASUREMENT_MODE_PEAK_RMS returns 2 ints, not %d",
+                        number);
+        return BAD_VALUE;
+    }
+
+    status_t status = NO_ERROR;
+    if (mEnabled) {
+        uint32_t replySize = number * sizeof(int32_t);
+        status = command(VISUALIZER_CMD_MEASURE,
+                sizeof(uint32_t)  /*cmdSize*/,
+                &type /*cmdData*/,
+                &replySize, measurements);
+        ALOGV("getMeasurements() command returned %d", status);
+        if ((status == NO_ERROR) && (replySize == 0)) {
+            status = NOT_ENOUGH_DATA;
+        }
+    } else {
+        ALOGV("getMeasurements() disabled");
+        return INVALID_OPERATION;
+    }
+    return status;
+}
+
 status_t Visualizer::getWaveForm(uint8_t *waveform)
 {
     if (waveform == NULL) {
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 4323d0c..0f6d897 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -776,17 +776,20 @@
     }
 }
 
-/*static*/ sp<IMemory> MediaPlayer::decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat)
+/*static*/ status_t MediaPlayer::decode(const char* url, uint32_t *pSampleRate,
+                                           int* pNumChannels, audio_format_t* pFormat,
+                                           const sp<IMemoryHeap>& heap, size_t *pSize)
 {
     ALOGV("decode(%s)", url);
-    sp<IMemory> p;
+    status_t status;
     const sp<IMediaPlayerService>& service = getMediaPlayerService();
     if (service != 0) {
-        p = service->decode(url, pSampleRate, pNumChannels, pFormat);
+        status = service->decode(url, pSampleRate, pNumChannels, pFormat, heap, pSize);
     } else {
         ALOGE("Unable to locate media service");
+        status = DEAD_OBJECT;
     }
-    return p;
+    return status;
 
 }
 
@@ -796,17 +799,22 @@
     notify(MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED, 0);
 }
 
-/*static*/ sp<IMemory> MediaPlayer::decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat)
+/*static*/ status_t MediaPlayer::decode(int fd, int64_t offset, int64_t length,
+                                        uint32_t *pSampleRate, int* pNumChannels,
+                                        audio_format_t* pFormat,
+                                        const sp<IMemoryHeap>& heap, size_t *pSize)
 {
     ALOGV("decode(%d, %lld, %lld)", fd, offset, length);
-    sp<IMemory> p;
+    status_t status;
     const sp<IMediaPlayerService>& service = getMediaPlayerService();
     if (service != 0) {
-        p = service->decode(fd, offset, length, pSampleRate, pNumChannels, pFormat);
+        status = service->decode(fd, offset, length, pSampleRate,
+                                 pNumChannels, pFormat, heap, pSize);
     } else {
         ALOGE("Unable to locate media service");
+        status = DEAD_OBJECT;
     }
-    return p;
+    return status;
 
 }
 
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 0dabd37..9553458 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -319,8 +319,8 @@
 
     result.append(" AudioCache\n");
     if (mHeap != 0) {
-        snprintf(buffer, 255, "  heap base(%p), size(%d), flags(%d), device(%s)\n",
-                mHeap->getBase(), mHeap->getSize(), mHeap->getFlags(), mHeap->getDevice());
+        snprintf(buffer, 255, "  heap base(%p), size(%d), flags(%d)\n",
+                mHeap->getBase(), mHeap->getSize(), mHeap->getFlags());
         result.append(buffer);
     }
     snprintf(buffer, 255, "  msec per frame(%f), channel count(%d), format(%d), frame count(%zd)\n",
@@ -1176,13 +1176,13 @@
 }
 #endif
 
-static size_t kDefaultHeapSize = 1024 * 1024; // 1MB
-
-sp<IMemory> MediaPlayerService::decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat)
+status_t MediaPlayerService::decode(const char* url, uint32_t *pSampleRate, int* pNumChannels,
+                                       audio_format_t* pFormat,
+                                       const sp<IMemoryHeap>& heap, size_t *pSize)
 {
     ALOGV("decode(%s)", url);
-    sp<MemoryBase> mem;
     sp<MediaPlayerBase> player;
+    status_t status = BAD_VALUE;
 
     // Protect our precious, precious DRMd ringtones by only allowing
     // decoding of http, but not filesystem paths or content Uris.
@@ -1190,7 +1190,7 @@
     // filedescriptor for them and use that.
     if (url != NULL && strncmp(url, "http://", 7) != 0) {
         ALOGD("Can't decode %s by path, use filedescriptor instead", url);
-        return mem;
+        return BAD_VALUE;
     }
 
     player_type playerType =
@@ -1198,7 +1198,7 @@
     ALOGV("player type = %d", playerType);
 
     // create the right type of player
-    sp<AudioCache> cache = new AudioCache(url);
+    sp<AudioCache> cache = new AudioCache(heap);
     player = MediaPlayerFactory::createPlayer(playerType, cache.get(), cache->notify);
     if (player == NULL) goto Exit;
     if (player->hardwareOutput()) goto Exit;
@@ -1224,22 +1224,27 @@
         goto Exit;
     }
 
-    mem = new MemoryBase(cache->getHeap(), 0, cache->size());
+    *pSize = cache->size();
     *pSampleRate = cache->sampleRate();
     *pNumChannels = cache->channelCount();
     *pFormat = cache->format();
-    ALOGV("return memory @ %p, sampleRate=%u, channelCount = %d, format = %d", mem->pointer(), *pSampleRate, *pNumChannels, *pFormat);
+    ALOGV("return size %d sampleRate=%u, channelCount = %d, format = %d",
+          *pSize, *pSampleRate, *pNumChannels, *pFormat);
+    status = NO_ERROR;
 
 Exit:
     if (player != 0) player->reset();
-    return mem;
+    return status;
 }
 
-sp<IMemory> MediaPlayerService::decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat)
+status_t MediaPlayerService::decode(int fd, int64_t offset, int64_t length,
+                                       uint32_t *pSampleRate, int* pNumChannels,
+                                       audio_format_t* pFormat,
+                                       const sp<IMemoryHeap>& heap, size_t *pSize)
 {
     ALOGV("decode(%d, %lld, %lld)", fd, offset, length);
-    sp<MemoryBase> mem;
     sp<MediaPlayerBase> player;
+    status_t status = BAD_VALUE;
 
     player_type playerType = MediaPlayerFactory::getPlayerType(NULL /* client */,
                                                                fd,
@@ -1248,7 +1253,7 @@
     ALOGV("player type = %d", playerType);
 
     // create the right type of player
-    sp<AudioCache> cache = new AudioCache("decode_fd");
+    sp<AudioCache> cache = new AudioCache(heap);
     player = MediaPlayerFactory::createPlayer(playerType, cache.get(), cache->notify);
     if (player == NULL) goto Exit;
     if (player->hardwareOutput()) goto Exit;
@@ -1274,16 +1279,18 @@
         goto Exit;
     }
 
-    mem = new MemoryBase(cache->getHeap(), 0, cache->size());
+    *pSize = cache->size();
     *pSampleRate = cache->sampleRate();
     *pNumChannels = cache->channelCount();
     *pFormat = cache->format();
-    ALOGV("return memory @ %p, sampleRate=%u, channelCount = %d, format = %d", mem->pointer(), *pSampleRate, *pNumChannels, *pFormat);
+    ALOGV("return size %d, sampleRate=%u, channelCount = %d, format = %d",
+          *pSize, *pSampleRate, *pNumChannels, *pFormat);
+    status = NO_ERROR;
 
 Exit:
     if (player != 0) player->reset();
     ::close(fd);
-    return mem;
+    return status;
 }
 
 
@@ -1803,12 +1810,10 @@
 
 #undef LOG_TAG
 #define LOG_TAG "AudioCache"
-MediaPlayerService::AudioCache::AudioCache(const char* name) :
-    mChannelCount(0), mFrameCount(1024), mSampleRate(0), mSize(0),
-    mError(NO_ERROR), mCommandComplete(false)
+MediaPlayerService::AudioCache::AudioCache(const sp<IMemoryHeap>& heap) :
+    mHeap(heap), mChannelCount(0), mFrameCount(1024), mSampleRate(0), mSize(0),
+    mError(NO_ERROR),  mCommandComplete(false)
 {
-    // create ashmem heap
-    mHeap = new MemoryHeapBase(kDefaultHeapSize, 0, name);
 }
 
 uint32_t MediaPlayerService::AudioCache::latency () const
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 7d27944..21f4117 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -177,7 +177,7 @@
     class AudioCache : public MediaPlayerBase::AudioSink
     {
     public:
-                                AudioCache(const char* name);
+                                AudioCache(const sp<IMemoryHeap>& heap);
         virtual                 ~AudioCache() {}
 
         virtual bool            ready() const { return (mChannelCount > 0) && (mHeap->getHeapID() > 0); }
@@ -224,7 +224,7 @@
 
         Mutex               mLock;
         Condition           mSignal;
-        sp<MemoryHeapBase>  mHeap;
+        sp<IMemoryHeap>     mHeap;
         float               mMsecsPerFrame;
         uint16_t            mChannelCount;
         audio_format_t      mFormat;
@@ -247,8 +247,13 @@
 
     virtual sp<IMediaPlayer>    create(const sp<IMediaPlayerClient>& client, int audioSessionId);
 
-    virtual sp<IMemory>         decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat);
-    virtual sp<IMemory>         decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat);
+    virtual status_t            decode(const char* url, uint32_t *pSampleRate, int* pNumChannels,
+                                       audio_format_t* pFormat,
+                                       const sp<IMemoryHeap>& heap, size_t *pSize);
+    virtual status_t            decode(int fd, int64_t offset, int64_t length,
+                                       uint32_t *pSampleRate, int* pNumChannels,
+                                       audio_format_t* pFormat,
+                                       const sp<IMemoryHeap>& heap, size_t *pSize);
     virtual sp<IOMX>            getOMX();
     virtual sp<ICrypto>         makeCrypto();
     virtual sp<IDrm>            makeDrm();
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 946f602..f4b5846 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -1681,6 +1681,26 @@
         return true;
     }
 
+    void handleFirstAccessUnit() {
+        if (mFirstAccessUnit) {
+            sp<AMessage> msg = mNotify->dup();
+            msg->setInt32("what", kWhatConnected);
+            msg->post();
+
+            if (mSeekable) {
+                for (size_t i = 0; i < mTracks.size(); ++i) {
+                    TrackInfo *info = &mTracks.editItemAt(i);
+
+                    postNormalPlayTimeMapping(
+                            i,
+                            info->mNormalPlayTimeRTP, info->mNormalPlayTimeUs);
+                }
+            }
+
+            mFirstAccessUnit = false;
+        }
+    }
+
     void onTimeUpdate(int32_t trackIndex, uint32_t rtpTime, uint64_t ntpTime) {
         ALOGV("onTimeUpdate track %d, rtpTime = 0x%08x, ntpTime = 0x%016llx",
              trackIndex, rtpTime, ntpTime);
@@ -1712,6 +1732,8 @@
             }
         }
         if (mAllTracksHaveTime && dataReceivedOnAllChannels()) {
+            handleFirstAccessUnit();
+
             // Time is now established, lets start timestamping immediately
             for (size_t i = 0; i < mTracks.size(); ++i) {
                 TrackInfo *trackInfo = &mTracks.editItemAt(i);
@@ -1745,23 +1767,7 @@
             return;
         }
 
-        if (mFirstAccessUnit) {
-            sp<AMessage> msg = mNotify->dup();
-            msg->setInt32("what", kWhatConnected);
-            msg->post();
-
-            if (mSeekable) {
-                for (size_t i = 0; i < mTracks.size(); ++i) {
-                    TrackInfo *info = &mTracks.editItemAt(i);
-
-                    postNormalPlayTimeMapping(
-                            i,
-                            info->mNormalPlayTimeRTP, info->mNormalPlayTimeUs);
-                }
-            }
-
-            mFirstAccessUnit = false;
-        }
+        handleFirstAccessUnit();
 
         TrackInfo *track = &mTracks.editItemAt(trackIndex);
 
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index 4be292f..35e816b 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -975,7 +975,6 @@
 {
     ssize_t i;  // not size_t because i will count down to -1
     Vector <AudioCommand *> removedCommands;
-    nsecs_t time = 0;
     command->mTime = systemTime() + milliseconds(delayMs);
 
     // acquire wake lock to make sure delayed commands are processed
@@ -1021,7 +1020,10 @@
             } else {
                 data2->mKeyValuePairs = param2.toString();
             }
-            time = command2->mTime;
+            command->mTime = command2->mTime;
+            // force delayMs to non 0 so that code below does not request to wait for
+            // command status as the command is now delayed
+            delayMs = 1;
         } break;
 
         case SET_VOLUME: {
@@ -1032,7 +1034,10 @@
             ALOGV("Filtering out volume command on output %d for stream %d",
                     data->mIO, data->mStream);
             removedCommands.add(command2);
-            time = command2->mTime;
+            command->mTime = command2->mTime;
+            // force delayMs to non 0 so that code below does not request to wait for
+            // command status as the command is now delayed
+            delayMs = 1;
         } break;
         case START_TONE:
         case STOP_TONE:
@@ -1054,16 +1059,12 @@
     }
     removedCommands.clear();
 
-    // wait for status only if delay is 0 and command time was not modified above
-    if (delayMs == 0 && time == 0) {
+    // wait for status only if delay is 0
+    if (delayMs == 0) {
         command->mWaitStatus = true;
     } else {
         command->mWaitStatus = false;
     }
-    // update command time if modified above
-    if (time != 0) {
-        command->mTime = time;
-    }
 
     // insert command at the right place according to its time stamp
     ALOGV("inserting command: %d at index %d, num commands %d",
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 9210330..b771e3b 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2115,6 +2115,8 @@
     // and then that string will be logged at the next convenient opportunity.
     const char *logString = NULL;
 
+    checkSilentMode_l();
+
     while (!exitPending())
     {
         cpuStats.sample(myName);
@@ -2339,6 +2341,22 @@
 
 }
 
+status_t AudioFlinger::PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp)
+{
+    if (mNormalSink != 0) {
+        return mNormalSink->getTimestamp(timestamp);
+    }
+    if (mType == OFFLOAD && mOutput->stream->get_presentation_position) {
+        uint64_t position64;
+        int ret = mOutput->stream->get_presentation_position(
+                                                mOutput->stream, &position64, &timestamp.mTime);
+        if (ret == 0) {
+            timestamp.mPosition = (uint32_t)position64;
+            return NO_ERROR;
+        }
+    }
+    return INVALID_OPERATION;
+}
 // ----------------------------------------------------------------------------
 
 AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index b3d88e3..443b8d7 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -466,6 +466,8 @@
                 // Return's the HAL's frame count i.e. fast mixer buffer size.
                 size_t      frameCountHAL() const { return mFrameCount; }
 
+                status_t         getTimestamp_l(AudioTimestamp& timestamp);
+
 protected:
     // updated by readOutputParameters()
     size_t                          mNormalFrameCount;  // normal mixer and effects
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 2b8f0ab..d8d325d 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -757,19 +757,23 @@
     }
     Mutex::Autolock _l(thread->mLock);
     PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
-    if (!playbackThread->mLatchQValid) {
-        return INVALID_OPERATION;
+    if (!isOffloaded()) {
+        if (!playbackThread->mLatchQValid) {
+            return INVALID_OPERATION;
+        }
+        uint32_t unpresentedFrames =
+                ((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) /
+                playbackThread->mSampleRate;
+        uint32_t framesWritten = mAudioTrackServerProxy->framesReleased();
+        if (framesWritten < unpresentedFrames) {
+            return INVALID_OPERATION;
+        }
+        timestamp.mPosition = framesWritten - unpresentedFrames;
+        timestamp.mTime = playbackThread->mLatchQ.mTimestamp.mTime;
+        return NO_ERROR;
     }
-    uint32_t unpresentedFrames =
-            ((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) /
-            playbackThread->mSampleRate;
-    uint32_t framesWritten = mAudioTrackServerProxy->framesReleased();
-    if (framesWritten < unpresentedFrames) {
-        return INVALID_OPERATION;
-    }
-    timestamp.mPosition = framesWritten - unpresentedFrames;
-    timestamp.mTime = playbackThread->mLatchQ.mTimestamp.mTime;
-    return NO_ERROR;
+
+    return playbackThread->getTimestamp_l(timestamp);
 }
 
 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index a41cc36..09829ea 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -327,6 +327,10 @@
     result.appendFormat("    Video stabilization is %s\n",
             p.videoStabilization ? "enabled" : "disabled");
 
+    result.appendFormat("    Selected still capture FPS range: %d - %d\n",
+            p.fastInfo.bestStillCaptureFpsRange[0],
+            p.fastInfo.bestStillCaptureFpsRange[1]);
+
     result.append("  Current streams:\n");
     result.appendFormat("    Preview stream ID: %d\n",
             getPreviewStreamId());
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index ad55feb..a6c1083 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -184,6 +184,7 @@
 
     // NOTE: Not scaled like FPS range values are.
     previewFps = fpsFromRange(previewFpsRange[0], previewFpsRange[1]);
+    lastSetPreviewFps = previewFps;
     params.set(CameraParameters::KEY_PREVIEW_FRAME_RATE,
             previewFps);
 
@@ -852,6 +853,33 @@
         arrayHeight = activeArraySize.data.i32[3];
     } else return NO_INIT;
 
+    // We'll set the target FPS range for still captures to be as wide
+    // as possible to give the HAL maximum latitude for exposure selection
+    camera_metadata_ro_entry_t availableFpsRanges =
+        staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, 2);
+    if (availableFpsRanges.count < 2 || availableFpsRanges.count % 2 != 0) {
+        return NO_INIT;
+    }
+
+    int32_t bestStillCaptureFpsRange[2] = {
+        availableFpsRanges.data.i32[0], availableFpsRanges.data.i32[1]
+    };
+    int32_t curRange =
+            bestStillCaptureFpsRange[1] - bestStillCaptureFpsRange[0];
+    for (size_t i = 2; i < availableFpsRanges.count; i += 2) {
+        int32_t nextRange =
+                availableFpsRanges.data.i32[i + 1] -
+                availableFpsRanges.data.i32[i];
+        if ( (nextRange > curRange) ||       // Maximize size of FPS range first
+                (nextRange == curRange &&    // Then minimize low-end FPS
+                 bestStillCaptureFpsRange[0] > availableFpsRanges.data.i32[i])) {
+
+            bestStillCaptureFpsRange[0] = availableFpsRanges.data.i32[i];
+            bestStillCaptureFpsRange[1] = availableFpsRanges.data.i32[i + 1];
+            curRange = nextRange;
+        }
+    }
+
     camera_metadata_ro_entry_t availableFaceDetectModes =
         staticInfo(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES, 0, 0,
                 false);
@@ -971,6 +999,8 @@
 
     fastInfo.arrayWidth = arrayWidth;
     fastInfo.arrayHeight = arrayHeight;
+    fastInfo.bestStillCaptureFpsRange[0] = bestStillCaptureFpsRange[0];
+    fastInfo.bestStillCaptureFpsRange[1] = bestStillCaptureFpsRange[1];
     fastInfo.bestFaceDetectMode = bestFaceDetectMode;
     fastInfo.maxFaces = maxFaces;
 
@@ -1123,6 +1153,12 @@
         validatedParams.previewFps =
             fpsFromRange(validatedParams.previewFpsRange[0],
                          validatedParams.previewFpsRange[1]);
+
+        // Update our last-seen single preview FPS, needed for disambiguating
+        // when the application is intending to use the deprecated single-FPS
+        // setting vs. the range FPS setting
+        validatedParams.lastSetPreviewFps = newParams.getPreviewFrameRate();
+
         newParams.setPreviewFrameRate(validatedParams.previewFps);
     }
 
@@ -1158,12 +1194,15 @@
         }
     }
 
-    // PREVIEW_FRAME_RATE
-    // Deprecated, only use if the preview fps range is unchanged this time.
-    // The single-value FPS is the same as the minimum of the range.
+    // PREVIEW_FRAME_RATE Deprecated, only use if the preview fps range is
+    // unchanged this time.  The single-value FPS is the same as the minimum of
+    // the range.  To detect whether the application has changed the value of
+    // previewFps, compare against their last-set preview FPS instead of the
+    // single FPS we may have synthesized from a range FPS set.
     if (!fpsRangeChanged) {
         validatedParams.previewFps = newParams.getPreviewFrameRate();
-        if (validatedParams.previewFps != previewFps || recordingHintChanged) {
+        if (validatedParams.previewFps != lastSetPreviewFps ||
+                recordingHintChanged) {
             camera_metadata_ro_entry_t availableFrameRates =
                 staticInfo(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES);
             /**
@@ -1234,7 +1273,10 @@
                 String8::format("%d,%d",
                         validatedParams.previewFpsRange[0] * kFpsToApiScale,
                         validatedParams.previewFpsRange[1] * kFpsToApiScale));
-
+        // Update our last-seen single preview FPS, needed for disambiguating
+        // when the application is intending to use the deprecated single-FPS
+        // setting vs. the range FPS setting
+        validatedParams.lastSetPreviewFps = validatedParams.previewFps;
     }
 
     // PICTURE_SIZE
@@ -1709,8 +1751,15 @@
             &metadataMode, 1);
     if (res != OK) return res;
 
-    res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
-            previewFpsRange, 2);
+    camera_metadata_entry_t intent =
+            request->find(ANDROID_CONTROL_CAPTURE_INTENT);
+    if (intent.data.u8[0] == ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE) {
+        res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+                fastInfo.bestStillCaptureFpsRange, 2);
+    } else {
+        res = request->update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+                previewFpsRange, 2);
+    }
     if (res != OK) return res;
 
     uint8_t reqWbLock = autoWhiteBalanceLock ?
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index a7111a3..0505b0e 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -46,6 +46,7 @@
 
     int previewWidth, previewHeight;
     int32_t previewFpsRange[2];
+    int lastSetPreviewFps; // the last single FPS value seen in a set call
     int previewFps; // deprecated, here only for tracking changes
     int previewFormat;
 
@@ -179,6 +180,7 @@
     struct DeviceInfo {
         int32_t arrayWidth;
         int32_t arrayHeight;
+        int32_t bestStillCaptureFpsRange[2];
         uint8_t bestFaceDetectMode;
         int32_t maxFaces;
         struct OverrideModes {