Merge "NuPlayer2CCDecoder: Add bound check before memcpy"
diff --git a/camera/cameraserver/cameraserver.rc b/camera/cameraserver/cameraserver.rc
index fea5a1d..a9aae0b 100644
--- a/camera/cameraserver/cameraserver.rc
+++ b/camera/cameraserver/cameraserver.rc
@@ -4,3 +4,4 @@
     group audio camera input drmrpc
     ioprio rt 4
     writepid /dev/cpuset/camera-daemon/tasks /dev/stune/top-app/tasks
+    rlimit rtprio 10 10
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 0501006..1dfa4f7 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -2312,7 +2312,9 @@
      * <p>If this device is the largest or only camera device with a given facing, then this
      * position will be <code>(0, 0, 0)</code>; a camera device with a lens optical center located 3 cm
      * from the main sensor along the +X axis (to the right from the user's perspective) will
-     * report <code>(0.03, 0, 0)</code>.</p>
+     * report <code>(0.03, 0, 0)</code>.  Note that this means that, for many computer vision
+     * applications, the position needs to be negated to convert it to a translation from the
+     * camera to the origin.</p>
      * <p>To transform a pixel coordinates between two cameras facing the same direction, first
      * the source camera ACAMERA_LENS_DISTORTION must be corrected for.  Then the source
      * camera ACAMERA_LENS_INTRINSIC_CALIBRATION needs to be applied, followed by the
@@ -2324,7 +2326,8 @@
      * <p>To compare this against a real image from the destination camera, the destination camera
      * image then needs to be corrected for radial distortion before comparison or sampling.</p>
      * <p>When ACAMERA_LENS_POSE_REFERENCE is GYROSCOPE, then this position is relative to
-     * the center of the primary gyroscope on the device.</p>
+     * the center of the primary gyroscope on the device. The axis definitions are the same as
+     * with PRIMARY_CAMERA.</p>
      *
      * @see ACAMERA_LENS_DISTORTION
      * @see ACAMERA_LENS_INTRINSIC_CALIBRATION
@@ -2418,13 +2421,15 @@
      * </code></pre>
      * <p>which can then be combined with the camera pose rotation
      * <code>R</code> and translation <code>t</code> (ACAMERA_LENS_POSE_ROTATION and
-     * ACAMERA_LENS_POSE_TRANSLATION, respective) to calculate the
+     * ACAMERA_LENS_POSE_TRANSLATION, respectively) to calculate the
      * complete transform from world coordinates to pixel
      * coordinates:</p>
-     * <pre><code>P = [ K 0   * [ R t
-     *      0 1 ]     0 1 ]
+     * <pre><code>P = [ K 0   * [ R -Rt
+     *      0 1 ]      0 1 ]
      * </code></pre>
-     * <p>and with <code>p_w</code> being a point in the world coordinate system
+     * <p>(Note the negation of poseTranslation when mapping from camera
+     * to world coordinates, and multiplication by the rotation).</p>
+     * <p>With <code>p_w</code> being a point in the world coordinate system
      * and <code>p_s</code> being a point in the camera active pixel array
      * coordinate system, and with the mapping including the
      * homogeneous division by z:</p>
@@ -2446,6 +2451,13 @@
      * activeArraySize rectangle), to determine the final pixel
      * coordinate of the world point for processed (non-RAW)
      * output buffers.</p>
+     * <p>For camera devices, the center of pixel <code>(x,y)</code> is located at
+     * coordinate <code>(x + 0.5, y + 0.5)</code>.  So on a device with a
+     * precorrection active array of size <code>(10,10)</code>, the valid pixel
+     * indices go from <code>(0,0)-(9,9)</code>, and an perfectly-built camera would
+     * have an optical center at the exact center of the pixel grid, at
+     * coordinates <code>(5.0, 5.0)</code>, which is the top-left corner of pixel
+     * <code>(5,5)</code>.</p>
      *
      * @see ACAMERA_LENS_DISTORTION
      * @see ACAMERA_LENS_POSE_ROTATION
@@ -3059,7 +3071,7 @@
      * outputs will crop horizontally (pillarbox), and 16:9
      * streams will match exactly. These additional crops will
      * be centered within the crop region.</p>
-     * <p>If the coordinate system is android.sensor.info.activeArraysSize, the width and height
+     * <p>If the coordinate system is ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE, the width and height
      * of the crop region cannot be set to be smaller than
      * <code>floor( activeArraySize.width / ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM )</code> and
      * <code>floor( activeArraySize.height / ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM )</code>, respectively.</p>
@@ -4675,8 +4687,8 @@
     ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE =                  // byte (acamera_metadata_enum_android_statistics_lens_shading_map_mode_t)
             ACAMERA_STATISTICS_START + 16,
     /**
-     * <p>A control for selecting whether OIS position information is included in output
-     * result metadata.</p>
+     * <p>A control for selecting whether optical stabilization (OIS) position
+     * information is included in output result metadata.</p>
      *
      * <p>Type: byte (acamera_metadata_enum_android_statistics_ois_data_mode_t)</p>
      *
@@ -4686,6 +4698,12 @@
      *   <li>ACaptureRequest</li>
      * </ul></p>
      *
+     * <p>Since optical image stabilization generally involves motion much faster than the duration
+     * of individualq image exposure, multiple OIS samples can be included for a single capture
+     * result. For example, if the OIS reporting operates at 200 Hz, a typical camera operating
+     * at 30fps may have 6-7 OIS samples per capture result. This information can be combined
+     * with the rolling shutter skew to account for lens motion during image exposure in
+     * post-processing algorithms.</p>
      */
     ACAMERA_STATISTICS_OIS_DATA_MODE =                          // byte (acamera_metadata_enum_android_statistics_ois_data_mode_t)
             ACAMERA_STATISTICS_START + 17,
@@ -4717,11 +4735,15 @@
      * </ul></p>
      *
      * <p>The array contains the amount of shifts in x direction, in pixels, based on OIS samples.
-     * A positive value is a shift from left to right in active array coordinate system. For
-     * example, if the optical center is (1000, 500) in active array coordinates, a shift of
-     * (3, 0) puts the new optical center at (1003, 500).</p>
+     * A positive value is a shift from left to right in the pre-correction active array
+     * coordinate system. For example, if the optical center is (1000, 500) in pre-correction
+     * active array coordinates, a shift of (3, 0) puts the new optical center at (1003, 500).</p>
      * <p>The number of shifts must match the number of timestamps in
      * ACAMERA_STATISTICS_OIS_TIMESTAMPS.</p>
+     * <p>The OIS samples are not affected by whether lens distortion correction is enabled (on
+     * supporting devices). They are always reported in pre-correction active array coordinates,
+     * since the scaling of OIS shifts would depend on the specific spot on the sensor the shift
+     * is needed.</p>
      *
      * @see ACAMERA_STATISTICS_OIS_TIMESTAMPS
      */
@@ -4738,11 +4760,15 @@
      * </ul></p>
      *
      * <p>The array contains the amount of shifts in y direction, in pixels, based on OIS samples.
-     * A positive value is a shift from top to bottom in active array coordinate system. For
-     * example, if the optical center is (1000, 500) in active array coordinates, a shift of
-     * (0, 5) puts the new optical center at (1000, 505).</p>
+     * A positive value is a shift from top to bottom in pre-correction active array coordinate
+     * system. For example, if the optical center is (1000, 500) in active array coordinates, a
+     * shift of (0, 5) puts the new optical center at (1000, 505).</p>
      * <p>The number of shifts must match the number of timestamps in
      * ACAMERA_STATISTICS_OIS_TIMESTAMPS.</p>
+     * <p>The OIS samples are not affected by whether lens distortion correction is enabled (on
+     * supporting devices). They are always reported in pre-correction active array coordinates,
+     * since the scaling of OIS shifts would depend on the specific spot on the sensor the shift
+     * is needed.</p>
      *
      * @see ACAMERA_STATISTICS_OIS_TIMESTAMPS
      */
@@ -5432,16 +5458,34 @@
      * any correction at all would slow down capture rate.  Every output stream will have a
      * similar amount of enhancement applied.</p>
      * <p>The correction only applies to processed outputs such as YUV, JPEG, or DEPTH16; it is not
-     * applied to any RAW output. Metadata coordinates such as face rectangles or metering
-     * regions are also not affected by correction.</p>
+     * applied to any RAW output.</p>
      * <p>This control will be on by default on devices that support this control. Applications
      * disabling distortion correction need to pay extra attention with the coordinate system of
      * metering regions, crop region, and face rectangles. When distortion correction is OFF,
      * metadata coordinates follow the coordinate system of
      * ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE. When distortion is not OFF, metadata
-     * coordinates follow the coordinate system of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.</p>
+     * coordinates follow the coordinate system of ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE.  The
+     * camera device will map these metadata fields to match the corrected image produced by the
+     * camera device, for both capture requests and results.  However, this mapping is not very
+     * precise, since rectangles do not generally map to rectangles when corrected.  Only linear
+     * scaling between the active array and precorrection active array coordinates is
+     * performed. Applications that require precise correction of metadata need to undo that
+     * linear scaling, and apply a more complete correction that takes into the account the app's
+     * own requirements.</p>
+     * <p>The full list of metadata that is affected in this way by distortion correction is:</p>
+     * <ul>
+     * <li>ACAMERA_CONTROL_AF_REGIONS</li>
+     * <li>ACAMERA_CONTROL_AE_REGIONS</li>
+     * <li>ACAMERA_CONTROL_AWB_REGIONS</li>
+     * <li>ACAMERA_SCALER_CROP_REGION</li>
+     * <li>android.statistics.faces</li>
+     * </ul>
      *
+     * @see ACAMERA_CONTROL_AE_REGIONS
+     * @see ACAMERA_CONTROL_AF_REGIONS
+     * @see ACAMERA_CONTROL_AWB_REGIONS
      * @see ACAMERA_LENS_DISTORTION
+     * @see ACAMERA_SCALER_CROP_REGION
      * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
      * @see ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE
      */
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index 70c281a..8a97299 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -21,6 +21,9 @@
 	libsoundtriggerservice \
 	libutils
 
+LOCAL_STATIC_LIBRARIES := \
+	libjsoncpp
+
 # TODO oboeservice is the old folder name for aaudioservice. It will be changed.
 LOCAL_C_INCLUDES := \
 	frameworks/av/services/audioflinger \
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 91ebf73..84f9c22 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -338,7 +338,7 @@
     aaudio_sharing_mode_t requestedInputSharingMode  = AAUDIO_SHARING_MODE_SHARED;
     int                   requestedInputChannelCount = NUM_INPUT_CHANNELS;
     aaudio_format_t       requestedInputFormat       = AAUDIO_FORMAT_UNSPECIFIED;
-    int32_t               requestedInputCapacity     = -1;
+    int32_t               requestedInputCapacity     = AAUDIO_UNSPECIFIED;
     aaudio_performance_mode_t inputPerformanceLevel  = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
 
     int32_t               outputFramesPerBurst = 0;
@@ -459,15 +459,8 @@
     argParser.setPerformanceMode(inputPerformanceLevel);
     argParser.setChannelCount(requestedInputChannelCount);
     argParser.setSharingMode(requestedInputSharingMode);
-
-    // Make sure the input buffer has plenty of capacity.
-    // Extra capacity on input should not increase latency if we keep it drained.
-    int32_t inputBufferCapacity = requestedInputCapacity;
-    if (inputBufferCapacity < 0) {
-        int32_t outputBufferCapacity = AAudioStream_getBufferCapacityInFrames(outputStream);
-        inputBufferCapacity = 2 * outputBufferCapacity;
-    }
-    argParser.setBufferCapacity(inputBufferCapacity);
+    // Warning! If you change input capacity then you may not get a FAST track on Legacy path.
+    argParser.setBufferCapacity(requestedInputCapacity);
 
     result = recorder.open(argParser);
     if (result != AAUDIO_OK) {
diff --git a/media/libeffects/loudness/EffectLoudnessEnhancer.cpp b/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
index 9d29cf1..d61efd3 100644
--- a/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
+++ b/media/libeffects/loudness/EffectLoudnessEnhancer.cpp
@@ -30,6 +30,26 @@
 #include <audio_effects/effect_loudnessenhancer.h>
 #include "dsp/core/dynamic_range_compression.h"
 
+// BUILD_FLOAT targets building a float effect instead of the legacy int16_t effect.
+#define BUILD_FLOAT
+
+#ifdef BUILD_FLOAT
+
+static constexpr audio_format_t kProcessFormat = AUDIO_FORMAT_PCM_FLOAT;
+
+#else
+
+static constexpr audio_format_t kProcessFormat = AUDIO_FORMAT_PCM_16_BIT;
+
+static inline int16_t clamp16(int32_t sample)
+{
+    if ((sample>>15) ^ (sample>>31))
+        sample = 0x7FFF ^ (sample>>31);
+    return sample;
+}
+
+#endif // BUILD_FLOAT
+
 extern "C" {
 
 // effect_handle_t interface implementation for LE effect
@@ -80,13 +100,6 @@
     }
 }
 
-static inline int16_t clamp16(int32_t sample)
-{
-    if ((sample>>15) ^ (sample>>31))
-        sample = 0x7FFF ^ (sample>>31);
-    return sample;
-}
-
 //----------------------------------------------------------------------------
 // LE_setConfig()
 //----------------------------------------------------------------------------
@@ -111,7 +124,7 @@
     if (pConfig->inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO) return -EINVAL;
     if (pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_WRITE &&
             pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_ACCUMULATE) return -EINVAL;
-    if (pConfig->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT) return -EINVAL;
+    if (pConfig->inputCfg.format != kProcessFormat) return -EINVAL;
 
     pContext->mConfig = *pConfig;
 
@@ -159,7 +172,7 @@
 
     pContext->mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
     pContext->mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
-    pContext->mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+    pContext->mConfig.inputCfg.format = kProcessFormat;
     pContext->mConfig.inputCfg.samplingRate = 44100;
     pContext->mConfig.inputCfg.bufferProvider.getBuffer = NULL;
     pContext->mConfig.inputCfg.bufferProvider.releaseBuffer = NULL;
@@ -167,7 +180,7 @@
     pContext->mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
     pContext->mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
     pContext->mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
-    pContext->mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+    pContext->mConfig.outputCfg.format = kProcessFormat;
     pContext->mConfig.outputCfg.samplingRate = 44100;
     pContext->mConfig.outputCfg.bufferProvider.getBuffer = NULL;
     pContext->mConfig.outputCfg.bufferProvider.releaseBuffer = NULL;
@@ -284,18 +297,41 @@
 
     //ALOGV("LE about to process %d samples", inBuffer->frameCount);
     uint16_t inIdx;
+#ifdef BUILD_FLOAT
+    constexpr float scale = 1 << 15; // power of 2 is lossless conversion to int16_t range
+    constexpr float inverseScale = 1.f / scale;
+    const float inputAmp = pow(10, pContext->mTargetGainmB/2000.0f) * scale;
+#else
     float inputAmp = pow(10, pContext->mTargetGainmB/2000.0f);
+#endif
     float leftSample, rightSample;
     for (inIdx = 0 ; inIdx < inBuffer->frameCount ; inIdx++) {
         // makeup gain is applied on the input of the compressor
+#ifdef BUILD_FLOAT
+        leftSample  = inputAmp * inBuffer->f32[2*inIdx];
+        rightSample = inputAmp * inBuffer->f32[2*inIdx +1];
+        pContext->mCompressor->Compress(&leftSample, &rightSample);
+        inBuffer->f32[2*inIdx]    = leftSample * inverseScale;
+        inBuffer->f32[2*inIdx +1] = rightSample * inverseScale;
+#else
         leftSample  = inputAmp * (float)inBuffer->s16[2*inIdx];
         rightSample = inputAmp * (float)inBuffer->s16[2*inIdx +1];
         pContext->mCompressor->Compress(&leftSample, &rightSample);
         inBuffer->s16[2*inIdx]    = (int16_t) leftSample;
         inBuffer->s16[2*inIdx +1] = (int16_t) rightSample;
+#endif // BUILD_FLOAT
     }
 
     if (inBuffer->raw != outBuffer->raw) {
+#ifdef BUILD_FLOAT
+        if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+            for (size_t i = 0; i < outBuffer->frameCount*2; i++) {
+                outBuffer->f32[i] += inBuffer->f32[i];
+            }
+        } else {
+            memcpy(outBuffer->raw, inBuffer->raw, outBuffer->frameCount * 2 * sizeof(float));
+        }
+#else
         if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
             for (size_t i = 0; i < outBuffer->frameCount*2; i++) {
                 outBuffer->s16[i] = clamp16(outBuffer->s16[i] + inBuffer->s16[i]);
@@ -303,6 +339,7 @@
         } else {
             memcpy(outBuffer->raw, inBuffer->raw, outBuffer->frameCount * 2 * sizeof(int16_t));
         }
+#endif // BUILD_FLOAT
     }
     if (pContext->mState != LOUDNESS_ENHANCER_STATE_ACTIVE) {
         return -ENODATA;
diff --git a/media/libeffects/visualizer/Android.mk b/media/libeffects/visualizer/Android.mk
index 70409de..3534149 100644
--- a/media/libeffects/visualizer/Android.mk
+++ b/media/libeffects/visualizer/Android.mk
@@ -19,7 +19,8 @@
 LOCAL_MODULE:= libvisualizer
 
 LOCAL_C_INCLUDES := \
-	$(call include-path-for, audio-effects)
+	$(call include-path-for, audio-effects) \
+	$(call include-path-for, audio-utils)
 
 
 LOCAL_HEADER_LIBRARIES += libhardware_headers
diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp
index 807f24d..e2ccfb7 100644
--- a/media/libeffects/visualizer/EffectVisualizer.cpp
+++ b/media/libeffects/visualizer/EffectVisualizer.cpp
@@ -24,11 +24,25 @@
 #include <string.h>
 #include <time.h>
 
+#include <algorithm> // max
 #include <new>
 
 #include <log/log.h>
 
 #include <audio_effects/effect_visualizer.h>
+#include <audio_utils/primitives.h>
+
+#define BUILD_FLOAT
+
+#ifdef BUILD_FLOAT
+
+static constexpr audio_format_t kProcessFormat = AUDIO_FORMAT_PCM_FLOAT;
+
+#else
+
+static constexpr audio_format_t kProcessFormat = AUDIO_FORMAT_PCM_16_BIT;
+
+#endif // BUILD_FLOAT
 
 extern "C" {
 
@@ -146,7 +160,7 @@
     if (pConfig->inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO) return -EINVAL;
     if (pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_WRITE &&
             pConfig->outputCfg.accessMode != EFFECT_BUFFER_ACCESS_ACCUMULATE) return -EINVAL;
-    if (pConfig->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT) return -EINVAL;
+    if (pConfig->inputCfg.format != kProcessFormat) return -EINVAL;
 
     pContext->mConfig = *pConfig;
 
@@ -192,7 +206,7 @@
 {
     pContext->mConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
     pContext->mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
-    pContext->mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+    pContext->mConfig.inputCfg.format = kProcessFormat;
     pContext->mConfig.inputCfg.samplingRate = 44100;
     pContext->mConfig.inputCfg.bufferProvider.getBuffer = NULL;
     pContext->mConfig.inputCfg.bufferProvider.releaseBuffer = NULL;
@@ -200,7 +214,7 @@
     pContext->mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
     pContext->mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
     pContext->mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
-    pContext->mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
+    pContext->mConfig.outputCfg.format = kProcessFormat;
     pContext->mConfig.outputCfg.samplingRate = 44100;
     pContext->mConfig.outputCfg.bufferProvider.getBuffer = NULL;
     pContext->mConfig.outputCfg.bufferProvider.releaseBuffer = NULL;
@@ -301,15 +315,8 @@
 //--- Effect Control Interface Implementation
 //
 
-static inline int16_t clamp16(int32_t sample)
-{
-    if ((sample>>15) ^ (sample>>31))
-        sample = 0x7FFF ^ (sample>>31);
-    return sample;
-}
-
 int Visualizer_process(
-        effect_handle_t self,audio_buffer_t *inBuffer, audio_buffer_t *outBuffer)
+        effect_handle_t self, audio_buffer_t *inBuffer, audio_buffer_t *outBuffer)
 {
     VisualizerContext * pContext = (VisualizerContext *)self;
 
@@ -324,20 +331,28 @@
         return -EINVAL;
     }
 
+    const size_t sampleLen = inBuffer->frameCount * pContext->mChannelCount;
+
     // perform measurements if needed
     if (pContext->mMeasurementMode & MEASUREMENT_MODE_PEAK_RMS) {
         // find the peak and RMS squared for the new buffer
-        uint32_t inIdx;
-        int16_t maxSample = 0;
         float rmsSqAcc = 0;
-        for (inIdx = 0 ; inIdx < inBuffer->frameCount * pContext->mChannelCount ; inIdx++) {
-            if (inBuffer->s16[inIdx] > maxSample) {
-                maxSample = inBuffer->s16[inIdx];
-            } else if (-inBuffer->s16[inIdx] > maxSample) {
-                maxSample = -inBuffer->s16[inIdx];
-            }
-            rmsSqAcc += (inBuffer->s16[inIdx] * inBuffer->s16[inIdx]);
+
+#ifdef BUILD_FLOAT
+        float maxSample = 0.f;
+        for (size_t inIdx = 0; inIdx < sampleLen; ++inIdx) {
+            maxSample = fmax(maxSample, fabs(inBuffer->f32[inIdx]));
+            rmsSqAcc += inBuffer->f32[inIdx] * inBuffer->f32[inIdx];
         }
+        maxSample *= 1 << 15; // scale to int16_t, with exactly 1 << 15 representing positive num.
+        rmsSqAcc *= 1 << 30; // scale to int16_t * 2
+#else
+        int maxSample = 0;
+        for (size_t inIdx = 0; inIdx < sampleLen; ++inIdx) {
+            maxSample = std::max(maxSample, std::abs(int32_t(inBuffer->s16[inIdx])));
+            rmsSqAcc += inBuffer->s16[inIdx] * inBuffer->s16[inIdx];
+        }
+#endif
         // store the measurement
         pContext->mPastMeasurements[pContext->mMeasurementBufferIdx].mPeakU16 = (uint16_t)maxSample;
         pContext->mPastMeasurements[pContext->mMeasurementBufferIdx].mRmsSquared =
@@ -348,32 +363,59 @@
         }
     }
 
-    // all code below assumes stereo 16 bit PCM output and input
+#ifdef BUILD_FLOAT
+    float fscale; // multiplicative scale
+#else
     int32_t shift;
+#endif // BUILD_FLOAT
 
     if (pContext->mScalingMode == VISUALIZER_SCALING_MODE_NORMALIZED) {
         // derive capture scaling factor from peak value in current buffer
         // this gives more interesting captures for display.
-        shift = 32;
-        int len = inBuffer->frameCount * 2;
-        for (int i = 0; i < len; i++) {
+
+#ifdef BUILD_FLOAT
+        float maxSample = 0.f;
+        for (size_t inIdx = 0; inIdx < sampleLen; ++inIdx) {
+            maxSample = fmax(maxSample, fabs(inBuffer->f32[inIdx]));
+        }
+        if (maxSample > 0.f) {
+            constexpr float halfish = 127.f / 256.f;
+            fscale = halfish / maxSample;
+            int exp; // unused
+            const float significand = frexp(fscale, &exp);
+            if (significand == 0.5f) {
+                fscale *= 255.f / 256.f; // avoid returning unaltered PCM signal
+            }
+        } else {
+            // scale doesn't matter, the values are all 0.
+            fscale = 1.f;
+        }
+#else
+        int32_t orAccum = 0;
+        for (size_t i = 0; i < sampleLen; ++i) {
             int32_t smp = inBuffer->s16[i];
             if (smp < 0) smp = -smp - 1; // take care to keep the max negative in range
-            int32_t clz = __builtin_clz(smp);
-            if (shift > clz) shift = clz;
+            orAccum |= smp;
         }
+
         // A maximum amplitude signal will have 17 leading zeros, which we want to
         // translate to a shift of 8 (for converting 16 bit to 8 bit)
-        shift = 25 - shift;
+        shift = 25 - __builtin_clz(orAccum);
+
         // Never scale by less than 8 to avoid returning unaltered PCM signal.
         if (shift < 3) {
             shift = 3;
         }
         // add one to combine the division by 2 needed after summing left and right channels below
         shift++;
+#endif // BUILD_FLOAT
     } else {
         assert(pContext->mScalingMode == VISUALIZER_SCALING_MODE_AS_PLAYED);
+#ifdef BUILD_FLOAT
+        fscale = 0.5f;  // default divide by 2 to account for sum of L + R.
+#else
         shift = 9;
+#endif // BUILD_FLOAT
     }
 
     uint32_t captIdx;
@@ -386,9 +428,13 @@
             // wrap around
             captIdx = 0;
         }
-        int32_t smp = inBuffer->s16[2 * inIdx] + inBuffer->s16[2 * inIdx + 1];
-        smp = smp >> shift;
+#ifdef BUILD_FLOAT
+        const float smp = (inBuffer->f32[2 * inIdx] + inBuffer->f32[2 * inIdx + 1]) * fscale;
+        buf[captIdx] = clamp8_from_float(smp);
+#else
+        const int32_t smp = (inBuffer->s16[2 * inIdx] + inBuffer->s16[2 * inIdx + 1]) >> shift;
         buf[captIdx] = ((uint8_t)smp)^0x80;
+#endif // BUILD_FLOAT
     }
 
     // XXX the following two should really be atomic, though it probably doesn't
@@ -400,6 +446,15 @@
     }
 
     if (inBuffer->raw != outBuffer->raw) {
+#ifdef BUILD_FLOAT
+        if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+            for (size_t i = 0; i < sampleLen; ++i) {
+                outBuffer->f32[i] += inBuffer->f32[i];
+            }
+        } else {
+            memcpy(outBuffer->raw, inBuffer->raw, sampleLen * sizeof(float));
+        }
+#else
         if (pContext->mConfig.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
             for (size_t i = 0; i < outBuffer->frameCount*2; i++) {
                 outBuffer->s16[i] = clamp16(outBuffer->s16[i] + inBuffer->s16[i]);
@@ -407,6 +462,7 @@
         } else {
             memcpy(outBuffer->raw, inBuffer->raw, outBuffer->frameCount * 2 * sizeof(int16_t));
         }
+#endif // BUILD_FLOAT
     }
     if (pContext->mState != VISUALIZER_STATE_ACTIVE) {
         return -ENODATA;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index a5f5fc6..0784939 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1120,6 +1120,7 @@
             } else if (what == DecoderBase::kWhatShutdownCompleted) {
                 ALOGV("%s shutdown completed", audio ? "audio" : "video");
                 if (audio) {
+                    Mutex::Autolock autoLock(mDecoderLock);
                     mAudioDecoder.clear();
                     mAudioDecoderError = false;
                     ++mAudioDecoderGeneration;
@@ -1127,6 +1128,7 @@
                     CHECK_EQ((int)mFlushingAudio, (int)SHUTTING_DOWN_DECODER);
                     mFlushingAudio = SHUT_DOWN;
                 } else {
+                    Mutex::Autolock autoLock(mDecoderLock);
                     mVideoDecoder.clear();
                     mVideoDecoderError = false;
                     ++mVideoDecoderGeneration;
@@ -1447,29 +1449,6 @@
             break;
         }
 
-        case kWhatGetStats:
-        {
-            ALOGV("kWhatGetStats");
-
-            Vector<sp<AMessage>> *trackStats;
-            CHECK(msg->findPointer("trackstats", (void**)&trackStats));
-
-            trackStats->clear();
-            if (mVideoDecoder != NULL) {
-                trackStats->push_back(mVideoDecoder->getStats());
-            }
-            if (mAudioDecoder != NULL) {
-                trackStats->push_back(mAudioDecoder->getStats());
-            }
-
-            // respond for synchronization
-            sp<AMessage> response = new AMessage;
-            sp<AReplyToken> replyID;
-            CHECK(msg->senderAwaitsResponse(&replyID));
-            response->postReply(replyID);
-            break;
-        }
-
         default:
             TRESPASS();
             break;
@@ -1817,6 +1796,7 @@
           (long long)currentPositionUs, forceNonOffload, needsToCreateAudioDecoder);
     if (mAudioDecoder != NULL) {
         mAudioDecoder->pause();
+        Mutex::Autolock autoLock(mDecoderLock);
         mAudioDecoder.clear();
         mAudioDecoderError = false;
         ++mAudioDecoderGeneration;
@@ -1935,6 +1915,8 @@
         }
     }
 
+    Mutex::Autolock autoLock(mDecoderLock);
+
     if (audio) {
         sp<AMessage> notify = new AMessage(kWhatAudioNotify, this);
         ++mAudioDecoderGeneration;
@@ -2236,13 +2218,15 @@
 void NuPlayer::getStats(Vector<sp<AMessage> > *trackStats) {
     CHECK(trackStats != NULL);
 
-    ALOGV("NuPlayer::getStats()");
-    sp<AMessage> msg = new AMessage(kWhatGetStats, this);
-    msg->setPointer("trackstats", trackStats);
+    trackStats->clear();
 
-    sp<AMessage> response;
-    (void) msg->postAndAwaitResponse(&response);
-    // response is for synchronization, ignore contents
+    Mutex::Autolock autoLock(mDecoderLock);
+    if (mVideoDecoder != NULL) {
+        trackStats->push_back(mVideoDecoder->getStats());
+    }
+    if (mAudioDecoder != NULL) {
+        trackStats->push_back(mAudioDecoder->getStats());
+    }
 }
 
 sp<MetaData> NuPlayer::getFileMeta() {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index e400d16..9f5be06 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -159,7 +159,6 @@
         kWhatPrepareDrm                 = 'pDrm',
         kWhatReleaseDrm                 = 'rDrm',
         kWhatMediaClockNotify           = 'mckN',
-        kWhatGetStats                   = 'gSts',
     };
 
     wp<NuPlayerDriver> mDriver;
@@ -175,6 +174,7 @@
     sp<DecoderBase> mVideoDecoder;
     bool mOffloadAudio;
     sp<DecoderBase> mAudioDecoder;
+    Mutex mDecoderLock;  // guard |mAudioDecoder| and |mVideoDecoder|.
     sp<CCDecoder> mCCDecoder;
     sp<Renderer> mRenderer;
     sp<ALooper> mRendererLooper;
diff --git a/media/libstagefright/MetaDataUtils.cpp b/media/libstagefright/MetaDataUtils.cpp
index 04f6ade..2475e7b 100644
--- a/media/libstagefright/MetaDataUtils.cpp
+++ b/media/libstagefright/MetaDataUtils.cpp
@@ -16,8 +16,10 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MetaDataUtils"
+#include <utils/Log.h>
 
 #include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/foundation/ABitReader.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaDataUtils.h>
@@ -25,6 +27,10 @@
 namespace android {
 
 bool MakeAVCCodecSpecificData(MetaDataBase &meta, const uint8_t *data, size_t size) {
+    if (data == nullptr || size == 0) {
+        return false;
+    }
+
     int32_t width;
     int32_t height;
     int32_t sarWidth;
@@ -46,6 +52,44 @@
     return true;
 }
 
+bool MakeAACCodecSpecificData(MetaDataBase &meta, const uint8_t *data, size_t size) {
+    if (data == nullptr || size < 7) {
+        return false;
+    }
+
+    ABitReader bits(data, size);
+
+    // adts_fixed_header
+
+    if (bits.getBits(12) != 0xfffu) {
+        ALOGE("Wrong atds_fixed_header");
+        return false;
+    }
+
+    bits.skipBits(4);  // ID, layer, protection_absent
+
+    unsigned profile = bits.getBits(2);
+    if (profile == 3u) {
+        ALOGE("profile should not be 3");
+        return false;
+    }
+    unsigned sampling_freq_index = bits.getBits(4);
+    bits.getBits(1);  // private_bit
+    unsigned channel_configuration = bits.getBits(3);
+    if (channel_configuration == 0u) {
+        ALOGE("channel_config should not be 0");
+        return false;
+    }
+
+    if (!MakeAACCodecSpecificData(
+            meta, profile, sampling_freq_index, channel_configuration)) {
+        return false;
+    }
+
+    meta.setInt32(kKeyIsADTS, true);
+    return true;
+}
+
 bool MakeAACCodecSpecificData(
         MetaDataBase &meta,
         unsigned profile, unsigned sampling_freq_index,
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
index f173e0f..06b15b3 100644
--- a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
@@ -689,7 +689,6 @@
                     notify(OMX_EventError, OMX_ErrorUndefined, err_code, NULL);
                     return;
                 }
-                mIsCodecConfigFlushRequired = true;
             }
 
             if (!mSampFreq || !mNumChannels) {
@@ -713,10 +712,14 @@
             signed int bytesConsumed = 0;
             int errorCode = 0;
             if (mIsCodecInitialized) {
+                mIsCodecConfigFlushRequired = true;
                 errorCode =
                     decodeXAACStream(inBuffer, inBufferLength, &bytesConsumed, &numOutBytes);
-            } else {
+            } else if (!mIsCodecConfigFlushRequired) {
                 ALOGW("Assumption that first frame after header initializes decoder failed!");
+                mSignalledError = true;
+                notify(OMX_EventError, OMX_ErrorUndefined, -1, NULL);
+                return;
             }
             inHeader->nFilledLen -= bytesConsumed;
             inHeader->nOffset += bytesConsumed;
diff --git a/media/libstagefright/include/media/stagefright/MetaDataUtils.h b/media/libstagefright/include/media/stagefright/MetaDataUtils.h
index d5a8080..4a7107d 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataUtils.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataUtils.h
@@ -24,6 +24,7 @@
 
 struct ABuffer;
 bool MakeAVCCodecSpecificData(MetaDataBase &meta, const uint8_t *data, size_t size);
+bool MakeAACCodecSpecificData(MetaDataBase &meta, const uint8_t *data, size_t size);
 bool MakeAACCodecSpecificData(MetaDataBase &meta, unsigned profile, unsigned sampling_freq_index,
         unsigned channel_configuration);
 
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 32635d1..fb498d4 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -775,10 +775,12 @@
     ALOGV("new stream PID 0x%02x, type 0x%02x, scrambled %d, SampleEncrypted: %d",
             elementaryPID, streamType, mScrambled, mSampleEncrypted);
 
-    uint32_t flags =
-            (isVideo() && mScrambled) ? ElementaryStreamQueue::kFlag_ScrambledData :
-            (mSampleEncrypted) ? ElementaryStreamQueue::kFlag_SampleEncryptedData :
-            0;
+    uint32_t flags = 0;
+    if (((isVideo() || isAudio()) && mScrambled)) {
+        flags = ElementaryStreamQueue::kFlag_ScrambledData;
+    } else if (mSampleEncrypted) {
+        flags = ElementaryStreamQueue::kFlag_SampleEncryptedData;
+    }
 
     ElementaryStreamQueue::Mode mode = ElementaryStreamQueue::INVALID;
 
@@ -1504,7 +1506,13 @@
         descrambleBytes = bytesWritten;
     }
 
-    sp<ABuffer> buffer;
+    // |buffer| points to the buffer from which we'd parse the PES header.
+    // When the output stream is scrambled, it points to mDescrambledBuffer
+    // (unless all packets in this PES are actually clear, in which case,
+    // it points to mBuffer since we never copied into mDescrambledBuffer).
+    // When the output stream is clear, it points to mBuffer, and we'll
+    // copy all descrambled data back to mBuffer.
+    sp<ABuffer> buffer = mBuffer;
     if (mQueue->isScrambled()) {
         // Queue subSample info for scrambled queue
         sp<ABuffer> clearSizesBuffer = new ABuffer(mSubSamples.size() * 4);
@@ -1533,15 +1541,18 @@
         }
         // Pass the original TS subsample size now. The PES header adjust
         // will be applied when the scrambled AU is dequeued.
+        // Note that if descrambleBytes is 0, it means this PES contains only
+        // all ts packets, leadingClearBytes is entire buffer size.
         mQueue->appendScrambledData(
-                mBuffer->data(), mBuffer->size(), sctrl,
-                isSync, clearSizesBuffer, encSizesBuffer);
+                mBuffer->data(), mBuffer->size(),
+                (descrambleBytes > 0) ? descrambleBytes : mBuffer->size(),
+                sctrl, isSync, clearSizesBuffer, encSizesBuffer);
 
-        buffer = mDescrambledBuffer;
+        if (descrambleBytes > 0) {
+            buffer = mDescrambledBuffer;
+        }
     } else {
         memcpy(mBuffer->data(), mDescrambledBuffer->data(), descrambleBytes);
-
-        buffer = mBuffer;
     }
 
     ABitReader br(buffer->data(), buffer->size());
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 90005c3..fb8b9fd 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -439,7 +439,8 @@
         ALOGE("appending data after EOS");
         return ERROR_MALFORMED;
     }
-    if (mBuffer == NULL || mBuffer->size() == 0) {
+
+    if (!isScrambled() && (mBuffer == NULL || mBuffer->size() == 0)) {
         switch (mMode) {
             case H264:
             case MPEG_VIDEO:
@@ -698,6 +699,7 @@
 
 void ElementaryStreamQueue::appendScrambledData(
         const void *data, size_t size,
+        size_t leadingClearBytes,
         int32_t keyId, bool isSync,
         sp<ABuffer> clearSizes, sp<ABuffer> encSizes) {
     if (!isScrambled()) {
@@ -725,6 +727,7 @@
 
     ScrambledRangeInfo scrambledInfo;
     scrambledInfo.mLength = size;
+    scrambledInfo.mLeadingClearBytes = leadingClearBytes;
     scrambledInfo.mKeyId = keyId;
     scrambledInfo.mIsSync = isSync;
     scrambledInfo.mClearSizes = clearSizes;
@@ -737,7 +740,6 @@
 
 sp<ABuffer> ElementaryStreamQueue::dequeueScrambledAccessUnit() {
     size_t nextScan = mBuffer->size();
-    mBuffer->setRange(0, 0);
     int32_t pesOffset = 0, pesScramblingControl = 0;
     int64_t timeUs = fetchTimestamp(nextScan, &pesOffset, &pesScramblingControl);
     if (timeUs < 0ll) {
@@ -748,6 +750,7 @@
     // return scrambled unit
     int32_t keyId = pesScramblingControl, isSync = 0, scrambledLength = 0;
     sp<ABuffer> clearSizes, encSizes;
+    size_t leadingClearBytes;
     while (mScrambledRangeInfos.size() > mRangeInfos.size()) {
         auto it = mScrambledRangeInfos.begin();
         ALOGV("[stream %d] fetching scrambled range: size=%zu", mMode, it->mLength);
@@ -765,6 +768,7 @@
         clearSizes = it->mClearSizes;
         encSizes = it->mEncSizes;
         isSync = it->mIsSync;
+        leadingClearBytes = it->mLeadingClearBytes;
         mScrambledRangeInfos.erase(it);
     }
     if (scrambledLength == 0) {
@@ -772,6 +776,70 @@
         return NULL;
     }
 
+    // Retrieve the leading clear bytes info, and use it to set the clear
+    // range on mBuffer. Note that the leading clear bytes includes the
+    // PES header portion, while mBuffer doesn't.
+    if ((int32_t)leadingClearBytes > pesOffset) {
+        mBuffer->setRange(0, leadingClearBytes - pesOffset);
+    } else {
+        mBuffer->setRange(0, 0);
+    }
+
+    // Try to parse formats, and if unavailable set up a dummy format.
+    // Only support the following modes for scrambled content for now.
+    // (will be expanded later).
+    if (mFormat == NULL) {
+        mFormat = new MetaData;
+        switch (mMode) {
+            case H264:
+            {
+                if (!MakeAVCCodecSpecificData(
+                        *mFormat, mBuffer->data(), mBuffer->size())) {
+                    ALOGI("Creating dummy AVC format for scrambled content");
+
+                    mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
+                    mFormat->setInt32(kKeyWidth, 1280);
+                    mFormat->setInt32(kKeyHeight, 720);
+                }
+                break;
+            }
+            case AAC:
+            {
+                if (!MakeAACCodecSpecificData(
+                        *mFormat, mBuffer->data(), mBuffer->size())) {
+                    ALOGI("Creating dummy AAC format for scrambled content");
+
+                    MakeAACCodecSpecificData(*mFormat,
+                            1 /*profile*/, 7 /*sampling_freq_index*/, 1 /*channel_config*/);
+                    mFormat->setInt32(kKeyIsADTS, true);
+                }
+
+                break;
+            }
+            case MPEG_VIDEO:
+            {
+                ALOGI("Creating dummy MPEG format for scrambled content");
+
+                mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG2);
+                mFormat->setInt32(kKeyWidth, 1280);
+                mFormat->setInt32(kKeyHeight, 720);
+                break;
+            }
+            default:
+            {
+                ALOGE("Unknown mode for scrambled content");
+                return NULL;
+            }
+        }
+
+        // for MediaExtractor.CasInfo
+        mFormat->setInt32(kKeyCASystemID, mCASystemId);
+        mFormat->setData(kKeyCASessionID,
+                0, mCasSessionId.data(), mCasSessionId.size());
+    }
+
+    mBuffer->setRange(0, 0);
+
     // copy into scrambled access unit
     sp<ABuffer> scrambledAccessUnit = ABuffer::CreateAsCopy(
             mScrambledBuffer->data(), scrambledLength);
@@ -803,7 +871,11 @@
 }
 
 sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnit() {
-    if ((mFlags & kFlag_AlignedData) && mMode == H264 && !isScrambled()) {
+    if (isScrambled()) {
+        return dequeueScrambledAccessUnit();
+    }
+
+    if ((mFlags & kFlag_AlignedData) && mMode == H264) {
         if (mRangeInfos.empty()) {
             return NULL;
         }
@@ -1114,25 +1186,11 @@
         bool protection_absent = bits.getBits(1) != 0;
 
         if (mFormat == NULL) {
-            unsigned profile = bits.getBits(2);
-            if (profile == 3u) {
-                ALOGE("profile should not be 3");
-                return NULL;
-            }
-            unsigned sampling_freq_index = bits.getBits(4);
-            bits.getBits(1);  // private_bit
-            unsigned channel_configuration = bits.getBits(3);
-            if (channel_configuration == 0u) {
-                ALOGE("channel_config should not be 0");
-                return NULL;
-            }
-            bits.skipBits(2);  // original_copy, home
-
             mFormat = new MetaData;
-            MakeAACCodecSpecificData(*mFormat,
-                    profile, sampling_freq_index, channel_configuration);
-
-            mFormat->setInt32(kKeyIsADTS, true);
+            if (!MakeAACCodecSpecificData(
+                    *mFormat, mBuffer->data() + offset, mBuffer->size() - offset)) {
+                return NULL;
+            }
 
             int32_t sampleRate;
             int32_t numChannels;
@@ -1147,12 +1205,12 @@
 
             ALOGI("found AAC codec config (%d Hz, %d channels)",
                  sampleRate, numChannels);
-        } else {
-            // profile_ObjectType, sampling_frequency_index, private_bits,
-            // channel_configuration, original_copy, home
-            bits.skipBits(12);
         }
 
+        // profile_ObjectType, sampling_frequency_index, private_bits,
+        // channel_configuration, original_copy, home
+        bits.skipBits(12);
+
         // adts_variable_header
 
         // copyright_identification_bit, copyright_identification_start
@@ -1267,27 +1325,6 @@
 }
 
 sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitH264() {
-    if (isScrambled()) {
-        if (mBuffer == NULL || mBuffer->size() == 0) {
-            return NULL;
-        }
-        if (mFormat == NULL) {
-            mFormat = new MetaData;
-            if (!MakeAVCCodecSpecificData(*mFormat, mBuffer->data(), mBuffer->size())) {
-                ALOGW("Creating dummy AVC format for scrambled content");
-                mFormat = new MetaData;
-                mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
-                mFormat->setInt32(kKeyWidth, 1280);
-                mFormat->setInt32(kKeyHeight, 720);
-            }
-            // for MediaExtractor.CasInfo
-            mFormat->setInt32(kKeyCASystemID, mCASystemId);
-            mFormat->setData(kKeyCASessionID, 0,
-                    mCasSessionId.data(), mCasSessionId.size());
-        }
-        return dequeueScrambledAccessUnit();
-    }
-
     const uint8_t *data = mBuffer->data();
 
     size_t size = mBuffer->size();
@@ -1587,25 +1624,6 @@
 }
 
 sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitMPEGVideo() {
-    if (isScrambled()) {
-        if (mBuffer == NULL || mBuffer->size() == 0) {
-            return NULL;
-        }
-        if (mFormat == NULL) {
-            ALOGI("Creating dummy MPEG format for scrambled content");
-            mFormat = new MetaData;
-            mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG2);
-            mFormat->setInt32(kKeyWidth, 1280);
-            mFormat->setInt32(kKeyHeight, 720);
-
-            // for MediaExtractor.CasInfo
-            mFormat->setInt32(kKeyCASystemID, mCASystemId);
-            mFormat->setData(kKeyCASessionID, 0,
-                    mCasSessionId.data(), mCasSessionId.size());
-        }
-        return dequeueScrambledAccessUnit();
-    }
-
     const uint8_t *data = mBuffer->data();
     size_t size = mBuffer->size();
 
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h
index 8c1d112..3227f47 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.h
+++ b/media/libstagefright/mpeg2ts/ESQueue.h
@@ -61,6 +61,7 @@
 
     void appendScrambledData(
             const void *data, size_t size,
+            size_t leadingClearBytes,
             int32_t keyId, bool isSync,
             sp<ABuffer> clearSizes, sp<ABuffer> encSizes);
 
@@ -86,8 +87,8 @@
     };
 
     struct ScrambledRangeInfo {
-        //int64_t mTimestampUs;
         size_t mLength;
+        size_t mLeadingClearBytes;
         int32_t mKeyId;
         int32_t mIsSync;
         sp<ABuffer> mClearSizes;
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index c0aa477..2c26ba4 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -42,6 +42,7 @@
 
 LOCAL_STATIC_LIBRARIES := \
     libcpustats \
+    libjsoncpp \
     libsndfile \
 
 LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 38483c3..43566b7 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -58,6 +58,8 @@
 
 #include <audio_utils/primitives.h>
 
+#include <json/json.h>
+
 #include <powermanager/PowerManager.h>
 
 #include <media/IMediaLogService.h>
@@ -440,8 +442,11 @@
         const bool formatJson = std::any_of(args.begin(), args.end(),
                 [](const String16 &arg) { return arg == String16("--json"); });
         if (formatJson) {
+            Json::Value root = getJsonDump();
+            Json::FastWriter writer;
+            std::string rootStr = writer.write(root);
             // XXX consider buffering if the string happens to be too long.
-            dprintf(fd, "%s", getJsonString().c_str());
+            dprintf(fd, "%s", rootStr.c_str());
             return NO_ERROR;
         }
 
@@ -556,34 +561,30 @@
     return NO_ERROR;
 }
 
-std::string AudioFlinger::getJsonString()
+Json::Value AudioFlinger::getJsonDump()
 {
-    std::string jsonStr = "{\n";
+    Json::Value root(Json::objectValue);
     const bool locked = dumpTryLock(mLock);
 
     // failed to lock - AudioFlinger is probably deadlocked
     if (!locked) {
-        jsonStr += "    \"deadlock_message\": ";
-        jsonStr += kDeadlockedString;
-        jsonStr += ",\n";
+        root["deadlock_message"] = kDeadlockedString;
     }
     // FIXME risky to access data structures without a lock held?
 
-    jsonStr += "  \"Playback_Threads\": [\n";
+    Json::Value playbackThreads = Json::arrayValue;
     // dump playback threads
     for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
-        if (i != 0) {
-            jsonStr += ",\n";
-        }
-        jsonStr += mPlaybackThreads.valueAt(i)->getJsonString();
+        playbackThreads.append(mPlaybackThreads.valueAt(i)->getJsonDump());
     }
-    jsonStr += "\n  ]\n}\n";
 
     if (locked) {
         mLock.unlock();
     }
 
-    return jsonStr;
+    root["playback_threads"] = playbackThreads;
+
+    return root;
 }
 
 sp<AudioFlinger::Client> AudioFlinger::registerPid(pid_t pid)
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 9b9a15d..e9e6e94 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -79,6 +79,7 @@
 
 #include <powermanager/IPowerManager.h>
 
+#include <json/json.h>
 #include <media/nblog/NBLog.h>
 #include <private/media/AudioEffectShared.h>
 #include <private/media/AudioTrackShared.h>
@@ -114,7 +115,7 @@
     static const char* getServiceName() ANDROID_API { return "media.audio_flinger"; }
 
     virtual     status_t    dump(int fd, const Vector<String16>& args);
-                std::string getJsonString();
+                Json::Value getJsonDump();
 
     // IAudioFlinger interface, in binder opcode order
     virtual sp<IAudioTrack> createTrack(const CreateTrackInput& input,
diff --git a/services/audioflinger/FastMixerDumpState.cpp b/services/audioflinger/FastMixerDumpState.cpp
index e26dca1..2abfbfb 100644
--- a/services/audioflinger/FastMixerDumpState.cpp
+++ b/services/audioflinger/FastMixerDumpState.cpp
@@ -24,6 +24,7 @@
 #include <cpustats/ThreadCpuUsage.h>
 #endif
 #endif
+#include <json/json.h>
 #include <string>
 #include <utils/Debug.h>
 #include <utils/Log.h>
@@ -205,14 +206,13 @@
     }
 }
 
-// TODO get rid of extraneous lines and use better key names.
-// TODO may go back to using a library to do the json formatting.
-std::string FastMixerDumpState::getJsonString() const
+Json::Value FastMixerDumpState::getJsonDump() const
 {
+    Json::Value root(Json::objectValue);
     if (mCommand == FastMixerState::INITIAL) {
-        return "    {\n      \"status\": \"uninitialized\"\n    }";
+        root["status"] = "uninitialized";
+        return root;
     }
-    std::string jsonStr = "    {\n";
 #ifdef FAST_THREAD_STATISTICS
     // find the interval of valid samples
     const uint32_t bounds = mBounds;
@@ -230,31 +230,25 @@
     }
     // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
     // and adjusted CPU load in MHz normalized for CPU clock frequency
-    std::string jsonWallStr = "      \"wall_clock_time\":[";
-    std::string jsonLoadNsStr = "      \"raw_cpu_load\":[";
+    Json::Value jsonWall(Json::arrayValue);
+    Json::Value jsonLoadNs(Json::arrayValue);
     // loop over all the samples
     for (uint32_t j = 0; j < n; ++j) {
         size_t i = oldestClosed++ & (mSamplingN - 1);
         uint32_t wallNs = mMonotonicNs[i];
-        if (j != 0) {
-            jsonWallStr += ',';
-            jsonLoadNsStr += ',';
-        }
-        /* jsonObject["wall"].append(wallNs); */
-        jsonWallStr += std::to_string(wallNs);
+        jsonWall.append(wallNs);
         uint32_t sampleLoadNs = mLoadNs[i];
-        jsonLoadNsStr += std::to_string(sampleLoadNs);
+        jsonLoadNs.append(sampleLoadNs);
     }
-    jsonWallStr += ']';
-    jsonLoadNsStr += ']';
     if (n) {
-        jsonStr += jsonWallStr + ",\n" + jsonLoadNsStr + "\n";
+        root["wall_clock_time_ns"] = jsonWall;
+        root["raw_cpu_load_ns"] = jsonLoadNs;
+        root["status"] = "ok";
     } else {
-        //dprintf(fd, "  No FastMixer statistics available currently\n");
+        root["status"] = "unavailable";
     }
 #endif
-    jsonStr += "    }";
-    return jsonStr;
+    return root;
 }
 
 }   // android
diff --git a/services/audioflinger/FastMixerDumpState.h b/services/audioflinger/FastMixerDumpState.h
index 81c4175..69c2e4e 100644
--- a/services/audioflinger/FastMixerDumpState.h
+++ b/services/audioflinger/FastMixerDumpState.h
@@ -20,6 +20,7 @@
 #include <stdint.h>
 #include <string>
 #include <audio_utils/TimestampVerifier.h>
+#include <json/json.h>
 #include "Configuration.h"
 #include "FastThreadDumpState.h"
 #include "FastMixerState.h"
@@ -67,7 +68,7 @@
     /*virtual*/ ~FastMixerDumpState();
 
     void dump(int fd) const;             // should only be called on a stable copy, not the original
-    std::string getJsonString() const;   // should only be called on a stable copy, not the original
+    Json::Value getJsonDump() const;     // should only be called on a stable copy, not the original
 
     double   mLatencyMs = 0.;   // measured latency, default of 0 if no valid timestamp read.
     uint32_t mWriteSequence;    // incremented before and after each write()
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 3ef8ce2..cd585f5 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -42,6 +42,7 @@
 #include <audio_utils/primitives.h>
 #include <audio_utils/format.h>
 #include <audio_utils/minifloat.h>
+#include <json/json.h>
 #include <system/audio_effects/effect_ns.h>
 #include <system/audio_effects/effect_aec.h>
 #include <system/audio.h>
@@ -1763,9 +1764,9 @@
     mLocalLog.dump(fd, "   " /* prefix */, 40 /* lines */);
 }
 
-std::string AudioFlinger::PlaybackThread::getJsonString() const
+Json::Value AudioFlinger::PlaybackThread::getJsonDump() const
 {
-    return "{}";
+    return Json::Value(Json::objectValue);
 }
 
 void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args __unused)
@@ -5141,14 +5142,20 @@
     }
 }
 
-std::string AudioFlinger::MixerThread::getJsonString() const
+Json::Value AudioFlinger::MixerThread::getJsonDump() const
 {
-    // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
-    // while we are dumping it.  It may be inconsistent, but it won't mutate!
-    // This is a large object so we place it on the heap.
-    // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
-    return std::unique_ptr<FastMixerDumpState>(new FastMixerDumpState(mFastMixerDumpState))
-        ->getJsonString();
+    Json::Value root;
+    if (hasFastMixer()) {
+        // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
+        // while we are dumping it.  It may be inconsistent, but it won't mutate!
+        // This is a large object so we place it on the heap.
+        // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
+        const std::unique_ptr<FastMixerDumpState> copy(new FastMixerDumpState(mFastMixerDumpState));
+        root["fastmixer_stats"] = copy->getJsonDump();
+    } else {
+        root["fastmixer_stats"] = "no_fastmixer";
+    }
+    return root;
 }
 
 uint32_t AudioFlinger::MixerThread::idleSleepTimeUs() const
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index f0d625c..2ab0bee 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -662,7 +662,7 @@
 
                 void        dump(int fd, const Vector<String16>& args);
                 // returns a string of audio performance related data in JSON format.
-    virtual     std::string getJsonString() const;
+    virtual     Json::Value getJsonDump() const;
 
     // Thread virtuals
     virtual     bool        threadLoop();
@@ -1108,7 +1108,7 @@
     virtual     bool        checkForNewParameter_l(const String8& keyValuePair,
                                                    status_t& status);
     virtual     void        dumpInternals(int fd, const Vector<String16>& args);
-                std::string getJsonString() const override;
+                Json::Value getJsonDump() const override;
 
     virtual     bool        isTrackAllowed_l(
                                     audio_channel_mask_t channelMask, audio_format_t format,
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index d1515e2..96079cc 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -4761,6 +4761,7 @@
 
     nextAudioPortGeneration();
 
+    audio_devices_t device = inputDesc->mDevice;
     ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
     if (index >= 0) {
         sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
@@ -4771,6 +4772,12 @@
 
     inputDesc->close();
     mInputs.removeItem(input);
+
+    audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
+    if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+            mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
+        SoundTrigger::setCaptureState(false);
+    }
 }
 
 SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice(
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 491ed72..4c6718c 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2738,13 +2738,13 @@
 status_t Camera3Device::registerInFlight(uint32_t frameNumber,
         int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
         bool hasAppCallback, nsecs_t maxExpectedDuration,
-        std::set<String8>& physicalCameraIds) {
+        std::set<String8>& physicalCameraIds, bool isStillCapture) {
     ATRACE_CALL();
     Mutex::Autolock l(mInFlightLock);
 
     ssize_t res;
     res = mInFlightMap.add(frameNumber, InFlightRequest(numBuffers, resultExtras, hasInput,
-            hasAppCallback, maxExpectedDuration, physicalCameraIds));
+            hasAppCallback, maxExpectedDuration, physicalCameraIds, isStillCapture));
     if (res < 0) return res;
 
     if (mInFlightMap.size() == 1) {
@@ -2810,6 +2810,10 @@
     if (request.numBuffersLeft == 0 &&
             (request.skipResultMetadata ||
             (request.haveResultMetadata && shutterTimestamp != 0))) {
+        if (request.stillCapture) {
+            ATRACE_ASYNC_END("still capture", frameNumber);
+        }
+
         ATRACE_ASYNC_END("frame capture", frameNumber);
 
         // Sanity check - if sensor timestamp matches shutter timestamp in the
@@ -4939,12 +4943,21 @@
         if (batchedRequest && i != mNextRequests.size()-1) {
             hasCallback = false;
         }
+        bool isStillCapture = false;
+        if (!mNextRequests[0].captureRequest->mSettingsList.begin()->metadata.isEmpty()) {
+            camera_metadata_ro_entry_t e = camera_metadata_ro_entry_t();
+            find_camera_metadata_ro_entry(halRequest->settings, ANDROID_CONTROL_CAPTURE_INTENT, &e);
+            if ((e.count > 0) && (e.data.u8[0] == ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE)) {
+                isStillCapture = true;
+                ATRACE_ASYNC_BEGIN("still capture", mNextRequests[i].halRequest.frame_number);
+            }
+        }
         res = parent->registerInFlight(halRequest->frame_number,
                 totalNumBuffers, captureRequest->mResultExtras,
                 /*hasInput*/halRequest->input_buffer != NULL,
                 hasCallback,
                 calculateMaxExpectedDuration(halRequest->settings),
-                requestedPhysicalCameras);
+                requestedPhysicalCameras, isStillCapture);
         ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
                ", burstId = %" PRId32 ".",
                 __FUNCTION__,
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 96212ab..51e1fb0 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -994,6 +994,9 @@
         // Map of physicalCameraId <-> Metadata
         std::vector<PhysicalCaptureResultInfo> physicalMetadatas;
 
+        // Indicates a still capture request.
+        bool stillCapture;
+
         // Default constructor needed by KeyedVector
         InFlightRequest() :
                 shutterTimestamp(0),
@@ -1004,12 +1007,13 @@
                 hasInputBuffer(false),
                 hasCallback(true),
                 maxExpectedDuration(kDefaultExpectedDuration),
-                skipResultMetadata(false) {
+                skipResultMetadata(false),
+                stillCapture(false) {
         }
 
         InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
                 bool hasAppCallback, nsecs_t maxDuration,
-                const std::set<String8>& physicalCameraIdSet) :
+                const std::set<String8>& physicalCameraIdSet, bool isStillCapture) :
                 shutterTimestamp(0),
                 sensorTimestamp(0),
                 requestStatus(OK),
@@ -1020,7 +1024,8 @@
                 hasCallback(hasAppCallback),
                 maxExpectedDuration(maxDuration),
                 skipResultMetadata(false),
-                physicalCameraIds(physicalCameraIdSet) {
+                physicalCameraIds(physicalCameraIdSet),
+                stillCapture(isStillCapture) {
         }
     };
 
@@ -1034,10 +1039,10 @@
     nsecs_t                mExpectedInflightDuration = 0;
     int                    mInFlightStatusId;
 
-
     status_t registerInFlight(uint32_t frameNumber,
             int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
-            bool callback, nsecs_t maxExpectedDuration, std::set<String8>& physicalCameraIds);
+            bool callback, nsecs_t maxExpectedDuration, std::set<String8>& physicalCameraIds,
+            bool isStillCapture);
 
     /**
      * Returns the maximum expected time it'll take for all currently in-flight