Merge "CCodec: read formats after setupInputSurface"
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
index dfd649d..c557de1 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
@@ -88,6 +88,16 @@
         mTimestampUs = 0u;
         mOutputSize = 0u;
         mTimestampDevTest = false;
+        mWidth = ENC_DEFAULT_FRAME_WIDTH;
+        mHeight = ENC_DEFAULT_FRAME_HEIGHT;
+        mMaxWidth = 0;
+        mMaxHeight = 0;
+        mMinWidth = INT32_MAX;
+        mMinHeight = INT32_MAX;
+
+        ASSERT_EQ(getMaxMinResolutionSupported(mComponent), C2_OK);
+        mWidth = std::max(std::min(mWidth, mMaxWidth), mMinWidth);
+        mHeight = std::max(std::min(mHeight, mMaxHeight), mMinHeight);
 
         C2SecureModeTuning secureModeTuning{};
         mComponent->query({&secureModeTuning}, {}, C2_MAY_BLOCK, nullptr);
@@ -111,6 +121,8 @@
     virtual void getParams() {}
 
     bool setupConfigParam(int32_t nWidth, int32_t nHeight, int32_t nBFrame = 0);
+    c2_status_t getMaxMinResolutionSupported(
+            const std::shared_ptr<android::Codec2Client::Component>& component);
 
     // callback function to process onWorkDone received by Listener
     void handleWorkDone(std::list<std::unique_ptr<C2Work>>& workItems) {
@@ -181,6 +193,12 @@
     uint32_t mFailedWorkReceived;
     uint64_t mTimestampUs;
     uint64_t mOutputSize;
+    int32_t mWidth;
+    int32_t mHeight;
+    int32_t mMaxWidth;
+    int32_t mMaxHeight;
+    int32_t mMinWidth;
+    int32_t mMinHeight;
 
     std::list<uint64_t> mTimestampUslist;
     std::list<uint64_t> mFlushedIndices;
@@ -271,6 +289,37 @@
     strcat(URL, "bbb_352x288_420p_30fps_32frames.yuv");
 }
 
+void fillByteBuffer(char* inputBuffer, char* mInputData, uint32_t nWidth, int32_t nHeight) {
+    int width, height, tileWidth, tileHeight;
+    int offset = 0, frmOffset = 0;
+    int numOfPlanes = 3;
+    for (int plane = 0; plane < numOfPlanes; plane++) {
+        if (plane == 0) {
+            width = nWidth;
+            height = nHeight;
+            tileWidth = ENC_DEFAULT_FRAME_WIDTH;
+            tileHeight = ENC_DEFAULT_FRAME_HEIGHT;
+        } else {
+            width = nWidth / 2;
+            tileWidth = ENC_DEFAULT_FRAME_WIDTH / 2;
+            height = nHeight / 2;
+            tileHeight = ENC_DEFAULT_FRAME_HEIGHT / 2;
+        }
+        for (int k = 0; k < height; k += tileHeight) {
+            int rowsToCopy = std::min(height - k, tileHeight);
+            for (int j = 0; j < rowsToCopy; j++) {
+                for (int i = 0; i < width; i += tileWidth) {
+                    int colsToCopy = std::min(width - i, tileWidth);
+                    memcpy(inputBuffer + (offset + (k + j) * width + i),
+                           mInputData + (frmOffset + j * tileWidth), colsToCopy);
+                }
+            }
+        }
+        offset += width * height;
+        frmOffset += tileWidth * tileHeight;
+    }
+}
+
 void encodeNFrames(const std::shared_ptr<android::Codec2Client::Component>& component,
                    std::mutex& queueLock, std::condition_variable& queueCondition,
                    std::list<std::unique_ptr<C2Work>>& workQueue,
@@ -314,12 +363,22 @@
             ULock l(queueLock);
             flushedIndices.emplace_back(frameID);
         }
-        char* data = (char*)malloc(bytesCount);
-        ASSERT_NE(data, nullptr);
-        memset(data, 0, bytesCount);
-        if (eleStream.is_open()) {
-            eleStream.read(data, bytesCount);
-            ASSERT_EQ(eleStream.gcount(), bytesCount);
+        std::vector<uint8_t> buffer(bytesCount);
+        char* data = (char*)buffer.data();
+        if (nWidth != ENC_DEFAULT_FRAME_WIDTH || nHeight != ENC_DEFAULT_FRAME_HEIGHT) {
+            int defaultBytesCount = ENC_DEFAULT_FRAME_HEIGHT * ENC_DEFAULT_FRAME_WIDTH * 3 >> 1;
+            std::vector<uint8_t> srcBuffer(defaultBytesCount);
+            char* srcData = (char*)srcBuffer.data();
+            if (eleStream.is_open()) {
+                eleStream.read(srcData, defaultBytesCount);
+                ASSERT_EQ(eleStream.gcount(), defaultBytesCount);
+            }
+            fillByteBuffer(data, srcData, nWidth, nHeight);
+        } else {
+            if (eleStream.is_open()) {
+                eleStream.read(data, bytesCount);
+                ASSERT_EQ(eleStream.gcount(), bytesCount);
+            }
         }
         std::shared_ptr<C2GraphicBlock> block;
         err = graphicPool->fetchGraphicBlock(nWidth, nHeight, HAL_PIXEL_FORMAT_YV12,
@@ -352,7 +411,6 @@
         work->input.buffers.emplace_back(new GraphicBuffer(block));
         work->worklets.clear();
         work->worklets.emplace_back(new C2Worklet);
-        free(data);
 
         std::list<std::unique_ptr<C2Work>> items;
         items.push_back(std::move(work));
@@ -381,13 +439,59 @@
     }
 };
 
+c2_status_t Codec2VideoEncHidlTestBase::getMaxMinResolutionSupported(
+        const std::shared_ptr<android::Codec2Client::Component>& component) {
+    std::unique_ptr<C2StreamPictureSizeInfo::input> param =
+            std::make_unique<C2StreamPictureSizeInfo::input>();
+    std::vector<C2FieldSupportedValuesQuery> validValueInfos = {
+            C2FieldSupportedValuesQuery::Current(
+                    C2ParamField(param.get(), &C2StreamPictureSizeInfo::width)),
+            C2FieldSupportedValuesQuery::Current(
+                    C2ParamField(param.get(), &C2StreamPictureSizeInfo::height))};
+    c2_status_t c2err = component->querySupportedValues(validValueInfos, C2_MAY_BLOCK);
+    if (c2err != C2_OK || validValueInfos.size() != 2u) {
+        ALOGE("querySupportedValues_vb failed for pictureSize");
+        return c2err;
+    }
+
+    const auto& c2FSVWidth = validValueInfos[0].values;
+    const auto& c2FSVHeight = validValueInfos[1].values;
+    switch (c2FSVWidth.type) {
+        case C2FieldSupportedValues::type_t::RANGE: {
+            const auto& widthRange = c2FSVWidth.range;
+            const auto& heightRange = c2FSVHeight.range;
+            mMaxWidth = (uint32_t)(widthRange.max).ref<uint32_t>();
+            mMaxHeight = (uint32_t)(heightRange.max).ref<uint32_t>();
+            mMinWidth = (uint32_t)(widthRange.min).ref<uint32_t>();
+            mMinHeight = (uint32_t)(heightRange.min).ref<uint32_t>();
+            break;
+        }
+        case C2FieldSupportedValues::type_t::VALUES: {
+            int32_t curr = 0;
+            for (const C2Value::Primitive& prim : c2FSVWidth.values) {
+                curr = (uint32_t)prim.ref<uint32_t>();
+                mMaxWidth = std::max(curr, mMaxWidth);
+                mMinWidth = std::min(curr, mMinWidth);
+            }
+            for (const C2Value::Primitive& prim : c2FSVHeight.values) {
+                curr = (uint32_t)prim.ref<uint32_t>();
+                mMaxHeight = std::max(curr, mMaxHeight);
+                mMinHeight = std::min(curr, mMinHeight);
+            }
+            break;
+        }
+        default:
+            ALOGE("Non supported data");
+            return C2_BAD_VALUE;
+    }
+    return C2_OK;
+}
+
 TEST_P(Codec2VideoEncEncodeTest, EncodeTest) {
     description("Encodes input file");
     if (mDisableTest) GTEST_SKIP() << "Test is disabled";
 
     char mURL[512];
-    int32_t nWidth = ENC_DEFAULT_FRAME_WIDTH;
-    int32_t nHeight = ENC_DEFAULT_FRAME_HEIGHT;
     bool signalEOS = std::get<3>(GetParam());
     // Send an empty frame to receive CSD data from encoder.
     bool sendEmptyFirstFrame = std::get<3>(GetParam());
@@ -415,10 +519,6 @@
         inputFrames--;
     }
 
-    if (!setupConfigParam(nWidth, nHeight, mConfigBPictures ? 1 : 0)) {
-        std::cout << "[   WARN   ] Test Skipped \n";
-        return;
-    }
     std::vector<std::unique_ptr<C2Param>> inParams;
     c2_status_t c2_status = mComponent->query({}, {C2StreamGopTuning::output::PARAM_TYPE},
                                               C2_DONT_BLOCK, &inParams);
@@ -438,6 +538,9 @@
             mConfigBPictures = false;
         }
     }
+    if (!setupConfigParam(mWidth, mHeight, mConfigBPictures ? 1 : 0)) {
+        ASSERT_TRUE(false) << "Failed while configuring height and width for " << mComponentName;
+    }
 
     ASSERT_EQ(mComponent->start(), C2_OK);
 
@@ -447,7 +550,7 @@
     }
     ASSERT_NO_FATAL_FAILURE(encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
                                           mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
-                                          inputFrames, ENC_NUM_FRAMES, nWidth, nHeight, false,
+                                          inputFrames, ENC_NUM_FRAMES, mWidth, mHeight, false,
                                           signalEOS));
     // mDisableTest will be set if buffer was not fetched properly.
     // This may happen when resolution is not proper but config succeeded
@@ -538,14 +641,12 @@
     if (mDisableTest) GTEST_SKIP() << "Test is disabled";
 
     char mURL[512];
-    int32_t nWidth = ENC_DEFAULT_FRAME_WIDTH;
-    int32_t nHeight = ENC_DEFAULT_FRAME_HEIGHT;
+
     strcpy(mURL, sResourceDir.c_str());
     GetURLForComponent(mURL);
 
-    if (!setupConfigParam(nWidth, nHeight)) {
-        std::cout << "[   WARN   ] Test Skipped \n";
-        return;
+    if (!setupConfigParam(mWidth, mHeight)) {
+        ASSERT_TRUE(false) << "Failed while configuring height and width for " << mComponentName;
     }
     ASSERT_EQ(mComponent->start(), C2_OK);
 
@@ -567,7 +668,7 @@
 
     ASSERT_NO_FATAL_FAILURE(encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
                                           mFlushedIndices, mGraphicPool, eleStream, mDisableTest, 0,
-                                          numFramesFlushed, nWidth, nHeight, false, false));
+                                          numFramesFlushed, mWidth, mHeight, false, false));
     // mDisableTest will be set if buffer was not fetched properly.
     // This may happen when resolution is not proper but config succeeded
     // In this cases, we skip encoding the input stream
@@ -587,8 +688,8 @@
     ASSERT_EQ(mWorkQueue.size(), MAX_INPUT_BUFFERS);
     ASSERT_NO_FATAL_FAILURE(encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
                                           mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
-                                          numFramesFlushed, numFrames - numFramesFlushed, nWidth,
-                                          nHeight, true));
+                                          numFramesFlushed, numFrames - numFramesFlushed, mWidth,
+                                          mHeight, true));
     eleStream.close();
     // mDisableTest will be set if buffer was not fetched properly.
     // This may happen when resolution is not proper but config succeeded
@@ -731,11 +832,8 @@
 
     mFlushedIndices.clear();
 
-    int32_t nWidth = ENC_DEFAULT_FRAME_WIDTH;
-    int32_t nHeight = ENC_DEFAULT_FRAME_HEIGHT;
-    if (!setupConfigParam(nWidth, nHeight)) {
-        std::cout << "[   WARN   ] Test Skipped \n";
-        return;
+    if (!setupConfigParam(mWidth, mHeight)) {
+        ASSERT_TRUE(false) << "Failed while configuring height and width for " << mComponentName;
     }
     ASSERT_EQ(mComponent->start(), C2_OK);
 
@@ -756,8 +854,8 @@
 
         ASSERT_NO_FATAL_FAILURE(encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
                                               mFlushedIndices, mGraphicPool, eleStream,
-                                              mDisableTest, inputFrameId, ENC_NUM_FRAMES, nWidth,
-                                              nHeight, false, false));
+                                              mDisableTest, inputFrameId, ENC_NUM_FRAMES, mWidth,
+                                              mHeight, false, false));
         // mDisableTest will be set if buffer was not fetched properly.
         // This may happen when resolution is not proper but config succeeded
         // In this cases, we skip encoding the input stream
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 7b55b63..12b9605 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -515,7 +515,7 @@
 }
 
 void AmendOutputFormatWithCodecSpecificData(
-        const uint8_t *data, size_t size, const std::string mediaType,
+        const uint8_t *data, size_t size, const std::string &mediaType,
         const sp<AMessage> &outputFormat) {
     if (mediaType == MIMETYPE_VIDEO_AVC) {
         // Codec specific data should be SPS and PPS in a single buffer,
@@ -2276,7 +2276,12 @@
                     }
                 }
                 if (config->mInputSurface) {
-                    config->mInputSurface->onInputBufferDone(work->input.ordinal.frameIndex);
+                    if (work->worklets.empty()
+                           || !work->worklets.back()
+                           || (work->worklets.back()->output.flags
+                                  & C2FrameData::FLAG_INCOMPLETE) == 0) {
+                        config->mInputSurface->onInputBufferDone(work->input.ordinal.frameIndex);
+                    }
                 }
                 if (initDataWatcher.hasChanged()) {
                     initData = initDataWatcher.update();
diff --git a/media/libeffects/downmix/Android.bp b/media/libeffects/downmix/Android.bp
index b40317f..e96c041 100644
--- a/media/libeffects/downmix/Android.bp
+++ b/media/libeffects/downmix/Android.bp
@@ -33,7 +33,6 @@
     relative_install_path: "soundfx",
 
     cflags: [
-        "-DBUILD_FLOAT",
         "-fvisibility=hidden",
         "-Wall",
         "-Werror",
diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c
index 99ac4f5..5ca5525 100644
--- a/media/libeffects/downmix/EffectDownmix.c
+++ b/media/libeffects/downmix/EffectDownmix.c
@@ -31,13 +31,8 @@
 // Do not submit with DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER defined, strictly for testing
 //#define DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER 0
 
-#ifdef BUILD_FLOAT
 #define MINUS_3_DB_IN_FLOAT 0.70710678f // -3dB = 0.70710678f
 const audio_format_t gTargetFormat = AUDIO_FORMAT_PCM_FLOAT;
-#else
-#define MINUS_3_DB_IN_Q19_12 2896 // -3dB = 0.707 * 2^12 = 2896
-const audio_format_t gTargetFormat = AUDIO_FORMAT_PCM_16_BIT;
-#endif
 
 // subset of possible audio_channel_mask_t values, and AUDIO_CHANNEL_OUT_* renamed to CHANNEL_MASK_*
 typedef enum {
@@ -88,7 +83,7 @@
 
 // number of effects in this library
 const int kNbEffects = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *);
-#ifdef BUILD_FLOAT
+
 static LVM_FLOAT clamp_float(LVM_FLOAT a) {
     if (a > 1.0f) {
         return 1.0f;
@@ -100,7 +95,7 @@
         return a;
     }
 }
-#endif
+
 /*----------------------------------------------------------------------------
  * Test code
  *--------------------------------------------------------------------------*/
@@ -303,106 +298,6 @@
     return -EINVAL;
 }
 
-#ifndef BUILD_FLOAT
-/*--- Effect Control Interface Implementation ---*/
-
-static int Downmix_Process(effect_handle_t self,
-        audio_buffer_t *inBuffer, audio_buffer_t *outBuffer) {
-
-    downmix_object_t *pDownmixer;
-    int16_t *pSrc, *pDst;
-    downmix_module_t *pDwmModule = (downmix_module_t *)self;
-
-    if (pDwmModule == NULL) {
-        return -EINVAL;
-    }
-
-    if (inBuffer == NULL || inBuffer->raw == NULL ||
-        outBuffer == NULL || outBuffer->raw == NULL ||
-        inBuffer->frameCount != outBuffer->frameCount) {
-        return -EINVAL;
-    }
-
-    pDownmixer = (downmix_object_t*) &pDwmModule->context;
-
-    if (pDownmixer->state == DOWNMIX_STATE_UNINITIALIZED) {
-        ALOGE("Downmix_Process error: trying to use an uninitialized downmixer");
-        return -EINVAL;
-    } else if (pDownmixer->state == DOWNMIX_STATE_INITIALIZED) {
-        ALOGE("Downmix_Process error: trying to use a non-configured downmixer");
-        return -ENODATA;
-    }
-
-    pSrc = inBuffer->s16;
-    pDst = outBuffer->s16;
-    size_t numFrames = outBuffer->frameCount;
-
-    const bool accumulate =
-            (pDwmModule->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
-    const uint32_t downmixInputChannelMask = pDwmModule->config.inputCfg.channels;
-
-    switch(pDownmixer->type) {
-
-      case DOWNMIX_TYPE_STRIP:
-          if (accumulate) {
-              while (numFrames) {
-                  pDst[0] = clamp16(pDst[0] + pSrc[0]);
-                  pDst[1] = clamp16(pDst[1] + pSrc[1]);
-                  pSrc += pDownmixer->input_channel_count;
-                  pDst += 2;
-                  numFrames--;
-              }
-          } else {
-              while (numFrames) {
-                  pDst[0] = pSrc[0];
-                  pDst[1] = pSrc[1];
-                  pSrc += pDownmixer->input_channel_count;
-                  pDst += 2;
-                  numFrames--;
-              }
-          }
-          break;
-
-      case DOWNMIX_TYPE_FOLD:
-#ifdef DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER
-          // bypass the optimized downmix routines for the common formats
-          if (!Downmix_foldGeneric(
-                  downmixInputChannelMask, pSrc, pDst, numFrames, accumulate)) {
-              ALOGE("Multichannel configuration 0x%" PRIx32 " is not supported", downmixInputChannelMask);
-              return -EINVAL;
-          }
-          break;
-#endif
-        // optimize for the common formats
-        switch((downmix_input_channel_mask_t)downmixInputChannelMask) {
-        case CHANNEL_MASK_QUAD_BACK:
-        case CHANNEL_MASK_QUAD_SIDE:
-            Downmix_foldFromQuad(pSrc, pDst, numFrames, accumulate);
-            break;
-        case CHANNEL_MASK_5POINT1_BACK:
-        case CHANNEL_MASK_5POINT1_SIDE:
-            Downmix_foldFrom5Point1(pSrc, pDst, numFrames, accumulate);
-            break;
-        case CHANNEL_MASK_7POINT1:
-            Downmix_foldFrom7Point1(pSrc, pDst, numFrames, accumulate);
-            break;
-        default:
-            if (!Downmix_foldGeneric(
-                    downmixInputChannelMask, pSrc, pDst, numFrames, accumulate)) {
-                ALOGE("Multichannel configuration 0x%" PRIx32 " is not supported", downmixInputChannelMask);
-                return -EINVAL;
-            }
-            break;
-        }
-        break;
-
-      default:
-        return -EINVAL;
-    }
-
-    return 0;
-}
-#else /*BUILD_FLOAT*/
 /*--- Effect Control Interface Implementation ---*/
 
 static int Downmix_Process(effect_handle_t self,
@@ -503,7 +398,6 @@
 
     return 0;
 }
-#endif
 
 static int Downmix_Command(effect_handle_t self, uint32_t cmdCode, uint32_t cmdSize,
         void *pCmdData, uint32_t *replySize, void *pReplyData) {
@@ -940,35 +834,6 @@
  *
  *----------------------------------------------------------------------------
  */
-#ifndef BUILD_FLOAT
-void Downmix_foldFromQuad(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
-    // sample at index 0 is FL
-    // sample at index 1 is FR
-    // sample at index 2 is RL
-    // sample at index 3 is RR
-    if (accumulate) {
-        while (numFrames) {
-            // FL + RL
-            pDst[0] = clamp16(pDst[0] + ((pSrc[0] + pSrc[2]) >> 1));
-            // FR + RR
-            pDst[1] = clamp16(pDst[1] + ((pSrc[1] + pSrc[3]) >> 1));
-            pSrc += 4;
-            pDst += 2;
-            numFrames--;
-        }
-    } else { // same code as above but without adding and clamping pDst[i] to itself
-        while (numFrames) {
-            // FL + RL
-            pDst[0] = clamp16((pSrc[0] + pSrc[2]) >> 1);
-            // FR + RR
-            pDst[1] = clamp16((pSrc[1] + pSrc[3]) >> 1);
-            pSrc += 4;
-            pDst += 2;
-            numFrames--;
-        }
-    }
-}
-#else
 void Downmix_foldFromQuad(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate) {
     // sample at index 0 is FL
     // sample at index 1 is FR
@@ -996,7 +861,6 @@
         }
     }
 }
-#endif
 
 /*----------------------------------------------------------------------------
  * Downmix_foldFrom5Point1()
@@ -1015,52 +879,6 @@
  *
  *----------------------------------------------------------------------------
  */
-#ifndef BUILD_FLOAT
-void Downmix_foldFrom5Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
-    int32_t lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
-    // sample at index 0 is FL
-    // sample at index 1 is FR
-    // sample at index 2 is FC
-    // sample at index 3 is LFE
-    // sample at index 4 is RL
-    // sample at index 5 is RR
-    // code is mostly duplicated between the two values of accumulate to avoid repeating the test
-    // for every sample
-    if (accumulate) {
-        while (numFrames) {
-            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
-            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
-                    + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
-            // FL + centerPlusLfeContrib + RL
-            lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[4] << 12);
-            // FR + centerPlusLfeContrib + RR
-            rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[5] << 12);
-            // accumulate in destination
-            pDst[0] = clamp16(pDst[0] + (lt >> 13));
-            pDst[1] = clamp16(pDst[1] + (rt >> 13));
-            pSrc += 6;
-            pDst += 2;
-            numFrames--;
-        }
-    } else { // same code as above but without adding and clamping pDst[i] to itself
-        while (numFrames) {
-            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
-            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
-                    + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
-            // FL + centerPlusLfeContrib + RL
-            lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[4] << 12);
-            // FR + centerPlusLfeContrib + RR
-            rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[5] << 12);
-            // store in destination
-            pDst[0] = clamp16(lt >> 13); // differs from when accumulate is true above
-            pDst[1] = clamp16(rt >> 13); // differs from when accumulate is true above
-            pSrc += 6;
-            pDst += 2;
-            numFrames--;
-        }
-    }
-}
-#else
 void Downmix_foldFrom5Point1(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate) {
     LVM_FLOAT lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
     // sample at index 0 is FL
@@ -1105,7 +923,6 @@
         }
     }
 }
-#endif
 
 /*----------------------------------------------------------------------------
  * Downmix_foldFrom7Point1()
@@ -1124,54 +941,6 @@
  *
  *----------------------------------------------------------------------------
  */
-#ifndef BUILD_FLOAT
-void Downmix_foldFrom7Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
-    int32_t lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
-    // sample at index 0 is FL
-    // sample at index 1 is FR
-    // sample at index 2 is FC
-    // sample at index 3 is LFE
-    // sample at index 4 is RL
-    // sample at index 5 is RR
-    // sample at index 6 is SL
-    // sample at index 7 is SR
-    // code is mostly duplicated between the two values of accumulate to avoid repeating the test
-    // for every sample
-    if (accumulate) {
-        while (numFrames) {
-            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
-            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
-                    + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
-            // FL + centerPlusLfeContrib + SL + RL
-            lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[6] << 12) + (pSrc[4] << 12);
-            // FR + centerPlusLfeContrib + SR + RR
-            rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[7] << 12) + (pSrc[5] << 12);
-            //accumulate in destination
-            pDst[0] = clamp16(pDst[0] + (lt >> 13));
-            pDst[1] = clamp16(pDst[1] + (rt >> 13));
-            pSrc += 8;
-            pDst += 2;
-            numFrames--;
-    }
-    } else { // same code as above but without adding and clamping pDst[i] to itself
-        while (numFrames) {
-            // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
-            centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12)
-                    + (pSrc[3] * MINUS_3_DB_IN_Q19_12);
-            // FL + centerPlusLfeContrib + SL + RL
-            lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[6] << 12) + (pSrc[4] << 12);
-            // FR + centerPlusLfeContrib + SR + RR
-            rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[7] << 12) + (pSrc[5] << 12);
-            // store in destination
-            pDst[0] = clamp16(lt >> 13); // differs from when accumulate is true above
-            pDst[1] = clamp16(rt >> 13); // differs from when accumulate is true above
-            pSrc += 8;
-            pDst += 2;
-            numFrames--;
-        }
-    }
-}
-#else
 void Downmix_foldFrom7Point1(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate) {
     LVM_FLOAT lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
     // sample at index 0 is FL
@@ -1218,7 +987,7 @@
         }
     }
 }
-#endif
+
 /*----------------------------------------------------------------------------
  * Downmix_foldGeneric()
  *----------------------------------------------------------------------------
@@ -1245,99 +1014,6 @@
  *
  *----------------------------------------------------------------------------
  */
-#ifndef BUILD_FLOAT
-bool Downmix_foldGeneric(
-        uint32_t mask, int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
-
-    if (!Downmix_validChannelMask(mask)) {
-        return false;
-    }
-
-    const bool hasSides = (mask & kSides) != 0;
-    const bool hasBacks = (mask & kBacks) != 0;
-
-    const int numChan = audio_channel_count_from_out_mask(mask);
-    const bool hasFC = ((mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) == AUDIO_CHANNEL_OUT_FRONT_CENTER);
-    const bool hasLFE =
-            ((mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY);
-    const bool hasBC = ((mask & AUDIO_CHANNEL_OUT_BACK_CENTER) == AUDIO_CHANNEL_OUT_BACK_CENTER);
-    // compute at what index each channel is: samples will be in the following order:
-    //   FL FR FC LFE BL BR BC SL SR
-    // when a channel is not present, its index is set to the same as the index of the preceding
-    // channel
-    const int indexFC  = hasFC    ? 2            : 1;        // front center
-    const int indexLFE = hasLFE   ? indexFC + 1  : indexFC;  // low frequency
-    const int indexBL  = hasBacks ? indexLFE + 1 : indexLFE; // back left
-    const int indexBR  = hasBacks ? indexBL + 1  : indexBL;  // back right
-    const int indexBC  = hasBC    ? indexBR + 1  : indexBR;  // back center
-    const int indexSL  = hasSides ? indexBC + 1  : indexBC;  // side left
-    const int indexSR  = hasSides ? indexSL + 1  : indexSL;  // side right
-
-    int32_t lt, rt, centersLfeContrib; // samples in Q19.12 format
-    // code is mostly duplicated between the two values of accumulate to avoid repeating the test
-    // for every sample
-    if (accumulate) {
-        while (numFrames) {
-            // compute contribution of FC, BC and LFE
-            centersLfeContrib = 0;
-            if (hasFC)  { centersLfeContrib += pSrc[indexFC]; }
-            if (hasLFE) { centersLfeContrib += pSrc[indexLFE]; }
-            if (hasBC)  { centersLfeContrib += pSrc[indexBC]; }
-            centersLfeContrib *= MINUS_3_DB_IN_Q19_12;
-            // always has FL/FR
-            lt = (pSrc[0] << 12);
-            rt = (pSrc[1] << 12);
-            // mix in sides and backs
-            if (hasSides) {
-                lt += pSrc[indexSL] << 12;
-                rt += pSrc[indexSR] << 12;
-            }
-            if (hasBacks) {
-                lt += pSrc[indexBL] << 12;
-                rt += pSrc[indexBR] << 12;
-            }
-            lt += centersLfeContrib;
-            rt += centersLfeContrib;
-            // accumulate in destination
-            pDst[0] = clamp16(pDst[0] + (lt >> 13));
-            pDst[1] = clamp16(pDst[1] + (rt >> 13));
-            pSrc += numChan;
-            pDst += 2;
-            numFrames--;
-        }
-    } else {
-        while (numFrames) {
-            // compute contribution of FC, BC and LFE
-            centersLfeContrib = 0;
-            if (hasFC)  { centersLfeContrib += pSrc[indexFC]; }
-            if (hasLFE) { centersLfeContrib += pSrc[indexLFE]; }
-            if (hasBC)  { centersLfeContrib += pSrc[indexBC]; }
-            centersLfeContrib *= MINUS_3_DB_IN_Q19_12;
-            // always has FL/FR
-            lt = (pSrc[0] << 12);
-            rt = (pSrc[1] << 12);
-            // mix in sides and backs
-            if (hasSides) {
-                lt += pSrc[indexSL] << 12;
-                rt += pSrc[indexSR] << 12;
-            }
-            if (hasBacks) {
-                lt += pSrc[indexBL] << 12;
-                rt += pSrc[indexBR] << 12;
-            }
-            lt += centersLfeContrib;
-            rt += centersLfeContrib;
-            // store in destination
-            pDst[0] = clamp16(lt >> 13); // differs from when accumulate is true above
-            pDst[1] = clamp16(rt >> 13); // differs from when accumulate is true above
-            pSrc += numChan;
-            pDst += 2;
-            numFrames--;
-        }
-    }
-    return true;
-}
-#else
 bool Downmix_foldGeneric(
         uint32_t mask, LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate) {
 
@@ -1429,4 +1105,3 @@
     }
     return true;
 }
-#endif
diff --git a/media/libeffects/downmix/EffectDownmix.h b/media/libeffects/downmix/EffectDownmix.h
index c1be0f2..679a855 100644
--- a/media/libeffects/downmix/EffectDownmix.h
+++ b/media/libeffects/downmix/EffectDownmix.h
@@ -27,9 +27,8 @@
 */
 
 #define DOWNMIX_OUTPUT_CHANNELS AUDIO_CHANNEL_OUT_STEREO
-#ifdef BUILD_FLOAT
 #define LVM_FLOAT float
-#endif
+
 typedef enum {
     DOWNMIX_STATE_UNINITIALIZED,
     DOWNMIX_STATE_INITIALIZED,
@@ -97,18 +96,10 @@
 int Downmix_Reset(downmix_object_t *pDownmixer, bool init);
 int Downmix_setParameter(downmix_object_t *pDownmixer, int32_t param, uint32_t size, void *pValue);
 int Downmix_getParameter(downmix_object_t *pDownmixer, int32_t param, uint32_t *pSize, void *pValue);
-#ifdef BUILD_FLOAT
 void Downmix_foldFromQuad(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate);
 void Downmix_foldFrom5Point1(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate);
 void Downmix_foldFrom7Point1(LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate);
 bool Downmix_foldGeneric(
         uint32_t mask, LVM_FLOAT *pSrc, LVM_FLOAT *pDst, size_t numFrames, bool accumulate);
-#else
-void Downmix_foldFromQuad(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
-void Downmix_foldFrom5Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
-void Downmix_foldFrom7Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
-bool Downmix_foldGeneric(
-        uint32_t mask, int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate);
-#endif
 
 #endif /*ANDROID_EFFECTDOWNMIX_H_*/
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index 8a4b17c..189fef0 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -20,6 +20,7 @@
 #include <sys/types.h>
 
 #include <android/IDataSource.h>
+#include <binder/IPCThreadState.h>
 #include <binder/Parcel.h>
 #include <gui/IGraphicBufferProducer.h>
 #include <media/AudioResamplerPublic.h>
@@ -83,10 +84,36 @@
 };
 
 // ModDrm helpers
-static void readVector(const Parcel& reply, Vector<uint8_t>& vector) {
-    uint32_t size = reply.readUint32();
-    vector.insertAt((size_t)0, size);
-    reply.read(vector.editArray(), size);
+static status_t readVector(const Parcel& reply, Vector<uint8_t>& vector) {
+    uint32_t size = 0;
+    status_t status = reply.readUint32(&size);
+    if (status == OK) {
+        status = size <= reply.dataAvail() ? OK : BAD_VALUE;
+    }
+    if (status == OK) {
+        status = vector.insertAt((size_t) 0, size) >= 0 ? OK : NO_MEMORY;
+    }
+    if (status == OK) {
+        status = reply.read(vector.editArray(), size);
+    }
+    if (status != OK) {
+        char errorMsg[100];
+        char buganizerId[] = "173720767";
+        snprintf(errorMsg,
+                sizeof(errorMsg),
+                "%s: failed to read array. Size: %d, status: %d.",
+                __func__,
+                size,
+                status);
+        android_errorWriteWithInfoLog(
+                /* safetyNet tag= */ 0x534e4554,
+                buganizerId,
+                IPCThreadState::self()->getCallingUid(),
+                errorMsg,
+                strlen(errorMsg));
+        ALOGE("%s (b/%s)", errorMsg, buganizerId);
+    }
+    return status;
 }
 
 static void writeVector(Parcel& data, Vector<uint8_t> const& vector) {
@@ -977,8 +1004,10 @@
             uint8_t uuid[16] = {};
             data.read(uuid, sizeof(uuid));
             Vector<uint8_t> drmSessionId;
-            readVector(data, drmSessionId);
-
+            status_t status = readVector(data, drmSessionId);
+            if (status != OK) {
+              return status;
+            }
             uint32_t result = prepareDrm(uuid, drmSessionId);
             reply->writeInt32(result);
             return OK;
diff --git a/media/libmediaplayerservice/nuplayer/RTPSource.cpp b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
index b43df38..d2d978a 100644
--- a/media/libmediaplayerservice/nuplayer/RTPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
@@ -124,8 +124,16 @@
         // index(i) should be started from 1. 0 is reserved for [root]
         mRTPConn->addStream(sockRtp, sockRtcp, desc, i + 1, notify, false);
         mRTPConn->setSelfID(info->mSelfID);
-        mRTPConn->setJbTime(
-                (info->mJbTimeMs <= 3000 && info->mJbTimeMs >= 40) ? info->mJbTimeMs : 300);
+        mRTPConn->setStaticJitterTimeMs(info->mJbTimeMs);
+
+        unsigned long PT;
+        AString formatDesc, formatParams;
+        // index(i) should be started from 1. 0 is reserved for [root]
+        desc->getFormatType(i + 1, &PT, &formatDesc, &formatParams);
+
+        int32_t clockRate, numChannels;
+        ASessionDescription::ParseFormatDesc(formatDesc.c_str(), &clockRate, &numChannels);
+        info->mTimeScale = clockRate;
 
         info->mRTPSocket = sockRtp;
         info->mRTCPSocket = sockRtcp;
@@ -146,10 +154,8 @@
 
         if (info->mIsAudio) {
             mAudioTrack = source;
-            info->mTimeScale = 16000;
         } else {
             mVideoTrack = source;
-            info->mTimeScale = 90000;
         }
 
         info->mSource = source;
@@ -680,7 +686,7 @@
         newTrackInfo.mIsAudio = isAudioKey;
         mTracks.push(newTrackInfo);
         info = &mTracks.editTop();
-        info->mJbTimeMs = 300;
+        info->mJbTimeMs = kStaticJitterTimeMs;
     }
 
     if (key == "rtp-param-mime-type") {
@@ -724,7 +730,8 @@
         int64_t networkHandle = atoll(value);
         setSocketNetwork(networkHandle);
     } else if (key == "rtp-param-jitter-buffer-time") {
-        info->mJbTimeMs = atoi(value);
+        // clamping min at 40, max at 3000
+        info->mJbTimeMs = std::min(std::max(40, atoi(value)), 3000);
     }
 
     return OK;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c9b6340..6bf7612 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -19,6 +19,8 @@
 #define LOG_TAG "MediaCodec"
 #include <utils/Log.h>
 
+#include <set>
+
 #include <inttypes.h>
 #include <stdlib.h>
 
@@ -201,6 +203,10 @@
     // implements DeathRecipient
     static void BinderDiedCallback(void* cookie);
     void binderDied();
+    static Mutex sLockCookies;
+    static std::set<void*> sCookies;
+    static void addCookie(void* cookie);
+    static void removeCookie(void* cookie);
 
     void addResource(const MediaResourceParcel &resource);
     void removeResource(const MediaResourceParcel &resource);
@@ -227,8 +233,15 @@
 }
 
 MediaCodec::ResourceManagerServiceProxy::~ResourceManagerServiceProxy() {
+
+    // remove the cookie, so any in-flight death notification will get dropped
+    // by our handler.
+    removeCookie(this);
+
+    Mutex::Autolock _l(mLock);
     if (mService != nullptr) {
         AIBinder_unlinkToDeath(mService->asBinder().get(), mDeathRecipient.get(), this);
+        mService = nullptr;
     }
 }
 
@@ -240,6 +253,10 @@
         return;
     }
 
+    // so our handler will process the death notifications
+    addCookie(this);
+
+    // after this, require mLock whenever using mService
     AIBinder_linkToDeath(mService->asBinder().get(), mDeathRecipient.get(), this);
 
     // Kill clients pending removal.
@@ -247,9 +264,28 @@
 }
 
 //static
+Mutex MediaCodec::ResourceManagerServiceProxy::sLockCookies;
+std::set<void*> MediaCodec::ResourceManagerServiceProxy::sCookies;
+
+//static
+void MediaCodec::ResourceManagerServiceProxy::addCookie(void* cookie) {
+    Mutex::Autolock _l(sLockCookies);
+    sCookies.insert(cookie);
+}
+
+//static
+void MediaCodec::ResourceManagerServiceProxy::removeCookie(void* cookie) {
+    Mutex::Autolock _l(sLockCookies);
+    sCookies.erase(cookie);
+}
+
+//static
 void MediaCodec::ResourceManagerServiceProxy::BinderDiedCallback(void* cookie) {
-    auto thiz = static_cast<ResourceManagerServiceProxy*>(cookie);
-    thiz->binderDied();
+    Mutex::Autolock _l(sLockCookies);
+    if (sCookies.find(cookie) != sCookies.end()) {
+        auto thiz = static_cast<ResourceManagerServiceProxy*>(cookie);
+        thiz->binderDied();
+    }
 }
 
 void MediaCodec::ResourceManagerServiceProxy::binderDied() {
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index 2f93d5d..92b2b09 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -34,6 +34,8 @@
 
 namespace android {
 
+const double JITTER_MULTIPLE = 1.5f;
+
 // static
 AAVCAssembler::AAVCAssembler(const sp<AMessage> &notify)
     : mNotifyMsg(notify),
@@ -123,22 +125,48 @@
 
     int64_t rtpTime = findRTPTime(firstRTPTime, buffer);
 
-    int64_t startTime = source->mFirstSysTime / 1000;
-    int64_t nowTime = ALooper::GetNowUs() / 1000;
-    int64_t playedTime = nowTime - startTime;
+    const int64_t startTimeMs = source->mFirstSysTime / 1000;
+    const int64_t nowTimeMs = ALooper::GetNowUs() / 1000;
+    const int64_t staticJbTimeMs = source->getStaticJitterTimeMs();
+    const int64_t dynamicJbTimeMs = source->getDynamicJitterTimeMs();
+    const int64_t clockRate = source->mClockRate;
 
-    int64_t playedTimeRtp = source->mFirstRtpTime + playedTime * (int64_t)source->mClockRate / 1000;
-    const int64_t jitterTime = source->mJbTimeMs * (int64_t)source->mClockRate / 1000;
+    int64_t playedTimeMs = nowTimeMs - startTimeMs;
+    int64_t playedTimeRtp = source->mFirstRtpTime + MsToRtp(playedTimeMs, clockRate);
 
-    int64_t expiredTimeInJb = rtpTime + jitterTime;
-    bool isExpired = expiredTimeInJb <= (playedTimeRtp);
-    bool isTooLate200 = expiredTimeInJb < (playedTimeRtp - jitterTime);
-    bool isTooLate300 = expiredTimeInJb < (playedTimeRtp - (jitterTime * 3 / 2));
+    /**
+     * Based on experience in real commercial network services,
+     * 300 ms is a maximum heuristic jitter buffer time for video RTP service.
+     */
+
+    /**
+     * The static(base) jitter is a kind of expected propagation time that we desire.
+     * We can drop packets if it doesn't meet our standards.
+     * If it gets shorter we can get faster response but can lose packets.
+     * Expecting range : 50ms ~ 1000ms (But 300 ms would be practical upper bound)
+     */
+    const int64_t baseJbTimeRtp = MsToRtp(staticJbTimeMs, clockRate);
+    /**
+     * Dynamic jitter is a variance of interarrival time as defined in the 6.4.1 of RFC 3550.
+     * We can regard this as a tolerance of every moments.
+     * Expecting range : 0ms ~ 150ms (Not to over 300 ms practically)
+     */
+    const int64_t dynamicJbTimeRtp =                        // Max 150
+            std::min(MsToRtp(dynamicJbTimeMs, clockRate), MsToRtp(150, clockRate));
+    const int64_t jitterTimeRtp = baseJbTimeRtp + dynamicJbTimeRtp; // Total jitter time
+
+    int64_t expiredTimeRtp = rtpTime + jitterTimeRtp;       // When does this buffer expire ? (T)
+    int64_t diffTimeRtp = playedTimeRtp - expiredTimeRtp;
+    bool isExpired = (diffTimeRtp >= 0);                    // It's expired if T is passed away
+    bool isFirstLineBroken = (diffTimeRtp > jitterTimeRtp); // (T + jitter) is a standard tolerance
+
+    int64_t finalMargin = dynamicJbTimeRtp * JITTER_MULTIPLE;
+    bool isSecondLineBroken = (diffTimeRtp > jitterTimeRtp + finalMargin); // The Maginot line
 
     if (mShowQueue && mShowQueueCnt < 20) {
         showCurrentQueue(queue);
-        printNowTimeUs(startTime, nowTime, playedTime);
-        printRTPTime(rtpTime, playedTimeRtp, expiredTimeInJb, isExpired);
+        printNowTimeMs(startTimeMs, nowTimeMs, playedTimeMs);
+        printRTPTime(rtpTime, playedTimeRtp, expiredTimeRtp, isExpired);
         mShowQueueCnt++;
     }
 
@@ -149,17 +177,23 @@
         return NOT_ENOUGH_DATA;
     }
 
-    if (isTooLate200) {
-        ALOGW("=== WARNING === buffer arrived 200ms late. === WARNING === ");
-    }
+    if (isFirstLineBroken) {
+        if (isSecondLineBroken) {
+            ALOGW("buffer too late ... \t Diff in Jb=%lld \t "
+                    "Seq# %d \t ExpSeq# %d \t"
+                    "JitterMs %lld + (%lld * %.3f)",
+                    (long long)(diffTimeRtp),
+                    buffer->int32Data(), mNextExpectedSeqNo,
+                    (long long)staticJbTimeMs, (long long)dynamicJbTimeMs, JITTER_MULTIPLE + 1);
+            printNowTimeMs(startTimeMs, nowTimeMs, playedTimeMs);
+            printRTPTime(rtpTime, playedTimeRtp, expiredTimeRtp, isExpired);
 
-    if (isTooLate300) {
-        ALOGW("buffer arrived after 300ms ... \t Diff in Jb=%lld \t Seq# %d",
-                (long long)(playedTimeRtp - expiredTimeInJb), buffer->int32Data());
-        printNowTimeUs(startTime, nowTime, playedTime);
-        printRTPTime(rtpTime, playedTimeRtp, expiredTimeInJb, isExpired);
-
-        mNextExpectedSeqNo = pickProperSeq(queue, firstRTPTime, playedTimeRtp, jitterTime);
+            mNextExpectedSeqNo = pickProperSeq(queue, firstRTPTime, playedTimeRtp, jitterTimeRtp);
+        }  else {
+            ALOGW("=== WARNING === buffer arrived after %lld + %lld = %lld ms === WARNING === ",
+                    (long long)staticJbTimeMs, (long long)dynamicJbTimeMs,
+                    (long long)RtpToMs(jitterTimeRtp, clockRate));
+        }
     }
 
     if (mNextExpectedSeqNoValid) {
@@ -170,6 +204,7 @@
             source->noticeAbandonBuffer(cntRemove);
             ALOGW("delete %d of %d buffers", cntRemove, size);
         }
+
         if (queue->empty()) {
             return NOT_ENOUGH_DATA;
         }
@@ -565,17 +600,6 @@
     msg->post();
 }
 
-inline int64_t AAVCAssembler::findRTPTime(
-        const uint32_t& firstRTPTime, const sp<ABuffer>& buffer) {
-    /* If you want to +, -, * rtpTime, recommend to declare rtpTime as int64_t.
-       Because rtpTime can be near UINT32_MAX. Beware the overflow. */
-    int64_t rtpTime = 0;
-    CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
-    // If the first overs 2^31 and rtp unders 2^31, the rtp value is overflowed one.
-    int64_t overflowMask = (firstRTPTime & 0x80000000 & ~rtpTime) << 1;
-    return rtpTime | overflowMask;
-}
-
 int32_t AAVCAssembler::pickProperSeq(const Queue *queue,
         uint32_t first, int64_t play, int64_t jit) {
     sp<ABuffer> buffer = *(queue->begin());
@@ -620,16 +644,6 @@
     return initSize - queue->size();
 }
 
-inline void AAVCAssembler::printNowTimeUs(int64_t start, int64_t now, int64_t play) {
-    ALOGD("start=%lld, now=%lld, played=%lld",
-            (long long)start, (long long)now, (long long)play);
-}
-
-inline void AAVCAssembler::printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp) {
-    ALOGD("rtp-time(JB)=%lld, played-rtp-time(JB)=%lld, expired-rtp-time(JB)=%lld expired=%d",
-            (long long)rtp, (long long)play, (long long)exp, isExp);
-}
-
 ARTPAssembler::AssemblyStatus AAVCAssembler::assembleMore(
         const sp<ARTPSource> &source) {
     AssemblyStatus status = addNALUnit(source);
diff --git a/media/libstagefright/rtsp/AAVCAssembler.h b/media/libstagefright/rtsp/AAVCAssembler.h
index 9d71e2f..954086c 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.h
+++ b/media/libstagefright/rtsp/AAVCAssembler.h
@@ -63,13 +63,10 @@
 
     void submitAccessUnit();
 
-    inline int64_t findRTPTime(const uint32_t& firstRTPTime, const sp<ABuffer>& buffer);
     int32_t pickProperSeq(const Queue *q, uint32_t first, int64_t play, int64_t jit);
     bool recycleUnit(uint32_t start, uint32_t end, uint32_t connected,
             size_t avail, float goodRatio);
     int32_t deleteUnitUnderSeq(Queue *q, uint32_t seq);
-    void printNowTimeUs(int64_t start, int64_t now, int64_t play);
-    void printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp);
 
     DISALLOW_EVIL_CONSTRUCTORS(AAVCAssembler);
 };
diff --git a/media/libstagefright/rtsp/AHEVCAssembler.cpp b/media/libstagefright/rtsp/AHEVCAssembler.cpp
index 553ea08..cd60203 100644
--- a/media/libstagefright/rtsp/AHEVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AHEVCAssembler.cpp
@@ -41,6 +41,8 @@
 
 namespace android {
 
+const double JITTER_MULTIPLE = 1.5f;
+
 // static
 AHEVCAssembler::AHEVCAssembler(const sp<AMessage> &notify)
     : mNotifyMsg(notify),
@@ -130,23 +132,51 @@
 
     sp<ABuffer> buffer = *queue->begin();
     buffer->meta()->setObject("source", source);
+
     int64_t rtpTime = findRTPTime(firstRTPTime, buffer);
 
-    int64_t startTime = source->mFirstSysTime / 1000;
-    int64_t nowTime = ALooper::GetNowUs() / 1000;
-    int64_t playedTime = nowTime - startTime;
-    int64_t playedTimeRtp = source->mFirstRtpTime + playedTime * (int64_t)source->mClockRate / 1000;
-    const int64_t jitterTime = source->mJbTimeMs * (int64_t)source->mClockRate / 1000;
+    const int64_t startTimeMs = source->mFirstSysTime / 1000;
+    const int64_t nowTimeMs = ALooper::GetNowUs() / 1000;
+    const int64_t staticJbTimeMs = source->getStaticJitterTimeMs();
+    const int64_t dynamicJbTimeMs = source->getDynamicJitterTimeMs();
+    const int64_t clockRate = source->mClockRate;
 
-    int64_t expiredTimeInJb = rtpTime + jitterTime;
-    bool isExpired = expiredTimeInJb <= (playedTimeRtp);
-    bool isTooLate200 = expiredTimeInJb < (playedTimeRtp - jitterTime);
-    bool isTooLate300 = expiredTimeInJb < (playedTimeRtp - (jitterTime * 3 / 2));
+    int64_t playedTimeMs = nowTimeMs - startTimeMs;
+    int64_t playedTimeRtp = source->mFirstRtpTime + MsToRtp(playedTimeMs, clockRate);
+
+    /**
+     * Based on experience in real commercial network services,
+     * 300 ms is a maximum heuristic jitter buffer time for video RTP service.
+     */
+
+    /**
+     * The static(base) jitter is a kind of expected propagation time that we desire.
+     * We can drop packets if it doesn't meet our standards.
+     * If it gets shorter we can get faster response but can lose packets.
+     * Expecting range : 50ms ~ 1000ms (But 300 ms would be practical upper bound)
+     */
+    const int64_t baseJbTimeRtp = MsToRtp(staticJbTimeMs, clockRate);
+    /**
+     * Dynamic jitter is a variance of interarrival time as defined in the 6.4.1 of RFC 3550.
+     * We can regard this as a tolerance of every moments.
+     * Expecting range : 0ms ~ 150ms (Not to over 300 ms practically)
+     */
+    const int64_t dynamicJbTimeRtp =                        // Max 150
+            std::min(MsToRtp(dynamicJbTimeMs, clockRate), MsToRtp(150, clockRate));
+    const int64_t jitterTimeRtp = baseJbTimeRtp + dynamicJbTimeRtp; // Total jitter time
+
+    int64_t expiredTimeRtp = rtpTime + jitterTimeRtp;       // When does this buffer expire ? (T)
+    int64_t diffTimeRtp = playedTimeRtp - expiredTimeRtp;
+    bool isExpired = (diffTimeRtp >= 0);                    // It's expired if T is passed away
+    bool isFirstLineBroken = (diffTimeRtp > jitterTimeRtp); // (T + jitter) is a standard tolerance
+
+    int64_t finalMargin = dynamicJbTimeRtp * JITTER_MULTIPLE;
+    bool isSecondLineBroken = (diffTimeRtp > jitterTimeRtp + finalMargin); // The Maginot line
 
     if (mShowQueueCnt < 20) {
         showCurrentQueue(queue);
-        printNowTimeUs(startTime, nowTime, playedTime);
-        printRTPTime(rtpTime, playedTimeRtp, expiredTimeInJb, isExpired);
+        printNowTimeMs(startTimeMs, nowTimeMs, playedTimeMs);
+        printRTPTime(rtpTime, playedTimeRtp, expiredTimeRtp, isExpired);
         mShowQueueCnt++;
     }
 
@@ -157,17 +187,23 @@
         return NOT_ENOUGH_DATA;
     }
 
-    if (isTooLate200) {
-        ALOGW("=== WARNING === buffer arrived 200ms late. === WARNING === ");
-    }
+    if (isFirstLineBroken) {
+        if (isSecondLineBroken) {
+            ALOGW("buffer too late ... \t Diff in Jb=%lld \t "
+                    "Seq# %d \t ExpSeq# %d \t"
+                    "JitterMs %lld + (%lld * %.3f)",
+                    (long long)(diffTimeRtp),
+                    buffer->int32Data(), mNextExpectedSeqNo,
+                    (long long)staticJbTimeMs, (long long)dynamicJbTimeMs, JITTER_MULTIPLE + 1);
+            printNowTimeMs(startTimeMs, nowTimeMs, playedTimeMs);
+            printRTPTime(rtpTime, playedTimeRtp, expiredTimeRtp, isExpired);
 
-    if (isTooLate300) {
-        ALOGW("buffer arrived after 300ms ... \t Diff in Jb=%lld \t Seq# %d",
-                (long long)(playedTimeRtp - expiredTimeInJb), buffer->int32Data());
-        printNowTimeUs(startTime, nowTime, playedTime);
-        printRTPTime(rtpTime, playedTimeRtp, expiredTimeInJb, isExpired);
-
-        mNextExpectedSeqNo = pickProperSeq(queue, firstRTPTime, playedTimeRtp, jitterTime);
+            mNextExpectedSeqNo = pickProperSeq(queue, firstRTPTime, playedTimeRtp, jitterTimeRtp);
+        }  else {
+            ALOGW("=== WARNING === buffer arrived after %lld + %lld = %lld ms === WARNING === ",
+                    (long long)staticJbTimeMs, (long long)dynamicJbTimeMs,
+                    (long long)RtpToMs(jitterTimeRtp, clockRate));
+        }
     }
 
     if (mNextExpectedSeqNoValid) {
@@ -578,17 +614,6 @@
     msg->post();
 }
 
-inline int64_t AHEVCAssembler::findRTPTime(
-        const uint32_t& firstRTPTime, const sp<ABuffer>& buffer) {
-    /* If you want to +, -, * rtpTime, recommend to declare rtpTime as int64_t.
-       Because rtpTime can be near UINT32_MAX. Beware the overflow. */
-    int64_t rtpTime = 0;
-    CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
-    // If the first overs 2^31 and rtp unders 2^31, the rtp value is overflowed one.
-    int64_t overflowMask = (firstRTPTime & 0x80000000 & ~rtpTime) << 1;
-    return rtpTime | overflowMask;
-}
-
 int32_t AHEVCAssembler::pickProperSeq(const Queue *queue,
         uint32_t first, int64_t play, int64_t jit) {
     sp<ABuffer> buffer = *(queue->begin());
@@ -633,16 +658,6 @@
     return initSize - queue->size();
 }
 
-inline void AHEVCAssembler::printNowTimeUs(int64_t start, int64_t now, int64_t play) {
-    ALOGD("start=%lld, now=%lld, played=%lld",
-            (long long)start, (long long)now, (long long)play);
-}
-
-inline void AHEVCAssembler::printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp) {
-    ALOGD("rtp-time(JB)=%lld, played-rtp-time(JB)=%lld, expired-rtp-time(JB)=%lld expired=%d",
-            (long long)rtp, (long long)play, (long long)exp, isExp);
-}
-
 ARTPAssembler::AssemblyStatus AHEVCAssembler::assembleMore(
         const sp<ARTPSource> &source) {
     AssemblyStatus status = addNALUnit(source);
diff --git a/media/libstagefright/rtsp/AHEVCAssembler.h b/media/libstagefright/rtsp/AHEVCAssembler.h
index bf1cded..e64b661 100644
--- a/media/libstagefright/rtsp/AHEVCAssembler.h
+++ b/media/libstagefright/rtsp/AHEVCAssembler.h
@@ -64,13 +64,10 @@
 
     void submitAccessUnit();
 
-    inline int64_t findRTPTime(const uint32_t& firstRTPTime, const sp<ABuffer>& buffer);
     int32_t pickProperSeq(const Queue *q, uint32_t first, int64_t play, int64_t jit);
     bool recycleUnit(uint32_t start, uint32_t end, uint32_t connected,
              size_t avail, float goodRatio);
     int32_t deleteUnitUnderSeq(Queue *queue, uint32_t seq);
-    void printNowTimeUs(int64_t start, int64_t now, int64_t play);
-    void printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp);
 
     DISALLOW_EVIL_CONSTRUCTORS(AHEVCAssembler);
 };
diff --git a/media/libstagefright/rtsp/ARTPAssembler.h b/media/libstagefright/rtsp/ARTPAssembler.h
index 191f08e..f959c40 100644
--- a/media/libstagefright/rtsp/ARTPAssembler.h
+++ b/media/libstagefright/rtsp/ARTPAssembler.h
@@ -19,6 +19,9 @@
 #define A_RTP_ASSEMBLER_H_
 
 #include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
 #include <utils/List.h>
 #include <utils/RefBase.h>
 
@@ -61,12 +64,47 @@
     bool mShowQueue;
     int32_t mShowQueueCnt;
 
+    // Utility functions
+    inline int64_t findRTPTime(const uint32_t& firstRTPTime, const sp<ABuffer>& buffer);
+    inline int64_t MsToRtp(int64_t ms, int64_t clockRate);
+    inline int64_t RtpToMs(int64_t rtp, int64_t clockRate);
+    inline void printNowTimeMs(int64_t start, int64_t now, int64_t play);
+    inline void printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp);
+
 private:
     int64_t mFirstFailureTimeUs;
 
     DISALLOW_EVIL_CONSTRUCTORS(ARTPAssembler);
 };
 
+inline int64_t ARTPAssembler::findRTPTime(const uint32_t& firstRTPTime, const sp<ABuffer>& buffer) {
+    /* If you want to +,-,* rtpTime, recommend to declare rtpTime as int64_t.
+       Because rtpTime can be near UINT32_MAX. Beware the overflow. */
+    int64_t rtpTime = 0;
+    CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+    // If the first overs 2^31 and rtp unders 2^31, the rtp value is overflowed one.
+    int64_t overflowMask = (firstRTPTime & 0x80000000 & ~rtpTime) << 1;
+    return rtpTime | overflowMask;
+}
+
+inline int64_t ARTPAssembler::MsToRtp(int64_t ms, int64_t clockRate) {
+    return ms * clockRate / 1000;
+}
+
+inline int64_t ARTPAssembler::RtpToMs(int64_t rtp, int64_t clockRate) {
+    return rtp * 1000 / clockRate;
+}
+
+inline void ARTPAssembler::printNowTimeMs(int64_t start, int64_t now, int64_t play) {
+    ALOGD("start=%lld, now=%lld, played=%lld",
+            (long long)start, (long long)now, (long long)play);
+}
+
+inline void ARTPAssembler::printRTPTime(int64_t rtp, int64_t play, int64_t exp, bool isExp) {
+    ALOGD("rtp-time(JB)=%lld, played-rtp-time(JB)=%lld, expired-rtp-time(JB)=%lld expired=%d",
+            (long long)rtp, (long long)play, (long long)exp, isExp);
+}
+
 }  // namespace android
 
 #endif  // A_RTP_ASSEMBLER_H_
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index 61c06d1..9509377 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -70,6 +70,8 @@
 
     bool mIsInjected;
 
+    // A place to save time when it polls
+    int64_t mLastPollTimeUs;
     // RTCP Extension for CVO
     int mCVOExtMap; // will be set to 0 if cvo is not negotiated in sdp
 };
@@ -80,7 +82,7 @@
       mLastReceiverReportTimeUs(-1),
       mLastBitrateReportTimeUs(-1),
       mTargetBitrate(-1),
-      mJbTimeMs(300) {
+      mStaticJitterTimeMs(kStaticJitterTimeMs) {
 }
 
 ARTPConnection::~ARTPConnection() {
@@ -416,6 +418,7 @@
         return;
     }
 
+    int64_t nowUs = ALooper::GetNowUs();
     int res = select(maxSocket + 1, &rs, NULL, NULL, &tv);
 
     if (res > 0) {
@@ -425,6 +428,7 @@
                 ++it;
                 continue;
             }
+            it->mLastPollTimeUs = nowUs;
 
             status_t err = OK;
             if (FD_ISSET(it->mRTPSocket, &rs)) {
@@ -486,7 +490,6 @@
         }
     }
 
-    int64_t nowUs = ALooper::GetNowUs();
     checkRxBitrate(nowUs);
 
     if (mLastReceiverReportTimeUs <= 0
@@ -720,6 +723,7 @@
     buffer->setInt32Data(u16at(&data[2]));
     buffer->setRange(payloadOffset, size - payloadOffset);
 
+    source->putDynamicJitterData(rtpTime, s->mLastPollTimeUs);
     source->processRTPPacket(buffer);
 
     return OK;
@@ -1066,7 +1070,7 @@
         }
 
         source->setSelfID(mSelfID);
-        source->setJbTime(mJbTimeMs > 0 ? mJbTimeMs : 300);
+        source->setStaticJitterTimeMs(mStaticJitterTimeMs);
         info->mSources.add(srcId, source);
     } else {
         source = info->mSources.valueAt(index);
@@ -1086,8 +1090,8 @@
     mSelfID = selfID;
 }
 
-void ARTPConnection::setJbTime(const uint32_t jbTimeMs) {
-    mJbTimeMs = jbTimeMs;
+void ARTPConnection::setStaticJitterTimeMs(const uint32_t jbTimeMs) {
+    mStaticJitterTimeMs = jbTimeMs;
 }
 
 void ARTPConnection::setTargetBitrate(int32_t targetBitrate) {
diff --git a/media/libstagefright/rtsp/ARTPConnection.h b/media/libstagefright/rtsp/ARTPConnection.h
index a37ac0e..ea0a374 100644
--- a/media/libstagefright/rtsp/ARTPConnection.h
+++ b/media/libstagefright/rtsp/ARTPConnection.h
@@ -46,7 +46,7 @@
     void injectPacket(int index, const sp<ABuffer> &buffer);
 
     void setSelfID(const uint32_t selfID);
-    void setJbTime(const uint32_t jbTimeMs);
+    void setStaticJitterTimeMs(const uint32_t jbTimeMs);
     void setTargetBitrate(int32_t targetBitrate);
 
     // Creates a pair of UDP datagram sockets bound to adjacent ports
@@ -89,7 +89,7 @@
     int32_t mSelfID;
     int32_t mTargetBitrate;
 
-    uint32_t mJbTimeMs;
+    uint32_t mStaticJitterTimeMs;
 
     int32_t mCumulativeBytes;
 
diff --git a/media/libstagefright/rtsp/ARTPSource.cpp b/media/libstagefright/rtsp/ARTPSource.cpp
index 3fdf8e4..402dc27 100644
--- a/media/libstagefright/rtsp/ARTPSource.cpp
+++ b/media/libstagefright/rtsp/ARTPSource.cpp
@@ -48,7 +48,6 @@
       mFirstRtpTime(0),
       mFirstSysTime(0),
       mClockRate(0),
-      mJbTimeMs(300), // default jitter buffer time is 300ms.
       mFirstSsrc(0),
       mHighestNackNumber(0),
       mID(id),
@@ -59,6 +58,7 @@
       mPrevNumBuffersReceived(0),
       mPrevExpectedForRR(0),
       mPrevNumBuffersReceivedForRR(0),
+      mStaticJbTimeMs(kStaticJitterTimeMs),
       mLastNTPTime(0),
       mLastNTPTimeUpdateUs(0),
       mIssueFIRRequests(false),
@@ -102,6 +102,11 @@
     if (mAssembler != NULL && !mAssembler->initCheck()) {
         mAssembler.clear();
     }
+
+    int32_t clockRate, numChannels;
+    ASessionDescription::ParseFormatDesc(desc.c_str(), &clockRate, &numChannels);
+    mClockRate = clockRate;
+    mJitterCalc = new JitterCalc(mClockRate);
 }
 
 static uint32_t AbsDiff(uint32_t seq1, uint32_t seq2) {
@@ -139,9 +144,8 @@
         mBaseSeqNumber = seqNum;
         mFirstRtpTime = firstRtpTime;
         mFirstSsrc = ssrc;
-        ALOGD("first-rtp arrived: first-rtp-time=%d, sys-time=%lld, seq-num=%u, ssrc=%d",
+        ALOGD("first-rtp arrived: first-rtp-time=%u, sys-time=%lld, seq-num=%u, ssrc=%d",
                 mFirstRtpTime, (long long)mFirstSysTime, mHighestSeqNumber, mFirstSsrc);
-        mClockRate = 90000;
         mQueue.push_back(buffer);
         return true;
     }
@@ -327,10 +331,11 @@
     data[18] = (mHighestSeqNumber >> 8) & 0xff;
     data[19] = mHighestSeqNumber & 0xff;
 
-    data[20] = 0x00;  // Interarrival jitter
-    data[21] = 0x00;
-    data[22] = 0x00;
-    data[23] = 0x00;
+    uint32_t jitterTime = getDynamicJitterTimeMs() * mClockRate / 1000;
+    data[20] = jitterTime >> 24;    // Interarrival jitter
+    data[21] = (jitterTime >> 16) & 0xff;
+    data[22] = (jitterTime >> 8) & 0xff;
+    data[23] = jitterTime & 0xff;
 
     uint32_t LSR = 0;
     uint32_t DLSR = 0;
@@ -508,15 +513,27 @@
     kSourceID = selfID;
 }
 
-void ARTPSource::setJbTime(const uint32_t jbTimeMs) {
-    mJbTimeMs = jbTimeMs;
-}
-
 void ARTPSource::setPeriodicFIR(bool enable) {
     ALOGD("setPeriodicFIR %d", enable);
     mIssueFIRRequests = enable;
 }
 
+uint32_t ARTPSource::getStaticJitterTimeMs() {
+    return mStaticJbTimeMs;
+}
+
+uint32_t ARTPSource::getDynamicJitterTimeMs() {
+    return mJitterCalc->getJitterMs();
+}
+
+void ARTPSource::setStaticJitterTimeMs(const uint32_t jbTimeMs) {
+    mStaticJbTimeMs = jbTimeMs;
+}
+
+void ARTPSource::putDynamicJitterData(uint32_t timeStamp, int64_t arrivalTime) {
+    mJitterCalc->putData(timeStamp, arrivalTime);
+}
+
 bool ARTPSource::isNeedToEarlyNotify() {
     uint32_t expected = mHighestSeqNumber - mBaseSeqNumber + 1;
     int32_t intervalExpectedInNow = expected - mPrevExpected;
diff --git a/media/libstagefright/rtsp/ARTPSource.h b/media/libstagefright/rtsp/ARTPSource.h
index c51fd8a..56011d3 100644
--- a/media/libstagefright/rtsp/ARTPSource.h
+++ b/media/libstagefright/rtsp/ARTPSource.h
@@ -27,8 +27,12 @@
 
 #include <map>
 
+#include "JitterCalculator.h"
+
 namespace android {
 
+const uint32_t kStaticJitterTimeMs = 50;   // 50ms
+
 struct ABuffer;
 struct AMessage;
 struct ARTPAssembler;
@@ -64,8 +68,13 @@
     void setSeqNumToNACK(uint16_t seqNum, uint16_t mask, uint16_t nowJitterHeadSeqNum);
     uint32_t getSelfID();
     void setSelfID(const uint32_t selfID);
-    void setJbTime(const uint32_t jbTimeMs);
     void setPeriodicFIR(bool enable);
+
+    uint32_t getStaticJitterTimeMs();
+    uint32_t getDynamicJitterTimeMs();
+    void setStaticJitterTimeMs(const uint32_t jbTimeMs);
+    void putDynamicJitterData(uint32_t timeStamp, int64_t arrivalTime);
+
     bool isNeedToEarlyNotify();
     void notifyPktInfo(int32_t bitrate, bool isRegular);
     // FIR needs to be sent by missing packet or broken video image.
@@ -78,7 +87,6 @@
     int64_t mFirstSysTime;
     int32_t mClockRate;
 
-    uint32_t mJbTimeMs;
     int32_t mFirstSsrc;
     int32_t mHighestNackNumber;
 
@@ -96,6 +104,9 @@
     List<sp<ABuffer> > mQueue;
     sp<ARTPAssembler> mAssembler;
 
+    uint32_t mStaticJbTimeMs;
+    sp<JitterCalc> mJitterCalc;
+
     typedef struct infoNACK {
         uint16_t seqNum;
         uint16_t mask;
diff --git a/media/libstagefright/rtsp/Android.bp b/media/libstagefright/rtsp/Android.bp
index dcadbaf..34d1788 100644
--- a/media/libstagefright/rtsp/Android.bp
+++ b/media/libstagefright/rtsp/Android.bp
@@ -36,6 +36,7 @@
         "ARTPWriter.cpp",
         "ARTSPConnection.cpp",
         "ASessionDescription.cpp",
+        "JitterCalculator.cpp",
         "SDPLoader.cpp",
     ],
 
diff --git a/media/libstagefright/rtsp/JitterCalculator.cpp b/media/libstagefright/rtsp/JitterCalculator.cpp
new file mode 100644
index 0000000..466171c
--- /dev/null
+++ b/media/libstagefright/rtsp/JitterCalculator.cpp
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "JitterCalc"
+#include <utils/Log.h>
+
+#include "JitterCalculator.h"
+
+#include <stdlib.h>
+
+namespace android {
+
+JitterCalc::JitterCalc(int32_t clockRate)
+    : mClockRate(clockRate) {
+    init();
+}
+
+void JitterCalc::init() {
+    mJitterValueUs = 0;
+    mLastTimeStamp = 0;
+    mLastArrivalTimeUs = 0;
+}
+
+void JitterCalc::putData(int64_t rtpTime, int64_t arrivalTimeUs) {
+    if (mLastTimeStamp == 0) {
+        mLastTimeStamp = rtpTime;
+        mLastArrivalTimeUs = arrivalTimeUs;
+    }
+
+    const int64_t UINT32_MSB = 0x80000000;
+    int64_t tempLastTimeStamp = mLastTimeStamp;
+    // A RTP time wraps around after UINT32_MAX. We must consider this case.
+    int64_t overflowMask = (mLastTimeStamp ^ rtpTime) & UINT32_MSB;
+    rtpTime |= ((overflowMask & ~rtpTime) << 1);
+    tempLastTimeStamp |= ((overflowMask & ~mLastTimeStamp) << 1);
+    ALOGV("Raw stamp \t\t now %llx \t\t last %llx",
+            (long long)rtpTime, (long long)tempLastTimeStamp);
+
+    int64_t diffTimeStampUs = abs(rtpTime - tempLastTimeStamp) * 1000000ll / mClockRate;
+    int64_t diffArrivalUs = abs(arrivalTimeUs - mLastArrivalTimeUs);
+    ALOGV("diffTimeStampus %lld \t\t diffArrivalUs %lld",
+            (long long)diffTimeStampUs, (long long)diffArrivalUs);
+
+    // 6.4.1 of RFC3550 defines this interarrival jitter value.
+    mJitterValueUs = (mJitterValueUs * 15 + abs(diffTimeStampUs - diffArrivalUs)) / 16;
+    ALOGV("JitterUs %lld", (long long)mJitterValueUs);
+
+    mLastTimeStamp = (uint32_t)rtpTime;
+    mLastArrivalTimeUs = arrivalTimeUs;
+}
+
+uint32_t JitterCalc::getJitterMs() {
+    return mJitterValueUs / 1000;
+}
+
+}   // namespace android
+
diff --git a/media/libstagefright/rtsp/JitterCalculator.h b/media/libstagefright/rtsp/JitterCalculator.h
new file mode 100644
index 0000000..03e43ff
--- /dev/null
+++ b/media/libstagefright/rtsp/JitterCalculator.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef A_JITTER_CALCULATOR_H_
+
+#define A_JITTER_CALCULATOR_H_
+
+#include <stdint.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class JitterCalc : public RefBase {
+private:
+    // Time Stamp per Second
+    const int32_t mClockRate;
+
+    uint32_t mJitterValueUs;
+    uint32_t mLastTimeStamp;
+    int64_t mLastArrivalTimeUs;
+
+    void init();
+public:
+    JitterCalc(int32_t clockRate);
+    void putData(int64_t rtpTime, int64_t arrivalTime);
+    uint32_t getJitterMs();
+};
+
+}   // namespace android
+
+#endif  // A_JITTER_CALCULATOR_H_
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 1522746..590b379 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -799,7 +799,7 @@
             if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
             if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
             if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
-            if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low freq, ");
+            if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low-frequency, ");
             if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
             if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
             if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
@@ -812,12 +812,16 @@
             if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, ");
             if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, ");
             if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
-            if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " );
-            if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
-            if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT) s.append("top-side-left, " );
-            if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) s.append("top-side-right, " );
-            if (mask & AUDIO_CHANNEL_OUT_HAPTIC_B) s.append("haptic-B, " );
-            if (mask & AUDIO_CHANNEL_OUT_HAPTIC_A) s.append("haptic-A, " );
+            if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, ");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, ");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT) s.append("top-side-left, ");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) s.append("top-side-right, ");
+            if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT) s.append("bottom-front-left, ");
+            if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER) s.append("bottom-front-center, ");
+            if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT) s.append("bottom-front-right, ");
+            if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) s.append("low-frequency-2, ");
+            if (mask & AUDIO_CHANNEL_OUT_HAPTIC_B) s.append("haptic-B, ");
+            if (mask & AUDIO_CHANNEL_OUT_HAPTIC_A) s.append("haptic-A, ");
             if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown,  ");
         } else {
             if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
@@ -835,9 +839,9 @@
             if (mask & AUDIO_CHANNEL_IN_BACK_LEFT) s.append("back-left, ");
             if (mask & AUDIO_CHANNEL_IN_BACK_RIGHT) s.append("back-right, ");
             if (mask & AUDIO_CHANNEL_IN_CENTER) s.append("center, ");
-            if (mask & AUDIO_CHANNEL_IN_LOW_FREQUENCY) s.append("low freq, ");
-            if (mask & AUDIO_CHANNEL_IN_TOP_LEFT) s.append("top-left, " );
-            if (mask & AUDIO_CHANNEL_IN_TOP_RIGHT) s.append("top-right, " );
+            if (mask & AUDIO_CHANNEL_IN_LOW_FREQUENCY) s.append("low-frequency, ");
+            if (mask & AUDIO_CHANNEL_IN_TOP_LEFT) s.append("top-left, ");
+            if (mask & AUDIO_CHANNEL_IN_TOP_RIGHT) s.append("top-right, ");
             if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
             if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
             if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown,  ");