Merge "Speed up SimpleDecodingSource"
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index 25a81eb..d24cb81 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -28,6 +28,10 @@
 
 #include "ACameraCaptureSession.inc"
 
+ACameraDevice::~ACameraDevice() {
+    mDevice->stopLooper();
+}
+
 namespace android {
 namespace acam {
 
@@ -116,14 +120,10 @@
         if (!isClosed()) {
             disconnectLocked(session);
         }
+        LOG_ALWAYS_FATAL_IF(mCbLooper != nullptr,
+                "CameraDevice looper should've been stopped before ~CameraDevice");
         mCurrentSession = nullptr;
-        if (mCbLooper != nullptr) {
-            mCbLooper->unregisterHandler(mHandler->id());
-            mCbLooper->stop();
-        }
     }
-    mCbLooper.clear();
-    mHandler.clear();
 }
 
 void
@@ -892,6 +892,16 @@
     return;
 }
 
+void CameraDevice::stopLooper() {
+    Mutex::Autolock _l(mDeviceLock);
+    if (mCbLooper != nullptr) {
+      mCbLooper->unregisterHandler(mHandler->id());
+      mCbLooper->stop();
+    }
+    mCbLooper.clear();
+    mHandler.clear();
+}
+
 CameraDevice::CallbackHandler::CallbackHandler(const char* id) : mId(id) {
 }
 
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index c92a95f..7a35bf0 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -109,6 +109,9 @@
 
     inline ACameraDevice* getWrapper() const { return mWrapper; };
 
+    // Stop the looper thread and unregister the handler
+    void stopLooper();
+
   private:
     friend ACameraCaptureSession;
     camera_status_t checkCameraClosedOrErrorLocked() const;
@@ -354,7 +357,7 @@
                   sp<ACameraMetadata> chars) :
             mDevice(new android::acam::CameraDevice(id, cb, chars, this)) {}
 
-    ~ACameraDevice() {};
+    ~ACameraDevice();
 
     /*******************
      * NDK public APIs *
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 9ec64e1..4a801a7 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -7696,14 +7696,14 @@
      * case, when the application configures a RAW stream, the camera device will make sure
      * the active physical camera will remain active to ensure consistent RAW output
      * behavior, and not switch to other physical cameras.</p>
-     * <p>To maintain backward compatibility, the capture request and result metadata tags
-     * required for basic camera functionalities will be solely based on the
-     * logical camera capabiltity. Other request and result metadata tags, on the other
-     * hand, will be based on current active physical camera. For example, the physical
-     * cameras' sensor sensitivity and lens capability could be different from each other.
-     * So when the application manually controls sensor exposure time/gain, or does manual
-     * focus control, it must checks the current active physical camera's exposure, gain,
-     * and focus distance range.</p>
+     * <p>The capture request and result metadata tags required for backward compatible camera
+     * functionalities will be solely based on the logical camera capabiltity. On the other
+     * hand, the use of manual capture controls (sensor or post-processing) with a
+     * logical camera may result in unexpected behavior when the HAL decides to switch
+     * between physical cameras with different characteristics under the hood. For example,
+     * when the application manually sets exposure time and sensitivity while zooming in,
+     * the brightness of the camera images may suddenly change because HAL switches from one
+     * physical camera to the other.</p>
      *
      * @see ACAMERA_LENS_DISTORTION
      * @see ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index 8a3bb46..35c8355 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -45,13 +45,7 @@
 using namespace android;
 
 ACameraDevice::~ACameraDevice() {
-    Mutex::Autolock _l(mDevice->mDeviceLock);
-    if (mDevice->mCbLooper != nullptr) {
-      mDevice->mCbLooper->unregisterHandler(mDevice->mHandler->id());
-      mDevice->mCbLooper->stop();
-    }
-    mDevice->mCbLooper.clear();
-    mDevice->mHandler.clear();
+    mDevice->stopLooper();
 }
 
 namespace android {
@@ -140,13 +134,9 @@
             disconnectLocked(session);
         }
         mCurrentSession = nullptr;
-        if (mCbLooper != nullptr) {
-            mCbLooper->unregisterHandler(mHandler->id());
-            mCbLooper->stop();
-        }
+        LOG_ALWAYS_FATAL_IF(mCbLooper != nullptr,
+            "CameraDevice looper should've been stopped before ~CameraDevice");
     }
-    mCbLooper.clear();
-    mHandler.clear();
 }
 
 void
@@ -1410,6 +1400,16 @@
     }
 }
 
+void CameraDevice::stopLooper() {
+    Mutex::Autolock _l(mDeviceLock);
+    if (mCbLooper != nullptr) {
+      mCbLooper->unregisterHandler(mHandler->id());
+      mCbLooper->stop();
+    }
+    mCbLooper.clear();
+    mHandler.clear();
+}
+
 /**
   * Camera service callback implementation
   */
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index d8df568..9e034c4 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -133,6 +133,9 @@
     bool setDeviceMetadataQueues();
     inline ACameraDevice* getWrapper() const { return mWrapper; };
 
+    // Stop the looper thread and unregister the handler
+    void stopLooper();
+
   private:
     friend ACameraCaptureSession;
     friend ACameraDevice;
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index 769895c..0cf277f 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -340,6 +340,7 @@
     aom_codec_flags_t flags;
     memset(&flags, 0, sizeof(aom_codec_flags_t));
 
+    ALOGV("Using libaom AV1 software decoder.");
     aom_codec_err_t err;
     if ((err = aom_codec_dec_init(mCodecCtx, aom_codec_av1_dx(), &cfg, 0))) {
         ALOGE("av1 decoder failed to initialize. (%d)", err);
diff --git a/media/codec2/components/gav1/Android.bp b/media/codec2/components/gav1/Android.bp
index da76e9d..0a0545d 100644
--- a/media/codec2/components/gav1/Android.bp
+++ b/media/codec2/components/gav1/Android.bp
@@ -6,4 +6,9 @@
     ],
 
     srcs: ["C2SoftGav1Dec.cpp"],
+    static_libs: ["libgav1"],
+
+    include_dirs: [
+        "external/libgav1/libgav1/",
+    ],
 }
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index f6dd14a..f5321ba 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -27,8 +27,6 @@
 
 namespace android {
 
-// TODO(vigneshv): This will be changed to c2.android.av1.decoder once this
-// component is fully functional.
 constexpr char COMPONENT_NAME[] = "c2.android.gav1.decoder";
 
 class C2SoftGav1Dec::IntfImpl : public SimpleInterface<void>::BaseParams {
@@ -256,20 +254,497 @@
                              const std::shared_ptr<IntfImpl> &intfImpl)
     : SimpleC2Component(
           std::make_shared<SimpleInterface<IntfImpl>>(name, id, intfImpl)),
-      mIntf(intfImpl) {}
+      mIntf(intfImpl),
+      mCodecCtx(nullptr) {
+  gettimeofday(&mTimeStart, nullptr);
+  gettimeofday(&mTimeEnd, nullptr);
+}
 
-c2_status_t C2SoftGav1Dec::onInit() { return C2_OK; }
-c2_status_t C2SoftGav1Dec::onStop() { return C2_OK; }
-void C2SoftGav1Dec::onReset() {}
-void C2SoftGav1Dec::onRelease(){};
-c2_status_t C2SoftGav1Dec::onFlush_sm() { return C2_OK; }
-void C2SoftGav1Dec::process(const std::unique_ptr<C2Work> & /*work*/,
-                            const std::shared_ptr<C2BlockPool> & /*pool*/) {}
-c2_status_t C2SoftGav1Dec::drain(
-    uint32_t /*drainMode*/, const std::shared_ptr<C2BlockPool> & /*pool*/) {
+C2SoftGav1Dec::~C2SoftGav1Dec() { onRelease(); }
+
+c2_status_t C2SoftGav1Dec::onInit() {
+  return initDecoder() ? C2_OK : C2_CORRUPTED;
+}
+
+c2_status_t C2SoftGav1Dec::onStop() {
+  mSignalledError = false;
+  mSignalledOutputEos = false;
   return C2_OK;
 }
 
+void C2SoftGav1Dec::onReset() {
+  (void)onStop();
+  c2_status_t err = onFlush_sm();
+  if (err != C2_OK) {
+    ALOGW("Failed to flush the av1 decoder. Trying to hard reset.");
+    destroyDecoder();
+    if (!initDecoder()) {
+      ALOGE("Hard reset failed.");
+    }
+  }
+}
+
+void C2SoftGav1Dec::onRelease() { destroyDecoder(); }
+
+c2_status_t C2SoftGav1Dec::onFlush_sm() {
+  Libgav1StatusCode status =
+      mCodecCtx->EnqueueFrame(/*data=*/nullptr, /*size=*/0,
+                              /*user_private_data=*/0);
+  if (status != kLibgav1StatusOk) {
+    ALOGE("Failed to flush av1 decoder. status: %d.", status);
+    return C2_CORRUPTED;
+  }
+
+  // Dequeue frame (if any) that was enqueued previously.
+  const libgav1::DecoderBuffer *buffer;
+  status = mCodecCtx->DequeueFrame(&buffer);
+  if (status != kLibgav1StatusOk) {
+    ALOGE("Failed to dequeue frame after flushing the av1 decoder. status: %d",
+          status);
+    return C2_CORRUPTED;
+  }
+
+  mSignalledError = false;
+  mSignalledOutputEos = false;
+
+  return C2_OK;
+}
+
+static int GetCPUCoreCount() {
+  int cpuCoreCount = 1;
+#if defined(_SC_NPROCESSORS_ONLN)
+  cpuCoreCount = sysconf(_SC_NPROCESSORS_ONLN);
+#else
+  // _SC_NPROC_ONLN must be defined...
+  cpuCoreCount = sysconf(_SC_NPROC_ONLN);
+#endif
+  CHECK(cpuCoreCount >= 1);
+  ALOGV("Number of CPU cores: %d", cpuCoreCount);
+  return cpuCoreCount;
+}
+
+bool C2SoftGav1Dec::initDecoder() {
+  mSignalledError = false;
+  mSignalledOutputEos = false;
+  mCodecCtx.reset(new libgav1::Decoder());
+
+  if (mCodecCtx == nullptr) {
+    ALOGE("mCodecCtx is null");
+    return false;
+  }
+
+  libgav1::DecoderSettings settings = {};
+  settings.threads = GetCPUCoreCount();
+
+  ALOGV("Using libgav1 AV1 software decoder.");
+  Libgav1StatusCode status = mCodecCtx->Init(&settings);
+  if (status != kLibgav1StatusOk) {
+    ALOGE("av1 decoder failed to initialize. status: %d.", status);
+    return false;
+  }
+
+  return true;
+}
+
+void C2SoftGav1Dec::destroyDecoder() { mCodecCtx = nullptr; }
+
+void fillEmptyWork(const std::unique_ptr<C2Work> &work) {
+  uint32_t flags = 0;
+  if (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) {
+    flags |= C2FrameData::FLAG_END_OF_STREAM;
+    ALOGV("signalling eos");
+  }
+  work->worklets.front()->output.flags = (C2FrameData::flags_t)flags;
+  work->worklets.front()->output.buffers.clear();
+  work->worklets.front()->output.ordinal = work->input.ordinal;
+  work->workletsProcessed = 1u;
+}
+
+void C2SoftGav1Dec::finishWork(uint64_t index,
+                               const std::unique_ptr<C2Work> &work,
+                               const std::shared_ptr<C2GraphicBlock> &block) {
+  std::shared_ptr<C2Buffer> buffer =
+      createGraphicBuffer(block, C2Rect(mWidth, mHeight));
+  auto fillWork = [buffer, index](const std::unique_ptr<C2Work> &work) {
+    uint32_t flags = 0;
+    if ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) &&
+        (c2_cntr64_t(index) == work->input.ordinal.frameIndex)) {
+      flags |= C2FrameData::FLAG_END_OF_STREAM;
+      ALOGV("signalling eos");
+    }
+    work->worklets.front()->output.flags = (C2FrameData::flags_t)flags;
+    work->worklets.front()->output.buffers.clear();
+    work->worklets.front()->output.buffers.push_back(buffer);
+    work->worklets.front()->output.ordinal = work->input.ordinal;
+    work->workletsProcessed = 1u;
+  };
+  if (work && c2_cntr64_t(index) == work->input.ordinal.frameIndex) {
+    fillWork(work);
+  } else {
+    finish(index, fillWork);
+  }
+}
+
+void C2SoftGav1Dec::process(const std::unique_ptr<C2Work> &work,
+                            const std::shared_ptr<C2BlockPool> &pool) {
+  work->result = C2_OK;
+  work->workletsProcessed = 0u;
+  work->worklets.front()->output.configUpdate.clear();
+  work->worklets.front()->output.flags = work->input.flags;
+  if (mSignalledError || mSignalledOutputEos) {
+    work->result = C2_BAD_VALUE;
+    return;
+  }
+
+  size_t inOffset = 0u;
+  size_t inSize = 0u;
+  C2ReadView rView = mDummyReadView;
+  if (!work->input.buffers.empty()) {
+    rView = work->input.buffers[0]->data().linearBlocks().front().map().get();
+    inSize = rView.capacity();
+    if (inSize && rView.error()) {
+      ALOGE("read view map failed %d", rView.error());
+      work->result = C2_CORRUPTED;
+      return;
+    }
+  }
+
+  bool codecConfig =
+      ((work->input.flags & C2FrameData::FLAG_CODEC_CONFIG) != 0);
+  bool eos = ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) != 0);
+
+  ALOGV("in buffer attr. size %zu timestamp %d frameindex %d, flags %x", inSize,
+        (int)work->input.ordinal.timestamp.peeku(),
+        (int)work->input.ordinal.frameIndex.peeku(), work->input.flags);
+
+  if (codecConfig) {
+    fillEmptyWork(work);
+    return;
+  }
+
+  int64_t frameIndex = work->input.ordinal.frameIndex.peekll();
+  if (inSize) {
+    uint8_t *bitstream = const_cast<uint8_t *>(rView.data() + inOffset);
+    int32_t decodeTime = 0;
+    int32_t delay = 0;
+
+    GETTIME(&mTimeStart, nullptr);
+    TIME_DIFF(mTimeEnd, mTimeStart, delay);
+
+    const Libgav1StatusCode status =
+        mCodecCtx->EnqueueFrame(bitstream, inSize, frameIndex);
+
+    GETTIME(&mTimeEnd, nullptr);
+    TIME_DIFF(mTimeStart, mTimeEnd, decodeTime);
+    ALOGV("decodeTime=%4d delay=%4d\n", decodeTime, delay);
+
+    if (status != kLibgav1StatusOk) {
+      ALOGE("av1 decoder failed to decode frame. status: %d.", status);
+      work->result = C2_CORRUPTED;
+      work->workletsProcessed = 1u;
+      mSignalledError = true;
+      return;
+    }
+
+  } else {
+    const Libgav1StatusCode status =
+        mCodecCtx->EnqueueFrame(/*data=*/nullptr, /*size=*/0,
+                                /*user_private_data=*/0);
+    if (status != kLibgav1StatusOk) {
+      ALOGE("Failed to flush av1 decoder. status: %d.", status);
+      work->result = C2_CORRUPTED;
+      work->workletsProcessed = 1u;
+      mSignalledError = true;
+      return;
+    }
+  }
+
+  (void)outputBuffer(pool, work);
+
+  if (eos) {
+    drainInternal(DRAIN_COMPONENT_WITH_EOS, pool, work);
+    mSignalledOutputEos = true;
+  } else if (!inSize) {
+    fillEmptyWork(work);
+  }
+}
+
+static void copyOutputBufferToYV12Frame(uint8_t *dst, const uint8_t *srcY,
+                                        const uint8_t *srcU,
+                                        const uint8_t *srcV, size_t srcYStride,
+                                        size_t srcUStride, size_t srcVStride,
+                                        uint32_t width, uint32_t height) {
+  const size_t dstYStride = align(width, 16);
+  const size_t dstUVStride = align(dstYStride / 2, 16);
+  uint8_t *const dstStart = dst;
+
+  for (size_t i = 0; i < height; ++i) {
+    memcpy(dst, srcY, width);
+    srcY += srcYStride;
+    dst += dstYStride;
+  }
+
+  dst = dstStart + dstYStride * height;
+  for (size_t i = 0; i < height / 2; ++i) {
+    memcpy(dst, srcV, width / 2);
+    srcV += srcVStride;
+    dst += dstUVStride;
+  }
+
+  dst = dstStart + (dstYStride * height) + (dstUVStride * height / 2);
+  for (size_t i = 0; i < height / 2; ++i) {
+    memcpy(dst, srcU, width / 2);
+    srcU += srcUStride;
+    dst += dstUVStride;
+  }
+}
+
+static void convertYUV420Planar16ToY410(uint32_t *dst, const uint16_t *srcY,
+                                        const uint16_t *srcU,
+                                        const uint16_t *srcV, size_t srcYStride,
+                                        size_t srcUStride, size_t srcVStride,
+                                        size_t dstStride, size_t width,
+                                        size_t height) {
+  // Converting two lines at a time, slightly faster
+  for (size_t y = 0; y < height; y += 2) {
+    uint32_t *dstTop = (uint32_t *)dst;
+    uint32_t *dstBot = (uint32_t *)(dst + dstStride);
+    uint16_t *ySrcTop = (uint16_t *)srcY;
+    uint16_t *ySrcBot = (uint16_t *)(srcY + srcYStride);
+    uint16_t *uSrc = (uint16_t *)srcU;
+    uint16_t *vSrc = (uint16_t *)srcV;
+
+    uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
+    size_t x = 0;
+    for (; x < width - 3; x += 4) {
+      u01 = *((uint32_t *)uSrc);
+      uSrc += 2;
+      v01 = *((uint32_t *)vSrc);
+      vSrc += 2;
+
+      y01 = *((uint32_t *)ySrcTop);
+      ySrcTop += 2;
+      y23 = *((uint32_t *)ySrcTop);
+      ySrcTop += 2;
+      y45 = *((uint32_t *)ySrcBot);
+      ySrcBot += 2;
+      y67 = *((uint32_t *)ySrcBot);
+      ySrcBot += 2;
+
+      uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+      uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
+
+      *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
+      *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
+      *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
+      *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
+
+      *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
+      *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
+      *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
+      *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
+    }
+
+    // There should be at most 2 more pixels to process. Note that we don't
+    // need to consider odd case as the buffer is always aligned to even.
+    if (x < width) {
+      u01 = *uSrc;
+      v01 = *vSrc;
+      y01 = *((uint32_t *)ySrcTop);
+      y45 = *((uint32_t *)ySrcBot);
+      uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+      *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
+      *dstTop++ = ((y01 >> 16) << 10) | uv0;
+      *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
+      *dstBot++ = ((y45 >> 16) << 10) | uv0;
+    }
+
+    srcY += srcYStride * 2;
+    srcU += srcUStride;
+    srcV += srcVStride;
+    dst += dstStride * 2;
+  }
+}
+
+static void convertYUV420Planar16ToYUV420Planar(
+    uint8_t *dst, const uint16_t *srcY, const uint16_t *srcU,
+    const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
+    size_t srcVStride, size_t dstStride, size_t width, size_t height) {
+  uint8_t *dstY = (uint8_t *)dst;
+  size_t dstYSize = dstStride * height;
+  size_t dstUVStride = align(dstStride / 2, 16);
+  size_t dstUVSize = dstUVStride * height / 2;
+  uint8_t *dstV = dstY + dstYSize;
+  uint8_t *dstU = dstV + dstUVSize;
+
+  for (size_t y = 0; y < height; ++y) {
+    for (size_t x = 0; x < width; ++x) {
+      dstY[x] = (uint8_t)(srcY[x] >> 2);
+    }
+
+    srcY += srcYStride;
+    dstY += dstStride;
+  }
+
+  for (size_t y = 0; y < (height + 1) / 2; ++y) {
+    for (size_t x = 0; x < (width + 1) / 2; ++x) {
+      dstU[x] = (uint8_t)(srcU[x] >> 2);
+      dstV[x] = (uint8_t)(srcV[x] >> 2);
+    }
+
+    srcU += srcUStride;
+    srcV += srcVStride;
+    dstU += dstUVStride;
+    dstV += dstUVStride;
+  }
+}
+
+bool C2SoftGav1Dec::outputBuffer(const std::shared_ptr<C2BlockPool> &pool,
+                                 const std::unique_ptr<C2Work> &work) {
+  if (!(work && pool)) return false;
+
+  const libgav1::DecoderBuffer *buffer;
+  const Libgav1StatusCode status = mCodecCtx->DequeueFrame(&buffer);
+
+  if (status != kLibgav1StatusOk) {
+    ALOGE("av1 decoder DequeueFrame failed. status: %d.", status);
+    return false;
+  }
+
+  // |buffer| can be NULL if status was equal to kLibgav1StatusOk. This is not
+  // an error. This could mean one of two things:
+  //  - The EnqueueFrame() call was either a flush (called with nullptr).
+  //  - The enqueued frame did not have any displayable frames.
+  if (!buffer) {
+    return false;
+  }
+
+  const int width = buffer->displayed_width[0];
+  const int height = buffer->displayed_height[0];
+  if (width != mWidth || height != mHeight) {
+    mWidth = width;
+    mHeight = height;
+
+    C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
+    std::vector<std::unique_ptr<C2SettingResult>> failures;
+    c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
+    if (err == C2_OK) {
+      work->worklets.front()->output.configUpdate.push_back(
+          C2Param::Copy(size));
+    } else {
+      ALOGE("Config update size failed");
+      mSignalledError = true;
+      work->result = C2_CORRUPTED;
+      work->workletsProcessed = 1u;
+      return false;
+    }
+  }
+
+  // TODO(vigneshv): Add support for monochrome videos since AV1 supports it.
+  CHECK(buffer->image_format == libgav1::kImageFormatYuv420);
+
+  std::shared_ptr<C2GraphicBlock> block;
+  uint32_t format = HAL_PIXEL_FORMAT_YV12;
+  if (buffer->bitdepth == 10) {
+    IntfImpl::Lock lock = mIntf->lock();
+    std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects =
+        mIntf->getDefaultColorAspects_l();
+
+    if (defaultColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
+        defaultColorAspects->matrix == C2Color::MATRIX_BT2020 &&
+        defaultColorAspects->transfer == C2Color::TRANSFER_ST2084) {
+      format = HAL_PIXEL_FORMAT_RGBA_1010102;
+    }
+  }
+  C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+
+  c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 16), mHeight, format,
+                                            usage, &block);
+
+  if (err != C2_OK) {
+    ALOGE("fetchGraphicBlock for Output failed with status %d", err);
+    work->result = err;
+    return false;
+  }
+
+  C2GraphicView wView = block->map().get();
+
+  if (wView.error()) {
+    ALOGE("graphic view map failed %d", wView.error());
+    work->result = C2_CORRUPTED;
+    return false;
+  }
+
+  ALOGV("provided (%dx%d) required (%dx%d), out frameindex %d", block->width(),
+        block->height(), mWidth, mHeight, (int)buffer->user_private_data);
+
+  uint8_t *dst = const_cast<uint8_t *>(wView.data()[C2PlanarLayout::PLANE_Y]);
+  size_t srcYStride = buffer->stride[0];
+  size_t srcUStride = buffer->stride[1];
+  size_t srcVStride = buffer->stride[2];
+
+  if (buffer->bitdepth == 10) {
+    const uint16_t *srcY = (const uint16_t *)buffer->plane[0];
+    const uint16_t *srcU = (const uint16_t *)buffer->plane[1];
+    const uint16_t *srcV = (const uint16_t *)buffer->plane[2];
+
+    if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
+      convertYUV420Planar16ToY410(
+          (uint32_t *)dst, srcY, srcU, srcV, srcYStride / 2, srcUStride / 2,
+          srcVStride / 2, align(mWidth, 16), mWidth, mHeight);
+    } else {
+      convertYUV420Planar16ToYUV420Planar(dst, srcY, srcU, srcV, srcYStride / 2,
+                                          srcUStride / 2, srcVStride / 2,
+                                          align(mWidth, 16), mWidth, mHeight);
+    }
+  } else {
+    const uint8_t *srcY = (const uint8_t *)buffer->plane[0];
+    const uint8_t *srcU = (const uint8_t *)buffer->plane[1];
+    const uint8_t *srcV = (const uint8_t *)buffer->plane[2];
+    copyOutputBufferToYV12Frame(dst, srcY, srcU, srcV, srcYStride, srcUStride,
+                                srcVStride, mWidth, mHeight);
+  }
+  finishWork(buffer->user_private_data, work, std::move(block));
+  block = nullptr;
+  return true;
+}
+
+c2_status_t C2SoftGav1Dec::drainInternal(
+    uint32_t drainMode, const std::shared_ptr<C2BlockPool> &pool,
+    const std::unique_ptr<C2Work> &work) {
+  if (drainMode == NO_DRAIN) {
+    ALOGW("drain with NO_DRAIN: no-op");
+    return C2_OK;
+  }
+  if (drainMode == DRAIN_CHAIN) {
+    ALOGW("DRAIN_CHAIN not supported");
+    return C2_OMITTED;
+  }
+
+  Libgav1StatusCode status =
+      mCodecCtx->EnqueueFrame(/*data=*/nullptr, /*size=*/0,
+                              /*user_private_data=*/0);
+  if (status != kLibgav1StatusOk) {
+    ALOGE("Failed to flush av1 decoder. status: %d.", status);
+    return C2_CORRUPTED;
+  }
+
+  while (outputBuffer(pool, work)) {
+  }
+
+  if (drainMode == DRAIN_COMPONENT_WITH_EOS && work &&
+      work->workletsProcessed == 0u) {
+    fillEmptyWork(work);
+  }
+
+  return C2_OK;
+}
+
+c2_status_t C2SoftGav1Dec::drain(uint32_t drainMode,
+                                 const std::shared_ptr<C2BlockPool> &pool) {
+  return drainInternal(drainMode, pool, nullptr);
+}
+
 class C2SoftGav1Factory : public C2ComponentFactory {
  public:
   C2SoftGav1Factory()
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.h b/media/codec2/components/gav1/C2SoftGav1Dec.h
index 40b217c..a7c08bb 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.h
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.h
@@ -18,6 +18,13 @@
 #define ANDROID_C2_SOFT_GAV1_DEC_H_
 
 #include <SimpleC2Component.h>
+#include "libgav1/src/decoder.h"
+#include "libgav1/src/decoder_settings.h"
+
+#define GETTIME(a, b) gettimeofday(a, b);
+#define TIME_DIFF(start, end, diff)     \
+    diff = (((end).tv_sec - (start).tv_sec) * 1000000) + \
+            ((end).tv_usec - (start).tv_usec);
 
 namespace android {
 
@@ -26,6 +33,7 @@
 
   C2SoftGav1Dec(const char* name, c2_node_id_t id,
                 const std::shared_ptr<IntfImpl>& intfImpl);
+  ~C2SoftGav1Dec();
 
   // Begin SimpleC2Component overrides.
   c2_status_t onInit() override;
@@ -41,6 +49,7 @@
 
  private:
   std::shared_ptr<IntfImpl> mIntf;
+  std::unique_ptr<libgav1::Decoder> mCodecCtx;
 
   uint32_t mWidth;
   uint32_t mHeight;
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.cpp b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
index 6509a88..6dab70b 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
@@ -127,14 +127,14 @@
     }
 
     switch (mBitrateMode->value) {
-        case C2Config::BITRATE_VARIABLE:
-            mBitrateControlMode = VPX_VBR;
-            break;
         case C2Config::BITRATE_CONST:
-        default:
             mBitrateControlMode = VPX_CBR;
             break;
-        break;
+        case C2Config::BITRATE_VARIABLE:
+        [[fallthrough]];
+        default:
+            mBitrateControlMode = VPX_VBR;
+            break;
     }
 
     setCodecSpecificInterface();
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.h b/media/codec2/components/vpx/C2SoftVpxEnc.h
index 90758f9..62ccd1b 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.h
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.h
@@ -275,7 +275,7 @@
         addParameter(
             DefineParam(mBitrateMode, C2_PARAMKEY_BITRATE_MODE)
                 .withDefault(new C2StreamBitrateModeTuning::output(
-                        0u, C2Config::BITRATE_CONST))
+                        0u, C2Config::BITRATE_VARIABLE))
                 .withFields({
                     C2F(mBitrateMode, value).oneOf({
                         C2Config::BITRATE_CONST, C2Config::BITRATE_VARIABLE })
diff --git a/media/codec2/components/xaac/C2SoftXaacDec.cpp b/media/codec2/components/xaac/C2SoftXaacDec.cpp
index a3ebadb..60ae93c 100644
--- a/media/codec2/components/xaac/C2SoftXaacDec.cpp
+++ b/media/codec2/components/xaac/C2SoftXaacDec.cpp
@@ -1309,69 +1309,84 @@
                                 &ui_exec_done);
     RETURN_IF_FATAL(err_code,  "IA_CMD_TYPE_DONE_QUERY");
 
-    if (ui_exec_done != 1) {
-        VOID* p_array;        // ITTIAM:buffer to handle gain payload
-        WORD32 buf_size = 0;  // ITTIAM:gain payload length
-        WORD32 bit_str_fmt = 1;
-        WORD32 gain_stream_flag = 1;
-
-        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
-                                    IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
-        RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN");
-
-        err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
-                                    IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
-        RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF");
-
-        if (buf_size > 0) {
-            /*Set bitstream_split_format */
-            err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
-                                      IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
-            RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
-
-            memcpy(mDrcInBuf, p_array, buf_size);
-            /* Set number of bytes to be processed */
-            err_code =
-                ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_BS, 0, &buf_size);
-            RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
-
-            err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
-                                      IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG, &gain_stream_flag);
-            RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
-
-            /* Execute process */
-            err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
-                                      IA_CMD_TYPE_INIT_CPY_BSF_BUFF, nullptr);
-            RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
-
-            mMpegDDRCPresent = 1;
-        }
-    }
-
-    /* How much buffer is used in input buffers */
+    int32_t num_preroll = 0;
     err_code = ixheaacd_dec_api(mXheaacCodecHandle,
-                                IA_API_CMD_GET_CURIDX_INPUT_BUF,
-                                0,
-                                bytesConsumed);
-    RETURN_IF_FATAL(err_code,  "IA_API_CMD_GET_CURIDX_INPUT_BUF");
+                                IA_API_CMD_GET_CONFIG_PARAM,
+                                IA_ENHAACPLUS_DEC_CONFIG_GET_NUM_PRE_ROLL_FRAMES,
+                                &num_preroll);
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GET_NUM_PRE_ROLL_FRAMES");
 
-    /* Get the output bytes */
-    err_code = ixheaacd_dec_api(mXheaacCodecHandle,
-                                IA_API_CMD_GET_OUTPUT_BYTES,
-                                0,
-                                outBytes);
-    RETURN_IF_FATAL(err_code,  "IA_API_CMD_GET_OUTPUT_BYTES");
+    {
+      int32_t preroll_frame_offset = 0;
 
-    if (mMpegDDRCPresent == 1) {
-        memcpy(mDrcInBuf, mOutputBuffer, *outBytes);
-        err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES, 0, outBytes);
-        RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+        do {
+            if (ui_exec_done != 1) {
+                VOID* p_array;        // ITTIAM:buffer to handle gain payload
+                WORD32 buf_size = 0;  // ITTIAM:gain payload length
+                WORD32 bit_str_fmt = 1;
+                WORD32 gain_stream_flag = 1;
 
-        err_code =
-            ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DO_EXECUTE, nullptr);
-        RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
+                err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                            IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
+                RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN");
 
-        memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
+                err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                            IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
+                RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF");
+
+                if (buf_size > 0) {
+                    /*Set bitstream_split_format */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                            IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+                    memcpy(mDrcInBuf, p_array, buf_size);
+                    /* Set number of bytes to be processed */
+                    err_code =
+                        ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_BS, 0, &buf_size);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                            IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG, &gain_stream_flag);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+                    /* Execute process */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+                                            IA_CMD_TYPE_INIT_CPY_BSF_BUFF, nullptr);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+
+                    mMpegDDRCPresent = 1;
+                }
+            }
+
+            /* How much buffer is used in input buffers */
+            err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+                                        IA_API_CMD_GET_CURIDX_INPUT_BUF,
+                                        0,
+                                        bytesConsumed);
+            RETURN_IF_FATAL(err_code,  "IA_API_CMD_GET_CURIDX_INPUT_BUF");
+
+            /* Get the output bytes */
+            err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+                                        IA_API_CMD_GET_OUTPUT_BYTES,
+                                        0,
+                                        outBytes);
+            RETURN_IF_FATAL(err_code,  "IA_API_CMD_GET_OUTPUT_BYTES");
+
+            if (mMpegDDRCPresent == 1) {
+                memcpy(mDrcInBuf, mOutputBuffer + preroll_frame_offset, *outBytes);
+                preroll_frame_offset += *outBytes;
+                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES, 0, outBytes);
+                RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+
+                err_code =
+                    ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DO_EXECUTE, nullptr);
+                RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
+
+                memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
+            }
+            num_preroll--;
+        } while (num_preroll > 0);
     }
     return IA_NO_ERROR;
 }
diff --git a/media/codec2/core/include/C2Param.h b/media/codec2/core/include/C2Param.h
index cc8c17a..51d417a 100644
--- a/media/codec2/core/include/C2Param.h
+++ b/media/codec2/core/include/C2Param.h
@@ -176,9 +176,9 @@
             DIR_INPUT      = 0x00000000,
             DIR_OUTPUT     = 0x10000000,
 
-            IS_STREAM_FLAG  = 0x00100000,
-            STREAM_ID_MASK  = 0x03E00000,
-            STREAM_ID_SHIFT = 21,
+            IS_STREAM_FLAG  = 0x02000000,
+            STREAM_ID_MASK  = 0x01F00000,
+            STREAM_ID_SHIFT = 20,
             MAX_STREAM_ID   = STREAM_ID_MASK >> STREAM_ID_SHIFT,
             STREAM_MASK     = IS_STREAM_FLAG | STREAM_ID_MASK,
 
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/Android.bp b/media/codec2/hidl/1.0/vts/functional/audio/Android.bp
index 687754b..65f0d09 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/Android.bp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/Android.bp
@@ -15,19 +15,19 @@
  */
 
 cc_test {
-    name: "VtsHidlC2V1_0TargetAudioDecTest",
-    defaults: ["VtsMediaC2V1_0Defaults"],
+    name: "VtsHalMediaC2V1_0TargetAudioDecTest",
+    defaults: ["VtsHalMediaC2V1_0Defaults"],
     srcs: [
-        "VtsHidlC2V1_0TargetAudioDecTest.cpp",
+        "VtsHalMediaC2V1_0TargetAudioDecTest.cpp",
         //"media_audio_hidl_test_common.cpp"
     ],
 }
 
 cc_test {
-    name: "VtsHidlC2V1_0TargetAudioEncTest",
-    defaults: ["VtsMediaC2V1_0Defaults"],
+    name: "VtsHalMediaC2V1_0TargetAudioEncTest",
+    defaults: ["VtsHalMediaC2V1_0Defaults"],
     srcs: [
-        "VtsHidlC2V1_0TargetAudioEncTest.cpp",
+        "VtsHalMediaC2V1_0TargetAudioEncTest.cpp",
         //"media_audio_hidl_test_common.cpp"
     ],
 }
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
similarity index 100%
rename from media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
similarity index 100%
rename from media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioEncTest.cpp
diff --git a/media/codec2/hidl/1.0/vts/functional/common/Android.bp b/media/codec2/hidl/1.0/vts/functional/common/Android.bp
index da0061a..a011ba3 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/Android.bp
+++ b/media/codec2/hidl/1.0/vts/functional/common/Android.bp
@@ -1,5 +1,5 @@
 cc_library_static {
-    name: "VtsMediaC2V1_0CommonUtil",
+    name: "VtsHalMediaC2V1_0CommonUtil",
     defaults: [
         "VtsHalTargetTestDefaults",
         "libcodec2-hidl-client-defaults",
@@ -17,14 +17,14 @@
 }
 
 cc_defaults {
-    name: "VtsMediaC2V1_0Defaults",
+    name: "VtsHalMediaC2V1_0Defaults",
     defaults: [
         "VtsHalTargetTestDefaults",
         "libcodec2-hidl-client-defaults",
     ],
 
     static_libs: [
-        "VtsMediaC2V1_0CommonUtil",
+        "VtsHalMediaC2V1_0CommonUtil",
     ],
 
     shared_libs: [
diff --git a/media/codec2/hidl/1.0/vts/functional/common/README.md b/media/codec2/hidl/1.0/vts/functional/common/README.md
index 3deab10..50e8356 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/README.md
+++ b/media/codec2/hidl/1.0/vts/functional/common/README.md
@@ -3,29 +3,29 @@
 #### master :
 Functionality of master is to enumerate all the Codec2 components available in C2 media service.
 
-usage: VtsHidlC2V1\_0TargetMasterTest -I default
+usage: VtsHalMediaC2V1\_0TargetMasterTest -I default
 
 #### component :
 Functionality of component test is to validate common functionality across all the Codec2 components available in C2 media service. For a standard C2 component, these tests are expected to pass.
 
-usage: VtsHidlC2V1\_0TargetComponentTest -I software -C <comp name>
-example: VtsHidlC2V1\_0TargetComponentTest -I software -C c2.android.vorbis.decoder
+usage: VtsHalMediaC2V1\_0TargetComponentTest -I software -C <comp name>
+example: VtsHalMediaC2V1\_0TargetComponentTest -I software -C c2.android.vorbis.decoder
 
 #### audio :
 Functionality of audio test is to validate audio specific functionality Codec2 components. The resource files for this test are taken from media/codec2/hidl/1.0/vts/functional/res. The path to these files on the device is required to be given for bitstream tests.
 
-usage: VtsHidlC2V1\_0TargetAudioDecTest -I default -C <comp name> -P /sdcard/res/
-usage: VtsHidlC2V1\_0TargetAudioEncTest -I software -C <comp name> -P /sdcard/res/
+usage: VtsHalMediaC2V1\_0TargetAudioDecTest -I default -C <comp name> -P /sdcard/media/
+usage: VtsHalMediaC2V1\_0TargetAudioEncTest -I software -C <comp name> -P /sdcard/media/
 
-example: VtsHidlC2V1\_0TargetAudioDecTest -I software -C c2.android.flac.decoder -P /sdcard/res/
-example: VtsHidlC2V1\_0TargetAudioEncTest -I software -C c2.android.opus.encoder -P /sdcard/res/
+example: VtsHalMediaC2V1\_0TargetAudioDecTest -I software -C c2.android.flac.decoder -P /sdcard/media/
+example: VtsHalMediaC2V1\_0TargetAudioEncTest -I software -C c2.android.opus.encoder -P /sdcard/media/
 
 #### video :
 Functionality of video test is to validate video specific functionality Codec2 components. The resource files for this test are taken from media/codec2/hidl/1.0/vts/functional/res. The path to these files on the device is required to be given for bitstream tests.
 
-usage: VtsHidlC2V1\_0TargetVideoDecTest -I default -C <comp name> -P /sdcard/res/
-usage: VtsHidlC2V1\_0TargetVideoEncTest -I software -C <comp name> -P /sdcard/res/
+usage: VtsHalMediaC2V1\_0TargetVideoDecTest -I default -C <comp name> -P /sdcard/media/
+usage: VtsHalMediaC2V1\_0TargetVideoEncTest -I software -C <comp name> -P /sdcard/media/
 
-example: VtsHidlC2V1\_0TargetVideoDecTest -I software -C c2.android.avc.decoder -P /sdcard/res/
-example: VtsHidlC2V1\_0TargetVideoEncTest -I software -C c2.android.vp9.encoder -P /sdcard/res/
+example: VtsHalMediaC2V1\_0TargetVideoDecTest -I software -C c2.android.avc.decoder -P /sdcard/media/
+example: VtsHalMediaC2V1\_0TargetVideoEncTest -I software -C c2.android.vp9.encoder -P /sdcard/media/
 
diff --git a/media/codec2/hidl/1.0/vts/functional/component/Android.bp b/media/codec2/hidl/1.0/vts/functional/component/Android.bp
index 4b913b6..7ec64ee 100644
--- a/media/codec2/hidl/1.0/vts/functional/component/Android.bp
+++ b/media/codec2/hidl/1.0/vts/functional/component/Android.bp
@@ -15,8 +15,8 @@
  */
 
 cc_test {
-    name: "VtsHidlC2V1_0TargetComponentTest",
-    defaults: ["VtsMediaC2V1_0Defaults"],
-    srcs: ["VtsHidlC2V1_0TargetComponentTest.cpp"],
+    name: "VtsHalMediaC2V1_0TargetComponentTest",
+    defaults: ["VtsHalMediaC2V1_0Defaults"],
+    srcs: ["VtsHalMediaC2V1_0TargetComponentTest.cpp"],
 }
 
diff --git a/media/codec2/hidl/1.0/vts/functional/component/VtsHidlC2V1_0TargetComponentTest.cpp b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
similarity index 95%
rename from media/codec2/hidl/1.0/vts/functional/component/VtsHidlC2V1_0TargetComponentTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
index 74548b5..9dc541c 100644
--- a/media/codec2/hidl/1.0/vts/functional/component/VtsHidlC2V1_0TargetComponentTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/component/VtsHalMediaC2V1_0TargetComponentTest.cpp
@@ -144,8 +144,7 @@
 
     // Queueing an empty WorkBundle
     std::list<std::unique_ptr<C2Work>> workList;
-    err = mComponent->queue(&workList);
-    ASSERT_EQ(err, C2_OK);
+    mComponent->queue(&workList);
 
     err = mComponent->reset();
     ASSERT_EQ(err, C2_OK);
@@ -183,33 +182,23 @@
 // Test Multiple Start Stop Reset Test
 TEST_F(Codec2ComponentHidlTest, MultipleStartStopReset) {
     ALOGV("Multiple Start Stop and Reset Test");
-    c2_status_t err = C2_OK;
 
     for (size_t i = 0; i < MAX_RETRY; i++) {
-        err = mComponent->start();
-        ASSERT_EQ(err, C2_OK);
-
-        err = mComponent->stop();
-        ASSERT_EQ(err, C2_OK);
+        mComponent->start();
+        mComponent->stop();
     }
 
-    err = mComponent->start();
-    ASSERT_EQ(err, C2_OK);
+    ASSERT_EQ(mComponent->start(), C2_OK);
 
     for (size_t i = 0; i < MAX_RETRY; i++) {
-        err = mComponent->reset();
-        ASSERT_EQ(err, C2_OK);
+        mComponent->reset();
     }
 
-    err = mComponent->start();
-    ASSERT_EQ(err, C2_OK);
-
-    err = mComponent->stop();
-    ASSERT_EQ(err, C2_OK);
+    ASSERT_EQ(mComponent->start(), C2_OK);
+    ASSERT_EQ(mComponent->stop(), C2_OK);
 
     // Second stop should return error
-    err = mComponent->stop();
-    ASSERT_NE(err, C2_OK);
+    ASSERT_NE(mComponent->stop(), C2_OK);
 }
 
 // Test Component Release API
@@ -233,8 +222,7 @@
     ASSERT_EQ(failures.size(), 0u);
 
     for (size_t i = 0; i < MAX_RETRY; i++) {
-        err = mComponent->release();
-        ASSERT_EQ(err, C2_OK);
+        mComponent->release();
     }
 }
 
@@ -332,14 +320,12 @@
     timeConsumed = getNowUs() - startTime;
     ALOGV("mComponent->queue() timeConsumed=%" PRId64 " us", timeConsumed);
     CHECK_TIMEOUT(timeConsumed, QUEUE_TIME_OUT, "queue()");
-    ASSERT_EQ(err, C2_OK);
 
     startTime = getNowUs();
     err = mComponent->flush(C2Component::FLUSH_COMPONENT, &workList);
     timeConsumed = getNowUs() - startTime;
     ALOGV("mComponent->flush() timeConsumed=%" PRId64 " us", timeConsumed);
     CHECK_TIMEOUT(timeConsumed, FLUSH_TIME_OUT, "flush()");
-    ASSERT_EQ(err, C2_OK);
 
     startTime = getNowUs();
     err = mComponent->stop();
diff --git a/media/codec2/hidl/1.0/vts/functional/master/Android.bp b/media/codec2/hidl/1.0/vts/functional/master/Android.bp
index e164d68..53e323e 100644
--- a/media/codec2/hidl/1.0/vts/functional/master/Android.bp
+++ b/media/codec2/hidl/1.0/vts/functional/master/Android.bp
@@ -15,8 +15,8 @@
  */
 
 cc_test {
-    name: "VtsHidlC2V1_0TargetMasterTest",
-    defaults: ["VtsMediaC2V1_0Defaults"],
-    srcs: ["VtsHidlC2V1_0TargetMasterTest.cpp"],
+    name: "VtsHalMediaC2V1_0TargetMasterTest",
+    defaults: ["VtsHalMediaC2V1_0Defaults"],
+    srcs: ["VtsHalMediaC2V1_0TargetMasterTest.cpp"],
 }
 
diff --git a/media/codec2/hidl/1.0/vts/functional/master/VtsHidlC2V1_0TargetMasterTest.cpp b/media/codec2/hidl/1.0/vts/functional/master/VtsHalMediaC2V1_0TargetMasterTest.cpp
similarity index 100%
rename from media/codec2/hidl/1.0/vts/functional/master/VtsHidlC2V1_0TargetMasterTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/master/VtsHalMediaC2V1_0TargetMasterTest.cpp
diff --git a/media/codec2/hidl/1.0/vts/functional/video/Android.bp b/media/codec2/hidl/1.0/vts/functional/video/Android.bp
index 6e57ee7..be35b02 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/Android.bp
+++ b/media/codec2/hidl/1.0/vts/functional/video/Android.bp
@@ -15,14 +15,14 @@
  */
 
 cc_test {
-    name: "VtsHidlC2V1_0TargetVideoDecTest",
-    defaults: ["VtsMediaC2V1_0Defaults"],
-    srcs: ["VtsHidlC2V1_0TargetVideoDecTest.cpp"],
+    name: "VtsHalMediaC2V1_0TargetVideoDecTest",
+    defaults: ["VtsHalMediaC2V1_0Defaults"],
+    srcs: ["VtsHalMediaC2V1_0TargetVideoDecTest.cpp"],
 }
 
 cc_test {
-    name: "VtsHidlC2V1_0TargetVideoEncTest",
-    defaults: ["VtsMediaC2V1_0Defaults"],
-    srcs: ["VtsHidlC2V1_0TargetVideoEncTest.cpp"],
+    name: "VtsHalMediaC2V1_0TargetVideoEncTest",
+    defaults: ["VtsHalMediaC2V1_0Defaults"],
+    srcs: ["VtsHalMediaC2V1_0TargetVideoEncTest.cpp"],
 }
 
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
similarity index 98%
rename from media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoDecTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
index 33fa848..0e20b47 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoDecTest.cpp
@@ -46,6 +46,10 @@
     explicit LinearBuffer(const std::shared_ptr<C2LinearBlock>& block)
         : C2Buffer(
               {block->share(block->offset(), block->size(), ::C2Fence())}) {}
+
+    explicit LinearBuffer(const std::shared_ptr<C2LinearBlock>& block, size_t size)
+        : C2Buffer(
+              {block->share(block->offset(), size, ::C2Fence())}) {}
 };
 
 static ComponentTestEnvironment* gEnv = nullptr;
@@ -371,11 +375,12 @@
         ASSERT_EQ(eleStream.gcount(), size);
 
         work->input.buffers.clear();
+        auto alignedSize = ALIGN(size, PAGE_SIZE);
         if (size) {
             std::shared_ptr<C2LinearBlock> block;
             ASSERT_EQ(C2_OK,
                     linearPool->fetchLinearBlock(
-                        size, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE},
+                        alignedSize, {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE},
                         &block));
             ASSERT_TRUE(block);
 
@@ -385,13 +390,13 @@
                 fprintf(stderr, "C2LinearBlock::map() failed : %d", view.error());
                 break;
             }
-            ASSERT_EQ((size_t)size, view.capacity());
+            ASSERT_EQ((size_t)alignedSize, view.capacity());
             ASSERT_EQ(0u, view.offset());
-            ASSERT_EQ((size_t)size, view.size());
+            ASSERT_EQ((size_t)alignedSize, view.size());
 
             memcpy(view.base(), data, size);
 
-            work->input.buffers.emplace_back(new LinearBuffer(block));
+            work->input.buffers.emplace_back(new LinearBuffer(block, size));
             free(data);
         }
         work->worklets.clear();
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
similarity index 89%
rename from media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
index 6bcf840..c1f5a92 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHalMediaC2V1_0TargetVideoEncTest.cpp
@@ -284,15 +284,16 @@
                    std::list<std::unique_ptr<C2Work>>& workQueue,
                    std::list<uint64_t>& flushedIndices,
                    std::shared_ptr<C2BlockPool>& graphicPool,
-                   std::ifstream& eleStream, uint32_t frameID,
-                   uint32_t nFrames, uint32_t nWidth, int32_t nHeight,
-                   bool flushed = false,bool signalEOS = true) {
+                   std::ifstream& eleStream, bool& disableTest,
+                   uint32_t frameID, uint32_t nFrames, uint32_t nWidth,
+                   int32_t nHeight, bool flushed = false, bool signalEOS = true) {
     typedef std::unique_lock<std::mutex> ULock;
 
     uint32_t maxRetry = 0;
     int bytesCount = nWidth * nHeight * 3 >> 1;
     int32_t timestampIncr = ENCODER_TIMESTAMP_INCREMENT;
     uint64_t timestamp = 0;
+    c2_status_t err = C2_OK;
     while (1) {
         if (nFrames == 0) break;
         uint32_t flags = 0;
@@ -333,16 +334,21 @@
             ASSERT_EQ(eleStream.gcount(), bytesCount);
         }
         std::shared_ptr<C2GraphicBlock> block;
-        ASSERT_EQ(
-            C2_OK,
-            graphicPool->fetchGraphicBlock(
+        err = graphicPool->fetchGraphicBlock(
                 nWidth, nHeight, HAL_PIXEL_FORMAT_YV12,
-                {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block));
+                {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE}, &block);
+        if (err != C2_OK) {
+            fprintf(stderr, "fetchGraphicBlock failed : %d\n", err);
+            disableTest = true;
+            break;
+        }
+
         ASSERT_TRUE(block);
         // Graphic View
         C2GraphicView view = block->map().get();
         if (view.error() != C2_OK) {
             fprintf(stderr, "C2GraphicBlock::map() failed : %d", view.error());
+            disableTest = true;
             break;
         }
 
@@ -420,8 +426,16 @@
     ASSERT_EQ(mComponent->start(), C2_OK);
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
-                      mFlushedIndices, mGraphicPool, eleStream,
+                      mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
                       0, ENC_NUM_FRAMES, nWidth, nHeight, false, signalEOS));
+    // mDisableTest will be set if buffer was not fetched properly.
+    // This may happen when resolution is not proper but config suceeded
+    // In this cases, we skip encoding the input stream
+    if (mDisableTest) {
+        std::cout << "[   WARN   ] Test Disabled \n";
+        ASSERT_EQ(mComponent->stop(), C2_OK);
+        return;
+    }
 
     // If EOS is not sent, sending empty input with EOS flag
     inputFrames = ENC_NUM_FRAMES;
@@ -531,8 +545,17 @@
     ALOGV("mURL : %s", mURL);
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
-                      mFlushedIndices, mGraphicPool, eleStream,
+                      mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
                       0, numFramesFlushed, nWidth, nHeight));
+    // mDisableTest will be set if buffer was not fetched properly.
+    // This may happen when resolution is not proper but config suceeded
+    // In this cases, we skip encoding the input stream
+    if (mDisableTest) {
+        std::cout << "[   WARN   ] Test Disabled \n";
+        ASSERT_EQ(mComponent->stop(), C2_OK);
+        return;
+    }
+
     std::list<std::unique_ptr<C2Work>> flushedWork;
     c2_status_t err =
         mComponent->flush(C2Component::FLUSH_COMPONENT, &flushedWork);
@@ -561,10 +584,19 @@
     mFlushedIndices.clear();
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
-                      mFlushedIndices, mGraphicPool, eleStream,
+                      mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
                       numFramesFlushed, numFrames - numFramesFlushed,
                       nWidth, nHeight, true));
     eleStream.close();
+    // mDisableTest will be set if buffer was not fetched properly.
+    // This may happen when resolution is not proper but config suceeded
+    // In this cases, we skip encoding the input stream
+    if (mDisableTest) {
+        std::cout << "[   WARN   ] Test Disabled \n";
+        ASSERT_EQ(mComponent->stop(), C2_OK);
+        return;
+    }
+
     err = mComponent->flush(C2Component::FLUSH_COMPONENT, &flushedWork);
     ASSERT_EQ(err, C2_OK);
     ASSERT_NO_FATAL_FAILURE(
@@ -607,19 +639,19 @@
 
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
-                      mFlushedIndices, mGraphicPool, eleStream,
+                      mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
                       0, 1, nWidth, nHeight, false, false));
 
     // Feed larger input buffer.
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
-                      mFlushedIndices, mGraphicPool, eleStream,
+                      mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
                       1, 1, nWidth*2, nHeight*2, false, false));
 
     // Feed smaller input buffer.
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
-                      mFlushedIndices, mGraphicPool, eleStream,
+                      mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
                       2, 1, nWidth/2, nHeight/2, false, true));
 
     // blocking call to ensures application to Wait till all the inputs are
@@ -629,15 +661,13 @@
         waitOnInputConsumption(mQueueLock, mQueueCondition, mWorkQueue));
 
     if (mFramesReceived != 3) {
-        ALOGE("Input buffer count and Output buffer count mismatch");
-        ALOGE("framesReceived : %d inputFrames : 3", mFramesReceived);
-        ASSERT_TRUE(false);
+        std::cout << "[   WARN   ] Component didn't receive all buffers back \n";
+        ALOGW("framesReceived : %d inputFrames : 3", mFramesReceived);
     }
 
     if (mFailedWorkReceived == 0) {
-        ALOGE("Expected failed frame count mismatch");
-        ALOGE("failedFramesReceived : %d", mFailedWorkReceived);
-        ASSERT_TRUE(false);
+        std::cout << "[   WARN   ] Expected failed frame count mismatch \n";
+        ALOGW("failedFramesReceived : %d", mFailedWorkReceived);
     }
 
     ASSERT_EQ(mComponent->stop(), C2_OK);
@@ -665,8 +695,17 @@
 
     ASSERT_NO_FATAL_FAILURE(
         encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
-                      mFlushedIndices, mGraphicPool, eleStream, 0,
-                      MAX_INPUT_BUFFERS, nWidth, nHeight));
+                      mFlushedIndices, mGraphicPool, eleStream, mDisableTest,
+                      0, MAX_INPUT_BUFFERS, nWidth, nHeight, false, true));
+
+    // mDisableTest will be set if buffer was not fetched properly.
+    // This may happen when resolution is not proper but config suceeded
+    // In this cases, we skip encoding the input stream
+    if (mDisableTest) {
+        std::cout << "[   WARN   ] Test Disabled \n";
+        ASSERT_EQ(mComponent->stop(), C2_OK);
+        return;
+    }
 
     ALOGD("Waiting for input consumption");
     ASSERT_NO_FATAL_FAILURE(
@@ -676,6 +715,7 @@
     ASSERT_EQ(mComponent->stop(), C2_OK);
     ASSERT_EQ(mComponent->reset(), C2_OK);
 }
+
 INSTANTIATE_TEST_CASE_P(NonStdSizes, Codec2VideoEncResolutionTest, ::testing::Values(
     std::make_pair(52, 18),
     std::make_pair(365, 365),
diff --git a/media/codec2/hidl/1.0/vts/functional/video/media_c2_video_hidl_test_common.h b/media/codec2/hidl/1.0/vts/functional/video/media_c2_video_hidl_test_common.h
index dd45557..e37ca38 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/media_c2_video_hidl_test_common.h
+++ b/media/codec2/hidl/1.0/vts/functional/video/media_c2_video_hidl_test_common.h
@@ -23,6 +23,8 @@
 #define ENC_DEFAULT_FRAME_HEIGHT 288
 #define MAX_ITERATIONS 128
 
+#define ALIGN(_sz, _align) ((_sz + (_align - 1)) & ~(_align - 1))
+
 /*
  * Common video utils
  */
diff --git a/media/codec2/sfplugin/C2OMXNode.cpp b/media/codec2/sfplugin/C2OMXNode.cpp
index 3a93c2a..78d221e 100644
--- a/media/codec2/sfplugin/C2OMXNode.cpp
+++ b/media/codec2/sfplugin/C2OMXNode.cpp
@@ -36,6 +36,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <ui/Fence.h>
 #include <ui/GraphicBuffer.h>
+#include <utils/Thread.h>
 
 #include "C2OMXNode.h"
 
@@ -50,16 +51,128 @@
 
 }  // namespace
 
+class C2OMXNode::QueueThread : public Thread {
+public:
+    QueueThread() : Thread(false) {}
+    ~QueueThread() override = default;
+    void queue(
+            const std::shared_ptr<Codec2Client::Component> &comp,
+            int fenceFd,
+            std::unique_ptr<C2Work> &&work,
+            android::base::unique_fd &&fd0,
+            android::base::unique_fd &&fd1) {
+        Mutexed<Jobs>::Locked jobs(mJobs);
+        auto it = jobs->queues.try_emplace(comp, comp, systemTime()).first;
+        it->second.workList.emplace_back(
+                std::move(work), fenceFd, std::move(fd0), std::move(fd1));
+        jobs->cond.broadcast();
+    }
+
+protected:
+    bool threadLoop() override {
+        constexpr nsecs_t kIntervalNs = nsecs_t(10) * 1000 * 1000;  // 10ms
+        constexpr nsecs_t kWaitNs = kIntervalNs * 2;
+        for (int i = 0; i < 2; ++i) {
+            Mutexed<Jobs>::Locked jobs(mJobs);
+            nsecs_t nowNs = systemTime();
+            bool queued = false;
+            for (auto it = jobs->queues.begin(); it != jobs->queues.end(); ) {
+                Queue &queue = it->second;
+                if (queue.workList.empty()
+                        || nowNs - queue.lastQueuedTimestampNs < kIntervalNs) {
+                    ++it;
+                    continue;
+                }
+                std::shared_ptr<Codec2Client::Component> comp = queue.component.lock();
+                if (!comp) {
+                    it = jobs->queues.erase(it);
+                    continue;
+                }
+                std::list<std::unique_ptr<C2Work>> items;
+                std::vector<int> fenceFds;
+                std::vector<android::base::unique_fd> uniqueFds;
+                while (!queue.workList.empty()) {
+                    items.push_back(std::move(queue.workList.front().work));
+                    fenceFds.push_back(queue.workList.front().fenceFd);
+                    uniqueFds.push_back(std::move(queue.workList.front().fd0));
+                    uniqueFds.push_back(std::move(queue.workList.front().fd1));
+                    queue.workList.pop_front();
+                }
+
+                jobs.unlock();
+                for (int fenceFd : fenceFds) {
+                    sp<Fence> fence(new Fence(fenceFd));
+                    fence->waitForever(LOG_TAG);
+                }
+                comp->queue(&items);
+                for (android::base::unique_fd &ufd : uniqueFds) {
+                    (void)ufd.release();
+                }
+                jobs.lock();
+
+                it = jobs->queues.upper_bound(comp);
+                queued = true;
+            }
+            if (queued) {
+                return true;
+            }
+            if (i == 0) {
+                jobs.waitForConditionRelative(jobs->cond, kWaitNs);
+            }
+        }
+        return true;
+    }
+
+private:
+    struct WorkFence {
+        WorkFence(std::unique_ptr<C2Work> &&w, int fd) : work(std::move(w)), fenceFd(fd) {}
+
+        WorkFence(
+                std::unique_ptr<C2Work> &&w,
+                int fd,
+                android::base::unique_fd &&uniqueFd0,
+                android::base::unique_fd &&uniqueFd1)
+            : work(std::move(w)),
+              fenceFd(fd),
+              fd0(std::move(uniqueFd0)),
+              fd1(std::move(uniqueFd1)) {}
+
+        std::unique_ptr<C2Work> work;
+        int fenceFd;
+        android::base::unique_fd fd0;
+        android::base::unique_fd fd1;
+    };
+    struct Queue {
+        Queue(const std::shared_ptr<Codec2Client::Component> &comp, nsecs_t timestamp)
+            : component(comp), lastQueuedTimestampNs(timestamp) {}
+        Queue(const Queue &) = delete;
+        Queue &operator =(const Queue &) = delete;
+
+        std::weak_ptr<Codec2Client::Component> component;
+        std::list<WorkFence> workList;
+        nsecs_t lastQueuedTimestampNs;
+    };
+    struct Jobs {
+        std::map<std::weak_ptr<Codec2Client::Component>,
+                 Queue,
+                 std::owner_less<std::weak_ptr<Codec2Client::Component>>> queues;
+        Condition cond;
+    };
+    Mutexed<Jobs> mJobs;
+};
+
 C2OMXNode::C2OMXNode(const std::shared_ptr<Codec2Client::Component> &comp)
     : mComp(comp), mFrameIndex(0), mWidth(0), mHeight(0), mUsage(0),
-      mAdjustTimestampGapUs(0), mFirstInputFrame(true) {
+      mAdjustTimestampGapUs(0), mFirstInputFrame(true),
+      mQueueThread(new QueueThread) {
     android_fdsan_set_error_level(ANDROID_FDSAN_ERROR_LEVEL_WARN_ALWAYS);
+    mQueueThread->run("C2OMXNode", PRIORITY_AUDIO);
 }
 
 status_t C2OMXNode::freeNode() {
     mComp.reset();
     android_fdsan_set_error_level(ANDROID_FDSAN_ERROR_LEVEL_WARN_ONCE);
-    return OK;
+    return mQueueThread->requestExitAndWait();
 }
 
 status_t C2OMXNode::sendCommand(OMX_COMMANDTYPE cmd, OMX_S32 param) {
@@ -216,11 +329,6 @@
 status_t C2OMXNode::emptyBuffer(
         buffer_id buffer, const OMXBuffer &omxBuf,
         OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
-    // TODO: better fence handling
-    if (fenceFd >= 0) {
-        sp<Fence> fence = new Fence(fenceFd);
-        fence->waitForever(LOG_TAG);
-    }
     std::shared_ptr<Codec2Client::Component> comp = mComp.lock();
     if (!comp) {
         return NO_INIT;
@@ -299,22 +407,8 @@
     }
     work->worklets.clear();
     work->worklets.emplace_back(new C2Worklet);
-    std::list<std::unique_ptr<C2Work>> items;
-    uint64_t index = work->input.ordinal.frameIndex.peeku();
-    items.push_back(std::move(work));
-
-    c2_status_t err = comp->queue(&items);
-    if (err != C2_OK) {
-        (void)fd0.release();
-        (void)fd1.release();
-        return UNKNOWN_ERROR;
-    }
-
-    mBufferIdsInUse.lock()->emplace(index, buffer);
-
-    // release ownership of the fds
-    (void)fd0.release();
-    (void)fd1.release();
+    mBufferIdsInUse.lock()->emplace(work->input.ordinal.frameIndex.peeku(), buffer);
+    mQueueThread->queue(comp, fenceFd, std::move(work), std::move(fd0), std::move(fd1));
 
     return OK;
 }
diff --git a/media/codec2/sfplugin/C2OMXNode.h b/media/codec2/sfplugin/C2OMXNode.h
index 3ca6c0a..1717c96 100644
--- a/media/codec2/sfplugin/C2OMXNode.h
+++ b/media/codec2/sfplugin/C2OMXNode.h
@@ -113,6 +113,9 @@
     c2_cntr64_t mPrevCodecTimestamp; // adjusted (codec) timestamp for previous frame
 
     Mutexed<std::map<uint64_t, buffer_id>> mBufferIdsInUse;
+
+    class QueueThread;
+    sp<QueueThread> mQueueThread;
 };
 
 }  // namespace android
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 8be9a1d..0b4c2d7 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -220,7 +220,6 @@
         const std::shared_ptr<CCodecCallback> &callback)
     : mHeapSeqNum(-1),
       mCCodecCallback(callback),
-      mDelay(0),
       mFrameIndex(0u),
       mFirstValidFrameIndex(0u),
       mMetaMode(MODE_NONE),
@@ -814,7 +813,6 @@
 
     size_t numInputSlots = inputDelayValue + pipelineDelayValue + kSmoothnessFactor;
     size_t numOutputSlots = outputDelayValue + kSmoothnessFactor;
-    mDelay = inputDelayValue + pipelineDelayValue + outputDelayValue;
 
     // TODO: get this from input format
     bool secure = mComponent->getName().find(".secure") != std::string::npos;
@@ -888,6 +886,8 @@
 
         bool forceArrayMode = false;
         Mutexed<Input>::Locked input(mInput);
+        input->inputDelay = inputDelayValue;
+        input->pipelineDelay = pipelineDelayValue;
         input->numSlots = numInputSlots;
         input->extraBuffers.flush();
         input->numExtraSlots = 0u;
@@ -1054,6 +1054,7 @@
         }
 
         Mutexed<Output>::Locked output(mOutput);
+        output->outputDelay = outputDelayValue;
         output->numSlots = numOutputSlots;
         if (graphic) {
             if (outputSurface) {
@@ -1075,8 +1076,7 @@
                     outputGeneration);
         }
 
-        if (oStreamFormat.value == C2BufferData::LINEAR
-                && mComponentName.find("c2.qti.") == std::string::npos) {
+        if (oStreamFormat.value == C2BufferData::LINEAR) {
             // WORKAROUND: if we're using early CSD workaround we convert to
             //             array mode, to appease apps assuming the output
             //             buffers to be of the same size.
@@ -1128,8 +1128,9 @@
     }
 
     C2StreamBufferTypeSetting::output oStreamFormat(0u);
-    c2_status_t err = mComponent->query({ &oStreamFormat }, {}, C2_DONT_BLOCK, nullptr);
-    if (err != C2_OK) {
+    C2PrependHeaderModeSetting prepend(PREPEND_HEADER_TO_NONE);
+    c2_status_t err = mComponent->query({ &oStreamFormat, &prepend }, {}, C2_DONT_BLOCK, nullptr);
+    if (err != C2_OK && err != C2_BAD_INDEX) {
         return UNKNOWN_ERROR;
     }
     size_t numInputSlots = mInput.lock()->numSlots;
@@ -1169,7 +1170,7 @@
                             mName, buffer->capacity(), config->size());
                 }
             } else if (oStreamFormat.value == C2BufferData::LINEAR && i == 0
-                    && mComponentName.find("c2.qti.") == std::string::npos) {
+                        && (!prepend || prepend.value == PREPEND_HEADER_TO_NONE)) {
                 // WORKAROUND: Some apps expect CSD available without queueing
                 //             any input. Queue an empty buffer to get the CSD.
                 buffer->setRange(0, 0);
@@ -1377,25 +1378,35 @@
                         (void)mPipelineWatcher.lock()->outputDelay(outputDelay.value);
 
                         bool outputBuffersChanged = false;
-                        Mutexed<Output>::Locked output(mOutput);
-                        output->outputDelay = outputDelay.value;
-                        size_t numOutputSlots = outputDelay.value + kSmoothnessFactor;
-                        if (output->numSlots < numOutputSlots) {
-                            output->numSlots = numOutputSlots;
-                            if (output->buffers->isArrayMode()) {
-                                OutputBuffersArray *array =
-                                    (OutputBuffersArray *)output->buffers.get();
-                                ALOGV("[%s] onWorkDone: growing output buffer array to %zu",
-                                      mName, numOutputSlots);
-                                array->grow(numOutputSlots);
-                                outputBuffersChanged = true;
+                        size_t numOutputSlots = 0;
+                        {
+                            Mutexed<Output>::Locked output(mOutput);
+                            output->outputDelay = outputDelay.value;
+                            numOutputSlots = outputDelay.value + kSmoothnessFactor;
+                            if (output->numSlots < numOutputSlots) {
+                                output->numSlots = numOutputSlots;
+                                if (output->buffers->isArrayMode()) {
+                                    OutputBuffersArray *array =
+                                        (OutputBuffersArray *)output->buffers.get();
+                                    ALOGV("[%s] onWorkDone: growing output buffer array to %zu",
+                                          mName, numOutputSlots);
+                                    array->grow(numOutputSlots);
+                                    outputBuffersChanged = true;
+                                }
                             }
+                            numOutputSlots = output->numSlots;
                         }
-                        output.unlock();
 
                         if (outputBuffersChanged) {
                             mCCodecCallback->onOutputBuffersChanged();
                         }
+
+                        uint32_t depth = mReorderStash.lock()->depth();
+                        Mutexed<OutputSurface>::Locked output(mOutputSurface);
+                        output->maxDequeueBuffers = numOutputSlots + depth + kRenderingDepth;
+                        if (output->surface) {
+                            output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
+                        }
                     }
                 }
                 break;
@@ -1620,7 +1631,12 @@
     // When client pushed EOS, we want all the work to be done quickly.
     // Otherwise, component may have stalled work due to input starvation up to
     // the sum of the delay in the pipeline.
-    size_t n = mInputMetEos ? 0 : mDelay;
+    size_t n = 0;
+    if (!mInputMetEos) {
+        size_t outputDelay = mOutput.lock()->outputDelay;
+        Mutexed<Input>::Locked input(mInput);
+        n = input->inputDelay + input->pipelineDelay + outputDelay;
+    }
     return mPipelineWatcher.lock()->elapsed(PipelineWatcher::Clock::now(), n);
 }
 
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index ae57678..ee3455d 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -228,8 +228,6 @@
     QueueSync mQueueSync;
     std::vector<std::unique_ptr<C2Param>> mParamsToBeSet;
 
-    size_t mDelay;
-
     struct Input {
         Input();
 
@@ -306,6 +304,7 @@
                 const C2WorkOrdinalStruct &ordinal);
         void defer(const Entry &entry);
         bool hasPending() const;
+        uint32_t depth() const { return mDepth; }
 
     private:
         std::list<Entry> mPending;
diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp
index 5ebd5bd..26c702d 100644
--- a/media/codec2/sfplugin/CCodecBuffers.cpp
+++ b/media/codec2/sfplugin/CCodecBuffers.cpp
@@ -439,6 +439,10 @@
             });
 }
 
+size_t BuffersArrayImpl::arraySize() const {
+    return mBuffers.size();
+}
+
 // InputBuffersArray
 
 void InputBuffersArray::initialize(
@@ -883,11 +887,24 @@
             mAlloc = [format = mFormat, size] {
                 return new LocalLinearBuffer(format, new ABuffer(size));
             };
+            ALOGD("[%s] reallocating with linear buffer of size %u", mName, size);
             break;
         }
 
-        // TODO: add support
-        case C2BufferData::GRAPHIC:         [[fallthrough]];
+        case C2BufferData::GRAPHIC: {
+            // This is only called for RawGraphicOutputBuffers.
+            mAlloc = [format = mFormat,
+                      lbp = LocalBufferPool::Create(kMaxLinearBufferSize * mImpl.arraySize())] {
+                return ConstGraphicBlockBuffer::AllocateEmpty(
+                        format,
+                        [lbp](size_t capacity) {
+                            return lbp->newBuffer(capacity);
+                        });
+            };
+            ALOGD("[%s] reallocating with graphic buffer: format = %s",
+                  mName, mFormat->debugString().c_str());
+            break;
+        }
 
         case C2BufferData::INVALID:         [[fallthrough]];
         case C2BufferData::LINEAR_CHUNKS:   [[fallthrough]];
diff --git a/media/codec2/sfplugin/CCodecBuffers.h b/media/codec2/sfplugin/CCodecBuffers.h
index e4f2809..2cb6b81 100644
--- a/media/codec2/sfplugin/CCodecBuffers.h
+++ b/media/codec2/sfplugin/CCodecBuffers.h
@@ -478,6 +478,11 @@
      */
     size_t numClientBuffers() const;
 
+    /**
+     * Return the size of the array.
+     */
+    size_t arraySize() const;
+
 private:
     std::string mImplName; ///< name for debugging
     const char *mName; ///< C-string version of name
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index 40160c7..7334834 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -629,7 +629,7 @@
 // static
 std::shared_ptr<C2Mapper::ProfileLevelMapper>
 C2Mapper::GetProfileLevelMapper(std::string mediaType) {
-    std::transform(mediaType.begin(), mediaType.begin(), mediaType.end(), ::tolower);
+    std::transform(mediaType.begin(), mediaType.end(), mediaType.begin(), ::tolower);
     if (mediaType == MIMETYPE_AUDIO_AAC) {
         return std::make_shared<AacProfileLevelMapper>();
     } else if (mediaType == MIMETYPE_VIDEO_AVC) {
@@ -657,7 +657,7 @@
 // static
 std::shared_ptr<C2Mapper::ProfileLevelMapper>
 C2Mapper::GetHdrProfileLevelMapper(std::string mediaType, bool isHdr10Plus) {
-    std::transform(mediaType.begin(), mediaType.begin(), mediaType.end(), ::tolower);
+    std::transform(mediaType.begin(), mediaType.end(), mediaType.begin(), ::tolower);
     if (mediaType == MIMETYPE_VIDEO_HEVC) {
         return std::make_shared<HevcProfileLevelMapper>(true, isHdr10Plus);
     } else if (mediaType == MIMETYPE_VIDEO_VP9) {
diff --git a/media/codec2/vndk/platform/C2BqBuffer.cpp b/media/codec2/vndk/platform/C2BqBuffer.cpp
index 5fa48a8..8304f74 100644
--- a/media/codec2/vndk/platform/C2BqBuffer.cpp
+++ b/media/codec2/vndk/platform/C2BqBuffer.cpp
@@ -413,6 +413,7 @@
         }
 
         sp<GraphicBuffer> &slotBuffer = mBuffers[slot];
+        uint32_t outGeneration;
         if (bufferNeedsReallocation || !slotBuffer) {
             if (!slotBuffer) {
                 slotBuffer = new GraphicBuffer();
@@ -421,7 +422,7 @@
             // instead of a new allocation.
             Return<void> transResult = mProducer->requestBuffer(
                     slot,
-                    [&status, &slotBuffer](
+                    [&status, &slotBuffer, &outGeneration](
                             HStatus hStatus,
                             HBuffer const& hBuffer,
                             uint32_t generationNumber){
@@ -429,17 +430,23 @@
                                 h2b(hBuffer, &slotBuffer) &&
                                 slotBuffer) {
                             slotBuffer->setGenerationNumber(generationNumber);
+                            outGeneration = generationNumber;
                         } else {
                             status = android::BAD_VALUE;
                         }
                     });
             if (!transResult.isOk()) {
+                slotBuffer.clear();
                 return C2_BAD_VALUE;
             } else if (status != android::NO_ERROR) {
                 slotBuffer.clear();
                 (void)mProducer->cancelBuffer(slot, hFenceWrapper.getHandle()).isOk();
                 return C2_BAD_VALUE;
             }
+            if (mGeneration == 0) {
+                // getting generation # lazily due to dequeue failure.
+                mGeneration = outGeneration;
+            }
         }
         if (slotBuffer) {
             ALOGV("buffer wraps %llu %d", (unsigned long long)mProducerId, slot);
@@ -563,6 +570,10 @@
             producerId = static_cast<uint64_t>(transResult);
             // TODO: provide gneration number from parameter.
             haveGeneration = getGenerationNumber(producer, &generation);
+            if (!haveGeneration) {
+                ALOGW("get generationNumber failed %llu",
+                      (unsigned long long)producerId);
+            }
         }
         int migrated = 0;
         {
@@ -580,10 +591,10 @@
                 }
             }
             int32_t oldGeneration = mGeneration;
-            if (producer && haveGeneration) {
+            if (producer) {
                 mProducer = producer;
                 mProducerId = producerId;
-                mGeneration = generation;
+                mGeneration = haveGeneration ? generation : 0;
             } else {
                 mProducer = nullptr;
                 mProducerId = 0;
@@ -591,7 +602,7 @@
                 ALOGW("invalid producer producer(%d), generation(%d)",
                       (bool)producer, haveGeneration);
             }
-            if (mProducer) { // migrate buffers
+            if (mProducer && haveGeneration) { // migrate buffers
                 for (int i = 0; i < NUM_BUFFER_SLOTS; ++i) {
                     std::shared_ptr<C2BufferQueueBlockPoolData> data =
                             mPoolDatas[i].lock();
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 8004e75..36cab1d 100755
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -5053,15 +5053,15 @@
     mCurrentSampleInfoCount = smplcnt;
     offset += 4;
     size -= 4;
+    if (mCurrentDefaultSampleInfoSize != 0) {
+        ALOGV("@@@@ using default sample info size of %d", mCurrentDefaultSampleInfoSize);
+        return OK;
+    }
     if(smplcnt > size) {
         ALOGW("b/124525515 - smplcnt(%u) > size(%ld)", (unsigned int)smplcnt, (unsigned long)size);
         android_errorWriteLog(0x534e4554, "124525515");
         return -EINVAL;
     }
-    if (mCurrentDefaultSampleInfoSize != 0) {
-        ALOGV("@@@@ using default sample info size of %d", mCurrentDefaultSampleInfoSize);
-        return OK;
-    }
     if (smplcnt > mCurrentSampleInfoAllocSize) {
         uint8_t * newPtr =  (uint8_t*) realloc(mCurrentSampleInfoSizes, smplcnt);
         if (newPtr == NULL) {
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index a6cc45b..366cc87 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -89,7 +89,11 @@
     if (mAudioEndpoint.isFreeRunning()) {
         //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
         // Update data queue based on the timing model.
-        int64_t estimatedRemoteCounter = mClockModel.convertTimeToPosition(currentNanoTime);
+        // Jitter in the DSP can cause late writes to the FIFO.
+        // This might be caused by resampling.
+        // We want to read the FIFO after the latest possible time
+        // that the DSP could have written the data.
+        int64_t estimatedRemoteCounter = mClockModel.convertLatestTimeToPosition(currentNanoTime);
         // TODO refactor, maybe use setRemoteCounter()
         mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
     }
@@ -139,7 +143,7 @@
                 // the writeCounter might have just advanced in the background,
                 // causing us to sleep until a later burst.
                 int64_t nextPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
-                wakeTime = mClockModel.convertPositionToTime(nextPosition);
+                wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
             }
                 break;
             default:
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index d26b352..9abdf53 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -19,12 +19,11 @@
 #include <log/log.h>
 
 #include <stdint.h>
+#include <algorithm>
 
 #include "utility/AudioClock.h"
 #include "IsochronousClockModel.h"
 
-#define MIN_LATENESS_NANOS (10 * AAUDIO_NANOS_PER_MICROSECOND)
-
 using namespace aaudio;
 
 IsochronousClockModel::IsochronousClockModel()
@@ -32,7 +31,7 @@
         , mMarkerNanoTime(0)
         , mSampleRate(48000)
         , mFramesPerBurst(64)
-        , mMaxLatenessInNanos(0)
+        , mMaxMeasuredLatenessNanos(0)
         , mState(STATE_STOPPED)
 {
 }
@@ -41,8 +40,7 @@
 }
 
 void IsochronousClockModel::setPositionAndTime(int64_t framePosition, int64_t nanoTime) {
-    ALOGV("setPositionAndTime(%lld, %lld)",
-          (long long) framePosition, (long long) nanoTime);
+    ALOGV("setPositionAndTime, %lld, %lld", (long long) framePosition, (long long) nanoTime);
     mMarkerFramePosition = framePosition;
     mMarkerNanoTime = nanoTime;
 }
@@ -54,7 +52,9 @@
 }
 
 void IsochronousClockModel::stop(int64_t nanoTime) {
-    ALOGV("stop(nanos = %lld)\n", (long long) nanoTime);
+    ALOGD("stop(nanos = %lld) max lateness = %d micros\n",
+        (long long) nanoTime,
+        (int) (mMaxMeasuredLatenessNanos / 1000));
     setPositionAndTime(convertTimeToPosition(nanoTime), nanoTime);
     // TODO should we set position?
     mState = STATE_STOPPED;
@@ -69,9 +69,10 @@
 }
 
 void IsochronousClockModel::processTimestamp(int64_t framePosition, int64_t nanoTime) {
-//    ALOGD("processTimestamp() - framePosition = %lld at nanoTime %llu",
-//         (long long)framePosition,
-//         (long long)nanoTime);
+    mTimestampCount++;
+// Log position and time in CSV format so we can import it easily into spreadsheets.
+    //ALOGD("%s() CSV, %d, %lld, %lld", __func__,
+          //mTimestampCount, (long long)framePosition, (long long)nanoTime);
     int64_t framesDelta = framePosition - mMarkerFramePosition;
     int64_t nanosDelta = nanoTime - mMarkerNanoTime;
     if (nanosDelta < 1000) {
@@ -110,22 +111,54 @@
             // Earlier than expected timestamp.
             // This data is probably more accurate, so use it.
             // Or we may be drifting due to a fast HW clock.
-//            int microsDelta = (int) (nanosDelta / 1000);
-//            int expectedMicrosDelta = (int) (expectedNanosDelta / 1000);
-//            ALOGD("processTimestamp() - STATE_RUNNING - %7d < %7d so %4d micros EARLY",
-//                 microsDelta, expectedMicrosDelta, (expectedMicrosDelta - microsDelta));
+            //int microsDelta = (int) (nanosDelta / 1000);
+            //int expectedMicrosDelta = (int) (expectedNanosDelta / 1000);
+            //ALOGD("%s() - STATE_RUNNING - #%d, %4d micros EARLY",
+                //__func__, mTimestampCount, expectedMicrosDelta - microsDelta);
 
             setPositionAndTime(framePosition, nanoTime);
-        } else if (nanosDelta > (expectedNanosDelta + mMaxLatenessInNanos)) {
-            // Later than expected timestamp.
-//            int microsDelta = (int) (nanosDelta / 1000);
-//            int expectedMicrosDeadline = (int) ((expectedNanosDelta + mMaxLatenessInNanos) / 1000);
-//            ALOGD("processTimestamp() - STATE_RUNNING - %7d > %7d so %4d micros LATE",
-//                  microsDelta, expectedMicrosDeadline, (microsDelta - expectedMicrosDeadline));
+        } else if (nanosDelta > (expectedNanosDelta + (2 * mBurstPeriodNanos))) {
+            // In this case we do not update mMaxMeasuredLatenessNanos because it
+            // would force it too high.
+            // mMaxMeasuredLatenessNanos should range from 1 to 2 * mBurstPeriodNanos
+            //int32_t measuredLatenessNanos = (int32_t)(nanosDelta - expectedNanosDelta);
+            //ALOGD("%s() - STATE_RUNNING - #%d, lateness %d - max %d = %4d micros VERY LATE",
+                  //__func__,
+                  //mTimestampCount,
+                  //measuredLatenessNanos / 1000,
+                  //mMaxMeasuredLatenessNanos / 1000,
+                  //(measuredLatenessNanos - mMaxMeasuredLatenessNanos) / 1000
+                  //);
 
-            // When we are late it may be because of preemption in the kernel or
-            //  we may be drifting due to a slow HW clock.
-            setPositionAndTime(framePosition,  nanoTime - mMaxLatenessInNanos);
+            // This typically happens when we are modelling a service instead of a DSP.
+            setPositionAndTime(framePosition,  nanoTime - (2 * mBurstPeriodNanos));
+        } else if (nanosDelta > (expectedNanosDelta + mMaxMeasuredLatenessNanos)) {
+            //int32_t previousLatenessNanos = mMaxMeasuredLatenessNanos;
+            mMaxMeasuredLatenessNanos = (int32_t)(nanosDelta - expectedNanosDelta);
+
+            //ALOGD("%s() - STATE_RUNNING - #%d, newmax %d - oldmax %d = %4d micros LATE",
+                  //__func__,
+                  //mTimestampCount,
+                  //mMaxMeasuredLatenessNanos / 1000,
+                  //previousLatenessNanos / 1000,
+                  //(mMaxMeasuredLatenessNanos - previousLatenessNanos) / 1000
+                  //);
+
+            // When we are late, it may be because of preemption in the kernel,
+            // or timing jitter caused by resampling in the DSP,
+            // or we may be drifting due to a slow HW clock.
+            // We add slight drift value just in case there is actual long term drift
+            // forward caused by a slower clock.
+            // If the clock is faster than the model will get pushed earlier
+            // by the code in the preceding branch.
+            // The two opposing forces should allow the model to track the real clock
+            // over a long time.
+            int64_t driftingTime = mMarkerNanoTime + expectedNanosDelta + kDriftNanos;
+            setPositionAndTime(framePosition,  driftingTime);
+            //ALOGD("%s() - #%d, max lateness = %d micros",
+                  //__func__,
+                  //mTimestampCount,
+                  //(int) (mMaxMeasuredLatenessNanos / 1000));
         }
         break;
     default:
@@ -145,9 +178,12 @@
     update();
 }
 
+// Update expected lateness based on sampleRate and framesPerBurst
 void IsochronousClockModel::update() {
-    int64_t nanosLate = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
-    mMaxLatenessInNanos = (nanosLate > MIN_LATENESS_NANOS) ? nanosLate : MIN_LATENESS_NANOS;
+    mBurstPeriodNanos = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
+    // Timestamps may be late by up to a burst because we are randomly sampling the time period
+    // after the DSP position is actually updated.
+    mMaxMeasuredLatenessNanos = mBurstPeriodNanos;
 }
 
 int64_t IsochronousClockModel::convertDeltaPositionToTime(int64_t framesDelta) const {
@@ -190,11 +226,25 @@
     return position;
 }
 
+int32_t IsochronousClockModel::getLateTimeOffsetNanos() const {
+    // This will never be < 0 because mMaxLatenessNanos starts at
+    // mBurstPeriodNanos and only gets bigger.
+    return (mMaxMeasuredLatenessNanos - mBurstPeriodNanos) + kExtraLatenessNanos;
+}
+
+int64_t IsochronousClockModel::convertPositionToLatestTime(int64_t framePosition) const {
+    return convertPositionToTime(framePosition) + getLateTimeOffsetNanos();
+}
+
+int64_t IsochronousClockModel::convertLatestTimeToPosition(int64_t nanoTime) const {
+    return convertTimeToPosition(nanoTime - getLateTimeOffsetNanos());
+}
+
 void IsochronousClockModel::dump() const {
     ALOGD("mMarkerFramePosition = %lld", (long long) mMarkerFramePosition);
     ALOGD("mMarkerNanoTime      = %lld", (long long) mMarkerNanoTime);
     ALOGD("mSampleRate          = %6d", mSampleRate);
     ALOGD("mFramesPerBurst      = %6d", mFramesPerBurst);
-    ALOGD("mMaxLatenessInNanos  = %6d", mMaxLatenessInNanos);
+    ALOGD("mMaxMeasuredLatenessNanos = %6d", mMaxMeasuredLatenessNanos);
     ALOGD("mState               = %6d", mState);
 }
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 46ca48e..582bf4e 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -18,6 +18,7 @@
 #define ANDROID_AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
 
 #include <stdint.h>
+#include "utility/AudioClock.h"
 
 namespace aaudio {
 
@@ -79,6 +80,15 @@
     int64_t convertPositionToTime(int64_t framePosition) const;
 
     /**
+     * Calculate the latest estimated time that the stream will be at that position.
+     * The more jittery the clock is then the later this will be.
+     *
+     * @param framePosition
+     * @return time in nanoseconds
+     */
+    int64_t convertPositionToLatestTime(int64_t framePosition) const;
+
+    /**
      * Calculate an estimated position where the stream will be at the specified time.
      *
      * @param nanoTime time of interest
@@ -87,6 +97,18 @@
     int64_t convertTimeToPosition(int64_t nanoTime) const;
 
     /**
+     * Calculate the corresponding estimated position based on the specified time being
+     * the latest possible time.
+     *
+     * For the same nanoTime, this may return an earlier position than
+     * convertTimeToPosition().
+     *
+     * @param nanoTime
+     * @return position in frames
+     */
+    int64_t convertLatestTimeToPosition(int64_t nanoTime) const;
+
+    /**
      * @param framesDelta difference in frames
      * @return duration in nanoseconds
      */
@@ -101,6 +123,9 @@
     void dump() const;
 
 private:
+
+    int32_t getLateTimeOffsetNanos() const;
+
     enum clock_model_state_t {
         STATE_STOPPED,
         STATE_STARTING,
@@ -108,13 +133,23 @@
         STATE_RUNNING
     };
 
+    // Amount of time to drift forward when we get a late timestamp.
+    // This value was calculated to allow tracking of a clock with 50 ppm error.
+    static constexpr int32_t   kDriftNanos         =  10 * 1000;
+    // TODO review value of kExtraLatenessNanos
+    static constexpr int32_t   kExtraLatenessNanos = 100 * 1000;
+
     int64_t             mMarkerFramePosition;
     int64_t             mMarkerNanoTime;
     int32_t             mSampleRate;
     int32_t             mFramesPerBurst;
-    int32_t             mMaxLatenessInNanos;
+    int32_t             mBurstPeriodNanos;
+    // Includes mBurstPeriodNanos because we sample randomly over time.
+    int32_t             mMaxMeasuredLatenessNanos;
     clock_model_state_t mState;
 
+    int32_t             mTimestampCount = 0;
+
     void update();
 };
 
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index c19fcf6..0a2850f 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -302,6 +302,8 @@
         for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
             pContext->pBundledContext->bandGaindB[i] = EQNB_5BandSoftPresets[i];
         }
+        pContext->pBundledContext->effectProcessCalled      = 0;
+        pContext->pBundledContext->effectInDrain            = 0;
 
         ALOGV("\tEffectCreate - Calling LvmBundle_init");
         ret = LvmBundle_init(pContext);
@@ -394,6 +396,8 @@
 
     // Clear the instantiated flag for the effect
     // protect agains the case where an effect is un-instantiated without being disabled
+
+    int &effectInDrain = pContext->pBundledContext->effectInDrain;
     if(pContext->EffectType == LVM_BASS_BOOST) {
         ALOGV("\tEffectRelease LVM_BASS_BOOST Clearing global intstantiated flag");
         pSessionContext->bBassInstantiated = LVM_FALSE;
@@ -418,12 +422,16 @@
     } else if(pContext->EffectType == LVM_VOLUME) {
         ALOGV("\tEffectRelease LVM_VOLUME Clearing global intstantiated flag");
         pSessionContext->bVolumeInstantiated = LVM_FALSE;
-        if (pContext->pBundledContext->bVolumeEnabled == LVM_TRUE){
+        // There is no samplesToExitCount for volume so we also use the drain flag to check
+        // if we should decrement the effects enabled.
+        if (pContext->pBundledContext->bVolumeEnabled == LVM_TRUE
+                || (effectInDrain & 1 << LVM_VOLUME) != 0) {
             pContext->pBundledContext->NumberEffectsEnabled--;
         }
     } else {
         ALOGV("\tLVM_ERROR : EffectRelease : Unsupported effect\n\n\n\n\n\n\n");
     }
+    effectInDrain &= ~(1 << pContext->EffectType); // no need to drain if released
 
     // Disable effect, in this case ignore errors (return codes)
     // if an effect has already been disabled
@@ -3124,8 +3132,9 @@
 
 int Effect_setEnabled(EffectContext *pContext, bool enabled)
 {
-    ALOGV("\tEffect_setEnabled() type %d, enabled %d", pContext->EffectType, enabled);
-
+    ALOGV("%s effectType %d, enabled %d, currently enabled %d", __func__,
+            pContext->EffectType, enabled, pContext->pBundledContext->NumberEffectsEnabled);
+    int &effectInDrain = pContext->pBundledContext->effectInDrain;
     if (enabled) {
         // Bass boost or Virtualizer can be temporarily disabled if playing over device speaker due
         // to their nature.
@@ -3139,6 +3148,7 @@
                 if(pContext->pBundledContext->SamplesToExitCountBb <= 0){
                     pContext->pBundledContext->NumberEffectsEnabled++;
                 }
+                effectInDrain &= ~(1 << LVM_BASS_BOOST);
                 pContext->pBundledContext->SamplesToExitCountBb =
                      (LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
                 pContext->pBundledContext->bBassEnabled = LVM_TRUE;
@@ -3152,6 +3162,7 @@
                 if(pContext->pBundledContext->SamplesToExitCountEq <= 0){
                     pContext->pBundledContext->NumberEffectsEnabled++;
                 }
+                effectInDrain &= ~(1 << LVM_EQUALIZER);
                 pContext->pBundledContext->SamplesToExitCountEq =
                      (LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
                 pContext->pBundledContext->bEqualizerEnabled = LVM_TRUE;
@@ -3164,6 +3175,7 @@
                 if(pContext->pBundledContext->SamplesToExitCountVirt <= 0){
                     pContext->pBundledContext->NumberEffectsEnabled++;
                 }
+                effectInDrain &= ~(1 << LVM_VIRTUALIZER);
                 pContext->pBundledContext->SamplesToExitCountVirt =
                      (LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
                 pContext->pBundledContext->bVirtualizerEnabled = LVM_TRUE;
@@ -3174,7 +3186,10 @@
                     ALOGV("\tEffect_setEnabled() LVM_VOLUME is already enabled");
                     return -EINVAL;
                 }
-                pContext->pBundledContext->NumberEffectsEnabled++;
+                if ((effectInDrain & 1 << LVM_VOLUME) == 0) {
+                    pContext->pBundledContext->NumberEffectsEnabled++;
+                }
+                effectInDrain &= ~(1 << LVM_VOLUME);
                 pContext->pBundledContext->bVolumeEnabled = LVM_TRUE;
                 break;
             default:
@@ -3192,6 +3207,7 @@
                     return -EINVAL;
                 }
                 pContext->pBundledContext->bBassEnabled = LVM_FALSE;
+                effectInDrain |= 1 << LVM_BASS_BOOST;
                 break;
             case LVM_EQUALIZER:
                 if (pContext->pBundledContext->bEqualizerEnabled == LVM_FALSE) {
@@ -3199,6 +3215,7 @@
                     return -EINVAL;
                 }
                 pContext->pBundledContext->bEqualizerEnabled = LVM_FALSE;
+                effectInDrain |= 1 << LVM_EQUALIZER;
                 break;
             case LVM_VIRTUALIZER:
                 if (pContext->pBundledContext->bVirtualizerEnabled == LVM_FALSE) {
@@ -3206,6 +3223,7 @@
                     return -EINVAL;
                 }
                 pContext->pBundledContext->bVirtualizerEnabled = LVM_FALSE;
+                effectInDrain |= 1 << LVM_VIRTUALIZER;
                 break;
             case LVM_VOLUME:
                 if (pContext->pBundledContext->bVolumeEnabled == LVM_FALSE) {
@@ -3213,6 +3231,7 @@
                     return -EINVAL;
                 }
                 pContext->pBundledContext->bVolumeEnabled = LVM_FALSE;
+                effectInDrain |= 1 << LVM_VOLUME;
                 break;
             default:
                 ALOGV("\tEffect_setEnabled() invalid effect type");
@@ -3283,6 +3302,38 @@
         ALOGV("\tLVM_ERROR : Effect_process() ERROR NULL INPUT POINTER OR FRAME COUNT IS WRONG");
         return -EINVAL;
     }
+
+    int &effectProcessCalled = pContext->pBundledContext->effectProcessCalled;
+    int &effectInDrain = pContext->pBundledContext->effectInDrain;
+    if ((effectProcessCalled & 1 << pContext->EffectType) != 0) {
+        ALOGW("Effect %d already called", pContext->EffectType);
+        const int undrainedEffects = effectInDrain & ~effectProcessCalled;
+        if ((undrainedEffects & 1 << LVM_BASS_BOOST) != 0) {
+            ALOGW("Draining BASS_BOOST");
+            pContext->pBundledContext->SamplesToExitCountBb = 0;
+            --pContext->pBundledContext->NumberEffectsEnabled;
+            effectInDrain &= ~(1 << LVM_BASS_BOOST);
+        }
+        if ((undrainedEffects & 1 << LVM_EQUALIZER) != 0) {
+            ALOGW("Draining EQUALIZER");
+            pContext->pBundledContext->SamplesToExitCountEq = 0;
+            --pContext->pBundledContext->NumberEffectsEnabled;
+            effectInDrain &= ~(1 << LVM_EQUALIZER);
+        }
+        if ((undrainedEffects & 1 << LVM_VIRTUALIZER) != 0) {
+            ALOGW("Draining VIRTUALIZER");
+            pContext->pBundledContext->SamplesToExitCountVirt = 0;
+            --pContext->pBundledContext->NumberEffectsEnabled;
+            effectInDrain &= ~(1 << LVM_VIRTUALIZER);
+        }
+        if ((undrainedEffects & 1 << LVM_VOLUME) != 0) {
+            ALOGW("Draining VOLUME");
+            --pContext->pBundledContext->NumberEffectsEnabled;
+            effectInDrain &= ~(1 << LVM_VOLUME);
+        }
+    }
+    effectProcessCalled |= 1 << pContext->EffectType;
+
     if ((pContext->pBundledContext->bBassEnabled == LVM_FALSE)&&
         (pContext->EffectType == LVM_BASS_BOOST)){
         //ALOGV("\tEffect_process() LVM_BASS_BOOST Effect is not enabled");
@@ -3291,9 +3342,12 @@
             //ALOGV("\tEffect_process: Waiting to turn off BASS_BOOST, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountBb);
         }
-        if(pContext->pBundledContext->SamplesToExitCountBb <= 0) {
+        if (pContext->pBundledContext->SamplesToExitCountBb <= 0) {
             status = -ENODATA;
-            pContext->pBundledContext->NumberEffectsEnabled--;
+            if ((effectInDrain & 1 << LVM_BASS_BOOST) != 0) {
+                pContext->pBundledContext->NumberEffectsEnabled--;
+                effectInDrain &= ~(1 << LVM_BASS_BOOST);
+            }
             ALOGV("\tEffect_process() this is the last frame for LVM_BASS_BOOST");
         }
     }
@@ -3301,7 +3355,10 @@
         (pContext->EffectType == LVM_VOLUME)){
         //ALOGV("\tEffect_process() LVM_VOLUME Effect is not enabled");
         status = -ENODATA;
-        pContext->pBundledContext->NumberEffectsEnabled--;
+        if ((effectInDrain & 1 << LVM_VOLUME) != 0) {
+            pContext->pBundledContext->NumberEffectsEnabled--;
+            effectInDrain &= ~(1 << LVM_VOLUME);
+        }
     }
     if ((pContext->pBundledContext->bEqualizerEnabled == LVM_FALSE)&&
         (pContext->EffectType == LVM_EQUALIZER)){
@@ -3311,9 +3368,12 @@
             //ALOGV("\tEffect_process: Waiting to turn off EQUALIZER, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountEq);
         }
-        if(pContext->pBundledContext->SamplesToExitCountEq <= 0) {
+        if (pContext->pBundledContext->SamplesToExitCountEq <= 0) {
             status = -ENODATA;
-            pContext->pBundledContext->NumberEffectsEnabled--;
+            if ((effectInDrain & 1 << LVM_EQUALIZER) != 0) {
+                pContext->pBundledContext->NumberEffectsEnabled--;
+                effectInDrain &= ~(1 << LVM_EQUALIZER);
+            }
             ALOGV("\tEffect_process() this is the last frame for LVM_EQUALIZER");
         }
     }
@@ -3326,9 +3386,12 @@
             //ALOGV("\tEffect_process: Waiting for to turn off VIRTUALIZER, %d samples left",
             //    pContext->pBundledContext->SamplesToExitCountVirt);
         }
-        if(pContext->pBundledContext->SamplesToExitCountVirt <= 0) {
+        if (pContext->pBundledContext->SamplesToExitCountVirt <= 0) {
             status = -ENODATA;
-            pContext->pBundledContext->NumberEffectsEnabled--;
+            if ((effectInDrain & 1 << LVM_VIRTUALIZER) != 0) {
+                pContext->pBundledContext->NumberEffectsEnabled--;
+                effectInDrain &= ~(1 << LVM_VIRTUALIZER);
+            }
             ALOGV("\tEffect_process() this is the last frame for LVM_VIRTUALIZER");
         }
     }
@@ -3337,8 +3400,18 @@
         pContext->pBundledContext->NumberEffectsCalled++;
     }
 
-    if(pContext->pBundledContext->NumberEffectsCalled ==
-       pContext->pBundledContext->NumberEffectsEnabled){
+    if (pContext->pBundledContext->NumberEffectsCalled >=
+            pContext->pBundledContext->NumberEffectsEnabled) {
+
+        // We expect the # effects called to be equal to # effects enabled in sequence (including
+        // draining effects).  Warn if this is not the case due to inconsistent calls.
+        ALOGW_IF(pContext->pBundledContext->NumberEffectsCalled >
+                pContext->pBundledContext->NumberEffectsEnabled,
+                "%s Number of effects called %d is greater than number of effects enabled %d",
+                __func__, pContext->pBundledContext->NumberEffectsCalled,
+                pContext->pBundledContext->NumberEffectsEnabled);
+        effectProcessCalled = 0; // reset our consistency check.
+
         //ALOGV("\tEffect_process     Calling process with %d effects enabled, %d called: Effect %d",
         //pContext->pBundledContext->NumberEffectsEnabled,
         //pContext->pBundledContext->NumberEffectsCalled, pContext->EffectType);
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
index 6af4554..e4aacd0 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
@@ -110,6 +110,14 @@
 #ifdef SUPPORT_MC
     LVM_INT32                       ChMask;
 #endif
+
+    /* Bitmask whether drain is in progress due to disabling the effect.
+       The corresponding bit to an effect is set by 1 << lvm_effect_en. */
+    int                             effectInDrain;
+
+    /* Bitmask whether process() was called for a particular effect.
+       The corresponding bit to an effect is set by 1 << lvm_effect_en. */
+    int                             effectProcessCalled;
 };
 
 /* SessionContext : One session */
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 2cd920a..4653711 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -1578,7 +1578,7 @@
         }
 
         if (mPreparing) {
-            notifyPreparedAndCleanup(finalStatus);
+            notifyPreparedAndCleanup(finalStatus == ERROR_END_OF_STREAM ? OK : finalStatus);
             mPreparing = false;
         } else if (mSentPauseOnBuffering) {
             sendCacheStats();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 2f0da2d..ee463ce 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -106,16 +106,17 @@
     releaseAndResetMediaBuffers();
 }
 
-sp<AMessage> NuPlayer::Decoder::getStats() const {
+sp<AMessage> NuPlayer::Decoder::getStats() {
 
+    Mutex::Autolock autolock(mStatsLock);
     mStats->setInt64("frames-total", mNumFramesTotal);
     mStats->setInt64("frames-dropped-input", mNumInputFramesDropped);
     mStats->setInt64("frames-dropped-output", mNumOutputFramesDropped);
     mStats->setFloat("frame-rate-total", mFrameRateTotal);
 
-    // i'm mutexed right now.
     // make our own copy, so we aren't victim to any later changes.
     sp<AMessage> copiedStats = mStats->dup();
+
     return copiedStats;
 }
 
@@ -362,13 +363,17 @@
     CHECK_EQ((status_t)OK, mCodec->getOutputFormat(&mOutputFormat));
     CHECK_EQ((status_t)OK, mCodec->getInputFormat(&mInputFormat));
 
-    mStats->setString("mime", mime.c_str());
-    mStats->setString("component-name", mComponentName.c_str());
+    {
+        Mutex::Autolock autolock(mStatsLock);
+        mStats->setString("mime", mime.c_str());
+        mStats->setString("component-name", mComponentName.c_str());
+    }
 
     if (!mIsAudio) {
         int32_t width, height;
         if (mOutputFormat->findInt32("width", &width)
                 && mOutputFormat->findInt32("height", &height)) {
+            Mutex::Autolock autolock(mStatsLock);
             mStats->setInt32("width", width);
             mStats->setInt32("height", height);
         }
@@ -799,6 +804,7 @@
         int32_t width, height;
         if (format->findInt32("width", &width)
                 && format->findInt32("height", &height)) {
+            Mutex::Autolock autolock(mStatsLock);
             mStats->setInt32("width", width);
             mStats->setInt32("height", height);
         }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index 3da2f0b..4a52b0c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -34,7 +34,7 @@
             const sp<Surface> &surface = NULL,
             const sp<CCDecoder> &ccDecoder = NULL);
 
-    virtual sp<AMessage> getStats() const;
+    virtual sp<AMessage> getStats();
 
     // sets the output surface of video decoders.
     virtual status_t setVideoSurface(const sp<Surface> &surface);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
index d44c396..a3e0046 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
@@ -47,7 +47,7 @@
     void signalResume(bool notifyComplete);
     void initiateShutdown();
 
-    virtual sp<AMessage> getStats() const {
+    virtual sp<AMessage> getStats() {
         return mStats;
     }
 
@@ -88,6 +88,7 @@
     int32_t mBufferGeneration;
     bool mPaused;
     sp<AMessage> mStats;
+    Mutex mStatsLock;
 
 private:
     enum {
diff --git a/media/libstagefright/codecs/m4v_h263/dec/src/packet_util.cpp b/media/libstagefright/codecs/m4v_h263/dec/src/packet_util.cpp
index 48414d7..5880e32 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/src/packet_util.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/src/packet_util.cpp
@@ -52,7 +52,11 @@
         PV_BitstreamByteAlign(stream);
         BitstreamReadBits32(stream, resync_marker_length);
 
-        *next_MB = (int) BitstreamReadBits16(stream, nbits);
+        int mbnum = (int) BitstreamReadBits16(stream, nbits);
+        if (mbnum < 0) {
+            return PV_FAIL;
+        }
+        *next_MB = mbnum;
 //      if (*next_MB <= video->mbnum)   /*  needs more investigation */
 //          *next_MB = video->mbnum+1;
 
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
index da86758..87e8fd4 100644
--- a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
@@ -1426,75 +1426,90 @@
     RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
 
     UWORD32 ui_exec_done;
+    WORD32 i_num_preroll = 0;
     /* Checking for end of processing */
     err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DONE_QUERY,
                                 &ui_exec_done);
     RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DONE_QUERY");
 
-#ifdef ENABLE_MPEG_D_DRC
+    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                              IA_ENHAACPLUS_DEC_CONFIG_GET_NUM_PRE_ROLL_FRAMES,
+                              &i_num_preroll);
+
+    RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GET_NUM_PRE_ROLL_FRAMES");
     {
-        if (ui_exec_done != 1) {
-            VOID* p_array;        // ITTIAM:buffer to handle gain payload
-            WORD32 buf_size = 0;  // ITTIAM:gain payload length
-            WORD32 bit_str_fmt = 1;
-            WORD32 gain_stream_flag = 1;
+        int32_t pi_preroll_frame_offset = 0;
+        do {
+#ifdef ENABLE_MPEG_D_DRC
+            if (ui_exec_done != 1) {
+                VOID* p_array;        // ITTIAM:buffer to handle gain payload
+                WORD32 buf_size = 0;  // ITTIAM:gain payload length
+                WORD32 bit_str_fmt = 1;
+                WORD32 gain_stream_flag = 1;
 
-            err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
-                                        IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
-            RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN");
+                err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                            IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN, &buf_size);
+                RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_LEN");
 
-            err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
-                                        IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
-            RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF");
+                err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CONFIG_PARAM,
+                                            IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF, &p_array);
+                RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_GAIN_PAYLOAD_BUF");
 
-            if (buf_size > 0) {
-                /*Set bitstream_split_format */
-                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
-                                          IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
-                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+                if (buf_size > 0) {
+                    /*Set bitstream_split_format */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                              IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT, &bit_str_fmt);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
 
-                memcpy(mDrcInBuf, p_array, buf_size);
-                /* Set number of bytes to be processed */
-                err_code =
-                    ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_BS, 0, &buf_size);
-                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+                    memcpy(mDrcInBuf, p_array, buf_size);
+                    /* Set number of bytes to be processed */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES_BS,
+                                              0, &buf_size);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
 
-                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
-                                          IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG, &gain_stream_flag);
-                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
+                                              IA_DRC_DEC_CONFIG_GAIN_STREAM_FLAG,
+                                              &gain_stream_flag);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
 
-                /* Execute process */
-                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
-                                          IA_CMD_TYPE_INIT_CPY_BSF_BUFF, NULL);
-                RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
+                    /* Execute process */
+                    err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
+                                              IA_CMD_TYPE_INIT_CPY_BSF_BUFF, NULL);
+                    RETURN_IF_FATAL(err_code, "IA_DRC_DEC_CONFIG_PARAM_BITS_FORMAT");
 
-                mMpegDDRCPresent = 1;
+                    mMpegDDRCPresent = 1;
+                }
             }
-        }
-    }
 #endif
-    /* How much buffer is used in input buffers */
-    err_code =
-        ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CURIDX_INPUT_BUF, 0, bytesConsumed);
-    RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_CURIDX_INPUT_BUF");
+            /* How much buffer is used in input buffers */
+            err_code =
+                ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_CURIDX_INPUT_BUF,
+                                 0, bytesConsumed);
+            RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_CURIDX_INPUT_BUF");
 
-    /* Get the output bytes */
-    err_code = ixheaacd_dec_api(mXheaacCodecHandle, IA_API_CMD_GET_OUTPUT_BYTES, 0, outBytes);
-    RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_OUTPUT_BYTES");
+            /* Get the output bytes */
+            err_code = ixheaacd_dec_api(mXheaacCodecHandle,
+                                        IA_API_CMD_GET_OUTPUT_BYTES, 0, outBytes);
+            RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_OUTPUT_BYTES");
 #ifdef ENABLE_MPEG_D_DRC
 
-    if (mMpegDDRCPresent == 1) {
-        memcpy(mDrcInBuf, mOutputBuffer, *outBytes);
-        err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES, 0, outBytes);
-        RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
+            if (mMpegDDRCPresent == 1) {
+                memcpy(mDrcInBuf, mOutputBuffer + pi_preroll_frame_offset, *outBytes);
+                pi_preroll_frame_offset += *outBytes;
+                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_INPUT_BYTES,
+                                          0, outBytes);
+                RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_INPUT_BYTES");
 
-        err_code =
-            ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE, IA_CMD_TYPE_DO_EXECUTE, NULL);
-        RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
+                err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_EXECUTE,
+                                          IA_CMD_TYPE_DO_EXECUTE, NULL);
+                RETURN_IF_FATAL(err_code, "IA_CMD_TYPE_DO_EXECUTE");
 
-        memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
-    }
+                memcpy(mOutputBuffer, mDrcOutBuf, *outBytes);
+            }
 #endif
+            i_num_preroll--;
+        } while (i_num_preroll > 0);
+    }
     return IA_NO_ERROR;
 }
 
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index d685321..c7dc415 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -324,8 +324,8 @@
 }
 
 #define DECLARE_YUV2RGBFUNC(func, rgb) int (*func)(     \
-        const uint8*, int, const uint8*, int,           \
-        const uint8*, int, uint8*, int, int, int)       \
+        const uint8_t*, int, const uint8_t*, int,           \
+        const uint8_t*, int, uint8_t*, int, int, int)       \
         = mSrcColorSpace.isBt709() ? libyuv::H420To##rgb \
         : mSrcColorSpace.isJpeg() ? libyuv::J420To##rgb  \
         : libyuv::I420To##rgb
@@ -350,7 +350,7 @@
     {
         DECLARE_YUV2RGBFUNC(func, RGB565);
         (*func)(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
-                (uint8 *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
+                (uint8_t *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
         break;
     }
 
@@ -358,7 +358,7 @@
     {
         DECLARE_YUV2RGBFUNC(func, ABGR);
         (*func)(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
-                (uint8 *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
+                (uint8_t *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
         break;
     }
 
@@ -366,7 +366,7 @@
     {
         DECLARE_YUV2RGBFUNC(func, ARGB);
         (*func)(src_y, src.mStride, src_u, src.mStride / 2, src_v, src.mStride / 2,
-                (uint8 *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
+                (uint8_t *)dst_ptr, dst.mStride, src.cropWidth(), src.cropHeight());
         break;
     }
 
@@ -391,17 +391,17 @@
 
     switch (mDstFormat) {
     case OMX_COLOR_Format16bitRGB565:
-        libyuv::NV12ToRGB565(src_y, src.mStride, src_u, src.mStride, (uint8 *)dst_ptr,
+        libyuv::NV12ToRGB565(src_y, src.mStride, src_u, src.mStride, (uint8_t *)dst_ptr,
                 dst.mStride, src.cropWidth(), src.cropHeight());
         break;
 
     case OMX_COLOR_Format32bitBGRA8888:
-        libyuv::NV12ToARGB(src_y, src.mStride, src_u, src.mStride, (uint8 *)dst_ptr,
+        libyuv::NV12ToARGB(src_y, src.mStride, src_u, src.mStride, (uint8_t *)dst_ptr,
                 dst.mStride, src.cropWidth(), src.cropHeight());
         break;
 
     case OMX_COLOR_Format32BitRGBA8888:
-        libyuv::NV12ToABGR(src_y, src.mStride, src_u, src.mStride, (uint8 *)dst_ptr,
+        libyuv::NV12ToABGR(src_y, src.mStride, src_u, src.mStride, (uint8_t *)dst_ptr,
                 dst.mStride, src.cropWidth(), src.cropHeight());
         break;
 
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
index 04041eb..a07eb8c 100644
--- a/media/libstagefright/data/media_codecs_google_c2_video.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -77,7 +77,7 @@
             <Limit name="bitrate" range="1-40000000" />
             <Feature name="adaptive-playback" />
         </MediaCodec>
-        <MediaCodec name="c2.android.av1.decoder" type="video/av01">
+        <MediaCodec name="c2.android.gav1.decoder" type="video/av01">
             <Limit name="size" min="96x96" max="1920x1080" />
             <Limit name="alignment" value="2x2" />
             <Limit name="block-size" value="16x16" />
diff --git a/media/libstagefright/data/media_codecs_sw.xml b/media/libstagefright/data/media_codecs_sw.xml
index 67d3f1a..9532ba6 100644
--- a/media/libstagefright/data/media_codecs_sw.xml
+++ b/media/libstagefright/data/media_codecs_sw.xml
@@ -182,7 +182,7 @@
             </Variant>
             <Feature name="adaptive-playback" />
         </MediaCodec>
-        <MediaCodec name="c2.android.av1.decoder" type="video/av01" variant="!slow-cpu">
+        <MediaCodec name="c2.android.gav1.decoder" type="video/av01" variant="!slow-cpu">
             <Limit name="size" min="2x2" max="1920x1080" />
             <Limit name="alignment" value="2x2" />
             <Limit name="block-size" value="16x16" />
diff --git a/media/libstagefright/exports.lds b/media/libstagefright/exports.lds
index 06c4f19..f5ddf1e 100644
--- a/media/libstagefright/exports.lds
+++ b/media/libstagefright/exports.lds
@@ -395,7 +395,6 @@
         ScaleFilterCols_NEON*;
         ScaleFilterReduce;
         ScaleFilterRows_NEON*;
-        ScaleOffset;
         ScalePlane;
         ScalePlane_16;
         ScalePlaneBilinearDown;
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 6160c3c..6adf563 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -966,7 +966,7 @@
         (strcmp(name, "/") == 0) || (strcmp(basename(name), name) != 0)) {
         char errMsg[80];
 
-        sprintf(errMsg, "Invalid name: %s", (const char *) name);
+        snprintf(errMsg, sizeof(errMsg), "Invalid name: %s", (const char *) name);
         ALOGE("%s (b/130656917)", errMsg);
         android_errorWriteWithInfoLog(SN_EVENT_LOG_ID, "130656917", -1, errMsg,
                                       strlen(errMsg));
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index baa4fc7..830f752 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -113,12 +113,12 @@
 
 void
 AImageReader::FrameListener::onFrameAvailable(const BufferItem& /*item*/) {
-    Mutex::Autolock _l(mLock);
     sp<AImageReader> reader = mReader.promote();
     if (reader == nullptr) {
         ALOGW("A frame is available after AImageReader closed!");
         return; // reader has been closed
     }
+    Mutex::Autolock _l(mLock);
     if (mListener.onImageAvailable == nullptr) {
         return; // No callback registered
     }
@@ -143,12 +143,12 @@
 
 void
 AImageReader::BufferRemovedListener::onBufferFreed(const wp<GraphicBuffer>& graphicBuffer) {
-    Mutex::Autolock _l(mLock);
     sp<AImageReader> reader = mReader.promote();
     if (reader == nullptr) {
         ALOGW("A frame is available after AImageReader closed!");
         return; // reader has been closed
     }
+    Mutex::Autolock _l(mLock);
     if (mListener.onBufferRemoved == nullptr) {
         return; // No callback registered
     }
diff --git a/media/ndk/NdkImageReaderPriv.h b/media/ndk/NdkImageReaderPriv.h
index e328cb1..19bd704 100644
--- a/media/ndk/NdkImageReaderPriv.h
+++ b/media/ndk/NdkImageReaderPriv.h
@@ -134,7 +134,7 @@
 
       private:
         AImageReader_ImageListener mListener = {nullptr, nullptr};
-        wp<AImageReader>           mReader;
+        const wp<AImageReader>     mReader;
         Mutex                      mLock;
     };
     sp<FrameListener> mFrameListener;
@@ -149,7 +149,7 @@
 
        private:
         AImageReader_BufferRemovedListener mListener = {nullptr, nullptr};
-        wp<AImageReader>           mReader;
+        const wp<AImageReader>     mReader;
         Mutex                      mLock;
     };
     sp<BufferRemovedListener> mBufferRemovedListener;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 28ad9dd..f81dacf 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -3956,6 +3956,32 @@
     return INVALID_OPERATION;
 }
 
+// For dedicated VoIP outputs, let the HAL apply the stream volume. Track volume is
+// still applied by the mixer.
+// All tracks attached to a mixer with flag VOIP_RX are tied to the same
+// stream type STREAM_VOICE_CALL so this will only change the HAL volume once even
+// if more than one track are active
+status_t AudioFlinger::PlaybackThread::handleVoipVolume_l(float *volume)
+{
+    status_t result = NO_ERROR;
+    if ((mOutput->flags & AUDIO_OUTPUT_FLAG_VOIP_RX) != 0) {
+        if (*volume != mLeftVolFloat) {
+            result = mOutput->stream->setVolume(*volume, *volume);
+            ALOGE_IF(result != OK,
+                     "Error when setting output stream volume: %d", result);
+            if (result == NO_ERROR) {
+                mLeftVolFloat = *volume;
+            }
+        }
+        // if stream volume was successfully sent to the HAL, mLeftVolFloat == v here and we
+        // remove stream volume contribution from software volume.
+        if (mLeftVolFloat == *volume) {
+            *volume = 1.0f;
+        }
+    }
+    return result;
+}
+
 status_t AudioFlinger::MixerThread::createAudioPatch_l(const struct audio_patch *patch,
                                                           audio_patch_handle_t *handle)
 {
@@ -4758,22 +4784,25 @@
                     // no acknowledgement required for newly active tracks
                 }
                 sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
+                float volume;
+                if (track->isPlaybackRestricted() || mStreamTypes[track->streamType()].mute) {
+                    volume = 0.f;
+                } else {
+                    volume = masterVolume * mStreamTypes[track->streamType()].volume;
+                }
+
+                handleVoipVolume_l(&volume);
+
                 // cache the combined master volume and stream type volume for fast mixer; this
                 // lacks any synchronization or barrier so VolumeProvider may read a stale value
                 const float vh = track->getVolumeHandler()->getVolume(
-                        proxy->framesReleased()).first;
-                float volume;
-                if (track->isPlaybackRestricted()) {
-                    volume = 0.f;
-                } else {
-                    volume = masterVolume
-                        * mStreamTypes[track->streamType()].volume
-                        * vh;
-                }
+                    proxy->framesReleased()).first;
+                volume *= vh;
                 track->mCachedVolume = volume;
                 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
                 float vlf = volume * float_from_gain(gain_minifloat_unpack_left(vlr));
                 float vrf = volume * float_from_gain(gain_minifloat_unpack_right(vlr));
+
                 track->setFinalVolume((vlf + vrf) / 2.f);
                 ++fastTracks;
             } else {
@@ -4916,20 +4945,22 @@
             uint32_t vl, vr;       // in U8.24 integer format
             float vlf, vrf, vaf;   // in [0.0, 1.0] float format
             // read original volumes with volume control
-            float typeVolume = mStreamTypes[track->streamType()].volume;
-            float v = masterVolume * typeVolume;
+            float v = masterVolume * mStreamTypes[track->streamType()].volume;
             // Always fetch volumeshaper volume to ensure state is updated.
             const sp<AudioTrackServerProxy> proxy = track->mAudioTrackServerProxy;
             const float vh = track->getVolumeHandler()->getVolume(
                     track->mAudioTrackServerProxy->framesReleased()).first;
 
-            if (track->isPausing() || mStreamTypes[track->streamType()].mute
-                    || track->isPlaybackRestricted()) {
+            if (mStreamTypes[track->streamType()].mute || track->isPlaybackRestricted()) {
+                v = 0;
+            }
+
+            handleVoipVolume_l(&v);
+
+            if (track->isPausing()) {
                 vl = vr = 0;
                 vlf = vrf = vaf = 0.;
-                if (track->isPausing()) {
-                    track->setPaused();
-                }
+                track->setPaused();
             } else {
                 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
                 vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
@@ -4981,25 +5012,6 @@
                 track->mHasVolumeController = false;
             }
 
-            // For dedicated VoIP outputs, let the HAL apply the stream volume. Track volume is
-            // still applied by the mixer.
-            if ((mOutput->flags & AUDIO_OUTPUT_FLAG_VOIP_RX) != 0) {
-                v = mStreamTypes[track->streamType()].mute ? 0.0f : v;
-                if (v != mLeftVolFloat) {
-                    status_t result = mOutput->stream->setVolume(v, v);
-                    ALOGE_IF(result != OK, "Error when setting output stream volume: %d", result);
-                    if (result == OK) {
-                        mLeftVolFloat = v;
-                    }
-                }
-                // if stream volume was successfully sent to the HAL, mLeftVolFloat == v here and we
-                // remove stream volume contribution from software volume.
-                if (v != 0.0f && mLeftVolFloat == v) {
-                   vlf = min(1.0f, vlf / v);
-                   vrf = min(1.0f, vrf / v);
-                   vaf = min(1.0f, vaf / v);
-               }
-            }
             // XXX: these things DON'T need to be done each time
             mAudioMixer->setBufferProvider(trackId, track);
             mAudioMixer->enable(trackId);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 336c2b4..fc8aa13 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -747,6 +747,7 @@
                 // is safe to do so. That will drop the final ref count and destroy the tracks.
     virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
                 void        removeTracks_l(const Vector< sp<Track> >& tracksToRemove);
+                status_t    handleVoipVolume_l(float *volume);
 
     // StreamOutHalInterfaceCallback implementation
     virtual     void        onWriteReady();
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 96a8337..1f9b725 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -333,9 +333,10 @@
             if (encodedFormat != AUDIO_FORMAT_DEFAULT) {
                 moduleDevice->setEncodedFormat(encodedFormat);
             }
-            moduleDevice->setAddress(devAddress);
             if (allowToCreate) {
                 moduleDevice->attach(hwModule);
+                moduleDevice->setAddress(devAddress);
+                moduleDevice->setName(String8(name));
             }
             return moduleDevice;
         }
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index a9e687c..5f92d24 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -472,6 +472,10 @@
     std::unordered_set<audio_format_t> formatSet;
     sp<HwModule> primaryModule =
             mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY);
+    if (primaryModule == nullptr) {
+        ALOGE("%s() unable to get primary module", __func__);
+        return NO_INIT;
+    }
     DeviceVector declaredDevices = primaryModule->getDeclaredDevices().getDevicesFromTypeMask(
             AUDIO_DEVICE_OUT_ALL_A2DP);
     for (const auto& device : declaredDevices) {
@@ -836,7 +840,7 @@
         // if explicitly requested
         static const uint32_t kRelevantFlags =
                 (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD |
-                 AUDIO_OUTPUT_FLAG_VOIP_RX);
+                 AUDIO_OUTPUT_FLAG_VOIP_RX | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ);
         flags =
             (audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
     }
@@ -5845,11 +5849,11 @@
 
     if (isVoiceVolSrc || isBtScoVolSrc) {
         float voiceVolume;
-        // Force voice volume to max for bluetooth SCO as volume is managed by the headset
+        // Force voice volume to max or mute for Bluetooth SCO as other attenuations are managed by the headset
         if (isVoiceVolSrc) {
             voiceVolume = (float)index/(float)curves.getVolumeIndexMax();
         } else {
-            voiceVolume = 1.0;
+            voiceVolume = index == 0 ? 0.0 : 1.0;
         }
         if (voiceVolume != mLastVoiceVolume) {
             mpClientInterface->setVoiceVolume(voiceVolume, delayMs);
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 3e62102..048d0e6 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -1149,6 +1149,8 @@
                 clientPid,
                 states[states.size() - 1]);
 
+        resource_policy::ClientPriority clientPriority = clientDescriptor->getPriority();
+
         // Find clients that would be evicted
         auto evicted = mActiveClientManager.wouldEvict(clientDescriptor);
 
@@ -1166,8 +1168,7 @@
             String8 msg = String8::format("%s : DENIED connect device %s client for package %s "
                     "(PID %d, score %d state %d) due to eviction policy", curTime.string(),
                     cameraId.string(), packageName.string(), clientPid,
-                    priorityScores[priorityScores.size() - 1],
-                    states[states.size() - 1]);
+                    clientPriority.getScore(), clientPriority.getState());
 
             for (auto& i : incompatibleClients) {
                 msg.appendFormat("\n   - Blocked by existing device %s client for package %s"
@@ -1212,9 +1213,8 @@
                     i->getKey().string(), String8{clientSp->getPackageName()}.string(),
                     i->getOwnerId(), i->getPriority().getScore(),
                     i->getPriority().getState(), cameraId.string(),
-                    packageName.string(), clientPid,
-                    priorityScores[priorityScores.size() - 1],
-                    states[states.size() - 1]));
+                    packageName.string(), clientPid, clientPriority.getScore(),
+                    clientPriority.getState()));
 
             // Notify the client of disconnection
             clientSp->notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
@@ -1348,14 +1348,19 @@
     Status ret = Status::ok();
     String8 id = String8(cameraId);
     sp<CameraDeviceClient> client = nullptr;
-
+    String16 clientPackageNameAdj = clientPackageName;
+    if (hardware::IPCThreadState::self()->isServingCall()) {
+        std::string vendorClient =
+                StringPrintf("vendor.client.pid<%d>", CameraThreadState::getCallingPid());
+        clientPackageNameAdj = String16(vendorClient.c_str());
+    }
     ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
             /*api1CameraId*/-1,
-            CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName,
+            CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageNameAdj,
             clientUid, USE_CALLING_PID, API_2, /*shimUpdateOnly*/ false, /*out*/client);
 
     if(!ret.isOk()) {
-        logRejected(id, CameraThreadState::getCallingPid(), String8(clientPackageName),
+        logRejected(id, CameraThreadState::getCallingPid(), String8(clientPackageNameAdj),
                 ret.toString8());
         return ret;
     }
@@ -2368,11 +2373,7 @@
         }
         mClientPackageName = packages[0];
     }
-    if (hardware::IPCThreadState::self()->isServingCall()) {
-        std::string vendorClient =
-                StringPrintf("vendor.client.pid<%d>", CameraThreadState::getCallingPid());
-        mClientPackageName = String16(vendorClient.c_str());
-    } else {
+    if (!hardware::IPCThreadState::self()->isServingCall()) {
         mAppOpsManager = std::make_unique<AppOpsManager>();
     }
 }
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
index 9525ad2..8ebaa2b 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
@@ -58,8 +58,8 @@
 
         entry = staticInfo.find(ANDROID_LENS_INTRINSIC_CALIBRATION);
         if (entry.count == 5) {
-            mInstrinsicCalibration.reserve(5);
-            mInstrinsicCalibration.insert(mInstrinsicCalibration.end(), entry.data.f,
+            mIntrinsicCalibration.reserve(5);
+            mIntrinsicCalibration.insert(mIntrinsicCalibration.end(), entry.data.f,
                     entry.data.f + 5);
         } else {
             ALOGW("%s: Intrinsic calibration absent from camera characteristics!", __FUNCTION__);
@@ -323,12 +323,12 @@
     depthPhoto.mMaxJpegSize = maxDepthJpegSize;
     // The camera intrinsic calibration layout is as follows:
     // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
-    if (mInstrinsicCalibration.size() == 5) {
-        memcpy(depthPhoto.mInstrinsicCalibration, mInstrinsicCalibration.data(),
-                sizeof(depthPhoto.mInstrinsicCalibration));
-        depthPhoto.mIsInstrinsicCalibrationValid = 1;
+    if (mIntrinsicCalibration.size() == 5) {
+        memcpy(depthPhoto.mIntrinsicCalibration, mIntrinsicCalibration.data(),
+                sizeof(depthPhoto.mIntrinsicCalibration));
+        depthPhoto.mIsIntrinsicCalibrationValid = 1;
     } else {
-        depthPhoto.mIsInstrinsicCalibrationValid = 0;
+        depthPhoto.mIsIntrinsicCalibrationValid = 0;
     }
     // The camera lens distortion contains the following lens correction coefficients.
     // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5]
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.h b/services/camera/libcameraservice/api2/DepthCompositeStream.h
index 1bf31f4..975c59b 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.h
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.h
@@ -124,7 +124,7 @@
 
     ssize_t              mMaxJpegSize;
     std::vector<std::tuple<size_t, size_t>> mSupportedDepthSizes;
-    std::vector<float>   mInstrinsicCalibration, mLensDistortion;
+    std::vector<float>   mIntrinsicCalibration, mLensDistortion;
     bool                 mIsLogicalCamera;
     void*                mDepthPhotoLibHandle;
     process_depth_photo_frame mDepthPhotoProcess;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 09638d0..4201af3 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -29,6 +29,7 @@
 #include <future>
 #include <inttypes.h>
 #include <hardware/camera_common.h>
+#include <android/hidl/manager/1.2/IServiceManager.h>
 #include <hidl/ServiceManagement.h>
 #include <functional>
 #include <camera_metadata_hidden.h>
@@ -47,10 +48,6 @@
 using std::literals::chrono_literals::operator""s;
 
 namespace {
-// Hardcoded name for the passthrough HAL implementation, since it can't be discovered via the
-// service manager
-const std::string kLegacyProviderName("legacy/0");
-const std::string kExternalProviderName("external/0");
 const bool kEnableLazyHal(property_get_bool("ro.camera.enableLazyHal", false));
 } // anonymous namespace
 
@@ -62,6 +59,19 @@
 CameraProviderManager::~CameraProviderManager() {
 }
 
+hardware::hidl_vec<hardware::hidl_string>
+CameraProviderManager::HardwareServiceInteractionProxy::listServices() {
+    hardware::hidl_vec<hardware::hidl_string> ret;
+    auto manager = hardware::defaultServiceManager1_2();
+    if (manager != nullptr) {
+        manager->listManifestByInterface(provider::V2_4::ICameraProvider::descriptor,
+                [&ret](const hardware::hidl_vec<hardware::hidl_string> &registered) {
+                    ret = registered;
+                });
+    }
+    return ret;
+}
+
 status_t CameraProviderManager::initialize(wp<CameraProviderManager::StatusListener> listener,
         ServiceInteractionProxy* proxy) {
     std::lock_guard<std::mutex> lock(mInterfaceMutex);
@@ -84,9 +94,10 @@
         return INVALID_OPERATION;
     }
 
-    // See if there's a passthrough HAL, but let's not complain if there's not
-    addProviderLocked(kLegacyProviderName, /*expected*/ false);
-    addProviderLocked(kExternalProviderName, /*expected*/ false);
+
+    for (const auto& instance : mServiceProxy->listServices()) {
+        this->addProviderLocked(instance);
+    }
 
     IPCThreadState::self()->flushCommands();
 
@@ -1087,7 +1098,7 @@
     return false;
 }
 
-status_t CameraProviderManager::addProviderLocked(const std::string& newProvider, bool expected) {
+status_t CameraProviderManager::addProviderLocked(const std::string& newProvider) {
     for (const auto& providerInfo : mProviders) {
         if (providerInfo->mProviderName == newProvider) {
             ALOGW("%s: Camera provider HAL with name '%s' already registered", __FUNCTION__,
@@ -1100,13 +1111,9 @@
     interface = mServiceProxy->getService(newProvider);
 
     if (interface == nullptr) {
-        if (expected) {
-            ALOGE("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
-                    newProvider.c_str());
-            return BAD_VALUE;
-        } else {
-            return OK;
-        }
+        ALOGE("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
+                newProvider.c_str());
+        return BAD_VALUE;
     }
 
     sp<ProviderInfo> providerInfo = new ProviderInfo(newProvider, this);
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index a42fb4d..8cdfc24 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -78,6 +78,7 @@
                 &notification) = 0;
         virtual sp<hardware::camera::provider::V2_4::ICameraProvider> getService(
                 const std::string &serviceName) = 0;
+        virtual hardware::hidl_vec<hardware::hidl_string> listServices() = 0;
         virtual ~ServiceInteractionProxy() {}
     };
 
@@ -95,6 +96,8 @@
                 const std::string &serviceName) override {
             return hardware::camera::provider::V2_4::ICameraProvider::getService(serviceName);
         }
+
+        virtual hardware::hidl_vec<hardware::hidl_string> listServices() override;
     };
 
     /**
@@ -567,7 +570,7 @@
             hardware::hidl_version minVersion = hardware::hidl_version{0,0},
             hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
 
-    status_t addProviderLocked(const std::string& newProvider, bool expected = true);
+    status_t addProviderLocked(const std::string& newProvider);
 
     status_t removeProvider(const std::string& provider);
     sp<StatusListener> getStatusListener() const;
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
index fc79150..3c90de0 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
@@ -61,6 +61,13 @@
 using dynamic_depth::Profile;
 using dynamic_depth::Profiles;
 
+template<>
+struct std::default_delete<jpeg_compress_struct> {
+    inline void operator()(jpeg_compress_struct* cinfo) const {
+        jpeg_destroy_compress(cinfo);
+    }
+};
+
 namespace android {
 namespace camera3 {
 
@@ -118,16 +125,16 @@
         bool mSuccess;
     } dmgr;
 
-    jpeg_compress_struct cinfo = {};
+    std::unique_ptr<jpeg_compress_struct> cinfo = std::make_unique<jpeg_compress_struct>();
     jpeg_error_mgr jerr;
 
     // Initialize error handling with standard callbacks, but
     // then override output_message (to print to ALOG) and
     // error_exit to set a flag and print a message instead
     // of killing the whole process.
-    cinfo.err = jpeg_std_error(&jerr);
+    cinfo->err = jpeg_std_error(&jerr);
 
-    cinfo.err->output_message = [](j_common_ptr cinfo) {
+    cinfo->err->output_message = [](j_common_ptr cinfo) {
         char buffer[JMSG_LENGTH_MAX];
 
         /* Create the message */
@@ -135,7 +142,7 @@
         ALOGE("libjpeg error: %s", buffer);
     };
 
-    cinfo.err->error_exit = [](j_common_ptr cinfo) {
+    cinfo->err->error_exit = [](j_common_ptr cinfo) {
         (*cinfo->err->output_message)(cinfo);
         if(cinfo->client_data) {
             auto & dmgr = *static_cast<CustomJpegDestMgr*>(cinfo->client_data);
@@ -144,12 +151,12 @@
     };
 
     // Now that we initialized some callbacks, let's create our compressor
-    jpeg_create_compress(&cinfo);
+    jpeg_create_compress(cinfo.get());
     dmgr.mBuffer = static_cast<JOCTET*>(out);
     dmgr.mBufferSize = maxOutSize;
     dmgr.mEncodedSize = 0;
     dmgr.mSuccess = true;
-    cinfo.client_data = static_cast<void*>(&dmgr);
+    cinfo->client_data = static_cast<void*>(&dmgr);
 
     // These lambdas become C-style function pointers and as per C++11 spec
     // may not capture anything.
@@ -171,28 +178,28 @@
         dmgr.mEncodedSize = dmgr.mBufferSize - dmgr.free_in_buffer;
         ALOGV("%s:%d Done with jpeg: %zu", __FUNCTION__, __LINE__, dmgr.mEncodedSize);
     };
-    cinfo.dest = reinterpret_cast<struct jpeg_destination_mgr*>(&dmgr);
-    cinfo.image_width = width;
-    cinfo.image_height = height;
-    cinfo.input_components = 1;
-    cinfo.in_color_space = JCS_GRAYSCALE;
+    cinfo->dest = static_cast<struct jpeg_destination_mgr*>(&dmgr);
+    cinfo->image_width = width;
+    cinfo->image_height = height;
+    cinfo->input_components = 1;
+    cinfo->in_color_space = JCS_GRAYSCALE;
 
     // Initialize defaults and then override what we want
-    jpeg_set_defaults(&cinfo);
+    jpeg_set_defaults(cinfo.get());
 
-    jpeg_set_quality(&cinfo, jpegQuality, 1);
-    jpeg_set_colorspace(&cinfo, JCS_GRAYSCALE);
-    cinfo.raw_data_in = 0;
-    cinfo.dct_method = JDCT_IFAST;
+    jpeg_set_quality(cinfo.get(), jpegQuality, 1);
+    jpeg_set_colorspace(cinfo.get(), JCS_GRAYSCALE);
+    cinfo->raw_data_in = 0;
+    cinfo->dct_method = JDCT_IFAST;
 
-    cinfo.comp_info[0].h_samp_factor = 1;
-    cinfo.comp_info[1].h_samp_factor = 1;
-    cinfo.comp_info[2].h_samp_factor = 1;
-    cinfo.comp_info[0].v_samp_factor = 1;
-    cinfo.comp_info[1].v_samp_factor = 1;
-    cinfo.comp_info[2].v_samp_factor = 1;
+    cinfo->comp_info[0].h_samp_factor = 1;
+    cinfo->comp_info[1].h_samp_factor = 1;
+    cinfo->comp_info[2].h_samp_factor = 1;
+    cinfo->comp_info[0].v_samp_factor = 1;
+    cinfo->comp_info[1].v_samp_factor = 1;
+    cinfo->comp_info[2].v_samp_factor = 1;
 
-    jpeg_start_compress(&cinfo, TRUE);
+    jpeg_start_compress(cinfo.get(), TRUE);
 
     if (exifOrientation != ExifOrientation::ORIENTATION_UNDEFINED) {
         std::unique_ptr<ExifUtils> utils(ExifUtils::create());
@@ -204,19 +211,19 @@
         if (utils->generateApp1()) {
             const uint8_t* exifBuffer = utils->getApp1Buffer();
             size_t exifBufferSize = utils->getApp1Length();
-            jpeg_write_marker(&cinfo, JPEG_APP0 + 1, static_cast<const JOCTET*>(exifBuffer),
+            jpeg_write_marker(cinfo.get(), JPEG_APP0 + 1, static_cast<const JOCTET*>(exifBuffer),
                     exifBufferSize);
         } else {
             ALOGE("%s: Unable to generate App1 buffer", __FUNCTION__);
         }
     }
 
-    for (size_t i = 0; i < cinfo.image_height; i++) {
+    for (size_t i = 0; i < cinfo->image_height; i++) {
         auto currentRow  = static_cast<JSAMPROW>(in + i*width);
-        jpeg_write_scanlines(&cinfo, &currentRow, /*num_lines*/1);
+        jpeg_write_scanlines(cinfo.get(), &currentRow, /*num_lines*/1);
     }
 
-    jpeg_finish_compress(&cinfo);
+    jpeg_finish_compress(cinfo.get());
 
     actualSize = dmgr.mEncodedSize;
     if (dmgr.mSuccess) {
@@ -430,12 +437,12 @@
         return BAD_VALUE;
     }
 
-    // It is not possible to generate an imaging model without instrinsic calibration.
-    if (inputFrame.mIsInstrinsicCalibrationValid) {
+    // It is not possible to generate an imaging model without intrinsic calibration.
+    if (inputFrame.mIsIntrinsicCalibrationValid) {
         // The camera intrinsic calibration layout is as follows:
         // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
-        const dynamic_depth::Point<double> focalLength(inputFrame.mInstrinsicCalibration[0],
-                inputFrame.mInstrinsicCalibration[1]);
+        const dynamic_depth::Point<double> focalLength(inputFrame.mIntrinsicCalibration[0],
+                inputFrame.mIntrinsicCalibration[1]);
         size_t width = inputFrame.mMainJpegWidth;
         size_t height = inputFrame.mMainJpegHeight;
         if (switchDimensions) {
@@ -444,9 +451,9 @@
         }
         const Dimension imageSize(width, height);
         ImagingModelParams imagingParams(focalLength, imageSize);
-        imagingParams.principal_point.x = inputFrame.mInstrinsicCalibration[2];
-        imagingParams.principal_point.y = inputFrame.mInstrinsicCalibration[3];
-        imagingParams.skew = inputFrame.mInstrinsicCalibration[4];
+        imagingParams.principal_point.x = inputFrame.mIntrinsicCalibration[2];
+        imagingParams.principal_point.y = inputFrame.mIntrinsicCalibration[3];
+        imagingParams.skew = inputFrame.mIntrinsicCalibration[4];
 
         // The camera lens distortion contains the following lens correction coefficients.
         // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5]
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.h b/services/camera/libcameraservice/common/DepthPhotoProcessor.h
index 6a2fbff..ba5ca9e 100644
--- a/services/camera/libcameraservice/common/DepthPhotoProcessor.h
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.h
@@ -39,8 +39,8 @@
     size_t                mMaxJpegSize;
     uint8_t               mJpegQuality;
     uint8_t               mIsLogical;
-    float                 mInstrinsicCalibration[5];
-    uint8_t               mIsInstrinsicCalibrationValid;
+    float                 mIntrinsicCalibration[5];
+    uint8_t               mIsIntrinsicCalibrationValid;
     float                 mLensDistortion[5];
     uint8_t               mIsLensDistortionValid;
     DepthPhotoOrientation mOrientation;
@@ -57,8 +57,8 @@
             mMaxJpegSize(0),
             mJpegQuality(100),
             mIsLogical(0),
-            mInstrinsicCalibration{0.f},
-            mIsInstrinsicCalibrationValid(0),
+            mIntrinsicCalibration{0.f},
+            mIsIntrinsicCalibrationValid(0),
             mLensDistortion{0.f},
             mIsLensDistortionValid(0),
             mOrientation(DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES) {}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 9771f9e..4227a3b 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -29,6 +29,9 @@
 #define CLOGE(fmt, ...) ALOGE("Camera %s: %s: " fmt, mId.string(), __FUNCTION__, \
             ##__VA_ARGS__)
 
+#define CLOGW(fmt, ...) ALOGW("Camera %s: %s: " fmt, mId.string(), __FUNCTION__, \
+            ##__VA_ARGS__)
+
 // Convenience macros for transitioning to the error state
 #define SET_ERR(fmt, ...) setErrorState(   \
     "%s: " fmt, __FUNCTION__,              \
@@ -3267,14 +3270,19 @@
         ALOGVV("%s: removed frame %d from InFlightMap", __FUNCTION__, frameNumber);
      }
 
-    // Sanity check - if we have too many in-flight frames, something has
-    // likely gone wrong
-    if (!mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() > kInFlightWarnLimit) {
-        CLOGE("In-flight list too large: %zu", mInFlightMap.size());
-    } else if (mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() >
-            kInFlightWarnLimitHighSpeed) {
-        CLOGE("In-flight list too large for high speed configuration: %zu",
-                mInFlightMap.size());
+    // Sanity check - if we have too many in-flight frames with long total inflight duration,
+    // something has likely gone wrong. This might still be legit only if application send in
+    // a long burst of long exposure requests.
+    if (mExpectedInflightDuration > kMinWarnInflightDuration) {
+        if (!mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() > kInFlightWarnLimit) {
+            CLOGW("In-flight list too large: %zu, total inflight duration %" PRIu64,
+                    mInFlightMap.size(), mExpectedInflightDuration);
+        } else if (mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() >
+                kInFlightWarnLimitHighSpeed) {
+            CLOGW("In-flight list too large for high speed configuration: %zu,"
+                    "total inflight duration %" PRIu64,
+                    mInFlightMap.size(), mExpectedInflightDuration);
+        }
     }
 }
 
@@ -4364,7 +4372,7 @@
         int overrideFormat = mapToFrameworkFormat(src.v3_2.overrideFormat);
         android_dataspace overrideDataSpace = mapToFrameworkDataspace(src.overrideDataSpace);
 
-        if (dst->format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+        if (dstStream->getOriginalFormat() != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
             dstStream->setFormatOverride(false);
             dstStream->setDataSpaceOverride(false);
             if (dst->format != overrideFormat) {
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 6e8ac84..cae34ce 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -227,6 +227,7 @@
     static const size_t        kDumpLockAttempts  = 10;
     static const size_t        kDumpSleepDuration = 100000; // 0.10 sec
     static const nsecs_t       kActiveTimeout     = 500000000;  // 500 ms
+    static const nsecs_t       kMinWarnInflightDuration = 5000000000; // 5 s
     static const size_t        kInFlightWarnLimit = 30;
     static const size_t        kInFlightWarnLimitHighSpeed = 256; // batch size 32 * pipe depth 8
     static const nsecs_t       kDefaultExpectedDuration = 100000000; // 100 ms
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 2df084b..fd9b4b0 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -68,7 +68,7 @@
     mLastMaxCount(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX),
     mBufferLimitLatency(kBufferLimitLatencyBinSize),
     mFormatOverridden(false),
-    mOriginalFormat(-1),
+    mOriginalFormat(format),
     mDataSpaceOverridden(false),
     mOriginalDataSpace(HAL_DATASPACE_UNKNOWN),
     mPhysicalCameraId(physicalCameraId),
@@ -125,9 +125,6 @@
 
 void Camera3Stream::setFormatOverride(bool formatOverridden) {
     mFormatOverridden = formatOverridden;
-    if (formatOverridden && mOriginalFormat == -1) {
-        mOriginalFormat = camera3_stream::format;
-    }
 }
 
 bool Camera3Stream::isFormatOverridden() const {
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 533318f..67afd0f 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -582,9 +582,9 @@
     static const int32_t kBufferLimitLatencyBinSize = 33; //in ms
     CameraLatencyHistogram mBufferLimitLatency;
 
-    //Keep track of original format in case it gets overridden
+    //Keep track of original format when the stream is created in case it gets overridden
     bool mFormatOverridden;
-    int mOriginalFormat;
+    const int mOriginalFormat;
 
     //Keep track of original dataSpace in case it gets overridden
     bool mDataSpaceOverridden;
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index f47e5a5..78d737d 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -205,6 +205,11 @@
         return mTestCameraProvider;
     }
 
+    virtual hardware::hidl_vec<hardware::hidl_string> listServices() override {
+        hardware::hidl_vec<hardware::hidl_string> ret = {"test/0"};
+        return ret;
+    }
+
 };
 
 struct TestStatusListener : public CameraProviderManager::StatusListener {
@@ -231,37 +236,24 @@
             vendorSection);
     serviceProxy.setProvider(provider);
 
+    int numProviders = static_cast<int>(serviceProxy.listServices().size());
+
     res = providerManager->initialize(statusListener, &serviceProxy);
     ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
     // Check that both "legacy" and "external" providers (really the same object) are called
     // once for all the init methods
-    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::SET_CALLBACK], 2) <<
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::SET_CALLBACK], numProviders) <<
             "Only one call to setCallback per provider expected during init";
-    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::GET_VENDOR_TAGS], 2) <<
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::GET_VENDOR_TAGS], numProviders) <<
             "Only one call to getVendorTags per provider expected during init";
-    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::IS_SET_TORCH_MODE_SUPPORTED], 2) <<
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::IS_SET_TORCH_MODE_SUPPORTED],
+            numProviders) <<
             "Only one call to isSetTorchModeSupported per provider expected during init";
-    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::GET_CAMERA_ID_LIST], 2) <<
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::GET_CAMERA_ID_LIST], numProviders) <<
             "Only one call to getCameraIdList per provider expected during init";
-    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::NOTIFY_DEVICE_STATE], 2) <<
+    EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::NOTIFY_DEVICE_STATE], numProviders) <<
             "Only one call to notifyDeviceState per provider expected during init";
 
-    std::string legacyInstanceName = "legacy/0";
-    std::string externalInstanceName = "external/0";
-    bool gotLegacy = false;
-    bool gotExternal = false;
-    EXPECT_EQ(2u, serviceProxy.mLastRequestedServiceNames.size()) <<
-            "Only two service queries expected to be seen by hardware service manager";
-
-    for (auto& serviceName : serviceProxy.mLastRequestedServiceNames) {
-        if (serviceName == legacyInstanceName) gotLegacy = true;
-        if (serviceName == externalInstanceName) gotExternal = true;
-    }
-    ASSERT_TRUE(gotLegacy) <<
-            "Legacy instance not requested from service manager";
-    ASSERT_TRUE(gotExternal) <<
-            "External instance not requested from service manager";
-
     hardware::hidl_string testProviderFqInterfaceName =
             "android.hardware.camera.provider@2.4::ICameraProvider";
     hardware::hidl_string testProviderInstanceName = "test/0";